blob: 5fa7f831dabd4a09e9e82e7135dcfc80c9ca448c [file] [log] [blame]
Martyn Welchf00a86d2009-07-31 09:28:17 +01001/*
2 * VMEbus User access driver
3 *
Martyn Welch66bd8db2010-02-18 15:12:52 +00004 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
Martyn Welchf00a86d2009-07-31 09:28:17 +01006 *
7 * Based on work by:
8 * Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +090018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Martyn Welchf00a86d2009-07-31 09:28:17 +010020#include <linux/cdev.h>
21#include <linux/delay.h>
22#include <linux/device.h>
23#include <linux/dma-mapping.h>
24#include <linux/errno.h>
25#include <linux/init.h>
26#include <linux/ioctl.h>
27#include <linux/kernel.h>
28#include <linux/mm.h>
29#include <linux/module.h>
30#include <linux/pagemap.h>
31#include <linux/pci.h>
Santosh Nayakecb3b802012-04-03 16:42:51 +053032#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090033#include <linux/slab.h>
Martyn Welchf00a86d2009-07-31 09:28:17 +010034#include <linux/spinlock.h>
35#include <linux/syscalls.h>
36#include <linux/types.h>
Martyn Welchf00a86d2009-07-31 09:28:17 +010037
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +000038#include <linux/io.h>
39#include <linux/uaccess.h>
Greg Kroah-Hartmandb3b9e92012-04-26 12:34:58 -070040#include <linux/vme.h>
Martyn Welchf00a86d2009-07-31 09:28:17 +010041
Martyn Welchf00a86d2009-07-31 09:28:17 +010042#include "vme_user.h"
43
Arnd Bergmann8e2394a2010-07-11 23:18:52 +020044static DEFINE_MUTEX(vme_user_mutex);
Vincent Bossier584721c2011-06-03 10:07:39 +010045static const char driver_name[] = "vme_user";
Martyn Welch238add52009-08-11 14:37:15 +010046
Manohar Vanga0a4b6b02011-09-26 11:27:18 +020047static int bus[VME_USER_BUS_MAX];
Emilio G. Cotac9492312010-11-12 11:15:21 +000048static unsigned int bus_num;
Martyn Welch238add52009-08-11 14:37:15 +010049
Martyn Welchf00a86d2009-07-31 09:28:17 +010050/* Currently Documentation/devices.txt defines the following for VME:
51 *
52 * 221 char VME bus
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +000053 * 0 = /dev/bus/vme/m0 First master image
54 * 1 = /dev/bus/vme/m1 Second master image
55 * 2 = /dev/bus/vme/m2 Third master image
56 * 3 = /dev/bus/vme/m3 Fourth master image
57 * 4 = /dev/bus/vme/s0 First slave image
58 * 5 = /dev/bus/vme/s1 Second slave image
59 * 6 = /dev/bus/vme/s2 Third slave image
60 * 7 = /dev/bus/vme/s3 Fourth slave image
61 * 8 = /dev/bus/vme/ctl Control
Martyn Welchf00a86d2009-07-31 09:28:17 +010062 *
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +000063 * It is expected that all VME bus drivers will use the
64 * same interface. For interface documentation see
65 * http://www.vmelinux.org/.
Martyn Welchf00a86d2009-07-31 09:28:17 +010066 *
67 * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
68 * even support the tsi148 chipset (which has 8 master and 8 slave windows).
Justin P. Mattock95605332012-08-13 10:28:22 -070069 * We'll run with this for now as far as possible, however it probably makes
Martyn Welchf00a86d2009-07-31 09:28:17 +010070 * sense to get rid of the old mappings and just do everything dynamically.
71 *
72 * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
73 * defined above and try to support at least some of the interface from
Justin P. Mattock95605332012-08-13 10:28:22 -070074 * http://www.vmelinux.org/ as an alternative the driver can be written
75 * providing a saner interface later.
Martyn Welch238add52009-08-11 14:37:15 +010076 *
77 * The vmelinux.org driver never supported slave images, the devices reserved
78 * for slaves were repurposed to support all 8 master images on the UniverseII!
79 * We shall support 4 masters and 4 slaves with this driver.
Martyn Welchf00a86d2009-07-31 09:28:17 +010080 */
81#define VME_MAJOR 221 /* VME Major Device Number */
82#define VME_DEVS 9 /* Number of dev entries */
83
84#define MASTER_MINOR 0
85#define MASTER_MAX 3
86#define SLAVE_MINOR 4
87#define SLAVE_MAX 7
88#define CONTROL_MINOR 8
89
90#define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */
91
92/*
93 * Structure to handle image related parameters.
94 */
Vincent Bossier584721c2011-06-03 10:07:39 +010095struct image_desc {
Emilio G. Cota0a81a0f2010-11-12 11:15:27 +000096 void *kern_buf; /* Buffer address in kernel space */
Martyn Welchf00a86d2009-07-31 09:28:17 +010097 dma_addr_t pci_buf; /* Buffer address in PCI address space */
98 unsigned long long size_buf; /* Buffer size */
Santosh Nayakecb3b802012-04-03 16:42:51 +053099 struct mutex mutex; /* Mutex for locking image */
Martyn Welchf00a86d2009-07-31 09:28:17 +0100100 struct device *device; /* Sysfs device */
101 struct vme_resource *resource; /* VME resource */
102 int users; /* Number of current users */
Vincent Bossier584721c2011-06-03 10:07:39 +0100103};
104static struct image_desc image[VME_DEVS];
Martyn Welchf00a86d2009-07-31 09:28:17 +0100105
Vincent Bossier584721c2011-06-03 10:07:39 +0100106struct driver_stats {
Martyn Welchf00a86d2009-07-31 09:28:17 +0100107 unsigned long reads;
108 unsigned long writes;
109 unsigned long ioctls;
110 unsigned long irqs;
111 unsigned long berrs;
Lisa Nguyenf827c162013-05-06 13:04:30 -0700112 unsigned long dmaerrors;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100113 unsigned long timeouts;
114 unsigned long external;
Vincent Bossier584721c2011-06-03 10:07:39 +0100115};
116static struct driver_stats statistics;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100117
Emilio G. Cotab9cc2932010-11-12 11:15:14 +0000118static struct cdev *vme_user_cdev; /* Character device */
119static struct class *vme_user_sysfs_class; /* Sysfs class */
Manohar Vanga8f966dc2011-09-26 11:27:15 +0200120static struct vme_dev *vme_user_bridge; /* Pointer to user device */
Martyn Welchf00a86d2009-07-31 09:28:17 +0100121
Martyn Welchf00a86d2009-07-31 09:28:17 +0100122
123static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
124 MASTER_MINOR, MASTER_MINOR,
125 SLAVE_MINOR, SLAVE_MINOR,
126 SLAVE_MINOR, SLAVE_MINOR,
127 CONTROL_MINOR
128 };
129
130
131static int vme_user_open(struct inode *, struct file *);
132static int vme_user_release(struct inode *, struct file *);
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000133static ssize_t vme_user_read(struct file *, char __user *, size_t, loff_t *);
134static ssize_t vme_user_write(struct file *, const char __user *, size_t,
135 loff_t *);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100136static loff_t vme_user_llseek(struct file *, loff_t, int);
Arnd Bergmannb1f2ac02010-04-27 20:15:07 +0200137static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100138
Manohar Vanga5d6abf32011-09-26 11:27:16 +0200139static int vme_user_match(struct vme_dev *);
Bill Pembertond7e530d2012-11-19 13:21:56 -0500140static int vme_user_probe(struct vme_dev *);
Bill Pembertonf21a8242012-11-19 13:26:52 -0500141static int vme_user_remove(struct vme_dev *);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100142
Vincent Bossier584721c2011-06-03 10:07:39 +0100143static const struct file_operations vme_user_fops = {
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000144 .open = vme_user_open,
145 .release = vme_user_release,
146 .read = vme_user_read,
147 .write = vme_user_write,
148 .llseek = vme_user_llseek,
149 .unlocked_ioctl = vme_user_unlocked_ioctl,
Aaron Sierra89b1cc22013-12-18 10:11:09 -0600150 .compat_ioctl = vme_user_unlocked_ioctl,
Martyn Welchf00a86d2009-07-31 09:28:17 +0100151};
152
153
154/*
155 * Reset all the statistic counters
156 */
157static void reset_counters(void)
158{
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000159 statistics.reads = 0;
160 statistics.writes = 0;
161 statistics.ioctls = 0;
162 statistics.irqs = 0;
163 statistics.berrs = 0;
Lisa Nguyenf827c162013-05-06 13:04:30 -0700164 statistics.dmaerrors = 0;
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000165 statistics.timeouts = 0;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100166}
167
Martyn Welchf00a86d2009-07-31 09:28:17 +0100168static int vme_user_open(struct inode *inode, struct file *file)
169{
170 int err;
171 unsigned int minor = MINOR(inode->i_rdev);
172
Santosh Nayakecb3b802012-04-03 16:42:51 +0530173 mutex_lock(&image[minor].mutex);
Vincent Bossier05614fb2011-06-09 09:20:31 +0100174 /* Allow device to be opened if a resource is needed and allocated. */
175 if (minor < CONTROL_MINOR && image[minor].resource == NULL) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900176 pr_err("No resources allocated for device\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100177 err = -EINVAL;
178 goto err_res;
179 }
180
181 /* Increment user count */
182 image[minor].users++;
183
Santosh Nayakecb3b802012-04-03 16:42:51 +0530184 mutex_unlock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100185
186 return 0;
187
188err_res:
Santosh Nayakecb3b802012-04-03 16:42:51 +0530189 mutex_unlock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100190
191 return err;
192}
193
194static int vme_user_release(struct inode *inode, struct file *file)
195{
196 unsigned int minor = MINOR(inode->i_rdev);
197
Santosh Nayakecb3b802012-04-03 16:42:51 +0530198 mutex_lock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100199
200 /* Decrement user count */
201 image[minor].users--;
202
Santosh Nayakecb3b802012-04-03 16:42:51 +0530203 mutex_unlock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100204
205 return 0;
206}
207
208/*
209 * We are going ot alloc a page during init per window for small transfers.
210 * Small transfers will go VME -> buffer -> user space. Larger (more than a
211 * page) transfers will lock the user space buffer into memory and then
212 * transfer the data directly into the user space buffers.
213 */
214static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
215 loff_t *ppos)
216{
217 ssize_t retval;
218 ssize_t copied = 0;
219
220 if (count <= image[minor].size_buf) {
221 /* We copy to kernel buffer */
222 copied = vme_master_read(image[minor].resource,
223 image[minor].kern_buf, count, *ppos);
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000224 if (copied < 0)
Martyn Welchf00a86d2009-07-31 09:28:17 +0100225 return (int)copied;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100226
227 retval = __copy_to_user(buf, image[minor].kern_buf,
228 (unsigned long)copied);
229 if (retval != 0) {
230 copied = (copied - retval);
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900231 pr_info("User copy failed\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100232 return -EINVAL;
233 }
234
235 } else {
236 /* XXX Need to write this */
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900237 pr_info("Currently don't support large transfers\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100238 /* Map in pages from userspace */
239
240 /* Call vme_master_read to do the transfer */
241 return -EINVAL;
242 }
243
244 return copied;
245}
246
247/*
Justin P. Mattock95605332012-08-13 10:28:22 -0700248 * We are going to alloc a page during init per window for small transfers.
Martyn Welchf00a86d2009-07-31 09:28:17 +0100249 * Small transfers will go user space -> buffer -> VME. Larger (more than a
250 * page) transfers will lock the user space buffer into memory and then
251 * transfer the data directly from the user space buffers out to VME.
252 */
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000253static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
Martyn Welchf00a86d2009-07-31 09:28:17 +0100254 size_t count, loff_t *ppos)
255{
256 ssize_t retval;
257 ssize_t copied = 0;
258
259 if (count <= image[minor].size_buf) {
260 retval = __copy_from_user(image[minor].kern_buf, buf,
261 (unsigned long)count);
262 if (retval != 0)
263 copied = (copied - retval);
264 else
265 copied = count;
266
267 copied = vme_master_write(image[minor].resource,
268 image[minor].kern_buf, copied, *ppos);
269 } else {
270 /* XXX Need to write this */
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900271 pr_info("Currently don't support large transfers\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100272 /* Map in pages from userspace */
273
274 /* Call vme_master_write to do the transfer */
275 return -EINVAL;
276 }
277
278 return copied;
279}
280
281static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
282 size_t count, loff_t *ppos)
283{
Emilio G. Cota0a81a0f2010-11-12 11:15:27 +0000284 void *image_ptr;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100285 ssize_t retval;
286
287 image_ptr = image[minor].kern_buf + *ppos;
288
289 retval = __copy_to_user(buf, image_ptr, (unsigned long)count);
290 if (retval != 0) {
291 retval = (count - retval);
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900292 pr_warn("Partial copy to userspace\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100293 } else
294 retval = count;
295
296 /* Return number of bytes successfully read */
297 return retval;
298}
299
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000300static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
Martyn Welchf00a86d2009-07-31 09:28:17 +0100301 size_t count, loff_t *ppos)
302{
Emilio G. Cota0a81a0f2010-11-12 11:15:27 +0000303 void *image_ptr;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100304 size_t retval;
305
306 image_ptr = image[minor].kern_buf + *ppos;
307
308 retval = __copy_from_user(image_ptr, buf, (unsigned long)count);
309 if (retval != 0) {
310 retval = (count - retval);
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900311 pr_warn("Partial copy to userspace\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100312 } else
313 retval = count;
314
315 /* Return number of bytes successfully read */
316 return retval;
317}
318
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000319static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000320 loff_t *ppos)
Martyn Welchf00a86d2009-07-31 09:28:17 +0100321{
Al Viro496ad9a2013-01-23 17:07:38 -0500322 unsigned int minor = MINOR(file_inode(file)->i_rdev);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100323 ssize_t retval;
324 size_t image_size;
325 size_t okcount;
326
Vincent Bossier05614fb2011-06-09 09:20:31 +0100327 if (minor == CONTROL_MINOR)
328 return 0;
329
Santosh Nayakecb3b802012-04-03 16:42:51 +0530330 mutex_lock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100331
332 /* XXX Do we *really* want this helper - we can use vme_*_get ? */
333 image_size = vme_get_size(image[minor].resource);
334
335 /* Ensure we are starting at a valid location */
336 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
Santosh Nayakecb3b802012-04-03 16:42:51 +0530337 mutex_unlock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100338 return 0;
339 }
340
341 /* Ensure not reading past end of the image */
342 if (*ppos + count > image_size)
343 okcount = image_size - *ppos;
344 else
345 okcount = count;
346
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000347 switch (type[minor]) {
Martyn Welchf00a86d2009-07-31 09:28:17 +0100348 case MASTER_MINOR:
349 retval = resource_to_user(minor, buf, okcount, ppos);
350 break;
351 case SLAVE_MINOR:
352 retval = buffer_to_user(minor, buf, okcount, ppos);
353 break;
354 default:
355 retval = -EINVAL;
356 }
357
Santosh Nayakecb3b802012-04-03 16:42:51 +0530358 mutex_unlock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100359 if (retval > 0)
360 *ppos += retval;
361
362 return retval;
363}
364
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000365static ssize_t vme_user_write(struct file *file, const char __user *buf,
366 size_t count, loff_t *ppos)
Martyn Welchf00a86d2009-07-31 09:28:17 +0100367{
Al Viro496ad9a2013-01-23 17:07:38 -0500368 unsigned int minor = MINOR(file_inode(file)->i_rdev);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100369 ssize_t retval;
370 size_t image_size;
371 size_t okcount;
372
Vincent Bossier05614fb2011-06-09 09:20:31 +0100373 if (minor == CONTROL_MINOR)
374 return 0;
375
Santosh Nayakecb3b802012-04-03 16:42:51 +0530376 mutex_lock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100377
378 image_size = vme_get_size(image[minor].resource);
379
380 /* Ensure we are starting at a valid location */
381 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
Santosh Nayakecb3b802012-04-03 16:42:51 +0530382 mutex_unlock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100383 return 0;
384 }
385
386 /* Ensure not reading past end of the image */
387 if (*ppos + count > image_size)
388 okcount = image_size - *ppos;
389 else
390 okcount = count;
391
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000392 switch (type[minor]) {
Martyn Welchf00a86d2009-07-31 09:28:17 +0100393 case MASTER_MINOR:
394 retval = resource_from_user(minor, buf, okcount, ppos);
395 break;
396 case SLAVE_MINOR:
397 retval = buffer_from_user(minor, buf, okcount, ppos);
398 break;
399 default:
400 retval = -EINVAL;
401 }
Toshiaki Yamane538a6972012-08-21 20:12:33 +0900402
Santosh Nayakecb3b802012-04-03 16:42:51 +0530403 mutex_unlock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100404
405 if (retval > 0)
406 *ppos += retval;
407
408 return retval;
409}
410
411static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
412{
Arthur Benilov877de4b2010-02-16 15:40:30 +0100413 loff_t absolute = -1;
Al Viro496ad9a2013-01-23 17:07:38 -0500414 unsigned int minor = MINOR(file_inode(file)->i_rdev);
Arthur Benilov877de4b2010-02-16 15:40:30 +0100415 size_t image_size;
416
Vincent Bossier05614fb2011-06-09 09:20:31 +0100417 if (minor == CONTROL_MINOR)
418 return -EINVAL;
419
Santosh Nayakecb3b802012-04-03 16:42:51 +0530420 mutex_lock(&image[minor].mutex);
Arthur Benilov877de4b2010-02-16 15:40:30 +0100421 image_size = vme_get_size(image[minor].resource);
422
423 switch (whence) {
424 case SEEK_SET:
425 absolute = off;
426 break;
427 case SEEK_CUR:
428 absolute = file->f_pos + off;
429 break;
430 case SEEK_END:
431 absolute = image_size + off;
432 break;
433 default:
Santosh Nayakecb3b802012-04-03 16:42:51 +0530434 mutex_unlock(&image[minor].mutex);
Arthur Benilov877de4b2010-02-16 15:40:30 +0100435 return -EINVAL;
436 break;
437 }
438
439 if ((absolute < 0) || (absolute >= image_size)) {
Santosh Nayakecb3b802012-04-03 16:42:51 +0530440 mutex_unlock(&image[minor].mutex);
Arthur Benilov877de4b2010-02-16 15:40:30 +0100441 return -EINVAL;
442 }
443
444 file->f_pos = absolute;
445
Santosh Nayakecb3b802012-04-03 16:42:51 +0530446 mutex_unlock(&image[minor].mutex);
Arthur Benilov877de4b2010-02-16 15:40:30 +0100447
448 return absolute;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100449}
450
Martyn Welch238add52009-08-11 14:37:15 +0100451/*
452 * The ioctls provided by the old VME access method (the one at vmelinux.org)
453 * are most certainly wrong as the effectively push the registers layout
454 * through to user space. Given that the VME core can handle multiple bridges,
455 * with different register layouts this is most certainly not the way to go.
456 *
457 * We aren't using the structures defined in the Motorola driver either - these
458 * are also quite low level, however we should use the definitions that have
459 * already been defined.
460 */
Martyn Welchf00a86d2009-07-31 09:28:17 +0100461static int vme_user_ioctl(struct inode *inode, struct file *file,
462 unsigned int cmd, unsigned long arg)
463{
Martyn Welch238add52009-08-11 14:37:15 +0100464 struct vme_master master;
465 struct vme_slave slave;
Vincent Bossierdca22182011-06-09 15:49:30 +0200466 struct vme_irq_id irq_req;
Martyn Welch238add52009-08-11 14:37:15 +0100467 unsigned long copied;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100468 unsigned int minor = MINOR(inode->i_rdev);
Martyn Welch238add52009-08-11 14:37:15 +0100469 int retval;
470 dma_addr_t pci_addr;
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000471 void __user *argp = (void __user *)arg;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100472
473 statistics.ioctls++;
Martyn Welch238add52009-08-11 14:37:15 +0100474
Martyn Welchf00a86d2009-07-31 09:28:17 +0100475 switch (type[minor]) {
476 case CONTROL_MINOR:
Vincent Bossierdca22182011-06-09 15:49:30 +0200477 switch (cmd) {
478 case VME_IRQ_GEN:
Dan Carpentera7f39432012-07-06 11:21:49 +0300479 copied = copy_from_user(&irq_req, argp,
Vincent Bossierdca22182011-06-09 15:49:30 +0200480 sizeof(struct vme_irq_id));
481 if (copied != 0) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900482 pr_warn("Partial copy from userspace\n");
Vincent Bossierdca22182011-06-09 15:49:30 +0200483 return -EFAULT;
484 }
485
486 retval = vme_irq_generate(vme_user_bridge,
487 irq_req.level,
488 irq_req.statid);
489
490 return retval;
491 }
Martyn Welchf00a86d2009-07-31 09:28:17 +0100492 break;
493 case MASTER_MINOR:
Martyn Welchf00a86d2009-07-31 09:28:17 +0100494 switch (cmd) {
Martyn Welch238add52009-08-11 14:37:15 +0100495 case VME_GET_MASTER:
496 memset(&master, 0, sizeof(struct vme_master));
Martyn Welchf00a86d2009-07-31 09:28:17 +0100497
Martyn Welch238add52009-08-11 14:37:15 +0100498 /* XXX We do not want to push aspace, cycle and width
499 * to userspace as they are
500 */
501 retval = vme_master_get(image[minor].resource,
Emilio G. Cota886953e2010-11-12 11:14:07 +0000502 &master.enable, &master.vme_addr,
503 &master.size, &master.aspace,
504 &master.cycle, &master.dwidth);
Martyn Welch238add52009-08-11 14:37:15 +0100505
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000506 copied = copy_to_user(argp, &master,
Martyn Welch238add52009-08-11 14:37:15 +0100507 sizeof(struct vme_master));
508 if (copied != 0) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900509 pr_warn("Partial copy to userspace\n");
Martyn Welch238add52009-08-11 14:37:15 +0100510 return -EFAULT;
511 }
512
513 return retval;
514 break;
515
516 case VME_SET_MASTER:
517
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000518 copied = copy_from_user(&master, argp, sizeof(master));
Martyn Welch238add52009-08-11 14:37:15 +0100519 if (copied != 0) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900520 pr_warn("Partial copy from userspace\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100521 return -EFAULT;
522 }
523
Martyn Welch238add52009-08-11 14:37:15 +0100524 /* XXX We do not want to push aspace, cycle and width
525 * to userspace as they are
526 */
527 return vme_master_set(image[minor].resource,
528 master.enable, master.vme_addr, master.size,
529 master.aspace, master.cycle, master.dwidth);
530
531 break;
532 }
533 break;
534 case SLAVE_MINOR:
535 switch (cmd) {
536 case VME_GET_SLAVE:
537 memset(&slave, 0, sizeof(struct vme_slave));
538
539 /* XXX We do not want to push aspace, cycle and width
540 * to userspace as they are
541 */
542 retval = vme_slave_get(image[minor].resource,
Emilio G. Cota886953e2010-11-12 11:14:07 +0000543 &slave.enable, &slave.vme_addr,
544 &slave.size, &pci_addr, &slave.aspace,
545 &slave.cycle);
Martyn Welch238add52009-08-11 14:37:15 +0100546
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000547 copied = copy_to_user(argp, &slave,
Martyn Welch238add52009-08-11 14:37:15 +0100548 sizeof(struct vme_slave));
549 if (copied != 0) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900550 pr_warn("Partial copy to userspace\n");
Martyn Welch238add52009-08-11 14:37:15 +0100551 return -EFAULT;
552 }
553
554 return retval;
555 break;
556
557 case VME_SET_SLAVE:
558
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000559 copied = copy_from_user(&slave, argp, sizeof(slave));
Martyn Welch238add52009-08-11 14:37:15 +0100560 if (copied != 0) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900561 pr_warn("Partial copy from userspace\n");
Martyn Welch238add52009-08-11 14:37:15 +0100562 return -EFAULT;
563 }
564
565 /* XXX We do not want to push aspace, cycle and width
566 * to userspace as they are
567 */
Martyn Welchf00a86d2009-07-31 09:28:17 +0100568 return vme_slave_set(image[minor].resource,
569 slave.enable, slave.vme_addr, slave.size,
570 image[minor].pci_buf, slave.aspace,
571 slave.cycle);
572
573 break;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100574 }
575 break;
576 }
577
578 return -EINVAL;
579}
580
Arnd Bergmannb1f2ac02010-04-27 20:15:07 +0200581static long
582vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
583{
584 int ret;
585
Arnd Bergmann8e2394a2010-07-11 23:18:52 +0200586 mutex_lock(&vme_user_mutex);
Al Viro496ad9a2013-01-23 17:07:38 -0500587 ret = vme_user_ioctl(file_inode(file), file, cmd, arg);
Arnd Bergmann8e2394a2010-07-11 23:18:52 +0200588 mutex_unlock(&vme_user_mutex);
Arnd Bergmannb1f2ac02010-04-27 20:15:07 +0200589
590 return ret;
591}
592
Martyn Welchf00a86d2009-07-31 09:28:17 +0100593
594/*
595 * Unallocate a previously allocated buffer
596 */
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000597static void buf_unalloc(int num)
Martyn Welchf00a86d2009-07-31 09:28:17 +0100598{
599 if (image[num].kern_buf) {
600#ifdef VME_DEBUG
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900601 pr_debug("UniverseII:Releasing buffer at %p\n",
602 image[num].pci_buf);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100603#endif
604
605 vme_free_consistent(image[num].resource, image[num].size_buf,
606 image[num].kern_buf, image[num].pci_buf);
607
608 image[num].kern_buf = NULL;
609 image[num].pci_buf = 0;
610 image[num].size_buf = 0;
611
612#ifdef VME_DEBUG
613 } else {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900614 pr_debug("UniverseII: Buffer not allocated\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100615#endif
616 }
617}
618
619static struct vme_driver vme_user_driver = {
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000620 .name = driver_name,
Manohar Vanga5d6abf32011-09-26 11:27:16 +0200621 .match = vme_user_match,
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000622 .probe = vme_user_probe,
Bill Pemberton38930752012-11-19 13:21:03 -0500623 .remove = vme_user_remove,
Martyn Welchf00a86d2009-07-31 09:28:17 +0100624};
625
626
Martyn Welch238add52009-08-11 14:37:15 +0100627static int __init vme_user_init(void)
628{
629 int retval = 0;
Martyn Welch238add52009-08-11 14:37:15 +0100630
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900631 pr_info("VME User Space Access Driver\n");
Martyn Welch238add52009-08-11 14:37:15 +0100632
633 if (bus_num == 0) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900634 pr_err("No cards, skipping registration\n");
Emilio G. Cota55db5022010-11-12 11:14:14 +0000635 retval = -ENODEV;
Martyn Welch238add52009-08-11 14:37:15 +0100636 goto err_nocard;
637 }
638
639 /* Let's start by supporting one bus, we can support more than one
640 * in future revisions if that ever becomes necessary.
641 */
Manohar Vanga0a4b6b02011-09-26 11:27:18 +0200642 if (bus_num > VME_USER_BUS_MAX) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900643 pr_err("Driver only able to handle %d buses\n",
644 VME_USER_BUS_MAX);
Manohar Vanga0a4b6b02011-09-26 11:27:18 +0200645 bus_num = VME_USER_BUS_MAX;
Martyn Welch238add52009-08-11 14:37:15 +0100646 }
647
Manohar Vanga5d6abf32011-09-26 11:27:16 +0200648 /*
649 * Here we just register the maximum number of devices we can and
650 * leave vme_user_match() to allow only 1 to go through to probe().
651 * This way, if we later want to allow multiple user access devices,
652 * we just change the code in vme_user_match().
653 */
654 retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS);
Martyn Welch238add52009-08-11 14:37:15 +0100655 if (retval != 0)
656 goto err_reg;
657
658 return retval;
659
Martyn Welch238add52009-08-11 14:37:15 +0100660err_reg:
Martyn Welch238add52009-08-11 14:37:15 +0100661err_nocard:
662 return retval;
663}
664
Manohar Vanga5d6abf32011-09-26 11:27:16 +0200665static int vme_user_match(struct vme_dev *vdev)
666{
Martyn Welch978f47d2013-11-08 11:58:34 +0000667 int i;
668
669 int cur_bus = vme_bus_num(vdev);
Martyn Welchd7729f02013-11-08 11:58:35 +0000670 int cur_slot = vme_slot_num(vdev);
Martyn Welch978f47d2013-11-08 11:58:34 +0000671
672 for (i = 0; i < bus_num; i++)
673 if ((cur_bus == bus[i]) && (cur_slot == vdev->num))
674 return 1;
675
676 return 0;
Manohar Vanga5d6abf32011-09-26 11:27:16 +0200677}
678
Martyn Welchf00a86d2009-07-31 09:28:17 +0100679/*
680 * In this simple access driver, the old behaviour is being preserved as much
681 * as practical. We will therefore reserve the buffers and request the images
682 * here so that we don't have to do it later.
683 */
Bill Pembertond7e530d2012-11-19 13:21:56 -0500684static int vme_user_probe(struct vme_dev *vdev)
Martyn Welchf00a86d2009-07-31 09:28:17 +0100685{
686 int i, err;
Bojan Prtvarf1552cb2014-04-03 18:56:10 +0200687 char *name;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100688
Martyn Welch238add52009-08-11 14:37:15 +0100689 /* Save pointer to the bridge device */
690 if (vme_user_bridge != NULL) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900691 dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n");
Martyn Welch238add52009-08-11 14:37:15 +0100692 err = -EINVAL;
693 goto err_dev;
694 }
Manohar Vanga8f966dc2011-09-26 11:27:15 +0200695 vme_user_bridge = vdev;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100696
697 /* Initialise descriptors */
698 for (i = 0; i < VME_DEVS; i++) {
699 image[i].kern_buf = NULL;
700 image[i].pci_buf = 0;
Santosh Nayakecb3b802012-04-03 16:42:51 +0530701 mutex_init(&image[i].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100702 image[i].device = NULL;
703 image[i].resource = NULL;
704 image[i].users = 0;
705 }
706
707 /* Initialise statistics counters */
708 reset_counters();
709
710 /* Assign major and minor numbers for the driver */
711 err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
712 driver_name);
713 if (err) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900714 dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n",
715 VME_MAJOR);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100716 goto err_region;
717 }
718
719 /* Register the driver as a char device */
720 vme_user_cdev = cdev_alloc();
Kumar Amit Mehtad4113a62013-03-24 22:37:48 -0700721 if (!vme_user_cdev) {
722 err = -ENOMEM;
723 goto err_char;
724 }
Martyn Welchf00a86d2009-07-31 09:28:17 +0100725 vme_user_cdev->ops = &vme_user_fops;
726 vme_user_cdev->owner = THIS_MODULE;
727 err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
728 if (err) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900729 dev_warn(&vdev->dev, "cdev_all failed\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100730 goto err_char;
731 }
732
733 /* Request slave resources and allocate buffers (128kB wide) */
734 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
735 /* XXX Need to properly request attributes */
Arthur Benilov5188d742010-02-16 15:40:58 +0100736 /* For ca91cx42 bridge there are only two slave windows
737 * supporting A16 addressing, so we request A24 supported
738 * by all windows.
739 */
Martyn Welchf00a86d2009-07-31 09:28:17 +0100740 image[i].resource = vme_slave_request(vme_user_bridge,
Arthur Benilov5188d742010-02-16 15:40:58 +0100741 VME_A24, VME_SCT);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100742 if (image[i].resource == NULL) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900743 dev_warn(&vdev->dev,
744 "Unable to allocate slave resource\n");
Wei Yongjun465ff282013-05-13 14:05:38 +0800745 err = -ENOMEM;
Martyn Welch238add52009-08-11 14:37:15 +0100746 goto err_slave;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100747 }
748 image[i].size_buf = PCI_BUF_SIZE;
749 image[i].kern_buf = vme_alloc_consistent(image[i].resource,
Emilio G. Cota886953e2010-11-12 11:14:07 +0000750 image[i].size_buf, &image[i].pci_buf);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100751 if (image[i].kern_buf == NULL) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900752 dev_warn(&vdev->dev,
753 "Unable to allocate memory for buffer\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100754 image[i].pci_buf = 0;
755 vme_slave_free(image[i].resource);
756 err = -ENOMEM;
Martyn Welch238add52009-08-11 14:37:15 +0100757 goto err_slave;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100758 }
759 }
760
761 /*
762 * Request master resources allocate page sized buffers for small
763 * reads and writes
764 */
765 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
766 /* XXX Need to properly request attributes */
767 image[i].resource = vme_master_request(vme_user_bridge,
768 VME_A32, VME_SCT, VME_D32);
769 if (image[i].resource == NULL) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900770 dev_warn(&vdev->dev,
771 "Unable to allocate master resource\n");
Wei Yongjun465ff282013-05-13 14:05:38 +0800772 err = -ENOMEM;
Martyn Welch238add52009-08-11 14:37:15 +0100773 goto err_master;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100774 }
Arthur Benilov33e920d2010-02-16 15:41:21 +0100775 image[i].size_buf = PCI_BUF_SIZE;
776 image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
777 if (image[i].kern_buf == NULL) {
Arthur Benilov33e920d2010-02-16 15:41:21 +0100778 err = -ENOMEM;
779 goto err_master_buf;
780 }
Martyn Welchf00a86d2009-07-31 09:28:17 +0100781 }
782
783 /* Create sysfs entries - on udev systems this creates the dev files */
784 vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
785 if (IS_ERR(vme_user_sysfs_class)) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900786 dev_err(&vdev->dev, "Error creating vme_user class.\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100787 err = PTR_ERR(vme_user_sysfs_class);
788 goto err_class;
789 }
790
791 /* Add sysfs Entries */
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000792 for (i = 0; i < VME_DEVS; i++) {
Vincent Bossier584721c2011-06-03 10:07:39 +0100793 int num;
Bojan Prtvar938acb992014-04-03 00:24:10 +0200794
Martyn Welchf00a86d2009-07-31 09:28:17 +0100795 switch (type[i]) {
796 case MASTER_MINOR:
Bojan Prtvarf1552cb2014-04-03 18:56:10 +0200797 name = "bus/vme/m%d";
Martyn Welchf00a86d2009-07-31 09:28:17 +0100798 break;
799 case CONTROL_MINOR:
Bojan Prtvarf1552cb2014-04-03 18:56:10 +0200800 name = "bus/vme/ctl";
Martyn Welchf00a86d2009-07-31 09:28:17 +0100801 break;
802 case SLAVE_MINOR:
Bojan Prtvarf1552cb2014-04-03 18:56:10 +0200803 name = "bus/vme/s%d";
Martyn Welchf00a86d2009-07-31 09:28:17 +0100804 break;
805 default:
806 err = -EINVAL;
807 goto err_sysfs;
808 break;
809 }
810
Vincent Bossier584721c2011-06-03 10:07:39 +0100811 num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
812 image[i].device = device_create(vme_user_sysfs_class, NULL,
813 MKDEV(VME_MAJOR, i), NULL, name, num);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100814 if (IS_ERR(image[i].device)) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900815 dev_info(&vdev->dev, "Error creating sysfs device\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100816 err = PTR_ERR(image[i].device);
817 goto err_sysfs;
818 }
819 }
820
Martyn Welchf00a86d2009-07-31 09:28:17 +0100821 return 0;
822
823 /* Ensure counter set correcty to destroy all sysfs devices */
824 i = VME_DEVS;
825err_sysfs:
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000826 while (i > 0) {
Martyn Welchf00a86d2009-07-31 09:28:17 +0100827 i--;
828 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
829 }
830 class_destroy(vme_user_sysfs_class);
831
Martyn Welch238add52009-08-11 14:37:15 +0100832 /* Ensure counter set correcty to unalloc all master windows */
833 i = MASTER_MAX + 1;
Arthur Benilov33e920d2010-02-16 15:41:21 +0100834err_master_buf:
835 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++)
836 kfree(image[i].kern_buf);
Martyn Welch238add52009-08-11 14:37:15 +0100837err_master:
838 while (i > MASTER_MINOR) {
Martyn Welchf00a86d2009-07-31 09:28:17 +0100839 i--;
Martyn Welch238add52009-08-11 14:37:15 +0100840 vme_master_free(image[i].resource);
841 }
842
843 /*
844 * Ensure counter set correcty to unalloc all slave windows and buffers
845 */
846 i = SLAVE_MAX + 1;
847err_slave:
848 while (i > SLAVE_MINOR) {
849 i--;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100850 buf_unalloc(i);
Emilio G. Cota1daa38d2010-12-03 09:05:08 +0000851 vme_slave_free(image[i].resource);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100852 }
853err_class:
854 cdev_del(vme_user_cdev);
855err_char:
856 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
857err_region:
Martyn Welch238add52009-08-11 14:37:15 +0100858err_dev:
Martyn Welchf00a86d2009-07-31 09:28:17 +0100859 return err;
860}
861
Bill Pembertonf21a8242012-11-19 13:26:52 -0500862static int vme_user_remove(struct vme_dev *dev)
Martyn Welchf00a86d2009-07-31 09:28:17 +0100863{
864 int i;
865
866 /* Remove sysfs Entries */
Santosh Nayakecb3b802012-04-03 16:42:51 +0530867 for (i = 0; i < VME_DEVS; i++) {
868 mutex_destroy(&image[i].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100869 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
Santosh Nayakecb3b802012-04-03 16:42:51 +0530870 }
Martyn Welchf00a86d2009-07-31 09:28:17 +0100871 class_destroy(vme_user_sysfs_class);
872
Emilio G. Cotab62c99b2010-12-03 14:20:51 +0000873 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
Arthur Benilov33e920d2010-02-16 15:41:21 +0100874 kfree(image[i].kern_buf);
Emilio G. Cotab62c99b2010-12-03 14:20:51 +0000875 vme_master_free(image[i].resource);
876 }
Arthur Benilov33e920d2010-02-16 15:41:21 +0100877
Martyn Welchf00a86d2009-07-31 09:28:17 +0100878 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
Martyn Welch238add52009-08-11 14:37:15 +0100879 vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100880 buf_unalloc(i);
Emilio G. Cota1daa38d2010-12-03 09:05:08 +0000881 vme_slave_free(image[i].resource);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100882 }
883
884 /* Unregister device driver */
885 cdev_del(vme_user_cdev);
886
887 /* Unregiser the major and minor device numbers */
888 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
Martyn Welch238add52009-08-11 14:37:15 +0100889
890 return 0;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100891}
892
Martyn Welch238add52009-08-11 14:37:15 +0100893static void __exit vme_user_exit(void)
894{
895 vme_unregister_driver(&vme_user_driver);
Martyn Welch238add52009-08-11 14:37:15 +0100896}
897
898
899MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
900module_param_array(bus, int, &bus_num, 0);
901
Martyn Welchf00a86d2009-07-31 09:28:17 +0100902MODULE_DESCRIPTION("VME User Space Access Driver");
Martyn Welch66bd8db2010-02-18 15:12:52 +0000903MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100904MODULE_LICENSE("GPL");
905
Martyn Welch238add52009-08-11 14:37:15 +0100906module_init(vme_user_init);
907module_exit(vme_user_exit);