Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 1 | /* |
| 2 | * VMEbus User access driver |
| 3 | * |
Martyn Welch | 66bd8db | 2010-02-18 15:12:52 +0000 | [diff] [blame] | 4 | * Author: Martyn Welch <martyn.welch@ge.com> |
| 5 | * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 6 | * |
| 7 | * Based on work by: |
| 8 | * Tom Armistead and Ajit Prem |
| 9 | * Copyright 2004 Motorola Inc. |
| 10 | * |
| 11 | * |
| 12 | * This program is free software; you can redistribute it and/or modify it |
| 13 | * under the terms of the GNU General Public License as published by the |
| 14 | * Free Software Foundation; either version 2 of the License, or (at your |
| 15 | * option) any later version. |
| 16 | */ |
| 17 | |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 19 | |
Dmitry Kalinkin | c74a804 | 2015-02-26 18:53:10 +0300 | [diff] [blame] | 20 | #include <linux/atomic.h> |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 21 | #include <linux/cdev.h> |
| 22 | #include <linux/delay.h> |
| 23 | #include <linux/device.h> |
| 24 | #include <linux/dma-mapping.h> |
| 25 | #include <linux/errno.h> |
| 26 | #include <linux/init.h> |
| 27 | #include <linux/ioctl.h> |
| 28 | #include <linux/kernel.h> |
| 29 | #include <linux/mm.h> |
| 30 | #include <linux/module.h> |
| 31 | #include <linux/pagemap.h> |
| 32 | #include <linux/pci.h> |
Santosh Nayak | ecb3b80 | 2012-04-03 16:42:51 +0530 | [diff] [blame] | 33 | #include <linux/mutex.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/slab.h> |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 35 | #include <linux/spinlock.h> |
| 36 | #include <linux/syscalls.h> |
| 37 | #include <linux/types.h> |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 38 | |
Nanakos Chrysostomos | 45f9f01 | 2010-05-28 10:54:45 +0000 | [diff] [blame] | 39 | #include <linux/io.h> |
| 40 | #include <linux/uaccess.h> |
Greg Kroah-Hartman | db3b9e9 | 2012-04-26 12:34:58 -0700 | [diff] [blame] | 41 | #include <linux/vme.h> |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 42 | |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 43 | #include "vme_user.h" |
| 44 | |
Vincent Bossier | 584721c | 2011-06-03 10:07:39 +0100 | [diff] [blame] | 45 | static const char driver_name[] = "vme_user"; |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 46 | |
Manohar Vanga | 0a4b6b0 | 2011-09-26 11:27:18 +0200 | [diff] [blame] | 47 | static int bus[VME_USER_BUS_MAX]; |
Emilio G. Cota | c949231 | 2010-11-12 11:15:21 +0000 | [diff] [blame] | 48 | static unsigned int bus_num; |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 49 | |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 50 | /* Currently Documentation/devices.txt defines the following for VME: |
| 51 | * |
| 52 | * 221 char VME bus |
Nanakos Chrysostomos | 45f9f01 | 2010-05-28 10:54:45 +0000 | [diff] [blame] | 53 | * 0 = /dev/bus/vme/m0 First master image |
| 54 | * 1 = /dev/bus/vme/m1 Second master image |
| 55 | * 2 = /dev/bus/vme/m2 Third master image |
| 56 | * 3 = /dev/bus/vme/m3 Fourth master image |
| 57 | * 4 = /dev/bus/vme/s0 First slave image |
| 58 | * 5 = /dev/bus/vme/s1 Second slave image |
| 59 | * 6 = /dev/bus/vme/s2 Third slave image |
| 60 | * 7 = /dev/bus/vme/s3 Fourth slave image |
| 61 | * 8 = /dev/bus/vme/ctl Control |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 62 | * |
Nanakos Chrysostomos | 45f9f01 | 2010-05-28 10:54:45 +0000 | [diff] [blame] | 63 | * It is expected that all VME bus drivers will use the |
| 64 | * same interface. For interface documentation see |
| 65 | * http://www.vmelinux.org/. |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 66 | * |
| 67 | * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't |
| 68 | * even support the tsi148 chipset (which has 8 master and 8 slave windows). |
Justin P. Mattock | 9560533 | 2012-08-13 10:28:22 -0700 | [diff] [blame] | 69 | * We'll run with this for now as far as possible, however it probably makes |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 70 | * sense to get rid of the old mappings and just do everything dynamically. |
| 71 | * |
| 72 | * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as |
| 73 | * defined above and try to support at least some of the interface from |
Justin P. Mattock | 9560533 | 2012-08-13 10:28:22 -0700 | [diff] [blame] | 74 | * http://www.vmelinux.org/ as an alternative the driver can be written |
| 75 | * providing a saner interface later. |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 76 | * |
| 77 | * The vmelinux.org driver never supported slave images, the devices reserved |
| 78 | * for slaves were repurposed to support all 8 master images on the UniverseII! |
| 79 | * We shall support 4 masters and 4 slaves with this driver. |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 80 | */ |
| 81 | #define VME_MAJOR 221 /* VME Major Device Number */ |
| 82 | #define VME_DEVS 9 /* Number of dev entries */ |
| 83 | |
| 84 | #define MASTER_MINOR 0 |
| 85 | #define MASTER_MAX 3 |
| 86 | #define SLAVE_MINOR 4 |
| 87 | #define SLAVE_MAX 7 |
| 88 | #define CONTROL_MINOR 8 |
| 89 | |
| 90 | #define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */ |
| 91 | |
| 92 | /* |
| 93 | * Structure to handle image related parameters. |
| 94 | */ |
Vincent Bossier | 584721c | 2011-06-03 10:07:39 +0100 | [diff] [blame] | 95 | struct image_desc { |
Emilio G. Cota | 0a81a0f | 2010-11-12 11:15:27 +0000 | [diff] [blame] | 96 | void *kern_buf; /* Buffer address in kernel space */ |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 97 | dma_addr_t pci_buf; /* Buffer address in PCI address space */ |
| 98 | unsigned long long size_buf; /* Buffer size */ |
Santosh Nayak | ecb3b80 | 2012-04-03 16:42:51 +0530 | [diff] [blame] | 99 | struct mutex mutex; /* Mutex for locking image */ |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 100 | struct device *device; /* Sysfs device */ |
| 101 | struct vme_resource *resource; /* VME resource */ |
| 102 | int users; /* Number of current users */ |
Dmitry Kalinkin | c74a804 | 2015-02-26 18:53:10 +0300 | [diff] [blame] | 103 | int mmap_count; /* Number of current mmap's */ |
Vincent Bossier | 584721c | 2011-06-03 10:07:39 +0100 | [diff] [blame] | 104 | }; |
| 105 | static struct image_desc image[VME_DEVS]; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 106 | |
Emilio G. Cota | b9cc293 | 2010-11-12 11:15:14 +0000 | [diff] [blame] | 107 | static struct cdev *vme_user_cdev; /* Character device */ |
| 108 | static struct class *vme_user_sysfs_class; /* Sysfs class */ |
Manohar Vanga | 8f966dc | 2011-09-26 11:27:15 +0200 | [diff] [blame] | 109 | static struct vme_dev *vme_user_bridge; /* Pointer to user device */ |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 110 | |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 111 | |
| 112 | static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR, |
| 113 | MASTER_MINOR, MASTER_MINOR, |
| 114 | SLAVE_MINOR, SLAVE_MINOR, |
| 115 | SLAVE_MINOR, SLAVE_MINOR, |
| 116 | CONTROL_MINOR |
| 117 | }; |
| 118 | |
Dmitry Kalinkin | c74a804 | 2015-02-26 18:53:10 +0300 | [diff] [blame] | 119 | struct vme_user_vma_priv { |
| 120 | unsigned int minor; |
| 121 | atomic_t refcnt; |
| 122 | }; |
| 123 | |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 124 | |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 125 | static int vme_user_open(struct inode *inode, struct file *file) |
| 126 | { |
| 127 | int err; |
| 128 | unsigned int minor = MINOR(inode->i_rdev); |
| 129 | |
Santosh Nayak | ecb3b80 | 2012-04-03 16:42:51 +0530 | [diff] [blame] | 130 | mutex_lock(&image[minor].mutex); |
Vincent Bossier | 05614fb | 2011-06-09 09:20:31 +0100 | [diff] [blame] | 131 | /* Allow device to be opened if a resource is needed and allocated. */ |
| 132 | if (minor < CONTROL_MINOR && image[minor].resource == NULL) { |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 133 | pr_err("No resources allocated for device\n"); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 134 | err = -EINVAL; |
| 135 | goto err_res; |
| 136 | } |
| 137 | |
| 138 | /* Increment user count */ |
| 139 | image[minor].users++; |
| 140 | |
Santosh Nayak | ecb3b80 | 2012-04-03 16:42:51 +0530 | [diff] [blame] | 141 | mutex_unlock(&image[minor].mutex); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 142 | |
| 143 | return 0; |
| 144 | |
| 145 | err_res: |
Santosh Nayak | ecb3b80 | 2012-04-03 16:42:51 +0530 | [diff] [blame] | 146 | mutex_unlock(&image[minor].mutex); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 147 | |
| 148 | return err; |
| 149 | } |
| 150 | |
| 151 | static int vme_user_release(struct inode *inode, struct file *file) |
| 152 | { |
| 153 | unsigned int minor = MINOR(inode->i_rdev); |
| 154 | |
Santosh Nayak | ecb3b80 | 2012-04-03 16:42:51 +0530 | [diff] [blame] | 155 | mutex_lock(&image[minor].mutex); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 156 | |
| 157 | /* Decrement user count */ |
| 158 | image[minor].users--; |
| 159 | |
Santosh Nayak | ecb3b80 | 2012-04-03 16:42:51 +0530 | [diff] [blame] | 160 | mutex_unlock(&image[minor].mutex); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 161 | |
| 162 | return 0; |
| 163 | } |
| 164 | |
| 165 | /* |
| 166 | * We are going ot alloc a page during init per window for small transfers. |
| 167 | * Small transfers will go VME -> buffer -> user space. Larger (more than a |
| 168 | * page) transfers will lock the user space buffer into memory and then |
| 169 | * transfer the data directly into the user space buffers. |
| 170 | */ |
| 171 | static ssize_t resource_to_user(int minor, char __user *buf, size_t count, |
| 172 | loff_t *ppos) |
| 173 | { |
| 174 | ssize_t retval; |
| 175 | ssize_t copied = 0; |
| 176 | |
| 177 | if (count <= image[minor].size_buf) { |
| 178 | /* We copy to kernel buffer */ |
| 179 | copied = vme_master_read(image[minor].resource, |
| 180 | image[minor].kern_buf, count, *ppos); |
Nanakos Chrysostomos | 45f9f01 | 2010-05-28 10:54:45 +0000 | [diff] [blame] | 181 | if (copied < 0) |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 182 | return (int)copied; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 183 | |
| 184 | retval = __copy_to_user(buf, image[minor].kern_buf, |
| 185 | (unsigned long)copied); |
| 186 | if (retval != 0) { |
| 187 | copied = (copied - retval); |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 188 | pr_info("User copy failed\n"); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 189 | return -EINVAL; |
| 190 | } |
| 191 | |
| 192 | } else { |
| 193 | /* XXX Need to write this */ |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 194 | pr_info("Currently don't support large transfers\n"); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 195 | /* Map in pages from userspace */ |
| 196 | |
| 197 | /* Call vme_master_read to do the transfer */ |
| 198 | return -EINVAL; |
| 199 | } |
| 200 | |
| 201 | return copied; |
| 202 | } |
| 203 | |
| 204 | /* |
Justin P. Mattock | 9560533 | 2012-08-13 10:28:22 -0700 | [diff] [blame] | 205 | * We are going to alloc a page during init per window for small transfers. |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 206 | * Small transfers will go user space -> buffer -> VME. Larger (more than a |
| 207 | * page) transfers will lock the user space buffer into memory and then |
| 208 | * transfer the data directly from the user space buffers out to VME. |
| 209 | */ |
Emilio G. Cota | 1a85f20 | 2010-11-12 11:15:34 +0000 | [diff] [blame] | 210 | static ssize_t resource_from_user(unsigned int minor, const char __user *buf, |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 211 | size_t count, loff_t *ppos) |
| 212 | { |
| 213 | ssize_t retval; |
| 214 | ssize_t copied = 0; |
| 215 | |
| 216 | if (count <= image[minor].size_buf) { |
| 217 | retval = __copy_from_user(image[minor].kern_buf, buf, |
| 218 | (unsigned long)count); |
| 219 | if (retval != 0) |
| 220 | copied = (copied - retval); |
| 221 | else |
| 222 | copied = count; |
| 223 | |
| 224 | copied = vme_master_write(image[minor].resource, |
| 225 | image[minor].kern_buf, copied, *ppos); |
| 226 | } else { |
| 227 | /* XXX Need to write this */ |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 228 | pr_info("Currently don't support large transfers\n"); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 229 | /* Map in pages from userspace */ |
| 230 | |
| 231 | /* Call vme_master_write to do the transfer */ |
| 232 | return -EINVAL; |
| 233 | } |
| 234 | |
| 235 | return copied; |
| 236 | } |
| 237 | |
| 238 | static ssize_t buffer_to_user(unsigned int minor, char __user *buf, |
| 239 | size_t count, loff_t *ppos) |
| 240 | { |
Emilio G. Cota | 0a81a0f | 2010-11-12 11:15:27 +0000 | [diff] [blame] | 241 | void *image_ptr; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 242 | ssize_t retval; |
| 243 | |
| 244 | image_ptr = image[minor].kern_buf + *ppos; |
| 245 | |
| 246 | retval = __copy_to_user(buf, image_ptr, (unsigned long)count); |
| 247 | if (retval != 0) { |
| 248 | retval = (count - retval); |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 249 | pr_warn("Partial copy to userspace\n"); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 250 | } else |
| 251 | retval = count; |
| 252 | |
| 253 | /* Return number of bytes successfully read */ |
| 254 | return retval; |
| 255 | } |
| 256 | |
Emilio G. Cota | 1a85f20 | 2010-11-12 11:15:34 +0000 | [diff] [blame] | 257 | static ssize_t buffer_from_user(unsigned int minor, const char __user *buf, |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 258 | size_t count, loff_t *ppos) |
| 259 | { |
Emilio G. Cota | 0a81a0f | 2010-11-12 11:15:27 +0000 | [diff] [blame] | 260 | void *image_ptr; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 261 | size_t retval; |
| 262 | |
| 263 | image_ptr = image[minor].kern_buf + *ppos; |
| 264 | |
| 265 | retval = __copy_from_user(image_ptr, buf, (unsigned long)count); |
| 266 | if (retval != 0) { |
| 267 | retval = (count - retval); |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 268 | pr_warn("Partial copy to userspace\n"); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 269 | } else |
| 270 | retval = count; |
| 271 | |
| 272 | /* Return number of bytes successfully read */ |
| 273 | return retval; |
| 274 | } |
| 275 | |
Emilio G. Cota | 1a85f20 | 2010-11-12 11:15:34 +0000 | [diff] [blame] | 276 | static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count, |
Nanakos Chrysostomos | 45f9f01 | 2010-05-28 10:54:45 +0000 | [diff] [blame] | 277 | loff_t *ppos) |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 278 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 279 | unsigned int minor = MINOR(file_inode(file)->i_rdev); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 280 | ssize_t retval; |
| 281 | size_t image_size; |
| 282 | size_t okcount; |
| 283 | |
Vincent Bossier | 05614fb | 2011-06-09 09:20:31 +0100 | [diff] [blame] | 284 | if (minor == CONTROL_MINOR) |
| 285 | return 0; |
| 286 | |
Santosh Nayak | ecb3b80 | 2012-04-03 16:42:51 +0530 | [diff] [blame] | 287 | mutex_lock(&image[minor].mutex); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 288 | |
| 289 | /* XXX Do we *really* want this helper - we can use vme_*_get ? */ |
| 290 | image_size = vme_get_size(image[minor].resource); |
| 291 | |
| 292 | /* Ensure we are starting at a valid location */ |
| 293 | if ((*ppos < 0) || (*ppos > (image_size - 1))) { |
Santosh Nayak | ecb3b80 | 2012-04-03 16:42:51 +0530 | [diff] [blame] | 294 | mutex_unlock(&image[minor].mutex); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 295 | return 0; |
| 296 | } |
| 297 | |
| 298 | /* Ensure not reading past end of the image */ |
| 299 | if (*ppos + count > image_size) |
| 300 | okcount = image_size - *ppos; |
| 301 | else |
| 302 | okcount = count; |
| 303 | |
Nanakos Chrysostomos | 45f9f01 | 2010-05-28 10:54:45 +0000 | [diff] [blame] | 304 | switch (type[minor]) { |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 305 | case MASTER_MINOR: |
| 306 | retval = resource_to_user(minor, buf, okcount, ppos); |
| 307 | break; |
| 308 | case SLAVE_MINOR: |
| 309 | retval = buffer_to_user(minor, buf, okcount, ppos); |
| 310 | break; |
| 311 | default: |
| 312 | retval = -EINVAL; |
| 313 | } |
| 314 | |
Santosh Nayak | ecb3b80 | 2012-04-03 16:42:51 +0530 | [diff] [blame] | 315 | mutex_unlock(&image[minor].mutex); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 316 | if (retval > 0) |
| 317 | *ppos += retval; |
| 318 | |
| 319 | return retval; |
| 320 | } |
| 321 | |
Emilio G. Cota | 1a85f20 | 2010-11-12 11:15:34 +0000 | [diff] [blame] | 322 | static ssize_t vme_user_write(struct file *file, const char __user *buf, |
| 323 | size_t count, loff_t *ppos) |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 324 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 325 | unsigned int minor = MINOR(file_inode(file)->i_rdev); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 326 | ssize_t retval; |
| 327 | size_t image_size; |
| 328 | size_t okcount; |
| 329 | |
Vincent Bossier | 05614fb | 2011-06-09 09:20:31 +0100 | [diff] [blame] | 330 | if (minor == CONTROL_MINOR) |
| 331 | return 0; |
| 332 | |
Santosh Nayak | ecb3b80 | 2012-04-03 16:42:51 +0530 | [diff] [blame] | 333 | mutex_lock(&image[minor].mutex); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 334 | |
| 335 | image_size = vme_get_size(image[minor].resource); |
| 336 | |
| 337 | /* Ensure we are starting at a valid location */ |
| 338 | if ((*ppos < 0) || (*ppos > (image_size - 1))) { |
Santosh Nayak | ecb3b80 | 2012-04-03 16:42:51 +0530 | [diff] [blame] | 339 | mutex_unlock(&image[minor].mutex); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 340 | return 0; |
| 341 | } |
| 342 | |
| 343 | /* Ensure not reading past end of the image */ |
| 344 | if (*ppos + count > image_size) |
| 345 | okcount = image_size - *ppos; |
| 346 | else |
| 347 | okcount = count; |
| 348 | |
Nanakos Chrysostomos | 45f9f01 | 2010-05-28 10:54:45 +0000 | [diff] [blame] | 349 | switch (type[minor]) { |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 350 | case MASTER_MINOR: |
| 351 | retval = resource_from_user(minor, buf, okcount, ppos); |
| 352 | break; |
| 353 | case SLAVE_MINOR: |
| 354 | retval = buffer_from_user(minor, buf, okcount, ppos); |
| 355 | break; |
| 356 | default: |
| 357 | retval = -EINVAL; |
| 358 | } |
Toshiaki Yamane | 538a697 | 2012-08-21 20:12:33 +0900 | [diff] [blame] | 359 | |
Santosh Nayak | ecb3b80 | 2012-04-03 16:42:51 +0530 | [diff] [blame] | 360 | mutex_unlock(&image[minor].mutex); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 361 | |
| 362 | if (retval > 0) |
| 363 | *ppos += retval; |
| 364 | |
| 365 | return retval; |
| 366 | } |
| 367 | |
| 368 | static loff_t vme_user_llseek(struct file *file, loff_t off, int whence) |
| 369 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 370 | unsigned int minor = MINOR(file_inode(file)->i_rdev); |
Arthur Benilov | 877de4b | 2010-02-16 15:40:30 +0100 | [diff] [blame] | 371 | size_t image_size; |
Al Viro | 5948229 | 2014-08-19 11:28:35 -0400 | [diff] [blame] | 372 | loff_t res; |
Arthur Benilov | 877de4b | 2010-02-16 15:40:30 +0100 | [diff] [blame] | 373 | |
Dmitry Kalinkin | 615c40d | 2015-05-28 15:07:02 +0300 | [diff] [blame] | 374 | switch (type[minor]) { |
| 375 | case MASTER_MINOR: |
| 376 | case SLAVE_MINOR: |
| 377 | mutex_lock(&image[minor].mutex); |
| 378 | image_size = vme_get_size(image[minor].resource); |
| 379 | res = fixed_size_llseek(file, off, whence, image_size); |
| 380 | mutex_unlock(&image[minor].mutex); |
| 381 | return res; |
| 382 | } |
Vincent Bossier | 05614fb | 2011-06-09 09:20:31 +0100 | [diff] [blame] | 383 | |
Dmitry Kalinkin | 615c40d | 2015-05-28 15:07:02 +0300 | [diff] [blame] | 384 | return -EINVAL; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 385 | } |
| 386 | |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 387 | /* |
| 388 | * The ioctls provided by the old VME access method (the one at vmelinux.org) |
| 389 | * are most certainly wrong as the effectively push the registers layout |
| 390 | * through to user space. Given that the VME core can handle multiple bridges, |
| 391 | * with different register layouts this is most certainly not the way to go. |
| 392 | * |
| 393 | * We aren't using the structures defined in the Motorola driver either - these |
| 394 | * are also quite low level, however we should use the definitions that have |
| 395 | * already been defined. |
| 396 | */ |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 397 | static int vme_user_ioctl(struct inode *inode, struct file *file, |
| 398 | unsigned int cmd, unsigned long arg) |
| 399 | { |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 400 | struct vme_master master; |
| 401 | struct vme_slave slave; |
Vincent Bossier | dca2218 | 2011-06-09 15:49:30 +0200 | [diff] [blame] | 402 | struct vme_irq_id irq_req; |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 403 | unsigned long copied; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 404 | unsigned int minor = MINOR(inode->i_rdev); |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 405 | int retval; |
| 406 | dma_addr_t pci_addr; |
Emilio G. Cota | 1a85f20 | 2010-11-12 11:15:34 +0000 | [diff] [blame] | 407 | void __user *argp = (void __user *)arg; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 408 | |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 409 | switch (type[minor]) { |
| 410 | case CONTROL_MINOR: |
Vincent Bossier | dca2218 | 2011-06-09 15:49:30 +0200 | [diff] [blame] | 411 | switch (cmd) { |
| 412 | case VME_IRQ_GEN: |
Dan Carpenter | a7f3943 | 2012-07-06 11:21:49 +0300 | [diff] [blame] | 413 | copied = copy_from_user(&irq_req, argp, |
Vincent Bossier | dca2218 | 2011-06-09 15:49:30 +0200 | [diff] [blame] | 414 | sizeof(struct vme_irq_id)); |
| 415 | if (copied != 0) { |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 416 | pr_warn("Partial copy from userspace\n"); |
Vincent Bossier | dca2218 | 2011-06-09 15:49:30 +0200 | [diff] [blame] | 417 | return -EFAULT; |
| 418 | } |
| 419 | |
Mahati Chamarthy | fc489a5 | 2014-09-22 23:04:13 +0530 | [diff] [blame] | 420 | return vme_irq_generate(vme_user_bridge, |
Vincent Bossier | dca2218 | 2011-06-09 15:49:30 +0200 | [diff] [blame] | 421 | irq_req.level, |
| 422 | irq_req.statid); |
Vincent Bossier | dca2218 | 2011-06-09 15:49:30 +0200 | [diff] [blame] | 423 | } |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 424 | break; |
| 425 | case MASTER_MINOR: |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 426 | switch (cmd) { |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 427 | case VME_GET_MASTER: |
| 428 | memset(&master, 0, sizeof(struct vme_master)); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 429 | |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 430 | /* XXX We do not want to push aspace, cycle and width |
| 431 | * to userspace as they are |
| 432 | */ |
| 433 | retval = vme_master_get(image[minor].resource, |
Emilio G. Cota | 886953e | 2010-11-12 11:14:07 +0000 | [diff] [blame] | 434 | &master.enable, &master.vme_addr, |
| 435 | &master.size, &master.aspace, |
| 436 | &master.cycle, &master.dwidth); |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 437 | |
Emilio G. Cota | 1a85f20 | 2010-11-12 11:15:34 +0000 | [diff] [blame] | 438 | copied = copy_to_user(argp, &master, |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 439 | sizeof(struct vme_master)); |
| 440 | if (copied != 0) { |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 441 | pr_warn("Partial copy to userspace\n"); |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 442 | return -EFAULT; |
| 443 | } |
| 444 | |
| 445 | return retval; |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 446 | |
| 447 | case VME_SET_MASTER: |
| 448 | |
Dmitry Kalinkin | c74a804 | 2015-02-26 18:53:10 +0300 | [diff] [blame] | 449 | if (image[minor].mmap_count != 0) { |
| 450 | pr_warn("Can't adjust mapped window\n"); |
| 451 | return -EPERM; |
| 452 | } |
| 453 | |
Emilio G. Cota | 1a85f20 | 2010-11-12 11:15:34 +0000 | [diff] [blame] | 454 | copied = copy_from_user(&master, argp, sizeof(master)); |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 455 | if (copied != 0) { |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 456 | pr_warn("Partial copy from userspace\n"); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 457 | return -EFAULT; |
| 458 | } |
| 459 | |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 460 | /* XXX We do not want to push aspace, cycle and width |
| 461 | * to userspace as they are |
| 462 | */ |
| 463 | return vme_master_set(image[minor].resource, |
| 464 | master.enable, master.vme_addr, master.size, |
| 465 | master.aspace, master.cycle, master.dwidth); |
| 466 | |
| 467 | break; |
| 468 | } |
| 469 | break; |
| 470 | case SLAVE_MINOR: |
| 471 | switch (cmd) { |
| 472 | case VME_GET_SLAVE: |
| 473 | memset(&slave, 0, sizeof(struct vme_slave)); |
| 474 | |
| 475 | /* XXX We do not want to push aspace, cycle and width |
| 476 | * to userspace as they are |
| 477 | */ |
| 478 | retval = vme_slave_get(image[minor].resource, |
Emilio G. Cota | 886953e | 2010-11-12 11:14:07 +0000 | [diff] [blame] | 479 | &slave.enable, &slave.vme_addr, |
| 480 | &slave.size, &pci_addr, &slave.aspace, |
| 481 | &slave.cycle); |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 482 | |
Emilio G. Cota | 1a85f20 | 2010-11-12 11:15:34 +0000 | [diff] [blame] | 483 | copied = copy_to_user(argp, &slave, |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 484 | sizeof(struct vme_slave)); |
| 485 | if (copied != 0) { |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 486 | pr_warn("Partial copy to userspace\n"); |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 487 | return -EFAULT; |
| 488 | } |
| 489 | |
| 490 | return retval; |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 491 | |
| 492 | case VME_SET_SLAVE: |
| 493 | |
Emilio G. Cota | 1a85f20 | 2010-11-12 11:15:34 +0000 | [diff] [blame] | 494 | copied = copy_from_user(&slave, argp, sizeof(slave)); |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 495 | if (copied != 0) { |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 496 | pr_warn("Partial copy from userspace\n"); |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 497 | return -EFAULT; |
| 498 | } |
| 499 | |
| 500 | /* XXX We do not want to push aspace, cycle and width |
| 501 | * to userspace as they are |
| 502 | */ |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 503 | return vme_slave_set(image[minor].resource, |
| 504 | slave.enable, slave.vme_addr, slave.size, |
| 505 | image[minor].pci_buf, slave.aspace, |
| 506 | slave.cycle); |
| 507 | |
| 508 | break; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 509 | } |
| 510 | break; |
| 511 | } |
| 512 | |
| 513 | return -EINVAL; |
| 514 | } |
| 515 | |
Arnd Bergmann | b1f2ac0 | 2010-04-27 20:15:07 +0200 | [diff] [blame] | 516 | static long |
| 517 | vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
| 518 | { |
| 519 | int ret; |
Dmitry Kalinkin | 0cd189a | 2015-02-26 18:53:09 +0300 | [diff] [blame] | 520 | struct inode *inode = file_inode(file); |
| 521 | unsigned int minor = MINOR(inode->i_rdev); |
Arnd Bergmann | b1f2ac0 | 2010-04-27 20:15:07 +0200 | [diff] [blame] | 522 | |
Dmitry Kalinkin | 0cd189a | 2015-02-26 18:53:09 +0300 | [diff] [blame] | 523 | mutex_lock(&image[minor].mutex); |
| 524 | ret = vme_user_ioctl(inode, file, cmd, arg); |
| 525 | mutex_unlock(&image[minor].mutex); |
Arnd Bergmann | b1f2ac0 | 2010-04-27 20:15:07 +0200 | [diff] [blame] | 526 | |
| 527 | return ret; |
| 528 | } |
| 529 | |
Dmitry Kalinkin | c74a804 | 2015-02-26 18:53:10 +0300 | [diff] [blame] | 530 | static void vme_user_vm_open(struct vm_area_struct *vma) |
| 531 | { |
| 532 | struct vme_user_vma_priv *vma_priv = vma->vm_private_data; |
| 533 | |
| 534 | atomic_inc(&vma_priv->refcnt); |
| 535 | } |
| 536 | |
| 537 | static void vme_user_vm_close(struct vm_area_struct *vma) |
| 538 | { |
| 539 | struct vme_user_vma_priv *vma_priv = vma->vm_private_data; |
| 540 | unsigned int minor = vma_priv->minor; |
| 541 | |
| 542 | if (!atomic_dec_and_test(&vma_priv->refcnt)) |
| 543 | return; |
| 544 | |
| 545 | mutex_lock(&image[minor].mutex); |
| 546 | image[minor].mmap_count--; |
| 547 | mutex_unlock(&image[minor].mutex); |
| 548 | |
| 549 | kfree(vma_priv); |
| 550 | } |
| 551 | |
Dmitry Kalinkin | e4aea6a | 2015-06-13 16:34:01 +0300 | [diff] [blame^] | 552 | static const struct vm_operations_struct vme_user_vm_ops = { |
| 553 | .open = vme_user_vm_open, |
| 554 | .close = vme_user_vm_close, |
| 555 | }; |
| 556 | |
Dmitry Kalinkin | c74a804 | 2015-02-26 18:53:10 +0300 | [diff] [blame] | 557 | static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma) |
| 558 | { |
| 559 | int err; |
| 560 | struct vme_user_vma_priv *vma_priv; |
| 561 | |
| 562 | mutex_lock(&image[minor].mutex); |
| 563 | |
| 564 | err = vme_master_mmap(image[minor].resource, vma); |
| 565 | if (err) { |
| 566 | mutex_unlock(&image[minor].mutex); |
| 567 | return err; |
| 568 | } |
| 569 | |
| 570 | vma_priv = kmalloc(sizeof(struct vme_user_vma_priv), GFP_KERNEL); |
| 571 | if (vma_priv == NULL) { |
| 572 | mutex_unlock(&image[minor].mutex); |
| 573 | return -ENOMEM; |
| 574 | } |
| 575 | |
| 576 | vma_priv->minor = minor; |
| 577 | atomic_set(&vma_priv->refcnt, 1); |
| 578 | vma->vm_ops = &vme_user_vm_ops; |
| 579 | vma->vm_private_data = vma_priv; |
| 580 | |
| 581 | image[minor].mmap_count++; |
| 582 | |
| 583 | mutex_unlock(&image[minor].mutex); |
| 584 | |
| 585 | return 0; |
| 586 | } |
| 587 | |
| 588 | static int vme_user_mmap(struct file *file, struct vm_area_struct *vma) |
| 589 | { |
| 590 | unsigned int minor = MINOR(file_inode(file)->i_rdev); |
| 591 | |
| 592 | if (type[minor] == MASTER_MINOR) |
| 593 | return vme_user_master_mmap(minor, vma); |
| 594 | |
| 595 | return -ENODEV; |
| 596 | } |
| 597 | |
Dmitry Kalinkin | e4aea6a | 2015-06-13 16:34:01 +0300 | [diff] [blame^] | 598 | static const struct file_operations vme_user_fops = { |
| 599 | .open = vme_user_open, |
| 600 | .release = vme_user_release, |
| 601 | .read = vme_user_read, |
| 602 | .write = vme_user_write, |
| 603 | .llseek = vme_user_llseek, |
| 604 | .unlocked_ioctl = vme_user_unlocked_ioctl, |
| 605 | .compat_ioctl = vme_user_unlocked_ioctl, |
| 606 | .mmap = vme_user_mmap, |
| 607 | }; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 608 | |
| 609 | /* |
| 610 | * Unallocate a previously allocated buffer |
| 611 | */ |
Nanakos Chrysostomos | 45f9f01 | 2010-05-28 10:54:45 +0000 | [diff] [blame] | 612 | static void buf_unalloc(int num) |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 613 | { |
| 614 | if (image[num].kern_buf) { |
| 615 | #ifdef VME_DEBUG |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 616 | pr_debug("UniverseII:Releasing buffer at %p\n", |
| 617 | image[num].pci_buf); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 618 | #endif |
| 619 | |
| 620 | vme_free_consistent(image[num].resource, image[num].size_buf, |
| 621 | image[num].kern_buf, image[num].pci_buf); |
| 622 | |
| 623 | image[num].kern_buf = NULL; |
| 624 | image[num].pci_buf = 0; |
| 625 | image[num].size_buf = 0; |
| 626 | |
| 627 | #ifdef VME_DEBUG |
| 628 | } else { |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 629 | pr_debug("UniverseII: Buffer not allocated\n"); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 630 | #endif |
| 631 | } |
| 632 | } |
| 633 | |
Manohar Vanga | 5d6abf3 | 2011-09-26 11:27:16 +0200 | [diff] [blame] | 634 | static int vme_user_match(struct vme_dev *vdev) |
| 635 | { |
Martyn Welch | 978f47d | 2013-11-08 11:58:34 +0000 | [diff] [blame] | 636 | int i; |
| 637 | |
| 638 | int cur_bus = vme_bus_num(vdev); |
Martyn Welch | d7729f0 | 2013-11-08 11:58:35 +0000 | [diff] [blame] | 639 | int cur_slot = vme_slot_num(vdev); |
Martyn Welch | 978f47d | 2013-11-08 11:58:34 +0000 | [diff] [blame] | 640 | |
| 641 | for (i = 0; i < bus_num; i++) |
| 642 | if ((cur_bus == bus[i]) && (cur_slot == vdev->num)) |
| 643 | return 1; |
| 644 | |
| 645 | return 0; |
Manohar Vanga | 5d6abf3 | 2011-09-26 11:27:16 +0200 | [diff] [blame] | 646 | } |
| 647 | |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 648 | /* |
| 649 | * In this simple access driver, the old behaviour is being preserved as much |
| 650 | * as practical. We will therefore reserve the buffers and request the images |
| 651 | * here so that we don't have to do it later. |
| 652 | */ |
Bill Pemberton | d7e530d | 2012-11-19 13:21:56 -0500 | [diff] [blame] | 653 | static int vme_user_probe(struct vme_dev *vdev) |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 654 | { |
| 655 | int i, err; |
Bojan Prtvar | f1552cb | 2014-04-03 18:56:10 +0200 | [diff] [blame] | 656 | char *name; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 657 | |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 658 | /* Save pointer to the bridge device */ |
| 659 | if (vme_user_bridge != NULL) { |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 660 | dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n"); |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 661 | err = -EINVAL; |
| 662 | goto err_dev; |
| 663 | } |
Manohar Vanga | 8f966dc | 2011-09-26 11:27:15 +0200 | [diff] [blame] | 664 | vme_user_bridge = vdev; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 665 | |
| 666 | /* Initialise descriptors */ |
| 667 | for (i = 0; i < VME_DEVS; i++) { |
| 668 | image[i].kern_buf = NULL; |
| 669 | image[i].pci_buf = 0; |
Santosh Nayak | ecb3b80 | 2012-04-03 16:42:51 +0530 | [diff] [blame] | 670 | mutex_init(&image[i].mutex); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 671 | image[i].device = NULL; |
| 672 | image[i].resource = NULL; |
| 673 | image[i].users = 0; |
| 674 | } |
| 675 | |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 676 | /* Assign major and minor numbers for the driver */ |
| 677 | err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS, |
| 678 | driver_name); |
| 679 | if (err) { |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 680 | dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n", |
| 681 | VME_MAJOR); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 682 | goto err_region; |
| 683 | } |
| 684 | |
| 685 | /* Register the driver as a char device */ |
| 686 | vme_user_cdev = cdev_alloc(); |
Kumar Amit Mehta | d4113a6 | 2013-03-24 22:37:48 -0700 | [diff] [blame] | 687 | if (!vme_user_cdev) { |
| 688 | err = -ENOMEM; |
| 689 | goto err_char; |
| 690 | } |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 691 | vme_user_cdev->ops = &vme_user_fops; |
| 692 | vme_user_cdev->owner = THIS_MODULE; |
| 693 | err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS); |
| 694 | if (err) { |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 695 | dev_warn(&vdev->dev, "cdev_all failed\n"); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 696 | goto err_char; |
| 697 | } |
| 698 | |
| 699 | /* Request slave resources and allocate buffers (128kB wide) */ |
| 700 | for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) { |
| 701 | /* XXX Need to properly request attributes */ |
Arthur Benilov | 5188d74 | 2010-02-16 15:40:58 +0100 | [diff] [blame] | 702 | /* For ca91cx42 bridge there are only two slave windows |
| 703 | * supporting A16 addressing, so we request A24 supported |
| 704 | * by all windows. |
| 705 | */ |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 706 | image[i].resource = vme_slave_request(vme_user_bridge, |
Arthur Benilov | 5188d74 | 2010-02-16 15:40:58 +0100 | [diff] [blame] | 707 | VME_A24, VME_SCT); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 708 | if (image[i].resource == NULL) { |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 709 | dev_warn(&vdev->dev, |
| 710 | "Unable to allocate slave resource\n"); |
Wei Yongjun | 465ff28 | 2013-05-13 14:05:38 +0800 | [diff] [blame] | 711 | err = -ENOMEM; |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 712 | goto err_slave; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 713 | } |
| 714 | image[i].size_buf = PCI_BUF_SIZE; |
| 715 | image[i].kern_buf = vme_alloc_consistent(image[i].resource, |
Emilio G. Cota | 886953e | 2010-11-12 11:14:07 +0000 | [diff] [blame] | 716 | image[i].size_buf, &image[i].pci_buf); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 717 | if (image[i].kern_buf == NULL) { |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 718 | dev_warn(&vdev->dev, |
| 719 | "Unable to allocate memory for buffer\n"); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 720 | image[i].pci_buf = 0; |
| 721 | vme_slave_free(image[i].resource); |
| 722 | err = -ENOMEM; |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 723 | goto err_slave; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 724 | } |
| 725 | } |
| 726 | |
| 727 | /* |
| 728 | * Request master resources allocate page sized buffers for small |
| 729 | * reads and writes |
| 730 | */ |
| 731 | for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) { |
| 732 | /* XXX Need to properly request attributes */ |
| 733 | image[i].resource = vme_master_request(vme_user_bridge, |
| 734 | VME_A32, VME_SCT, VME_D32); |
| 735 | if (image[i].resource == NULL) { |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 736 | dev_warn(&vdev->dev, |
| 737 | "Unable to allocate master resource\n"); |
Wei Yongjun | 465ff28 | 2013-05-13 14:05:38 +0800 | [diff] [blame] | 738 | err = -ENOMEM; |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 739 | goto err_master; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 740 | } |
Arthur Benilov | 33e920d | 2010-02-16 15:41:21 +0100 | [diff] [blame] | 741 | image[i].size_buf = PCI_BUF_SIZE; |
| 742 | image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL); |
| 743 | if (image[i].kern_buf == NULL) { |
Arthur Benilov | 33e920d | 2010-02-16 15:41:21 +0100 | [diff] [blame] | 744 | err = -ENOMEM; |
Daeseok Youn | 1a52489 | 2014-03-26 12:01:48 +0900 | [diff] [blame] | 745 | vme_master_free(image[i].resource); |
| 746 | goto err_master; |
Arthur Benilov | 33e920d | 2010-02-16 15:41:21 +0100 | [diff] [blame] | 747 | } |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 748 | } |
| 749 | |
| 750 | /* Create sysfs entries - on udev systems this creates the dev files */ |
| 751 | vme_user_sysfs_class = class_create(THIS_MODULE, driver_name); |
| 752 | if (IS_ERR(vme_user_sysfs_class)) { |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 753 | dev_err(&vdev->dev, "Error creating vme_user class.\n"); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 754 | err = PTR_ERR(vme_user_sysfs_class); |
| 755 | goto err_class; |
| 756 | } |
| 757 | |
| 758 | /* Add sysfs Entries */ |
Nanakos Chrysostomos | 45f9f01 | 2010-05-28 10:54:45 +0000 | [diff] [blame] | 759 | for (i = 0; i < VME_DEVS; i++) { |
Vincent Bossier | 584721c | 2011-06-03 10:07:39 +0100 | [diff] [blame] | 760 | int num; |
Bojan Prtvar | 938acb99 | 2014-04-03 00:24:10 +0200 | [diff] [blame] | 761 | |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 762 | switch (type[i]) { |
| 763 | case MASTER_MINOR: |
Bojan Prtvar | f1552cb | 2014-04-03 18:56:10 +0200 | [diff] [blame] | 764 | name = "bus/vme/m%d"; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 765 | break; |
| 766 | case CONTROL_MINOR: |
Bojan Prtvar | f1552cb | 2014-04-03 18:56:10 +0200 | [diff] [blame] | 767 | name = "bus/vme/ctl"; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 768 | break; |
| 769 | case SLAVE_MINOR: |
Bojan Prtvar | f1552cb | 2014-04-03 18:56:10 +0200 | [diff] [blame] | 770 | name = "bus/vme/s%d"; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 771 | break; |
| 772 | default: |
| 773 | err = -EINVAL; |
| 774 | goto err_sysfs; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 775 | } |
| 776 | |
Vincent Bossier | 584721c | 2011-06-03 10:07:39 +0100 | [diff] [blame] | 777 | num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i; |
| 778 | image[i].device = device_create(vme_user_sysfs_class, NULL, |
| 779 | MKDEV(VME_MAJOR, i), NULL, name, num); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 780 | if (IS_ERR(image[i].device)) { |
YAMANE Toshiaki | 0093e5f | 2012-11-09 12:23:14 +0900 | [diff] [blame] | 781 | dev_info(&vdev->dev, "Error creating sysfs device\n"); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 782 | err = PTR_ERR(image[i].device); |
| 783 | goto err_sysfs; |
| 784 | } |
| 785 | } |
| 786 | |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 787 | return 0; |
| 788 | |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 789 | err_sysfs: |
Nanakos Chrysostomos | 45f9f01 | 2010-05-28 10:54:45 +0000 | [diff] [blame] | 790 | while (i > 0) { |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 791 | i--; |
| 792 | device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i)); |
| 793 | } |
| 794 | class_destroy(vme_user_sysfs_class); |
| 795 | |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 796 | /* Ensure counter set correcty to unalloc all master windows */ |
| 797 | i = MASTER_MAX + 1; |
| 798 | err_master: |
| 799 | while (i > MASTER_MINOR) { |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 800 | i--; |
Daeseok Youn | 1a52489 | 2014-03-26 12:01:48 +0900 | [diff] [blame] | 801 | kfree(image[i].kern_buf); |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 802 | vme_master_free(image[i].resource); |
| 803 | } |
| 804 | |
| 805 | /* |
| 806 | * Ensure counter set correcty to unalloc all slave windows and buffers |
| 807 | */ |
| 808 | i = SLAVE_MAX + 1; |
| 809 | err_slave: |
| 810 | while (i > SLAVE_MINOR) { |
| 811 | i--; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 812 | buf_unalloc(i); |
Emilio G. Cota | 1daa38d | 2010-12-03 09:05:08 +0000 | [diff] [blame] | 813 | vme_slave_free(image[i].resource); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 814 | } |
| 815 | err_class: |
| 816 | cdev_del(vme_user_cdev); |
| 817 | err_char: |
| 818 | unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS); |
| 819 | err_region: |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 820 | err_dev: |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 821 | return err; |
| 822 | } |
| 823 | |
Bill Pemberton | f21a824 | 2012-11-19 13:26:52 -0500 | [diff] [blame] | 824 | static int vme_user_remove(struct vme_dev *dev) |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 825 | { |
| 826 | int i; |
| 827 | |
| 828 | /* Remove sysfs Entries */ |
Santosh Nayak | ecb3b80 | 2012-04-03 16:42:51 +0530 | [diff] [blame] | 829 | for (i = 0; i < VME_DEVS; i++) { |
| 830 | mutex_destroy(&image[i].mutex); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 831 | device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i)); |
Santosh Nayak | ecb3b80 | 2012-04-03 16:42:51 +0530 | [diff] [blame] | 832 | } |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 833 | class_destroy(vme_user_sysfs_class); |
| 834 | |
Emilio G. Cota | b62c99b | 2010-12-03 14:20:51 +0000 | [diff] [blame] | 835 | for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) { |
Arthur Benilov | 33e920d | 2010-02-16 15:41:21 +0100 | [diff] [blame] | 836 | kfree(image[i].kern_buf); |
Emilio G. Cota | b62c99b | 2010-12-03 14:20:51 +0000 | [diff] [blame] | 837 | vme_master_free(image[i].resource); |
| 838 | } |
Arthur Benilov | 33e920d | 2010-02-16 15:41:21 +0100 | [diff] [blame] | 839 | |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 840 | for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) { |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 841 | vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 842 | buf_unalloc(i); |
Emilio G. Cota | 1daa38d | 2010-12-03 09:05:08 +0000 | [diff] [blame] | 843 | vme_slave_free(image[i].resource); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 844 | } |
| 845 | |
| 846 | /* Unregister device driver */ |
| 847 | cdev_del(vme_user_cdev); |
| 848 | |
| 849 | /* Unregiser the major and minor device numbers */ |
| 850 | unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS); |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 851 | |
| 852 | return 0; |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 853 | } |
| 854 | |
Dmitry Kalinkin | e4aea6a | 2015-06-13 16:34:01 +0300 | [diff] [blame^] | 855 | static struct vme_driver vme_user_driver = { |
| 856 | .name = driver_name, |
| 857 | .match = vme_user_match, |
| 858 | .probe = vme_user_probe, |
| 859 | .remove = vme_user_remove, |
| 860 | }; |
| 861 | |
| 862 | static int __init vme_user_init(void) |
| 863 | { |
| 864 | int retval = 0; |
| 865 | |
| 866 | pr_info("VME User Space Access Driver\n"); |
| 867 | |
| 868 | if (bus_num == 0) { |
| 869 | pr_err("No cards, skipping registration\n"); |
| 870 | retval = -ENODEV; |
| 871 | goto err_nocard; |
| 872 | } |
| 873 | |
| 874 | /* Let's start by supporting one bus, we can support more than one |
| 875 | * in future revisions if that ever becomes necessary. |
| 876 | */ |
| 877 | if (bus_num > VME_USER_BUS_MAX) { |
| 878 | pr_err("Driver only able to handle %d buses\n", |
| 879 | VME_USER_BUS_MAX); |
| 880 | bus_num = VME_USER_BUS_MAX; |
| 881 | } |
| 882 | |
| 883 | /* |
| 884 | * Here we just register the maximum number of devices we can and |
| 885 | * leave vme_user_match() to allow only 1 to go through to probe(). |
| 886 | * This way, if we later want to allow multiple user access devices, |
| 887 | * we just change the code in vme_user_match(). |
| 888 | */ |
| 889 | retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS); |
| 890 | if (retval != 0) |
| 891 | goto err_reg; |
| 892 | |
| 893 | return retval; |
| 894 | |
| 895 | err_reg: |
| 896 | err_nocard: |
| 897 | return retval; |
| 898 | } |
| 899 | |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 900 | static void __exit vme_user_exit(void) |
| 901 | { |
| 902 | vme_unregister_driver(&vme_user_driver); |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 903 | } |
| 904 | |
| 905 | |
| 906 | MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected"); |
| 907 | module_param_array(bus, int, &bus_num, 0); |
| 908 | |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 909 | MODULE_DESCRIPTION("VME User Space Access Driver"); |
Martyn Welch | 66bd8db | 2010-02-18 15:12:52 +0000 | [diff] [blame] | 910 | MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com"); |
Martyn Welch | f00a86d | 2009-07-31 09:28:17 +0100 | [diff] [blame] | 911 | MODULE_LICENSE("GPL"); |
| 912 | |
Martyn Welch | 238add5 | 2009-08-11 14:37:15 +0100 | [diff] [blame] | 913 | module_init(vme_user_init); |
| 914 | module_exit(vme_user_exit); |