blob: 8e46d606d42a871479635131b2a0fd7f7ce501ec [file] [log] [blame]
Martyn Welchf00a86d2009-07-31 09:28:17 +01001/*
2 * VMEbus User access driver
3 *
Martyn Welch66bd8db2010-02-18 15:12:52 +00004 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
Martyn Welchf00a86d2009-07-31 09:28:17 +01006 *
7 * Based on work by:
8 * Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +090018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kalinkinc74a8042015-02-26 18:53:10 +030020#include <linux/atomic.h>
Martyn Welchf00a86d2009-07-31 09:28:17 +010021#include <linux/cdev.h>
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/dma-mapping.h>
25#include <linux/errno.h>
26#include <linux/init.h>
27#include <linux/ioctl.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/pagemap.h>
32#include <linux/pci.h>
Santosh Nayakecb3b802012-04-03 16:42:51 +053033#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Martyn Welchf00a86d2009-07-31 09:28:17 +010035#include <linux/spinlock.h>
36#include <linux/syscalls.h>
37#include <linux/types.h>
Martyn Welchf00a86d2009-07-31 09:28:17 +010038
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +000039#include <linux/io.h>
40#include <linux/uaccess.h>
Greg Kroah-Hartmandb3b9e92012-04-26 12:34:58 -070041#include <linux/vme.h>
Martyn Welchf00a86d2009-07-31 09:28:17 +010042
Martyn Welchf00a86d2009-07-31 09:28:17 +010043#include "vme_user.h"
44
Vincent Bossier584721c2011-06-03 10:07:39 +010045static const char driver_name[] = "vme_user";
Martyn Welch238add52009-08-11 14:37:15 +010046
Manohar Vanga0a4b6b02011-09-26 11:27:18 +020047static int bus[VME_USER_BUS_MAX];
Emilio G. Cotac9492312010-11-12 11:15:21 +000048static unsigned int bus_num;
Martyn Welch238add52009-08-11 14:37:15 +010049
Martyn Welchf00a86d2009-07-31 09:28:17 +010050/* Currently Documentation/devices.txt defines the following for VME:
51 *
52 * 221 char VME bus
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +000053 * 0 = /dev/bus/vme/m0 First master image
54 * 1 = /dev/bus/vme/m1 Second master image
55 * 2 = /dev/bus/vme/m2 Third master image
56 * 3 = /dev/bus/vme/m3 Fourth master image
57 * 4 = /dev/bus/vme/s0 First slave image
58 * 5 = /dev/bus/vme/s1 Second slave image
59 * 6 = /dev/bus/vme/s2 Third slave image
60 * 7 = /dev/bus/vme/s3 Fourth slave image
61 * 8 = /dev/bus/vme/ctl Control
Martyn Welchf00a86d2009-07-31 09:28:17 +010062 *
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +000063 * It is expected that all VME bus drivers will use the
64 * same interface. For interface documentation see
65 * http://www.vmelinux.org/.
Martyn Welchf00a86d2009-07-31 09:28:17 +010066 *
67 * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
68 * even support the tsi148 chipset (which has 8 master and 8 slave windows).
Justin P. Mattock95605332012-08-13 10:28:22 -070069 * We'll run with this for now as far as possible, however it probably makes
Martyn Welchf00a86d2009-07-31 09:28:17 +010070 * sense to get rid of the old mappings and just do everything dynamically.
71 *
72 * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
73 * defined above and try to support at least some of the interface from
Justin P. Mattock95605332012-08-13 10:28:22 -070074 * http://www.vmelinux.org/ as an alternative the driver can be written
75 * providing a saner interface later.
Martyn Welch238add52009-08-11 14:37:15 +010076 *
77 * The vmelinux.org driver never supported slave images, the devices reserved
78 * for slaves were repurposed to support all 8 master images on the UniverseII!
79 * We shall support 4 masters and 4 slaves with this driver.
Martyn Welchf00a86d2009-07-31 09:28:17 +010080 */
81#define VME_MAJOR 221 /* VME Major Device Number */
82#define VME_DEVS 9 /* Number of dev entries */
83
84#define MASTER_MINOR 0
85#define MASTER_MAX 3
86#define SLAVE_MINOR 4
87#define SLAVE_MAX 7
88#define CONTROL_MINOR 8
89
90#define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */
91
92/*
93 * Structure to handle image related parameters.
94 */
Vincent Bossier584721c2011-06-03 10:07:39 +010095struct image_desc {
Emilio G. Cota0a81a0f2010-11-12 11:15:27 +000096 void *kern_buf; /* Buffer address in kernel space */
Martyn Welchf00a86d2009-07-31 09:28:17 +010097 dma_addr_t pci_buf; /* Buffer address in PCI address space */
98 unsigned long long size_buf; /* Buffer size */
Santosh Nayakecb3b802012-04-03 16:42:51 +053099 struct mutex mutex; /* Mutex for locking image */
Martyn Welchf00a86d2009-07-31 09:28:17 +0100100 struct device *device; /* Sysfs device */
101 struct vme_resource *resource; /* VME resource */
102 int users; /* Number of current users */
Dmitry Kalinkinc74a8042015-02-26 18:53:10 +0300103 int mmap_count; /* Number of current mmap's */
Vincent Bossier584721c2011-06-03 10:07:39 +0100104};
105static struct image_desc image[VME_DEVS];
Martyn Welchf00a86d2009-07-31 09:28:17 +0100106
Emilio G. Cotab9cc2932010-11-12 11:15:14 +0000107static struct cdev *vme_user_cdev; /* Character device */
108static struct class *vme_user_sysfs_class; /* Sysfs class */
Manohar Vanga8f966dc2011-09-26 11:27:15 +0200109static struct vme_dev *vme_user_bridge; /* Pointer to user device */
Martyn Welchf00a86d2009-07-31 09:28:17 +0100110
Martyn Welchf00a86d2009-07-31 09:28:17 +0100111
112static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
113 MASTER_MINOR, MASTER_MINOR,
114 SLAVE_MINOR, SLAVE_MINOR,
115 SLAVE_MINOR, SLAVE_MINOR,
116 CONTROL_MINOR
117 };
118
Dmitry Kalinkinc74a8042015-02-26 18:53:10 +0300119struct vme_user_vma_priv {
120 unsigned int minor;
121 atomic_t refcnt;
122};
123
Martyn Welchf00a86d2009-07-31 09:28:17 +0100124
Martyn Welchf00a86d2009-07-31 09:28:17 +0100125static int vme_user_open(struct inode *inode, struct file *file)
126{
127 int err;
128 unsigned int minor = MINOR(inode->i_rdev);
129
Santosh Nayakecb3b802012-04-03 16:42:51 +0530130 mutex_lock(&image[minor].mutex);
Vincent Bossier05614fb2011-06-09 09:20:31 +0100131 /* Allow device to be opened if a resource is needed and allocated. */
132 if (minor < CONTROL_MINOR && image[minor].resource == NULL) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900133 pr_err("No resources allocated for device\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100134 err = -EINVAL;
135 goto err_res;
136 }
137
138 /* Increment user count */
139 image[minor].users++;
140
Santosh Nayakecb3b802012-04-03 16:42:51 +0530141 mutex_unlock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100142
143 return 0;
144
145err_res:
Santosh Nayakecb3b802012-04-03 16:42:51 +0530146 mutex_unlock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100147
148 return err;
149}
150
151static int vme_user_release(struct inode *inode, struct file *file)
152{
153 unsigned int minor = MINOR(inode->i_rdev);
154
Santosh Nayakecb3b802012-04-03 16:42:51 +0530155 mutex_lock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100156
157 /* Decrement user count */
158 image[minor].users--;
159
Santosh Nayakecb3b802012-04-03 16:42:51 +0530160 mutex_unlock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100161
162 return 0;
163}
164
165/*
166 * We are going ot alloc a page during init per window for small transfers.
167 * Small transfers will go VME -> buffer -> user space. Larger (more than a
168 * page) transfers will lock the user space buffer into memory and then
169 * transfer the data directly into the user space buffers.
170 */
171static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
172 loff_t *ppos)
173{
174 ssize_t retval;
175 ssize_t copied = 0;
176
177 if (count <= image[minor].size_buf) {
178 /* We copy to kernel buffer */
179 copied = vme_master_read(image[minor].resource,
180 image[minor].kern_buf, count, *ppos);
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000181 if (copied < 0)
Martyn Welchf00a86d2009-07-31 09:28:17 +0100182 return (int)copied;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100183
184 retval = __copy_to_user(buf, image[minor].kern_buf,
185 (unsigned long)copied);
186 if (retval != 0) {
187 copied = (copied - retval);
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900188 pr_info("User copy failed\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100189 return -EINVAL;
190 }
191
192 } else {
193 /* XXX Need to write this */
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900194 pr_info("Currently don't support large transfers\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100195 /* Map in pages from userspace */
196
197 /* Call vme_master_read to do the transfer */
198 return -EINVAL;
199 }
200
201 return copied;
202}
203
204/*
Justin P. Mattock95605332012-08-13 10:28:22 -0700205 * We are going to alloc a page during init per window for small transfers.
Martyn Welchf00a86d2009-07-31 09:28:17 +0100206 * Small transfers will go user space -> buffer -> VME. Larger (more than a
207 * page) transfers will lock the user space buffer into memory and then
208 * transfer the data directly from the user space buffers out to VME.
209 */
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000210static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
Martyn Welchf00a86d2009-07-31 09:28:17 +0100211 size_t count, loff_t *ppos)
212{
213 ssize_t retval;
214 ssize_t copied = 0;
215
216 if (count <= image[minor].size_buf) {
217 retval = __copy_from_user(image[minor].kern_buf, buf,
218 (unsigned long)count);
219 if (retval != 0)
220 copied = (copied - retval);
221 else
222 copied = count;
223
224 copied = vme_master_write(image[minor].resource,
225 image[minor].kern_buf, copied, *ppos);
226 } else {
227 /* XXX Need to write this */
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900228 pr_info("Currently don't support large transfers\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100229 /* Map in pages from userspace */
230
231 /* Call vme_master_write to do the transfer */
232 return -EINVAL;
233 }
234
235 return copied;
236}
237
238static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
239 size_t count, loff_t *ppos)
240{
Emilio G. Cota0a81a0f2010-11-12 11:15:27 +0000241 void *image_ptr;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100242 ssize_t retval;
243
244 image_ptr = image[minor].kern_buf + *ppos;
245
246 retval = __copy_to_user(buf, image_ptr, (unsigned long)count);
247 if (retval != 0) {
248 retval = (count - retval);
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900249 pr_warn("Partial copy to userspace\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100250 } else
251 retval = count;
252
253 /* Return number of bytes successfully read */
254 return retval;
255}
256
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000257static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
Martyn Welchf00a86d2009-07-31 09:28:17 +0100258 size_t count, loff_t *ppos)
259{
Emilio G. Cota0a81a0f2010-11-12 11:15:27 +0000260 void *image_ptr;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100261 size_t retval;
262
263 image_ptr = image[minor].kern_buf + *ppos;
264
265 retval = __copy_from_user(image_ptr, buf, (unsigned long)count);
266 if (retval != 0) {
267 retval = (count - retval);
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900268 pr_warn("Partial copy to userspace\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100269 } else
270 retval = count;
271
272 /* Return number of bytes successfully read */
273 return retval;
274}
275
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000276static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000277 loff_t *ppos)
Martyn Welchf00a86d2009-07-31 09:28:17 +0100278{
Al Viro496ad9a2013-01-23 17:07:38 -0500279 unsigned int minor = MINOR(file_inode(file)->i_rdev);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100280 ssize_t retval;
281 size_t image_size;
282 size_t okcount;
283
Vincent Bossier05614fb2011-06-09 09:20:31 +0100284 if (minor == CONTROL_MINOR)
285 return 0;
286
Santosh Nayakecb3b802012-04-03 16:42:51 +0530287 mutex_lock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100288
289 /* XXX Do we *really* want this helper - we can use vme_*_get ? */
290 image_size = vme_get_size(image[minor].resource);
291
292 /* Ensure we are starting at a valid location */
293 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
Santosh Nayakecb3b802012-04-03 16:42:51 +0530294 mutex_unlock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100295 return 0;
296 }
297
298 /* Ensure not reading past end of the image */
299 if (*ppos + count > image_size)
300 okcount = image_size - *ppos;
301 else
302 okcount = count;
303
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000304 switch (type[minor]) {
Martyn Welchf00a86d2009-07-31 09:28:17 +0100305 case MASTER_MINOR:
306 retval = resource_to_user(minor, buf, okcount, ppos);
307 break;
308 case SLAVE_MINOR:
309 retval = buffer_to_user(minor, buf, okcount, ppos);
310 break;
311 default:
312 retval = -EINVAL;
313 }
314
Santosh Nayakecb3b802012-04-03 16:42:51 +0530315 mutex_unlock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100316 if (retval > 0)
317 *ppos += retval;
318
319 return retval;
320}
321
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000322static ssize_t vme_user_write(struct file *file, const char __user *buf,
323 size_t count, loff_t *ppos)
Martyn Welchf00a86d2009-07-31 09:28:17 +0100324{
Al Viro496ad9a2013-01-23 17:07:38 -0500325 unsigned int minor = MINOR(file_inode(file)->i_rdev);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100326 ssize_t retval;
327 size_t image_size;
328 size_t okcount;
329
Vincent Bossier05614fb2011-06-09 09:20:31 +0100330 if (minor == CONTROL_MINOR)
331 return 0;
332
Santosh Nayakecb3b802012-04-03 16:42:51 +0530333 mutex_lock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100334
335 image_size = vme_get_size(image[minor].resource);
336
337 /* Ensure we are starting at a valid location */
338 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
Santosh Nayakecb3b802012-04-03 16:42:51 +0530339 mutex_unlock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100340 return 0;
341 }
342
343 /* Ensure not reading past end of the image */
344 if (*ppos + count > image_size)
345 okcount = image_size - *ppos;
346 else
347 okcount = count;
348
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000349 switch (type[minor]) {
Martyn Welchf00a86d2009-07-31 09:28:17 +0100350 case MASTER_MINOR:
351 retval = resource_from_user(minor, buf, okcount, ppos);
352 break;
353 case SLAVE_MINOR:
354 retval = buffer_from_user(minor, buf, okcount, ppos);
355 break;
356 default:
357 retval = -EINVAL;
358 }
Toshiaki Yamane538a6972012-08-21 20:12:33 +0900359
Santosh Nayakecb3b802012-04-03 16:42:51 +0530360 mutex_unlock(&image[minor].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100361
362 if (retval > 0)
363 *ppos += retval;
364
365 return retval;
366}
367
368static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
369{
Al Viro496ad9a2013-01-23 17:07:38 -0500370 unsigned int minor = MINOR(file_inode(file)->i_rdev);
Arthur Benilov877de4b2010-02-16 15:40:30 +0100371 size_t image_size;
Al Viro59482292014-08-19 11:28:35 -0400372 loff_t res;
Arthur Benilov877de4b2010-02-16 15:40:30 +0100373
Dmitry Kalinkin615c40d2015-05-28 15:07:02 +0300374 switch (type[minor]) {
375 case MASTER_MINOR:
376 case SLAVE_MINOR:
377 mutex_lock(&image[minor].mutex);
378 image_size = vme_get_size(image[minor].resource);
379 res = fixed_size_llseek(file, off, whence, image_size);
380 mutex_unlock(&image[minor].mutex);
381 return res;
382 }
Vincent Bossier05614fb2011-06-09 09:20:31 +0100383
Dmitry Kalinkin615c40d2015-05-28 15:07:02 +0300384 return -EINVAL;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100385}
386
Martyn Welch238add52009-08-11 14:37:15 +0100387/*
388 * The ioctls provided by the old VME access method (the one at vmelinux.org)
389 * are most certainly wrong as the effectively push the registers layout
390 * through to user space. Given that the VME core can handle multiple bridges,
391 * with different register layouts this is most certainly not the way to go.
392 *
393 * We aren't using the structures defined in the Motorola driver either - these
394 * are also quite low level, however we should use the definitions that have
395 * already been defined.
396 */
Martyn Welchf00a86d2009-07-31 09:28:17 +0100397static int vme_user_ioctl(struct inode *inode, struct file *file,
398 unsigned int cmd, unsigned long arg)
399{
Martyn Welch238add52009-08-11 14:37:15 +0100400 struct vme_master master;
401 struct vme_slave slave;
Vincent Bossierdca22182011-06-09 15:49:30 +0200402 struct vme_irq_id irq_req;
Martyn Welch238add52009-08-11 14:37:15 +0100403 unsigned long copied;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100404 unsigned int minor = MINOR(inode->i_rdev);
Martyn Welch238add52009-08-11 14:37:15 +0100405 int retval;
406 dma_addr_t pci_addr;
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000407 void __user *argp = (void __user *)arg;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100408
Martyn Welchf00a86d2009-07-31 09:28:17 +0100409 switch (type[minor]) {
410 case CONTROL_MINOR:
Vincent Bossierdca22182011-06-09 15:49:30 +0200411 switch (cmd) {
412 case VME_IRQ_GEN:
Dan Carpentera7f39432012-07-06 11:21:49 +0300413 copied = copy_from_user(&irq_req, argp,
Vincent Bossierdca22182011-06-09 15:49:30 +0200414 sizeof(struct vme_irq_id));
415 if (copied != 0) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900416 pr_warn("Partial copy from userspace\n");
Vincent Bossierdca22182011-06-09 15:49:30 +0200417 return -EFAULT;
418 }
419
Mahati Chamarthyfc489a52014-09-22 23:04:13 +0530420 return vme_irq_generate(vme_user_bridge,
Vincent Bossierdca22182011-06-09 15:49:30 +0200421 irq_req.level,
422 irq_req.statid);
Vincent Bossierdca22182011-06-09 15:49:30 +0200423 }
Martyn Welchf00a86d2009-07-31 09:28:17 +0100424 break;
425 case MASTER_MINOR:
Martyn Welchf00a86d2009-07-31 09:28:17 +0100426 switch (cmd) {
Martyn Welch238add52009-08-11 14:37:15 +0100427 case VME_GET_MASTER:
428 memset(&master, 0, sizeof(struct vme_master));
Martyn Welchf00a86d2009-07-31 09:28:17 +0100429
Martyn Welch238add52009-08-11 14:37:15 +0100430 /* XXX We do not want to push aspace, cycle and width
431 * to userspace as they are
432 */
433 retval = vme_master_get(image[minor].resource,
Emilio G. Cota886953e2010-11-12 11:14:07 +0000434 &master.enable, &master.vme_addr,
435 &master.size, &master.aspace,
436 &master.cycle, &master.dwidth);
Martyn Welch238add52009-08-11 14:37:15 +0100437
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000438 copied = copy_to_user(argp, &master,
Martyn Welch238add52009-08-11 14:37:15 +0100439 sizeof(struct vme_master));
440 if (copied != 0) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900441 pr_warn("Partial copy to userspace\n");
Martyn Welch238add52009-08-11 14:37:15 +0100442 return -EFAULT;
443 }
444
445 return retval;
Martyn Welch238add52009-08-11 14:37:15 +0100446
447 case VME_SET_MASTER:
448
Dmitry Kalinkinc74a8042015-02-26 18:53:10 +0300449 if (image[minor].mmap_count != 0) {
450 pr_warn("Can't adjust mapped window\n");
451 return -EPERM;
452 }
453
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000454 copied = copy_from_user(&master, argp, sizeof(master));
Martyn Welch238add52009-08-11 14:37:15 +0100455 if (copied != 0) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900456 pr_warn("Partial copy from userspace\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100457 return -EFAULT;
458 }
459
Martyn Welch238add52009-08-11 14:37:15 +0100460 /* XXX We do not want to push aspace, cycle and width
461 * to userspace as they are
462 */
463 return vme_master_set(image[minor].resource,
464 master.enable, master.vme_addr, master.size,
465 master.aspace, master.cycle, master.dwidth);
466
467 break;
468 }
469 break;
470 case SLAVE_MINOR:
471 switch (cmd) {
472 case VME_GET_SLAVE:
473 memset(&slave, 0, sizeof(struct vme_slave));
474
475 /* XXX We do not want to push aspace, cycle and width
476 * to userspace as they are
477 */
478 retval = vme_slave_get(image[minor].resource,
Emilio G. Cota886953e2010-11-12 11:14:07 +0000479 &slave.enable, &slave.vme_addr,
480 &slave.size, &pci_addr, &slave.aspace,
481 &slave.cycle);
Martyn Welch238add52009-08-11 14:37:15 +0100482
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000483 copied = copy_to_user(argp, &slave,
Martyn Welch238add52009-08-11 14:37:15 +0100484 sizeof(struct vme_slave));
485 if (copied != 0) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900486 pr_warn("Partial copy to userspace\n");
Martyn Welch238add52009-08-11 14:37:15 +0100487 return -EFAULT;
488 }
489
490 return retval;
Martyn Welch238add52009-08-11 14:37:15 +0100491
492 case VME_SET_SLAVE:
493
Emilio G. Cota1a85f202010-11-12 11:15:34 +0000494 copied = copy_from_user(&slave, argp, sizeof(slave));
Martyn Welch238add52009-08-11 14:37:15 +0100495 if (copied != 0) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900496 pr_warn("Partial copy from userspace\n");
Martyn Welch238add52009-08-11 14:37:15 +0100497 return -EFAULT;
498 }
499
500 /* XXX We do not want to push aspace, cycle and width
501 * to userspace as they are
502 */
Martyn Welchf00a86d2009-07-31 09:28:17 +0100503 return vme_slave_set(image[minor].resource,
504 slave.enable, slave.vme_addr, slave.size,
505 image[minor].pci_buf, slave.aspace,
506 slave.cycle);
507
508 break;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100509 }
510 break;
511 }
512
513 return -EINVAL;
514}
515
Arnd Bergmannb1f2ac02010-04-27 20:15:07 +0200516static long
517vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
518{
519 int ret;
Dmitry Kalinkin0cd189a2015-02-26 18:53:09 +0300520 struct inode *inode = file_inode(file);
521 unsigned int minor = MINOR(inode->i_rdev);
Arnd Bergmannb1f2ac02010-04-27 20:15:07 +0200522
Dmitry Kalinkin0cd189a2015-02-26 18:53:09 +0300523 mutex_lock(&image[minor].mutex);
524 ret = vme_user_ioctl(inode, file, cmd, arg);
525 mutex_unlock(&image[minor].mutex);
Arnd Bergmannb1f2ac02010-04-27 20:15:07 +0200526
527 return ret;
528}
529
Dmitry Kalinkinc74a8042015-02-26 18:53:10 +0300530static void vme_user_vm_open(struct vm_area_struct *vma)
531{
532 struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
533
534 atomic_inc(&vma_priv->refcnt);
535}
536
537static void vme_user_vm_close(struct vm_area_struct *vma)
538{
539 struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
540 unsigned int minor = vma_priv->minor;
541
542 if (!atomic_dec_and_test(&vma_priv->refcnt))
543 return;
544
545 mutex_lock(&image[minor].mutex);
546 image[minor].mmap_count--;
547 mutex_unlock(&image[minor].mutex);
548
549 kfree(vma_priv);
550}
551
Dmitry Kalinkine4aea6a2015-06-13 16:34:01 +0300552static const struct vm_operations_struct vme_user_vm_ops = {
553 .open = vme_user_vm_open,
554 .close = vme_user_vm_close,
555};
556
Dmitry Kalinkinc74a8042015-02-26 18:53:10 +0300557static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma)
558{
559 int err;
560 struct vme_user_vma_priv *vma_priv;
561
562 mutex_lock(&image[minor].mutex);
563
564 err = vme_master_mmap(image[minor].resource, vma);
565 if (err) {
566 mutex_unlock(&image[minor].mutex);
567 return err;
568 }
569
570 vma_priv = kmalloc(sizeof(struct vme_user_vma_priv), GFP_KERNEL);
571 if (vma_priv == NULL) {
572 mutex_unlock(&image[minor].mutex);
573 return -ENOMEM;
574 }
575
576 vma_priv->minor = minor;
577 atomic_set(&vma_priv->refcnt, 1);
578 vma->vm_ops = &vme_user_vm_ops;
579 vma->vm_private_data = vma_priv;
580
581 image[minor].mmap_count++;
582
583 mutex_unlock(&image[minor].mutex);
584
585 return 0;
586}
587
588static int vme_user_mmap(struct file *file, struct vm_area_struct *vma)
589{
590 unsigned int minor = MINOR(file_inode(file)->i_rdev);
591
592 if (type[minor] == MASTER_MINOR)
593 return vme_user_master_mmap(minor, vma);
594
595 return -ENODEV;
596}
597
Dmitry Kalinkine4aea6a2015-06-13 16:34:01 +0300598static const struct file_operations vme_user_fops = {
599 .open = vme_user_open,
600 .release = vme_user_release,
601 .read = vme_user_read,
602 .write = vme_user_write,
603 .llseek = vme_user_llseek,
604 .unlocked_ioctl = vme_user_unlocked_ioctl,
605 .compat_ioctl = vme_user_unlocked_ioctl,
606 .mmap = vme_user_mmap,
607};
Martyn Welchf00a86d2009-07-31 09:28:17 +0100608
609/*
610 * Unallocate a previously allocated buffer
611 */
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000612static void buf_unalloc(int num)
Martyn Welchf00a86d2009-07-31 09:28:17 +0100613{
614 if (image[num].kern_buf) {
615#ifdef VME_DEBUG
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900616 pr_debug("UniverseII:Releasing buffer at %p\n",
617 image[num].pci_buf);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100618#endif
619
620 vme_free_consistent(image[num].resource, image[num].size_buf,
621 image[num].kern_buf, image[num].pci_buf);
622
623 image[num].kern_buf = NULL;
624 image[num].pci_buf = 0;
625 image[num].size_buf = 0;
626
627#ifdef VME_DEBUG
628 } else {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900629 pr_debug("UniverseII: Buffer not allocated\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100630#endif
631 }
632}
633
Manohar Vanga5d6abf32011-09-26 11:27:16 +0200634static int vme_user_match(struct vme_dev *vdev)
635{
Martyn Welch978f47d2013-11-08 11:58:34 +0000636 int i;
637
638 int cur_bus = vme_bus_num(vdev);
Martyn Welchd7729f02013-11-08 11:58:35 +0000639 int cur_slot = vme_slot_num(vdev);
Martyn Welch978f47d2013-11-08 11:58:34 +0000640
641 for (i = 0; i < bus_num; i++)
642 if ((cur_bus == bus[i]) && (cur_slot == vdev->num))
643 return 1;
644
645 return 0;
Manohar Vanga5d6abf32011-09-26 11:27:16 +0200646}
647
Martyn Welchf00a86d2009-07-31 09:28:17 +0100648/*
649 * In this simple access driver, the old behaviour is being preserved as much
650 * as practical. We will therefore reserve the buffers and request the images
651 * here so that we don't have to do it later.
652 */
Bill Pembertond7e530d2012-11-19 13:21:56 -0500653static int vme_user_probe(struct vme_dev *vdev)
Martyn Welchf00a86d2009-07-31 09:28:17 +0100654{
655 int i, err;
Bojan Prtvarf1552cb2014-04-03 18:56:10 +0200656 char *name;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100657
Martyn Welch238add52009-08-11 14:37:15 +0100658 /* Save pointer to the bridge device */
659 if (vme_user_bridge != NULL) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900660 dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n");
Martyn Welch238add52009-08-11 14:37:15 +0100661 err = -EINVAL;
662 goto err_dev;
663 }
Manohar Vanga8f966dc2011-09-26 11:27:15 +0200664 vme_user_bridge = vdev;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100665
666 /* Initialise descriptors */
667 for (i = 0; i < VME_DEVS; i++) {
668 image[i].kern_buf = NULL;
669 image[i].pci_buf = 0;
Santosh Nayakecb3b802012-04-03 16:42:51 +0530670 mutex_init(&image[i].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100671 image[i].device = NULL;
672 image[i].resource = NULL;
673 image[i].users = 0;
674 }
675
Martyn Welchf00a86d2009-07-31 09:28:17 +0100676 /* Assign major and minor numbers for the driver */
677 err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
678 driver_name);
679 if (err) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900680 dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n",
681 VME_MAJOR);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100682 goto err_region;
683 }
684
685 /* Register the driver as a char device */
686 vme_user_cdev = cdev_alloc();
Kumar Amit Mehtad4113a62013-03-24 22:37:48 -0700687 if (!vme_user_cdev) {
688 err = -ENOMEM;
689 goto err_char;
690 }
Martyn Welchf00a86d2009-07-31 09:28:17 +0100691 vme_user_cdev->ops = &vme_user_fops;
692 vme_user_cdev->owner = THIS_MODULE;
693 err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
694 if (err) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900695 dev_warn(&vdev->dev, "cdev_all failed\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100696 goto err_char;
697 }
698
699 /* Request slave resources and allocate buffers (128kB wide) */
700 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
701 /* XXX Need to properly request attributes */
Arthur Benilov5188d742010-02-16 15:40:58 +0100702 /* For ca91cx42 bridge there are only two slave windows
703 * supporting A16 addressing, so we request A24 supported
704 * by all windows.
705 */
Martyn Welchf00a86d2009-07-31 09:28:17 +0100706 image[i].resource = vme_slave_request(vme_user_bridge,
Arthur Benilov5188d742010-02-16 15:40:58 +0100707 VME_A24, VME_SCT);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100708 if (image[i].resource == NULL) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900709 dev_warn(&vdev->dev,
710 "Unable to allocate slave resource\n");
Wei Yongjun465ff282013-05-13 14:05:38 +0800711 err = -ENOMEM;
Martyn Welch238add52009-08-11 14:37:15 +0100712 goto err_slave;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100713 }
714 image[i].size_buf = PCI_BUF_SIZE;
715 image[i].kern_buf = vme_alloc_consistent(image[i].resource,
Emilio G. Cota886953e2010-11-12 11:14:07 +0000716 image[i].size_buf, &image[i].pci_buf);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100717 if (image[i].kern_buf == NULL) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900718 dev_warn(&vdev->dev,
719 "Unable to allocate memory for buffer\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100720 image[i].pci_buf = 0;
721 vme_slave_free(image[i].resource);
722 err = -ENOMEM;
Martyn Welch238add52009-08-11 14:37:15 +0100723 goto err_slave;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100724 }
725 }
726
727 /*
728 * Request master resources allocate page sized buffers for small
729 * reads and writes
730 */
731 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
732 /* XXX Need to properly request attributes */
733 image[i].resource = vme_master_request(vme_user_bridge,
734 VME_A32, VME_SCT, VME_D32);
735 if (image[i].resource == NULL) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900736 dev_warn(&vdev->dev,
737 "Unable to allocate master resource\n");
Wei Yongjun465ff282013-05-13 14:05:38 +0800738 err = -ENOMEM;
Martyn Welch238add52009-08-11 14:37:15 +0100739 goto err_master;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100740 }
Arthur Benilov33e920d2010-02-16 15:41:21 +0100741 image[i].size_buf = PCI_BUF_SIZE;
742 image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
743 if (image[i].kern_buf == NULL) {
Arthur Benilov33e920d2010-02-16 15:41:21 +0100744 err = -ENOMEM;
Daeseok Youn1a524892014-03-26 12:01:48 +0900745 vme_master_free(image[i].resource);
746 goto err_master;
Arthur Benilov33e920d2010-02-16 15:41:21 +0100747 }
Martyn Welchf00a86d2009-07-31 09:28:17 +0100748 }
749
750 /* Create sysfs entries - on udev systems this creates the dev files */
751 vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
752 if (IS_ERR(vme_user_sysfs_class)) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900753 dev_err(&vdev->dev, "Error creating vme_user class.\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100754 err = PTR_ERR(vme_user_sysfs_class);
755 goto err_class;
756 }
757
758 /* Add sysfs Entries */
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000759 for (i = 0; i < VME_DEVS; i++) {
Vincent Bossier584721c2011-06-03 10:07:39 +0100760 int num;
Bojan Prtvar938acb992014-04-03 00:24:10 +0200761
Martyn Welchf00a86d2009-07-31 09:28:17 +0100762 switch (type[i]) {
763 case MASTER_MINOR:
Bojan Prtvarf1552cb2014-04-03 18:56:10 +0200764 name = "bus/vme/m%d";
Martyn Welchf00a86d2009-07-31 09:28:17 +0100765 break;
766 case CONTROL_MINOR:
Bojan Prtvarf1552cb2014-04-03 18:56:10 +0200767 name = "bus/vme/ctl";
Martyn Welchf00a86d2009-07-31 09:28:17 +0100768 break;
769 case SLAVE_MINOR:
Bojan Prtvarf1552cb2014-04-03 18:56:10 +0200770 name = "bus/vme/s%d";
Martyn Welchf00a86d2009-07-31 09:28:17 +0100771 break;
772 default:
773 err = -EINVAL;
774 goto err_sysfs;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100775 }
776
Vincent Bossier584721c2011-06-03 10:07:39 +0100777 num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
778 image[i].device = device_create(vme_user_sysfs_class, NULL,
779 MKDEV(VME_MAJOR, i), NULL, name, num);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100780 if (IS_ERR(image[i].device)) {
YAMANE Toshiaki0093e5f2012-11-09 12:23:14 +0900781 dev_info(&vdev->dev, "Error creating sysfs device\n");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100782 err = PTR_ERR(image[i].device);
783 goto err_sysfs;
784 }
785 }
786
Martyn Welchf00a86d2009-07-31 09:28:17 +0100787 return 0;
788
Martyn Welchf00a86d2009-07-31 09:28:17 +0100789err_sysfs:
Nanakos Chrysostomos45f9f012010-05-28 10:54:45 +0000790 while (i > 0) {
Martyn Welchf00a86d2009-07-31 09:28:17 +0100791 i--;
792 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
793 }
794 class_destroy(vme_user_sysfs_class);
795
Martyn Welch238add52009-08-11 14:37:15 +0100796 /* Ensure counter set correcty to unalloc all master windows */
797 i = MASTER_MAX + 1;
798err_master:
799 while (i > MASTER_MINOR) {
Martyn Welchf00a86d2009-07-31 09:28:17 +0100800 i--;
Daeseok Youn1a524892014-03-26 12:01:48 +0900801 kfree(image[i].kern_buf);
Martyn Welch238add52009-08-11 14:37:15 +0100802 vme_master_free(image[i].resource);
803 }
804
805 /*
806 * Ensure counter set correcty to unalloc all slave windows and buffers
807 */
808 i = SLAVE_MAX + 1;
809err_slave:
810 while (i > SLAVE_MINOR) {
811 i--;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100812 buf_unalloc(i);
Emilio G. Cota1daa38d2010-12-03 09:05:08 +0000813 vme_slave_free(image[i].resource);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100814 }
815err_class:
816 cdev_del(vme_user_cdev);
817err_char:
818 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
819err_region:
Martyn Welch238add52009-08-11 14:37:15 +0100820err_dev:
Martyn Welchf00a86d2009-07-31 09:28:17 +0100821 return err;
822}
823
Bill Pembertonf21a8242012-11-19 13:26:52 -0500824static int vme_user_remove(struct vme_dev *dev)
Martyn Welchf00a86d2009-07-31 09:28:17 +0100825{
826 int i;
827
828 /* Remove sysfs Entries */
Santosh Nayakecb3b802012-04-03 16:42:51 +0530829 for (i = 0; i < VME_DEVS; i++) {
830 mutex_destroy(&image[i].mutex);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100831 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
Santosh Nayakecb3b802012-04-03 16:42:51 +0530832 }
Martyn Welchf00a86d2009-07-31 09:28:17 +0100833 class_destroy(vme_user_sysfs_class);
834
Emilio G. Cotab62c99b2010-12-03 14:20:51 +0000835 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
Arthur Benilov33e920d2010-02-16 15:41:21 +0100836 kfree(image[i].kern_buf);
Emilio G. Cotab62c99b2010-12-03 14:20:51 +0000837 vme_master_free(image[i].resource);
838 }
Arthur Benilov33e920d2010-02-16 15:41:21 +0100839
Martyn Welchf00a86d2009-07-31 09:28:17 +0100840 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
Martyn Welch238add52009-08-11 14:37:15 +0100841 vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100842 buf_unalloc(i);
Emilio G. Cota1daa38d2010-12-03 09:05:08 +0000843 vme_slave_free(image[i].resource);
Martyn Welchf00a86d2009-07-31 09:28:17 +0100844 }
845
846 /* Unregister device driver */
847 cdev_del(vme_user_cdev);
848
849 /* Unregiser the major and minor device numbers */
850 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
Martyn Welch238add52009-08-11 14:37:15 +0100851
852 return 0;
Martyn Welchf00a86d2009-07-31 09:28:17 +0100853}
854
Dmitry Kalinkine4aea6a2015-06-13 16:34:01 +0300855static struct vme_driver vme_user_driver = {
856 .name = driver_name,
857 .match = vme_user_match,
858 .probe = vme_user_probe,
859 .remove = vme_user_remove,
860};
861
862static int __init vme_user_init(void)
863{
864 int retval = 0;
865
866 pr_info("VME User Space Access Driver\n");
867
868 if (bus_num == 0) {
869 pr_err("No cards, skipping registration\n");
870 retval = -ENODEV;
871 goto err_nocard;
872 }
873
874 /* Let's start by supporting one bus, we can support more than one
875 * in future revisions if that ever becomes necessary.
876 */
877 if (bus_num > VME_USER_BUS_MAX) {
878 pr_err("Driver only able to handle %d buses\n",
879 VME_USER_BUS_MAX);
880 bus_num = VME_USER_BUS_MAX;
881 }
882
883 /*
884 * Here we just register the maximum number of devices we can and
885 * leave vme_user_match() to allow only 1 to go through to probe().
886 * This way, if we later want to allow multiple user access devices,
887 * we just change the code in vme_user_match().
888 */
889 retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS);
890 if (retval != 0)
891 goto err_reg;
892
893 return retval;
894
895err_reg:
896err_nocard:
897 return retval;
898}
899
Martyn Welch238add52009-08-11 14:37:15 +0100900static void __exit vme_user_exit(void)
901{
902 vme_unregister_driver(&vme_user_driver);
Martyn Welch238add52009-08-11 14:37:15 +0100903}
904
905
906MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
907module_param_array(bus, int, &bus_num, 0);
908
Martyn Welchf00a86d2009-07-31 09:28:17 +0100909MODULE_DESCRIPTION("VME User Space Access Driver");
Martyn Welch66bd8db2010-02-18 15:12:52 +0000910MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
Martyn Welchf00a86d2009-07-31 09:28:17 +0100911MODULE_LICENSE("GPL");
912
Martyn Welch238add52009-08-11 14:37:15 +0100913module_init(vme_user_init);
914module_exit(vme_user_exit);