Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 1 | /* |
| 2 | * Support for hardware-assisted userspace interrupt masking. |
| 3 | * |
| 4 | * Copyright (C) 2010 Paul Mundt |
| 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. |
| 9 | */ |
| 10 | #define pr_fmt(fmt) "intc: " fmt |
| 11 | |
| 12 | #include <linux/errno.h> |
Kay Sievers | f4e73bf | 2011-12-21 15:09:52 -0800 | [diff] [blame] | 13 | #include <linux/device.h> |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 14 | #include <linux/init.h> |
| 15 | #include <linux/io.h> |
Nobuhiro Iwamatsu | a102a08 | 2011-10-04 10:17:21 +0900 | [diff] [blame] | 16 | #include <linux/stat.h> |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 17 | #include <asm/sizes.h> |
| 18 | #include "internals.h" |
| 19 | |
| 20 | static void __iomem *uimask; |
| 21 | |
| 22 | static ssize_t |
Kay Sievers | f4e73bf | 2011-12-21 15:09:52 -0800 | [diff] [blame] | 23 | show_intc_userimask(struct device *dev, |
| 24 | struct device_attribute *attr, char *buf) |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 25 | { |
| 26 | return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf); |
| 27 | } |
| 28 | |
| 29 | static ssize_t |
Kay Sievers | f4e73bf | 2011-12-21 15:09:52 -0800 | [diff] [blame] | 30 | store_intc_userimask(struct device *dev, |
| 31 | struct device_attribute *attr, |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 32 | const char *buf, size_t count) |
| 33 | { |
| 34 | unsigned long level; |
| 35 | |
| 36 | level = simple_strtoul(buf, NULL, 10); |
| 37 | |
| 38 | /* |
| 39 | * Minimal acceptable IRQ levels are in the 2 - 16 range, but |
| 40 | * these are chomped so as to not interfere with normal IRQs. |
| 41 | * |
| 42 | * Level 1 is a special case on some CPUs in that it's not |
| 43 | * directly settable, but given that USERIMASK cuts off below a |
| 44 | * certain level, we don't care about this limitation here. |
| 45 | * Level 0 on the other hand equates to user masking disabled. |
| 46 | * |
| 47 | * We use the default priority level as a cut off so that only |
| 48 | * special case opt-in IRQs can be mangled. |
| 49 | */ |
| 50 | if (level >= intc_get_dfl_prio_level()) |
| 51 | return -EINVAL; |
| 52 | |
| 53 | __raw_writel(0xa5 << 24 | level << 4, uimask); |
| 54 | |
| 55 | return count; |
| 56 | } |
| 57 | |
Kay Sievers | f4e73bf | 2011-12-21 15:09:52 -0800 | [diff] [blame] | 58 | static DEVICE_ATTR(userimask, S_IRUSR | S_IWUSR, |
| 59 | show_intc_userimask, store_intc_userimask); |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 60 | |
| 61 | |
| 62 | static int __init userimask_sysdev_init(void) |
| 63 | { |
| 64 | if (unlikely(!uimask)) |
| 65 | return -ENXIO; |
| 66 | |
Kay Sievers | f4e73bf | 2011-12-21 15:09:52 -0800 | [diff] [blame] | 67 | return device_create_file(intc_subsys.dev_root, &dev_attr_userimask); |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 68 | } |
| 69 | late_initcall(userimask_sysdev_init); |
| 70 | |
| 71 | int register_intc_userimask(unsigned long addr) |
| 72 | { |
| 73 | if (unlikely(uimask)) |
| 74 | return -EBUSY; |
| 75 | |
| 76 | uimask = ioremap_nocache(addr, SZ_4K); |
| 77 | if (unlikely(!uimask)) |
| 78 | return -ENOMEM; |
| 79 | |
| 80 | pr_info("userimask support registered for levels 0 -> %d\n", |
| 81 | intc_get_dfl_prio_level() - 1); |
| 82 | |
| 83 | return 0; |
| 84 | } |