Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 1 | /* |
| 2 | Added support for the AMD Geode LX RNG |
| 3 | (c) Copyright 2004-2005 Advanced Micro Devices, Inc. |
| 4 | |
| 5 | derived from |
| 6 | |
| 7 | Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) |
| 8 | (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com> |
| 9 | |
| 10 | derived from |
| 11 | |
| 12 | Hardware driver for the AMD 768 Random Number Generator (RNG) |
| 13 | (c) Copyright 2001 Red Hat Inc <alan@redhat.com> |
| 14 | |
| 15 | derived from |
| 16 | |
| 17 | Hardware driver for Intel i810 Random Number Generator (RNG) |
| 18 | Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com> |
| 19 | Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com> |
| 20 | |
| 21 | Added generic RNG API |
Michael Büsch | eb032b9 | 2011-07-04 20:50:05 +0200 | [diff] [blame] | 22 | Copyright 2006 Michael Buesch <m@bues.ch> |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 23 | Copyright 2005 (c) MontaVista Software, Inc. |
| 24 | |
| 25 | Please read Documentation/hw_random.txt for details on use. |
| 26 | |
| 27 | ---------------------------------------------------------- |
| 28 | This software may be used and distributed according to the terms |
| 29 | of the GNU General Public License, incorporated herein by reference. |
| 30 | |
| 31 | */ |
| 32 | |
| 33 | |
| 34 | #include <linux/device.h> |
| 35 | #include <linux/hw_random.h> |
| 36 | #include <linux/module.h> |
| 37 | #include <linux/kernel.h> |
| 38 | #include <linux/fs.h> |
Al Viro | 914e263 | 2006-10-18 13:55:46 -0400 | [diff] [blame] | 39 | #include <linux/sched.h> |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 40 | #include <linux/miscdevice.h> |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 41 | #include <linux/kthread.h> |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 42 | #include <linux/delay.h> |
Rusty Russell | f7f154f | 2013-03-05 10:07:08 +1030 | [diff] [blame] | 43 | #include <linux/slab.h> |
Kees Cook | d9e7972 | 2014-03-03 15:51:48 -0800 | [diff] [blame] | 44 | #include <linux/random.h> |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 45 | #include <linux/err.h> |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 46 | #include <asm/uaccess.h> |
| 47 | |
| 48 | |
| 49 | #define RNG_MODULE_NAME "hw_random" |
| 50 | #define PFX RNG_MODULE_NAME ": " |
| 51 | #define RNG_MISCDEV_MINOR 183 /* official */ |
| 52 | |
| 53 | |
| 54 | static struct hwrng *current_rng; |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 55 | static struct task_struct *hwrng_fill; |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 56 | static LIST_HEAD(rng_list); |
Rusty Russell | 9372b35 | 2014-12-08 16:50:35 +0800 | [diff] [blame] | 57 | /* Protects rng_list and current_rng */ |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 58 | static DEFINE_MUTEX(rng_mutex); |
Rusty Russell | 9372b35 | 2014-12-08 16:50:35 +0800 | [diff] [blame] | 59 | /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */ |
| 60 | static DEFINE_MUTEX(reading_mutex); |
Ian Molton | 9996508 | 2009-12-01 14:47:32 +0800 | [diff] [blame] | 61 | static int data_avail; |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 62 | static u8 *rng_buffer, *rng_fillbuf; |
Torsten Duwe | 0f734e6 | 2014-06-14 23:48:41 -0400 | [diff] [blame] | 63 | static unsigned short current_quality; |
| 64 | static unsigned short default_quality; /* = 0; default to "off" */ |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 65 | |
| 66 | module_param(current_quality, ushort, 0644); |
| 67 | MODULE_PARM_DESC(current_quality, |
| 68 | "current hwrng entropy estimation per mill"); |
Torsten Duwe | 0f734e6 | 2014-06-14 23:48:41 -0400 | [diff] [blame] | 69 | module_param(default_quality, ushort, 0644); |
| 70 | MODULE_PARM_DESC(default_quality, |
| 71 | "default entropy content of hwrng per mill"); |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 72 | |
Herbert Xu | ff77c15 | 2014-12-23 16:40:21 +1100 | [diff] [blame] | 73 | static void drop_current_rng(void); |
Herbert Xu | 90ac41b | 2014-12-23 16:40:22 +1100 | [diff] [blame] | 74 | static int hwrng_init(struct hwrng *rng); |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 75 | static void start_khwrngd(void); |
Rusty Russell | f7f154f | 2013-03-05 10:07:08 +1030 | [diff] [blame] | 76 | |
Amit Shah | d3cc799 | 2014-07-10 15:42:34 +0530 | [diff] [blame] | 77 | static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, |
| 78 | int wait); |
| 79 | |
Rusty Russell | f7f154f | 2013-03-05 10:07:08 +1030 | [diff] [blame] | 80 | static size_t rng_buffer_size(void) |
| 81 | { |
| 82 | return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; |
| 83 | } |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 84 | |
Amit Shah | d3cc799 | 2014-07-10 15:42:34 +0530 | [diff] [blame] | 85 | static void add_early_randomness(struct hwrng *rng) |
| 86 | { |
Amit Shah | d3cc799 | 2014-07-10 15:42:34 +0530 | [diff] [blame] | 87 | int bytes_read; |
Andrew Lutomirski | 6d4952d | 2016-10-17 10:06:27 -0700 | [diff] [blame] | 88 | size_t size = min_t(size_t, 16, rng_buffer_size()); |
Amit Shah | d3cc799 | 2014-07-10 15:42:34 +0530 | [diff] [blame] | 89 | |
Rusty Russell | 9372b35 | 2014-12-08 16:50:35 +0800 | [diff] [blame] | 90 | mutex_lock(&reading_mutex); |
Andrew Lutomirski | 6d4952d | 2016-10-17 10:06:27 -0700 | [diff] [blame] | 91 | bytes_read = rng_get_data(rng, rng_buffer, size, 1); |
Rusty Russell | 9372b35 | 2014-12-08 16:50:35 +0800 | [diff] [blame] | 92 | mutex_unlock(&reading_mutex); |
Amit Shah | d3cc799 | 2014-07-10 15:42:34 +0530 | [diff] [blame] | 93 | if (bytes_read > 0) |
Andrew Lutomirski | 6d4952d | 2016-10-17 10:06:27 -0700 | [diff] [blame] | 94 | add_device_randomness(rng_buffer, bytes_read); |
Amit Shah | d3cc799 | 2014-07-10 15:42:34 +0530 | [diff] [blame] | 95 | } |
| 96 | |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 97 | static inline void cleanup_rng(struct kref *kref) |
| 98 | { |
| 99 | struct hwrng *rng = container_of(kref, struct hwrng, ref); |
| 100 | |
| 101 | if (rng->cleanup) |
| 102 | rng->cleanup(rng); |
Rusty Russell | a027f30 | 2014-12-08 16:50:38 +0800 | [diff] [blame] | 103 | |
Herbert Xu | 77584ee | 2014-12-23 16:40:17 +1100 | [diff] [blame] | 104 | complete(&rng->cleanup_done); |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 105 | } |
| 106 | |
Herbert Xu | 90ac41b | 2014-12-23 16:40:22 +1100 | [diff] [blame] | 107 | static int set_current_rng(struct hwrng *rng) |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 108 | { |
Herbert Xu | 90ac41b | 2014-12-23 16:40:22 +1100 | [diff] [blame] | 109 | int err; |
| 110 | |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 111 | BUG_ON(!mutex_is_locked(&rng_mutex)); |
Herbert Xu | 90ac41b | 2014-12-23 16:40:22 +1100 | [diff] [blame] | 112 | |
| 113 | err = hwrng_init(rng); |
| 114 | if (err) |
| 115 | return err; |
| 116 | |
Herbert Xu | ff77c15 | 2014-12-23 16:40:21 +1100 | [diff] [blame] | 117 | drop_current_rng(); |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 118 | current_rng = rng; |
Herbert Xu | 90ac41b | 2014-12-23 16:40:22 +1100 | [diff] [blame] | 119 | |
| 120 | return 0; |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 121 | } |
| 122 | |
| 123 | static void drop_current_rng(void) |
| 124 | { |
| 125 | BUG_ON(!mutex_is_locked(&rng_mutex)); |
| 126 | if (!current_rng) |
| 127 | return; |
| 128 | |
| 129 | /* decrease last reference for triggering the cleanup */ |
| 130 | kref_put(¤t_rng->ref, cleanup_rng); |
| 131 | current_rng = NULL; |
| 132 | } |
| 133 | |
| 134 | /* Returns ERR_PTR(), NULL or refcounted hwrng */ |
| 135 | static struct hwrng *get_current_rng(void) |
| 136 | { |
| 137 | struct hwrng *rng; |
| 138 | |
| 139 | if (mutex_lock_interruptible(&rng_mutex)) |
| 140 | return ERR_PTR(-ERESTARTSYS); |
| 141 | |
| 142 | rng = current_rng; |
| 143 | if (rng) |
| 144 | kref_get(&rng->ref); |
| 145 | |
| 146 | mutex_unlock(&rng_mutex); |
| 147 | return rng; |
| 148 | } |
| 149 | |
| 150 | static void put_rng(struct hwrng *rng) |
| 151 | { |
| 152 | /* |
| 153 | * Hold rng_mutex here so we serialize in case they set_current_rng |
| 154 | * on rng again immediately. |
| 155 | */ |
| 156 | mutex_lock(&rng_mutex); |
| 157 | if (rng) |
| 158 | kref_put(&rng->ref, cleanup_rng); |
| 159 | mutex_unlock(&rng_mutex); |
| 160 | } |
| 161 | |
Herbert Xu | 90ac41b | 2014-12-23 16:40:22 +1100 | [diff] [blame] | 162 | static int hwrng_init(struct hwrng *rng) |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 163 | { |
Herbert Xu | 15b66cd | 2014-12-23 16:40:18 +1100 | [diff] [blame] | 164 | if (kref_get_unless_zero(&rng->ref)) |
| 165 | goto skip_init; |
| 166 | |
Amit Shah | d3cc799 | 2014-07-10 15:42:34 +0530 | [diff] [blame] | 167 | if (rng->init) { |
| 168 | int ret; |
| 169 | |
| 170 | ret = rng->init(rng); |
| 171 | if (ret) |
| 172 | return ret; |
| 173 | } |
Herbert Xu | 15b66cd | 2014-12-23 16:40:18 +1100 | [diff] [blame] | 174 | |
| 175 | kref_init(&rng->ref); |
| 176 | reinit_completion(&rng->cleanup_done); |
| 177 | |
| 178 | skip_init: |
Amit Shah | d3cc799 | 2014-07-10 15:42:34 +0530 | [diff] [blame] | 179 | add_early_randomness(rng); |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 180 | |
Torsten Duwe | 0f734e6 | 2014-06-14 23:48:41 -0400 | [diff] [blame] | 181 | current_quality = rng->quality ? : default_quality; |
Keith Packard | 506bf0c | 2015-03-18 00:17:00 -0700 | [diff] [blame] | 182 | if (current_quality > 1024) |
| 183 | current_quality = 1024; |
Torsten Duwe | 0f734e6 | 2014-06-14 23:48:41 -0400 | [diff] [blame] | 184 | |
| 185 | if (current_quality == 0 && hwrng_fill) |
| 186 | kthread_stop(hwrng_fill); |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 187 | if (current_quality > 0 && !hwrng_fill) |
| 188 | start_khwrngd(); |
| 189 | |
Amit Shah | d3cc799 | 2014-07-10 15:42:34 +0530 | [diff] [blame] | 190 | return 0; |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 191 | } |
| 192 | |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 193 | static int rng_dev_open(struct inode *inode, struct file *filp) |
| 194 | { |
| 195 | /* enforce read-only access to this chrdev */ |
| 196 | if ((filp->f_mode & FMODE_READ) == 0) |
| 197 | return -EINVAL; |
| 198 | if (filp->f_mode & FMODE_WRITE) |
| 199 | return -EINVAL; |
| 200 | return 0; |
| 201 | } |
| 202 | |
Ian Molton | 9996508 | 2009-12-01 14:47:32 +0800 | [diff] [blame] | 203 | static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, |
| 204 | int wait) { |
| 205 | int present; |
| 206 | |
Rusty Russell | 9372b35 | 2014-12-08 16:50:35 +0800 | [diff] [blame] | 207 | BUG_ON(!mutex_is_locked(&reading_mutex)); |
Ian Molton | 9996508 | 2009-12-01 14:47:32 +0800 | [diff] [blame] | 208 | if (rng->read) |
| 209 | return rng->read(rng, (void *)buffer, size, wait); |
| 210 | |
| 211 | if (rng->data_present) |
| 212 | present = rng->data_present(rng, wait); |
| 213 | else |
| 214 | present = 1; |
| 215 | |
| 216 | if (present) |
| 217 | return rng->data_read(rng, (u32 *)buffer); |
| 218 | |
| 219 | return 0; |
| 220 | } |
| 221 | |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 222 | static ssize_t rng_dev_read(struct file *filp, char __user *buf, |
| 223 | size_t size, loff_t *offp) |
| 224 | { |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 225 | ssize_t ret = 0; |
Patrick McHardy | 984e976 | 2007-11-21 12:24:45 +0800 | [diff] [blame] | 226 | int err = 0; |
Ian Molton | 9996508 | 2009-12-01 14:47:32 +0800 | [diff] [blame] | 227 | int bytes_read, len; |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 228 | struct hwrng *rng; |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 229 | |
| 230 | while (size) { |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 231 | rng = get_current_rng(); |
| 232 | if (IS_ERR(rng)) { |
| 233 | err = PTR_ERR(rng); |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 234 | goto out; |
Ian Molton | 9996508 | 2009-12-01 14:47:32 +0800 | [diff] [blame] | 235 | } |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 236 | if (!rng) { |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 237 | err = -ENODEV; |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 238 | goto out; |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 239 | } |
Patrick McHardy | 984e976 | 2007-11-21 12:24:45 +0800 | [diff] [blame] | 240 | |
Jiri Slaby | 1ab8729 | 2015-11-27 16:50:43 +0100 | [diff] [blame] | 241 | if (mutex_lock_interruptible(&reading_mutex)) { |
| 242 | err = -ERESTARTSYS; |
| 243 | goto out_put; |
| 244 | } |
Ian Molton | 9996508 | 2009-12-01 14:47:32 +0800 | [diff] [blame] | 245 | if (!data_avail) { |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 246 | bytes_read = rng_get_data(rng, rng_buffer, |
Rusty Russell | f7f154f | 2013-03-05 10:07:08 +1030 | [diff] [blame] | 247 | rng_buffer_size(), |
Ian Molton | 9996508 | 2009-12-01 14:47:32 +0800 | [diff] [blame] | 248 | !(filp->f_flags & O_NONBLOCK)); |
| 249 | if (bytes_read < 0) { |
| 250 | err = bytes_read; |
Rusty Russell | 9372b35 | 2014-12-08 16:50:35 +0800 | [diff] [blame] | 251 | goto out_unlock_reading; |
Ian Molton | 9996508 | 2009-12-01 14:47:32 +0800 | [diff] [blame] | 252 | } |
| 253 | data_avail = bytes_read; |
| 254 | } |
| 255 | |
| 256 | if (!data_avail) { |
| 257 | if (filp->f_flags & O_NONBLOCK) { |
| 258 | err = -EAGAIN; |
Rusty Russell | 9372b35 | 2014-12-08 16:50:35 +0800 | [diff] [blame] | 259 | goto out_unlock_reading; |
Ian Molton | 9996508 | 2009-12-01 14:47:32 +0800 | [diff] [blame] | 260 | } |
| 261 | } else { |
| 262 | len = data_avail; |
| 263 | if (len > size) |
| 264 | len = size; |
| 265 | |
| 266 | data_avail -= len; |
| 267 | |
| 268 | if (copy_to_user(buf + ret, rng_buffer + data_avail, |
| 269 | len)) { |
| 270 | err = -EFAULT; |
Rusty Russell | 9372b35 | 2014-12-08 16:50:35 +0800 | [diff] [blame] | 271 | goto out_unlock_reading; |
Ian Molton | 9996508 | 2009-12-01 14:47:32 +0800 | [diff] [blame] | 272 | } |
| 273 | |
| 274 | size -= len; |
| 275 | ret += len; |
| 276 | } |
| 277 | |
Rusty Russell | 9372b35 | 2014-12-08 16:50:35 +0800 | [diff] [blame] | 278 | mutex_unlock(&reading_mutex); |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 279 | put_rng(rng); |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 280 | |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 281 | if (need_resched()) |
| 282 | schedule_timeout_interruptible(1); |
Ian Molton | 9996508 | 2009-12-01 14:47:32 +0800 | [diff] [blame] | 283 | |
| 284 | if (signal_pending(current)) { |
| 285 | err = -ERESTARTSYS; |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 286 | goto out; |
Ian Molton | 9996508 | 2009-12-01 14:47:32 +0800 | [diff] [blame] | 287 | } |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 288 | } |
| 289 | out: |
| 290 | return ret ? : err; |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 291 | |
Rusty Russell | 9372b35 | 2014-12-08 16:50:35 +0800 | [diff] [blame] | 292 | out_unlock_reading: |
| 293 | mutex_unlock(&reading_mutex); |
Jiri Slaby | 1ab8729 | 2015-11-27 16:50:43 +0100 | [diff] [blame] | 294 | out_put: |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 295 | put_rng(rng); |
| 296 | goto out; |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 297 | } |
| 298 | |
| 299 | |
Arjan van de Ven | 62322d2 | 2006-07-03 00:24:21 -0700 | [diff] [blame] | 300 | static const struct file_operations rng_chrdev_ops = { |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 301 | .owner = THIS_MODULE, |
| 302 | .open = rng_dev_open, |
| 303 | .read = rng_dev_read, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 304 | .llseek = noop_llseek, |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 305 | }; |
| 306 | |
Takashi Iwai | 0daa7a0 | 2015-02-02 15:44:55 +0100 | [diff] [blame] | 307 | static const struct attribute_group *rng_dev_groups[]; |
| 308 | |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 309 | static struct miscdevice rng_miscdev = { |
| 310 | .minor = RNG_MISCDEV_MINOR, |
| 311 | .name = RNG_MODULE_NAME, |
Kay Sievers | e454cea | 2009-09-18 23:01:12 +0200 | [diff] [blame] | 312 | .nodename = "hwrng", |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 313 | .fops = &rng_chrdev_ops, |
Takashi Iwai | 0daa7a0 | 2015-02-02 15:44:55 +0100 | [diff] [blame] | 314 | .groups = rng_dev_groups, |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 315 | }; |
| 316 | |
| 317 | |
Greg Kroah-Hartman | 94fbcde | 2006-07-27 16:16:04 -0700 | [diff] [blame] | 318 | static ssize_t hwrng_attr_current_store(struct device *dev, |
| 319 | struct device_attribute *attr, |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 320 | const char *buf, size_t len) |
| 321 | { |
| 322 | int err; |
| 323 | struct hwrng *rng; |
| 324 | |
| 325 | err = mutex_lock_interruptible(&rng_mutex); |
| 326 | if (err) |
| 327 | return -ERESTARTSYS; |
| 328 | err = -ENODEV; |
| 329 | list_for_each_entry(rng, &rng_list, list) { |
Lee Jones | d9a53b0 | 2015-09-17 14:45:53 +0100 | [diff] [blame] | 330 | if (sysfs_streq(rng->name, buf)) { |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 331 | err = 0; |
Herbert Xu | 90ac41b | 2014-12-23 16:40:22 +1100 | [diff] [blame] | 332 | if (rng != current_rng) |
| 333 | err = set_current_rng(rng); |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 334 | break; |
| 335 | } |
| 336 | } |
| 337 | mutex_unlock(&rng_mutex); |
| 338 | |
| 339 | return err ? : len; |
| 340 | } |
| 341 | |
Greg Kroah-Hartman | 94fbcde | 2006-07-27 16:16:04 -0700 | [diff] [blame] | 342 | static ssize_t hwrng_attr_current_show(struct device *dev, |
| 343 | struct device_attribute *attr, |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 344 | char *buf) |
| 345 | { |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 346 | ssize_t ret; |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 347 | struct hwrng *rng; |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 348 | |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 349 | rng = get_current_rng(); |
| 350 | if (IS_ERR(rng)) |
| 351 | return PTR_ERR(rng); |
| 352 | |
| 353 | ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none"); |
| 354 | put_rng(rng); |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 355 | |
| 356 | return ret; |
| 357 | } |
| 358 | |
Greg Kroah-Hartman | 94fbcde | 2006-07-27 16:16:04 -0700 | [diff] [blame] | 359 | static ssize_t hwrng_attr_available_show(struct device *dev, |
| 360 | struct device_attribute *attr, |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 361 | char *buf) |
| 362 | { |
| 363 | int err; |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 364 | struct hwrng *rng; |
| 365 | |
| 366 | err = mutex_lock_interruptible(&rng_mutex); |
| 367 | if (err) |
| 368 | return -ERESTARTSYS; |
| 369 | buf[0] = '\0'; |
| 370 | list_for_each_entry(rng, &rng_list, list) { |
Rickard Strandqvist | 61daf05 | 2014-10-16 23:17:23 +0200 | [diff] [blame] | 371 | strlcat(buf, rng->name, PAGE_SIZE); |
| 372 | strlcat(buf, " ", PAGE_SIZE); |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 373 | } |
Rickard Strandqvist | 61daf05 | 2014-10-16 23:17:23 +0200 | [diff] [blame] | 374 | strlcat(buf, "\n", PAGE_SIZE); |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 375 | mutex_unlock(&rng_mutex); |
| 376 | |
Rickard Strandqvist | 61daf05 | 2014-10-16 23:17:23 +0200 | [diff] [blame] | 377 | return strlen(buf); |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 378 | } |
| 379 | |
Greg Kroah-Hartman | 94fbcde | 2006-07-27 16:16:04 -0700 | [diff] [blame] | 380 | static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR, |
| 381 | hwrng_attr_current_show, |
| 382 | hwrng_attr_current_store); |
| 383 | static DEVICE_ATTR(rng_available, S_IRUGO, |
| 384 | hwrng_attr_available_show, |
| 385 | NULL); |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 386 | |
Takashi Iwai | 0daa7a0 | 2015-02-02 15:44:55 +0100 | [diff] [blame] | 387 | static struct attribute *rng_dev_attrs[] = { |
| 388 | &dev_attr_rng_current.attr, |
| 389 | &dev_attr_rng_available.attr, |
| 390 | NULL |
| 391 | }; |
| 392 | |
| 393 | ATTRIBUTE_GROUPS(rng_dev); |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 394 | |
Herbert Xu | ac3a497 | 2014-12-23 16:40:19 +1100 | [diff] [blame] | 395 | static void __exit unregister_miscdev(void) |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 396 | { |
Rafael J. Wysocki | b844eba | 2008-03-23 20:28:24 +0100 | [diff] [blame] | 397 | misc_deregister(&rng_miscdev); |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 398 | } |
| 399 | |
Herbert Xu | ac3a497 | 2014-12-23 16:40:19 +1100 | [diff] [blame] | 400 | static int __init register_miscdev(void) |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 401 | { |
Takashi Iwai | 0daa7a0 | 2015-02-02 15:44:55 +0100 | [diff] [blame] | 402 | return misc_register(&rng_miscdev); |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 403 | } |
| 404 | |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 405 | static int hwrng_fillfn(void *unused) |
| 406 | { |
| 407 | long rc; |
| 408 | |
| 409 | while (!kthread_should_stop()) { |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 410 | struct hwrng *rng; |
| 411 | |
| 412 | rng = get_current_rng(); |
| 413 | if (IS_ERR(rng) || !rng) |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 414 | break; |
Rusty Russell | 9372b35 | 2014-12-08 16:50:35 +0800 | [diff] [blame] | 415 | mutex_lock(&reading_mutex); |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 416 | rc = rng_get_data(rng, rng_fillbuf, |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 417 | rng_buffer_size(), 1); |
Rusty Russell | 9372b35 | 2014-12-08 16:50:35 +0800 | [diff] [blame] | 418 | mutex_unlock(&reading_mutex); |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 419 | put_rng(rng); |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 420 | if (rc <= 0) { |
| 421 | pr_warn("hwrng: no data available\n"); |
| 422 | msleep_interruptible(10000); |
| 423 | continue; |
| 424 | } |
Rusty Russell | 9372b35 | 2014-12-08 16:50:35 +0800 | [diff] [blame] | 425 | /* Outside lock, sure, but y'know: randomness. */ |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 426 | add_hwgenerator_randomness((void *)rng_fillbuf, rc, |
Stephen Boyd | e02b876 | 2014-07-25 15:19:18 -0700 | [diff] [blame] | 427 | rc * current_quality * 8 >> 10); |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 428 | } |
Torsten Duwe | 9dda727 | 2014-06-16 10:24:09 -0400 | [diff] [blame] | 429 | hwrng_fill = NULL; |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 430 | return 0; |
| 431 | } |
| 432 | |
| 433 | static void start_khwrngd(void) |
| 434 | { |
| 435 | hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); |
Martin Schwidefsky | 17fb874 | 2015-07-24 13:13:30 +0200 | [diff] [blame] | 436 | if (IS_ERR(hwrng_fill)) { |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 437 | pr_err("hwrng_fill thread creation failed"); |
| 438 | hwrng_fill = NULL; |
| 439 | } |
| 440 | } |
| 441 | |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 442 | int hwrng_register(struct hwrng *rng) |
| 443 | { |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 444 | int err = -EINVAL; |
| 445 | struct hwrng *old_rng, *tmp; |
| 446 | |
| 447 | if (rng->name == NULL || |
Ian Molton | 9996508 | 2009-12-01 14:47:32 +0800 | [diff] [blame] | 448 | (rng->data_read == NULL && rng->read == NULL)) |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 449 | goto out; |
| 450 | |
| 451 | mutex_lock(&rng_mutex); |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 452 | /* Must not register two RNGs with the same name. */ |
| 453 | err = -EEXIST; |
| 454 | list_for_each_entry(tmp, &rng_list, list) { |
| 455 | if (strcmp(tmp->name, rng->name) == 0) |
| 456 | goto out_unlock; |
| 457 | } |
| 458 | |
Herbert Xu | 15b66cd | 2014-12-23 16:40:18 +1100 | [diff] [blame] | 459 | init_completion(&rng->cleanup_done); |
| 460 | complete(&rng->cleanup_done); |
| 461 | |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 462 | old_rng = current_rng; |
Rusty Russell | ebbbfa2 | 2014-12-08 16:50:39 +0800 | [diff] [blame] | 463 | err = 0; |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 464 | if (!old_rng) { |
Herbert Xu | 90ac41b | 2014-12-23 16:40:22 +1100 | [diff] [blame] | 465 | err = set_current_rng(rng); |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 466 | if (err) |
| 467 | goto out_unlock; |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 468 | } |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 469 | list_add_tail(&rng->list, &rng_list); |
Kees Cook | d9e7972 | 2014-03-03 15:51:48 -0800 | [diff] [blame] | 470 | |
Amit Shah | d3cc799 | 2014-07-10 15:42:34 +0530 | [diff] [blame] | 471 | if (old_rng && !rng->init) { |
| 472 | /* |
| 473 | * Use a new device's input to add some randomness to |
| 474 | * the system. If this rng device isn't going to be |
| 475 | * used right away, its init function hasn't been |
| 476 | * called yet; so only use the randomness from devices |
| 477 | * that don't need an init callback. |
| 478 | */ |
| 479 | add_early_randomness(rng); |
| 480 | } |
| 481 | |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 482 | out_unlock: |
| 483 | mutex_unlock(&rng_mutex); |
| 484 | out: |
| 485 | return err; |
| 486 | } |
| 487 | EXPORT_SYMBOL_GPL(hwrng_register); |
| 488 | |
Rafael J. Wysocki | b844eba | 2008-03-23 20:28:24 +0100 | [diff] [blame] | 489 | void hwrng_unregister(struct hwrng *rng) |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 490 | { |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 491 | mutex_lock(&rng_mutex); |
| 492 | |
| 493 | list_del(&rng->list); |
| 494 | if (current_rng == rng) { |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 495 | drop_current_rng(); |
| 496 | if (!list_empty(&rng_list)) { |
| 497 | struct hwrng *tail; |
| 498 | |
| 499 | tail = list_entry(rng_list.prev, struct hwrng, list); |
| 500 | |
Herbert Xu | 90ac41b | 2014-12-23 16:40:22 +1100 | [diff] [blame] | 501 | set_current_rng(tail); |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 502 | } |
| 503 | } |
Rusty Russell | 3a2c0ba | 2014-12-08 16:50:37 +0800 | [diff] [blame] | 504 | |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 505 | if (list_empty(&rng_list)) { |
Amos Kong | 1dacb39 | 2014-12-08 16:50:36 +0800 | [diff] [blame] | 506 | mutex_unlock(&rng_mutex); |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 507 | if (hwrng_fill) |
| 508 | kthread_stop(hwrng_fill); |
Amos Kong | 1dacb39 | 2014-12-08 16:50:36 +0800 | [diff] [blame] | 509 | } else |
| 510 | mutex_unlock(&rng_mutex); |
Rusty Russell | a027f30 | 2014-12-08 16:50:38 +0800 | [diff] [blame] | 511 | |
Herbert Xu | 77584ee | 2014-12-23 16:40:17 +1100 | [diff] [blame] | 512 | wait_for_completion(&rng->cleanup_done); |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 513 | } |
Rafael J. Wysocki | b844eba | 2008-03-23 20:28:24 +0100 | [diff] [blame] | 514 | EXPORT_SYMBOL_GPL(hwrng_unregister); |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 515 | |
Dmitry Torokhov | 4d9b519 | 2015-03-12 14:00:02 -0700 | [diff] [blame] | 516 | static void devm_hwrng_release(struct device *dev, void *res) |
| 517 | { |
| 518 | hwrng_unregister(*(struct hwrng **)res); |
| 519 | } |
| 520 | |
| 521 | static int devm_hwrng_match(struct device *dev, void *res, void *data) |
| 522 | { |
| 523 | struct hwrng **r = res; |
| 524 | |
| 525 | if (WARN_ON(!r || !*r)) |
| 526 | return 0; |
| 527 | |
| 528 | return *r == data; |
| 529 | } |
| 530 | |
| 531 | int devm_hwrng_register(struct device *dev, struct hwrng *rng) |
| 532 | { |
| 533 | struct hwrng **ptr; |
| 534 | int error; |
| 535 | |
| 536 | ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL); |
| 537 | if (!ptr) |
| 538 | return -ENOMEM; |
| 539 | |
| 540 | error = hwrng_register(rng); |
| 541 | if (error) { |
| 542 | devres_free(ptr); |
| 543 | return error; |
| 544 | } |
| 545 | |
| 546 | *ptr = rng; |
| 547 | devres_add(dev, ptr); |
| 548 | return 0; |
| 549 | } |
| 550 | EXPORT_SYMBOL_GPL(devm_hwrng_register); |
| 551 | |
| 552 | void devm_hwrng_unregister(struct device *dev, struct hwrng *rng) |
| 553 | { |
| 554 | devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng); |
| 555 | } |
| 556 | EXPORT_SYMBOL_GPL(devm_hwrng_unregister); |
| 557 | |
Herbert Xu | ac3a497 | 2014-12-23 16:40:19 +1100 | [diff] [blame] | 558 | static int __init hwrng_modinit(void) |
| 559 | { |
PrasannaKumar Muralidharan | 58b022a | 2016-09-07 20:18:02 +0530 | [diff] [blame] | 560 | int ret = -ENOMEM; |
| 561 | |
| 562 | /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ |
| 563 | rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); |
| 564 | if (!rng_buffer) |
| 565 | return -ENOMEM; |
| 566 | |
| 567 | rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL); |
| 568 | if (!rng_fillbuf) { |
| 569 | kfree(rng_buffer); |
| 570 | return -ENOMEM; |
| 571 | } |
| 572 | |
| 573 | ret = register_miscdev(); |
| 574 | if (ret) { |
| 575 | kfree(rng_fillbuf); |
| 576 | kfree(rng_buffer); |
| 577 | } |
| 578 | |
| 579 | return ret; |
Herbert Xu | ac3a497 | 2014-12-23 16:40:19 +1100 | [diff] [blame] | 580 | } |
| 581 | |
| 582 | static void __exit hwrng_modexit(void) |
Satoru Takeuchi | b7d44d9 | 2013-03-20 11:28:51 +1030 | [diff] [blame] | 583 | { |
| 584 | mutex_lock(&rng_mutex); |
| 585 | BUG_ON(current_rng); |
| 586 | kfree(rng_buffer); |
Torsten Duwe | be4000b | 2014-06-14 23:46:03 -0400 | [diff] [blame] | 587 | kfree(rng_fillbuf); |
Satoru Takeuchi | b7d44d9 | 2013-03-20 11:28:51 +1030 | [diff] [blame] | 588 | mutex_unlock(&rng_mutex); |
Herbert Xu | ac3a497 | 2014-12-23 16:40:19 +1100 | [diff] [blame] | 589 | |
| 590 | unregister_miscdev(); |
Satoru Takeuchi | b7d44d9 | 2013-03-20 11:28:51 +1030 | [diff] [blame] | 591 | } |
| 592 | |
Herbert Xu | ac3a497 | 2014-12-23 16:40:19 +1100 | [diff] [blame] | 593 | module_init(hwrng_modinit); |
| 594 | module_exit(hwrng_modexit); |
Michael Buesch | 844dd05 | 2006-06-26 00:24:59 -0700 | [diff] [blame] | 595 | |
| 596 | MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); |
| 597 | MODULE_LICENSE("GPL"); |