blob: d2d2c89de5b4428e627eb06d9733ec1bcf2c2b0d [file] [log] [blame]
Michael Buesch844dd052006-06-26 00:24:59 -07001/*
2 Added support for the AMD Geode LX RNG
3 (c) Copyright 2004-2005 Advanced Micro Devices, Inc.
4
5 derived from
6
7 Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
8 (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
9
10 derived from
11
12 Hardware driver for the AMD 768 Random Number Generator (RNG)
13 (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
14
15 derived from
16
17 Hardware driver for Intel i810 Random Number Generator (RNG)
18 Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
19 Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
20
21 Added generic RNG API
Michael Büscheb032b92011-07-04 20:50:05 +020022 Copyright 2006 Michael Buesch <m@bues.ch>
Michael Buesch844dd052006-06-26 00:24:59 -070023 Copyright 2005 (c) MontaVista Software, Inc.
24
25 Please read Documentation/hw_random.txt for details on use.
26
27 ----------------------------------------------------------
28 This software may be used and distributed according to the terms
29 of the GNU General Public License, incorporated herein by reference.
30
31 */
32
33
34#include <linux/device.h>
35#include <linux/hw_random.h>
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/fs.h>
Al Viro914e2632006-10-18 13:55:46 -040039#include <linux/sched.h>
Michael Buesch844dd052006-06-26 00:24:59 -070040#include <linux/miscdevice.h>
Torsten Duwebe4000b2014-06-14 23:46:03 -040041#include <linux/kthread.h>
Michael Buesch844dd052006-06-26 00:24:59 -070042#include <linux/delay.h>
Rusty Russellf7f154f2013-03-05 10:07:08 +103043#include <linux/slab.h>
Kees Cookd9e79722014-03-03 15:51:48 -080044#include <linux/random.h>
Rusty Russell3a2c0ba2014-12-08 16:50:37 +080045#include <linux/err.h>
Michael Buesch844dd052006-06-26 00:24:59 -070046#include <asm/uaccess.h>
47
48
49#define RNG_MODULE_NAME "hw_random"
50#define PFX RNG_MODULE_NAME ": "
51#define RNG_MISCDEV_MINOR 183 /* official */
52
53
54static struct hwrng *current_rng;
Torsten Duwebe4000b2014-06-14 23:46:03 -040055static struct task_struct *hwrng_fill;
Michael Buesch844dd052006-06-26 00:24:59 -070056static LIST_HEAD(rng_list);
Rusty Russell9372b352014-12-08 16:50:35 +080057/* Protects rng_list and current_rng */
Michael Buesch844dd052006-06-26 00:24:59 -070058static DEFINE_MUTEX(rng_mutex);
Rusty Russell9372b352014-12-08 16:50:35 +080059/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
60static DEFINE_MUTEX(reading_mutex);
Ian Molton99965082009-12-01 14:47:32 +080061static int data_avail;
Torsten Duwebe4000b2014-06-14 23:46:03 -040062static u8 *rng_buffer, *rng_fillbuf;
Torsten Duwe0f734e62014-06-14 23:48:41 -040063static unsigned short current_quality;
64static unsigned short default_quality; /* = 0; default to "off" */
Torsten Duwebe4000b2014-06-14 23:46:03 -040065
66module_param(current_quality, ushort, 0644);
67MODULE_PARM_DESC(current_quality,
68 "current hwrng entropy estimation per mill");
Torsten Duwe0f734e62014-06-14 23:48:41 -040069module_param(default_quality, ushort, 0644);
70MODULE_PARM_DESC(default_quality,
71 "default entropy content of hwrng per mill");
Torsten Duwebe4000b2014-06-14 23:46:03 -040072
Herbert Xuff77c152014-12-23 16:40:21 +110073static void drop_current_rng(void);
Herbert Xu90ac41b2014-12-23 16:40:22 +110074static int hwrng_init(struct hwrng *rng);
Torsten Duwebe4000b2014-06-14 23:46:03 -040075static void start_khwrngd(void);
Rusty Russellf7f154f2013-03-05 10:07:08 +103076
Amit Shahd3cc7992014-07-10 15:42:34 +053077static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
78 int wait);
79
Rusty Russellf7f154f2013-03-05 10:07:08 +103080static size_t rng_buffer_size(void)
81{
82 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
83}
Michael Buesch844dd052006-06-26 00:24:59 -070084
Amit Shahd3cc7992014-07-10 15:42:34 +053085static void add_early_randomness(struct hwrng *rng)
86{
Amit Shahd3cc7992014-07-10 15:42:34 +053087 int bytes_read;
Andrew Lutomirski6d4952d2016-10-17 10:06:27 -070088 size_t size = min_t(size_t, 16, rng_buffer_size());
Amit Shahd3cc7992014-07-10 15:42:34 +053089
Rusty Russell9372b352014-12-08 16:50:35 +080090 mutex_lock(&reading_mutex);
Andrew Lutomirski6d4952d2016-10-17 10:06:27 -070091 bytes_read = rng_get_data(rng, rng_buffer, size, 1);
Rusty Russell9372b352014-12-08 16:50:35 +080092 mutex_unlock(&reading_mutex);
Amit Shahd3cc7992014-07-10 15:42:34 +053093 if (bytes_read > 0)
Andrew Lutomirski6d4952d2016-10-17 10:06:27 -070094 add_device_randomness(rng_buffer, bytes_read);
Amit Shahd3cc7992014-07-10 15:42:34 +053095}
96
Rusty Russell3a2c0ba2014-12-08 16:50:37 +080097static inline void cleanup_rng(struct kref *kref)
98{
99 struct hwrng *rng = container_of(kref, struct hwrng, ref);
100
101 if (rng->cleanup)
102 rng->cleanup(rng);
Rusty Russella027f302014-12-08 16:50:38 +0800103
Herbert Xu77584ee2014-12-23 16:40:17 +1100104 complete(&rng->cleanup_done);
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800105}
106
Herbert Xu90ac41b2014-12-23 16:40:22 +1100107static int set_current_rng(struct hwrng *rng)
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800108{
Herbert Xu90ac41b2014-12-23 16:40:22 +1100109 int err;
110
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800111 BUG_ON(!mutex_is_locked(&rng_mutex));
Herbert Xu90ac41b2014-12-23 16:40:22 +1100112
113 err = hwrng_init(rng);
114 if (err)
115 return err;
116
Herbert Xuff77c152014-12-23 16:40:21 +1100117 drop_current_rng();
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800118 current_rng = rng;
Herbert Xu90ac41b2014-12-23 16:40:22 +1100119
120 return 0;
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800121}
122
123static void drop_current_rng(void)
124{
125 BUG_ON(!mutex_is_locked(&rng_mutex));
126 if (!current_rng)
127 return;
128
129 /* decrease last reference for triggering the cleanup */
130 kref_put(&current_rng->ref, cleanup_rng);
131 current_rng = NULL;
132}
133
134/* Returns ERR_PTR(), NULL or refcounted hwrng */
135static struct hwrng *get_current_rng(void)
136{
137 struct hwrng *rng;
138
139 if (mutex_lock_interruptible(&rng_mutex))
140 return ERR_PTR(-ERESTARTSYS);
141
142 rng = current_rng;
143 if (rng)
144 kref_get(&rng->ref);
145
146 mutex_unlock(&rng_mutex);
147 return rng;
148}
149
150static void put_rng(struct hwrng *rng)
151{
152 /*
153 * Hold rng_mutex here so we serialize in case they set_current_rng
154 * on rng again immediately.
155 */
156 mutex_lock(&rng_mutex);
157 if (rng)
158 kref_put(&rng->ref, cleanup_rng);
159 mutex_unlock(&rng_mutex);
160}
161
Herbert Xu90ac41b2014-12-23 16:40:22 +1100162static int hwrng_init(struct hwrng *rng)
Michael Buesch844dd052006-06-26 00:24:59 -0700163{
Herbert Xu15b66cd2014-12-23 16:40:18 +1100164 if (kref_get_unless_zero(&rng->ref))
165 goto skip_init;
166
Amit Shahd3cc7992014-07-10 15:42:34 +0530167 if (rng->init) {
168 int ret;
169
170 ret = rng->init(rng);
171 if (ret)
172 return ret;
173 }
Herbert Xu15b66cd2014-12-23 16:40:18 +1100174
175 kref_init(&rng->ref);
176 reinit_completion(&rng->cleanup_done);
177
178skip_init:
Amit Shahd3cc7992014-07-10 15:42:34 +0530179 add_early_randomness(rng);
Torsten Duwebe4000b2014-06-14 23:46:03 -0400180
Torsten Duwe0f734e62014-06-14 23:48:41 -0400181 current_quality = rng->quality ? : default_quality;
Keith Packard506bf0c2015-03-18 00:17:00 -0700182 if (current_quality > 1024)
183 current_quality = 1024;
Torsten Duwe0f734e62014-06-14 23:48:41 -0400184
185 if (current_quality == 0 && hwrng_fill)
186 kthread_stop(hwrng_fill);
Torsten Duwebe4000b2014-06-14 23:46:03 -0400187 if (current_quality > 0 && !hwrng_fill)
188 start_khwrngd();
189
Amit Shahd3cc7992014-07-10 15:42:34 +0530190 return 0;
Michael Buesch844dd052006-06-26 00:24:59 -0700191}
192
Michael Buesch844dd052006-06-26 00:24:59 -0700193static int rng_dev_open(struct inode *inode, struct file *filp)
194{
195 /* enforce read-only access to this chrdev */
196 if ((filp->f_mode & FMODE_READ) == 0)
197 return -EINVAL;
198 if (filp->f_mode & FMODE_WRITE)
199 return -EINVAL;
200 return 0;
201}
202
Ian Molton99965082009-12-01 14:47:32 +0800203static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
204 int wait) {
205 int present;
206
Rusty Russell9372b352014-12-08 16:50:35 +0800207 BUG_ON(!mutex_is_locked(&reading_mutex));
Ian Molton99965082009-12-01 14:47:32 +0800208 if (rng->read)
209 return rng->read(rng, (void *)buffer, size, wait);
210
211 if (rng->data_present)
212 present = rng->data_present(rng, wait);
213 else
214 present = 1;
215
216 if (present)
217 return rng->data_read(rng, (u32 *)buffer);
218
219 return 0;
220}
221
Michael Buesch844dd052006-06-26 00:24:59 -0700222static ssize_t rng_dev_read(struct file *filp, char __user *buf,
223 size_t size, loff_t *offp)
224{
Michael Buesch844dd052006-06-26 00:24:59 -0700225 ssize_t ret = 0;
Patrick McHardy984e9762007-11-21 12:24:45 +0800226 int err = 0;
Ian Molton99965082009-12-01 14:47:32 +0800227 int bytes_read, len;
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800228 struct hwrng *rng;
Michael Buesch844dd052006-06-26 00:24:59 -0700229
230 while (size) {
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800231 rng = get_current_rng();
232 if (IS_ERR(rng)) {
233 err = PTR_ERR(rng);
Michael Buesch844dd052006-06-26 00:24:59 -0700234 goto out;
Ian Molton99965082009-12-01 14:47:32 +0800235 }
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800236 if (!rng) {
Michael Buesch844dd052006-06-26 00:24:59 -0700237 err = -ENODEV;
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800238 goto out;
Michael Buesch844dd052006-06-26 00:24:59 -0700239 }
Patrick McHardy984e9762007-11-21 12:24:45 +0800240
Jiri Slaby1ab87292015-11-27 16:50:43 +0100241 if (mutex_lock_interruptible(&reading_mutex)) {
242 err = -ERESTARTSYS;
243 goto out_put;
244 }
Ian Molton99965082009-12-01 14:47:32 +0800245 if (!data_avail) {
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800246 bytes_read = rng_get_data(rng, rng_buffer,
Rusty Russellf7f154f2013-03-05 10:07:08 +1030247 rng_buffer_size(),
Ian Molton99965082009-12-01 14:47:32 +0800248 !(filp->f_flags & O_NONBLOCK));
249 if (bytes_read < 0) {
250 err = bytes_read;
Rusty Russell9372b352014-12-08 16:50:35 +0800251 goto out_unlock_reading;
Ian Molton99965082009-12-01 14:47:32 +0800252 }
253 data_avail = bytes_read;
254 }
255
256 if (!data_avail) {
257 if (filp->f_flags & O_NONBLOCK) {
258 err = -EAGAIN;
Rusty Russell9372b352014-12-08 16:50:35 +0800259 goto out_unlock_reading;
Ian Molton99965082009-12-01 14:47:32 +0800260 }
261 } else {
262 len = data_avail;
263 if (len > size)
264 len = size;
265
266 data_avail -= len;
267
268 if (copy_to_user(buf + ret, rng_buffer + data_avail,
269 len)) {
270 err = -EFAULT;
Rusty Russell9372b352014-12-08 16:50:35 +0800271 goto out_unlock_reading;
Ian Molton99965082009-12-01 14:47:32 +0800272 }
273
274 size -= len;
275 ret += len;
276 }
277
Rusty Russell9372b352014-12-08 16:50:35 +0800278 mutex_unlock(&reading_mutex);
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800279 put_rng(rng);
Michael Buesch844dd052006-06-26 00:24:59 -0700280
Michael Buesch844dd052006-06-26 00:24:59 -0700281 if (need_resched())
282 schedule_timeout_interruptible(1);
Ian Molton99965082009-12-01 14:47:32 +0800283
284 if (signal_pending(current)) {
285 err = -ERESTARTSYS;
Michael Buesch844dd052006-06-26 00:24:59 -0700286 goto out;
Ian Molton99965082009-12-01 14:47:32 +0800287 }
Michael Buesch844dd052006-06-26 00:24:59 -0700288 }
289out:
290 return ret ? : err;
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800291
Rusty Russell9372b352014-12-08 16:50:35 +0800292out_unlock_reading:
293 mutex_unlock(&reading_mutex);
Jiri Slaby1ab87292015-11-27 16:50:43 +0100294out_put:
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800295 put_rng(rng);
296 goto out;
Michael Buesch844dd052006-06-26 00:24:59 -0700297}
298
299
Arjan van de Ven62322d22006-07-03 00:24:21 -0700300static const struct file_operations rng_chrdev_ops = {
Michael Buesch844dd052006-06-26 00:24:59 -0700301 .owner = THIS_MODULE,
302 .open = rng_dev_open,
303 .read = rng_dev_read,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200304 .llseek = noop_llseek,
Michael Buesch844dd052006-06-26 00:24:59 -0700305};
306
Takashi Iwai0daa7a02015-02-02 15:44:55 +0100307static const struct attribute_group *rng_dev_groups[];
308
Michael Buesch844dd052006-06-26 00:24:59 -0700309static struct miscdevice rng_miscdev = {
310 .minor = RNG_MISCDEV_MINOR,
311 .name = RNG_MODULE_NAME,
Kay Sieverse454cea2009-09-18 23:01:12 +0200312 .nodename = "hwrng",
Michael Buesch844dd052006-06-26 00:24:59 -0700313 .fops = &rng_chrdev_ops,
Takashi Iwai0daa7a02015-02-02 15:44:55 +0100314 .groups = rng_dev_groups,
Michael Buesch844dd052006-06-26 00:24:59 -0700315};
316
317
Greg Kroah-Hartman94fbcde2006-07-27 16:16:04 -0700318static ssize_t hwrng_attr_current_store(struct device *dev,
319 struct device_attribute *attr,
Michael Buesch844dd052006-06-26 00:24:59 -0700320 const char *buf, size_t len)
321{
322 int err;
323 struct hwrng *rng;
324
325 err = mutex_lock_interruptible(&rng_mutex);
326 if (err)
327 return -ERESTARTSYS;
328 err = -ENODEV;
329 list_for_each_entry(rng, &rng_list, list) {
Lee Jonesd9a53b02015-09-17 14:45:53 +0100330 if (sysfs_streq(rng->name, buf)) {
Michael Buesch844dd052006-06-26 00:24:59 -0700331 err = 0;
Herbert Xu90ac41b2014-12-23 16:40:22 +1100332 if (rng != current_rng)
333 err = set_current_rng(rng);
Michael Buesch844dd052006-06-26 00:24:59 -0700334 break;
335 }
336 }
337 mutex_unlock(&rng_mutex);
338
339 return err ? : len;
340}
341
Greg Kroah-Hartman94fbcde2006-07-27 16:16:04 -0700342static ssize_t hwrng_attr_current_show(struct device *dev,
343 struct device_attribute *attr,
Michael Buesch844dd052006-06-26 00:24:59 -0700344 char *buf)
345{
Michael Buesch844dd052006-06-26 00:24:59 -0700346 ssize_t ret;
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800347 struct hwrng *rng;
Michael Buesch844dd052006-06-26 00:24:59 -0700348
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800349 rng = get_current_rng();
350 if (IS_ERR(rng))
351 return PTR_ERR(rng);
352
353 ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
354 put_rng(rng);
Michael Buesch844dd052006-06-26 00:24:59 -0700355
356 return ret;
357}
358
Greg Kroah-Hartman94fbcde2006-07-27 16:16:04 -0700359static ssize_t hwrng_attr_available_show(struct device *dev,
360 struct device_attribute *attr,
Michael Buesch844dd052006-06-26 00:24:59 -0700361 char *buf)
362{
363 int err;
Michael Buesch844dd052006-06-26 00:24:59 -0700364 struct hwrng *rng;
365
366 err = mutex_lock_interruptible(&rng_mutex);
367 if (err)
368 return -ERESTARTSYS;
369 buf[0] = '\0';
370 list_for_each_entry(rng, &rng_list, list) {
Rickard Strandqvist61daf052014-10-16 23:17:23 +0200371 strlcat(buf, rng->name, PAGE_SIZE);
372 strlcat(buf, " ", PAGE_SIZE);
Michael Buesch844dd052006-06-26 00:24:59 -0700373 }
Rickard Strandqvist61daf052014-10-16 23:17:23 +0200374 strlcat(buf, "\n", PAGE_SIZE);
Michael Buesch844dd052006-06-26 00:24:59 -0700375 mutex_unlock(&rng_mutex);
376
Rickard Strandqvist61daf052014-10-16 23:17:23 +0200377 return strlen(buf);
Michael Buesch844dd052006-06-26 00:24:59 -0700378}
379
Greg Kroah-Hartman94fbcde2006-07-27 16:16:04 -0700380static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
381 hwrng_attr_current_show,
382 hwrng_attr_current_store);
383static DEVICE_ATTR(rng_available, S_IRUGO,
384 hwrng_attr_available_show,
385 NULL);
Michael Buesch844dd052006-06-26 00:24:59 -0700386
Takashi Iwai0daa7a02015-02-02 15:44:55 +0100387static struct attribute *rng_dev_attrs[] = {
388 &dev_attr_rng_current.attr,
389 &dev_attr_rng_available.attr,
390 NULL
391};
392
393ATTRIBUTE_GROUPS(rng_dev);
Michael Buesch844dd052006-06-26 00:24:59 -0700394
Herbert Xuac3a4972014-12-23 16:40:19 +1100395static void __exit unregister_miscdev(void)
Michael Buesch844dd052006-06-26 00:24:59 -0700396{
Rafael J. Wysockib844eba2008-03-23 20:28:24 +0100397 misc_deregister(&rng_miscdev);
Michael Buesch844dd052006-06-26 00:24:59 -0700398}
399
Herbert Xuac3a4972014-12-23 16:40:19 +1100400static int __init register_miscdev(void)
Michael Buesch844dd052006-06-26 00:24:59 -0700401{
Takashi Iwai0daa7a02015-02-02 15:44:55 +0100402 return misc_register(&rng_miscdev);
Michael Buesch844dd052006-06-26 00:24:59 -0700403}
404
Torsten Duwebe4000b2014-06-14 23:46:03 -0400405static int hwrng_fillfn(void *unused)
406{
407 long rc;
408
409 while (!kthread_should_stop()) {
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800410 struct hwrng *rng;
411
412 rng = get_current_rng();
413 if (IS_ERR(rng) || !rng)
Torsten Duwebe4000b2014-06-14 23:46:03 -0400414 break;
Rusty Russell9372b352014-12-08 16:50:35 +0800415 mutex_lock(&reading_mutex);
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800416 rc = rng_get_data(rng, rng_fillbuf,
Torsten Duwebe4000b2014-06-14 23:46:03 -0400417 rng_buffer_size(), 1);
Rusty Russell9372b352014-12-08 16:50:35 +0800418 mutex_unlock(&reading_mutex);
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800419 put_rng(rng);
Torsten Duwebe4000b2014-06-14 23:46:03 -0400420 if (rc <= 0) {
421 pr_warn("hwrng: no data available\n");
422 msleep_interruptible(10000);
423 continue;
424 }
Rusty Russell9372b352014-12-08 16:50:35 +0800425 /* Outside lock, sure, but y'know: randomness. */
Torsten Duwebe4000b2014-06-14 23:46:03 -0400426 add_hwgenerator_randomness((void *)rng_fillbuf, rc,
Stephen Boyde02b8762014-07-25 15:19:18 -0700427 rc * current_quality * 8 >> 10);
Torsten Duwebe4000b2014-06-14 23:46:03 -0400428 }
Torsten Duwe9dda7272014-06-16 10:24:09 -0400429 hwrng_fill = NULL;
Torsten Duwebe4000b2014-06-14 23:46:03 -0400430 return 0;
431}
432
433static void start_khwrngd(void)
434{
435 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
Martin Schwidefsky17fb8742015-07-24 13:13:30 +0200436 if (IS_ERR(hwrng_fill)) {
Torsten Duwebe4000b2014-06-14 23:46:03 -0400437 pr_err("hwrng_fill thread creation failed");
438 hwrng_fill = NULL;
439 }
440}
441
Michael Buesch844dd052006-06-26 00:24:59 -0700442int hwrng_register(struct hwrng *rng)
443{
Michael Buesch844dd052006-06-26 00:24:59 -0700444 int err = -EINVAL;
445 struct hwrng *old_rng, *tmp;
446
447 if (rng->name == NULL ||
Ian Molton99965082009-12-01 14:47:32 +0800448 (rng->data_read == NULL && rng->read == NULL))
Michael Buesch844dd052006-06-26 00:24:59 -0700449 goto out;
450
451 mutex_lock(&rng_mutex);
Michael Buesch844dd052006-06-26 00:24:59 -0700452 /* Must not register two RNGs with the same name. */
453 err = -EEXIST;
454 list_for_each_entry(tmp, &rng_list, list) {
455 if (strcmp(tmp->name, rng->name) == 0)
456 goto out_unlock;
457 }
458
Herbert Xu15b66cd2014-12-23 16:40:18 +1100459 init_completion(&rng->cleanup_done);
460 complete(&rng->cleanup_done);
461
Michael Buesch844dd052006-06-26 00:24:59 -0700462 old_rng = current_rng;
Rusty Russellebbbfa22014-12-08 16:50:39 +0800463 err = 0;
Michael Buesch844dd052006-06-26 00:24:59 -0700464 if (!old_rng) {
Herbert Xu90ac41b2014-12-23 16:40:22 +1100465 err = set_current_rng(rng);
Michael Buesch844dd052006-06-26 00:24:59 -0700466 if (err)
467 goto out_unlock;
Michael Buesch844dd052006-06-26 00:24:59 -0700468 }
Michael Buesch844dd052006-06-26 00:24:59 -0700469 list_add_tail(&rng->list, &rng_list);
Kees Cookd9e79722014-03-03 15:51:48 -0800470
Amit Shahd3cc7992014-07-10 15:42:34 +0530471 if (old_rng && !rng->init) {
472 /*
473 * Use a new device's input to add some randomness to
474 * the system. If this rng device isn't going to be
475 * used right away, its init function hasn't been
476 * called yet; so only use the randomness from devices
477 * that don't need an init callback.
478 */
479 add_early_randomness(rng);
480 }
481
Michael Buesch844dd052006-06-26 00:24:59 -0700482out_unlock:
483 mutex_unlock(&rng_mutex);
484out:
485 return err;
486}
487EXPORT_SYMBOL_GPL(hwrng_register);
488
Rafael J. Wysockib844eba2008-03-23 20:28:24 +0100489void hwrng_unregister(struct hwrng *rng)
Michael Buesch844dd052006-06-26 00:24:59 -0700490{
Michael Buesch844dd052006-06-26 00:24:59 -0700491 mutex_lock(&rng_mutex);
492
493 list_del(&rng->list);
494 if (current_rng == rng) {
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800495 drop_current_rng();
496 if (!list_empty(&rng_list)) {
497 struct hwrng *tail;
498
499 tail = list_entry(rng_list.prev, struct hwrng, list);
500
Herbert Xu90ac41b2014-12-23 16:40:22 +1100501 set_current_rng(tail);
Michael Buesch844dd052006-06-26 00:24:59 -0700502 }
503 }
Rusty Russell3a2c0ba2014-12-08 16:50:37 +0800504
Torsten Duwebe4000b2014-06-14 23:46:03 -0400505 if (list_empty(&rng_list)) {
Amos Kong1dacb392014-12-08 16:50:36 +0800506 mutex_unlock(&rng_mutex);
Torsten Duwebe4000b2014-06-14 23:46:03 -0400507 if (hwrng_fill)
508 kthread_stop(hwrng_fill);
Amos Kong1dacb392014-12-08 16:50:36 +0800509 } else
510 mutex_unlock(&rng_mutex);
Rusty Russella027f302014-12-08 16:50:38 +0800511
Herbert Xu77584ee2014-12-23 16:40:17 +1100512 wait_for_completion(&rng->cleanup_done);
Michael Buesch844dd052006-06-26 00:24:59 -0700513}
Rafael J. Wysockib844eba2008-03-23 20:28:24 +0100514EXPORT_SYMBOL_GPL(hwrng_unregister);
Michael Buesch844dd052006-06-26 00:24:59 -0700515
Dmitry Torokhov4d9b5192015-03-12 14:00:02 -0700516static void devm_hwrng_release(struct device *dev, void *res)
517{
518 hwrng_unregister(*(struct hwrng **)res);
519}
520
521static int devm_hwrng_match(struct device *dev, void *res, void *data)
522{
523 struct hwrng **r = res;
524
525 if (WARN_ON(!r || !*r))
526 return 0;
527
528 return *r == data;
529}
530
531int devm_hwrng_register(struct device *dev, struct hwrng *rng)
532{
533 struct hwrng **ptr;
534 int error;
535
536 ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
537 if (!ptr)
538 return -ENOMEM;
539
540 error = hwrng_register(rng);
541 if (error) {
542 devres_free(ptr);
543 return error;
544 }
545
546 *ptr = rng;
547 devres_add(dev, ptr);
548 return 0;
549}
550EXPORT_SYMBOL_GPL(devm_hwrng_register);
551
552void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
553{
554 devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
555}
556EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
557
Herbert Xuac3a4972014-12-23 16:40:19 +1100558static int __init hwrng_modinit(void)
559{
PrasannaKumar Muralidharan58b022a2016-09-07 20:18:02 +0530560 int ret = -ENOMEM;
561
562 /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
563 rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
564 if (!rng_buffer)
565 return -ENOMEM;
566
567 rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
568 if (!rng_fillbuf) {
569 kfree(rng_buffer);
570 return -ENOMEM;
571 }
572
573 ret = register_miscdev();
574 if (ret) {
575 kfree(rng_fillbuf);
576 kfree(rng_buffer);
577 }
578
579 return ret;
Herbert Xuac3a4972014-12-23 16:40:19 +1100580}
581
582static void __exit hwrng_modexit(void)
Satoru Takeuchib7d44d92013-03-20 11:28:51 +1030583{
584 mutex_lock(&rng_mutex);
585 BUG_ON(current_rng);
586 kfree(rng_buffer);
Torsten Duwebe4000b2014-06-14 23:46:03 -0400587 kfree(rng_fillbuf);
Satoru Takeuchib7d44d92013-03-20 11:28:51 +1030588 mutex_unlock(&rng_mutex);
Herbert Xuac3a4972014-12-23 16:40:19 +1100589
590 unregister_miscdev();
Satoru Takeuchib7d44d92013-03-20 11:28:51 +1030591}
592
Herbert Xuac3a4972014-12-23 16:40:19 +1100593module_init(hwrng_modinit);
594module_exit(hwrng_modexit);
Michael Buesch844dd052006-06-26 00:24:59 -0700595
596MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
597MODULE_LICENSE("GPL");