hwrng: core - move add_early_randomness() out of rng_mutex
[linux-2.6-block.git] / drivers / char / hw_random / core.c
CommitLineData
844dd05f 1/*
dd801483
CL
2 * hw_random/core.c: HWRNG core API
3 *
4 * Copyright 2006 Michael Buesch <m@bues.ch>
5 * Copyright 2005 (c) MontaVista Software, Inc.
6 *
4f4cfa6c 7 * Please read Documentation/admin-guide/hw_random.rst for details on use.
dd801483
CL
8 *
9 * This software may be used and distributed according to the terms
10 * of the GNU General Public License, incorporated herein by reference.
844dd05f
MB
11 */
12
affdec58 13#include <linux/delay.h>
844dd05f 14#include <linux/device.h>
affdec58 15#include <linux/err.h>
03a3bb7a 16#include <linux/freezer.h>
affdec58 17#include <linux/fs.h>
844dd05f 18#include <linux/hw_random.h>
844dd05f 19#include <linux/kernel.h>
be4000bc 20#include <linux/kthread.h>
174cd4b1 21#include <linux/sched/signal.h>
affdec58
CL
22#include <linux/miscdevice.h>
23#include <linux/module.h>
d9e79726 24#include <linux/random.h>
affdec58
CL
25#include <linux/sched.h>
26#include <linux/slab.h>
7c0f6ba6 27#include <linux/uaccess.h>
844dd05f 28
844dd05f 29#define RNG_MODULE_NAME "hw_random"
844dd05f
MB
30
31static struct hwrng *current_rng;
10a515dd
HF
32/* the current rng has been explicitly chosen by user via sysfs */
33static int cur_rng_set_by_user;
be4000bc 34static struct task_struct *hwrng_fill;
2bbb6983 35/* list of registered rngs, sorted decending by quality */
844dd05f 36static LIST_HEAD(rng_list);
9372b35e 37/* Protects rng_list and current_rng */
844dd05f 38static DEFINE_MUTEX(rng_mutex);
9372b35e
RR
39/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
40static DEFINE_MUTEX(reading_mutex);
9996508b 41static int data_avail;
be4000bc 42static u8 *rng_buffer, *rng_fillbuf;
0f734e6e
TD
43static unsigned short current_quality;
44static unsigned short default_quality; /* = 0; default to "off" */
be4000bc
TD
45
46module_param(current_quality, ushort, 0644);
47MODULE_PARM_DESC(current_quality,
fae29f13 48 "current hwrng entropy estimation per 1024 bits of input");
0f734e6e
TD
49module_param(default_quality, ushort, 0644);
50MODULE_PARM_DESC(default_quality,
fae29f13 51 "default entropy content of hwrng per 1024 bits of input");
be4000bc 52
ff77c150 53static void drop_current_rng(void);
90ac41bd 54static int hwrng_init(struct hwrng *rng);
be4000bc 55static void start_khwrngd(void);
f7f154f1 56
d3cc7996
AS
57static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
58 int wait);
59
f7f154f1
RR
60static size_t rng_buffer_size(void)
61{
62 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
63}
844dd05f 64
d3cc7996
AS
65static void add_early_randomness(struct hwrng *rng)
66{
d3cc7996 67 int bytes_read;
6d4952d9 68 size_t size = min_t(size_t, 16, rng_buffer_size());
d3cc7996 69
9372b35e 70 mutex_lock(&reading_mutex);
78887832 71 bytes_read = rng_get_data(rng, rng_buffer, size, 0);
9372b35e 72 mutex_unlock(&reading_mutex);
d3cc7996 73 if (bytes_read > 0)
6d4952d9 74 add_device_randomness(rng_buffer, bytes_read);
d3cc7996
AS
75}
76
3a2c0ba5
RR
77static inline void cleanup_rng(struct kref *kref)
78{
79 struct hwrng *rng = container_of(kref, struct hwrng, ref);
80
81 if (rng->cleanup)
82 rng->cleanup(rng);
a027f30d 83
77584ee5 84 complete(&rng->cleanup_done);
3a2c0ba5
RR
85}
86
90ac41bd 87static int set_current_rng(struct hwrng *rng)
3a2c0ba5 88{
90ac41bd
HX
89 int err;
90
3a2c0ba5 91 BUG_ON(!mutex_is_locked(&rng_mutex));
90ac41bd
HX
92
93 err = hwrng_init(rng);
94 if (err)
95 return err;
96
ff77c150 97 drop_current_rng();
3a2c0ba5 98 current_rng = rng;
90ac41bd
HX
99
100 return 0;
3a2c0ba5
RR
101}
102
103static void drop_current_rng(void)
104{
105 BUG_ON(!mutex_is_locked(&rng_mutex));
106 if (!current_rng)
107 return;
108
109 /* decrease last reference for triggering the cleanup */
110 kref_put(&current_rng->ref, cleanup_rng);
111 current_rng = NULL;
112}
113
114/* Returns ERR_PTR(), NULL or refcounted hwrng */
daae28de
LV
115static struct hwrng *get_current_rng_nolock(void)
116{
117 if (current_rng)
118 kref_get(&current_rng->ref);
119
120 return current_rng;
121}
122
3a2c0ba5
RR
123static struct hwrng *get_current_rng(void)
124{
125 struct hwrng *rng;
126
127 if (mutex_lock_interruptible(&rng_mutex))
128 return ERR_PTR(-ERESTARTSYS);
129
daae28de 130 rng = get_current_rng_nolock();
3a2c0ba5
RR
131
132 mutex_unlock(&rng_mutex);
133 return rng;
134}
135
136static void put_rng(struct hwrng *rng)
137{
138 /*
139 * Hold rng_mutex here so we serialize in case they set_current_rng
140 * on rng again immediately.
141 */
142 mutex_lock(&rng_mutex);
143 if (rng)
144 kref_put(&rng->ref, cleanup_rng);
145 mutex_unlock(&rng_mutex);
146}
147
90ac41bd 148static int hwrng_init(struct hwrng *rng)
844dd05f 149{
15b66cd5
HX
150 if (kref_get_unless_zero(&rng->ref))
151 goto skip_init;
152
d3cc7996
AS
153 if (rng->init) {
154 int ret;
155
156 ret = rng->init(rng);
157 if (ret)
158 return ret;
159 }
15b66cd5
HX
160
161 kref_init(&rng->ref);
162 reinit_completion(&rng->cleanup_done);
163
164skip_init:
0f734e6e 165 current_quality = rng->quality ? : default_quality;
506bf0c0
KP
166 if (current_quality > 1024)
167 current_quality = 1024;
0f734e6e
TD
168
169 if (current_quality == 0 && hwrng_fill)
170 kthread_stop(hwrng_fill);
be4000bc
TD
171 if (current_quality > 0 && !hwrng_fill)
172 start_khwrngd();
173
d3cc7996 174 return 0;
844dd05f
MB
175}
176
844dd05f
MB
177static int rng_dev_open(struct inode *inode, struct file *filp)
178{
179 /* enforce read-only access to this chrdev */
180 if ((filp->f_mode & FMODE_READ) == 0)
181 return -EINVAL;
182 if (filp->f_mode & FMODE_WRITE)
183 return -EINVAL;
184 return 0;
185}
186
9996508b
IM
187static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
188 int wait) {
189 int present;
190
9372b35e 191 BUG_ON(!mutex_is_locked(&reading_mutex));
9996508b
IM
192 if (rng->read)
193 return rng->read(rng, (void *)buffer, size, wait);
194
195 if (rng->data_present)
196 present = rng->data_present(rng, wait);
197 else
198 present = 1;
199
200 if (present)
201 return rng->data_read(rng, (u32 *)buffer);
202
203 return 0;
204}
205
844dd05f
MB
206static ssize_t rng_dev_read(struct file *filp, char __user *buf,
207 size_t size, loff_t *offp)
208{
844dd05f 209 ssize_t ret = 0;
984e976f 210 int err = 0;
9996508b 211 int bytes_read, len;
3a2c0ba5 212 struct hwrng *rng;
844dd05f
MB
213
214 while (size) {
3a2c0ba5
RR
215 rng = get_current_rng();
216 if (IS_ERR(rng)) {
217 err = PTR_ERR(rng);
844dd05f 218 goto out;
9996508b 219 }
3a2c0ba5 220 if (!rng) {
844dd05f 221 err = -ENODEV;
3a2c0ba5 222 goto out;
844dd05f 223 }
984e976f 224
1ab87298
JS
225 if (mutex_lock_interruptible(&reading_mutex)) {
226 err = -ERESTARTSYS;
227 goto out_put;
228 }
9996508b 229 if (!data_avail) {
3a2c0ba5 230 bytes_read = rng_get_data(rng, rng_buffer,
f7f154f1 231 rng_buffer_size(),
9996508b
IM
232 !(filp->f_flags & O_NONBLOCK));
233 if (bytes_read < 0) {
234 err = bytes_read;
9372b35e 235 goto out_unlock_reading;
9996508b
IM
236 }
237 data_avail = bytes_read;
893f1128 238 }
844dd05f 239
9996508b
IM
240 if (!data_avail) {
241 if (filp->f_flags & O_NONBLOCK) {
242 err = -EAGAIN;
9372b35e 243 goto out_unlock_reading;
9996508b
IM
244 }
245 } else {
246 len = data_avail;
247 if (len > size)
248 len = size;
249
250 data_avail -= len;
251
252 if (copy_to_user(buf + ret, rng_buffer + data_avail,
253 len)) {
254 err = -EFAULT;
9372b35e 255 goto out_unlock_reading;
9996508b
IM
256 }
257
258 size -= len;
259 ret += len;
844dd05f
MB
260 }
261
9372b35e 262 mutex_unlock(&reading_mutex);
3a2c0ba5 263 put_rng(rng);
9996508b 264
844dd05f
MB
265 if (need_resched())
266 schedule_timeout_interruptible(1);
9996508b
IM
267
268 if (signal_pending(current)) {
269 err = -ERESTARTSYS;
844dd05f 270 goto out;
9996508b 271 }
844dd05f
MB
272 }
273out:
274 return ret ? : err;
3a2c0ba5 275
9372b35e
RR
276out_unlock_reading:
277 mutex_unlock(&reading_mutex);
1ab87298 278out_put:
3a2c0ba5
RR
279 put_rng(rng);
280 goto out;
844dd05f
MB
281}
282
62322d25 283static const struct file_operations rng_chrdev_ops = {
844dd05f
MB
284 .owner = THIS_MODULE,
285 .open = rng_dev_open,
286 .read = rng_dev_read,
6038f373 287 .llseek = noop_llseek,
844dd05f
MB
288};
289
0daa7a0a
TI
290static const struct attribute_group *rng_dev_groups[];
291
844dd05f 292static struct miscdevice rng_miscdev = {
fd50d71f 293 .minor = HWRNG_MINOR,
844dd05f 294 .name = RNG_MODULE_NAME,
e454cea2 295 .nodename = "hwrng",
844dd05f 296 .fops = &rng_chrdev_ops,
0daa7a0a 297 .groups = rng_dev_groups,
844dd05f
MB
298};
299
142a27f0
PM
300static int enable_best_rng(void)
301{
302 int ret = -ENODEV;
303
304 BUG_ON(!mutex_is_locked(&rng_mutex));
305
306 /* rng_list is sorted by quality, use the best (=first) one */
307 if (!list_empty(&rng_list)) {
308 struct hwrng *new_rng;
309
310 new_rng = list_entry(rng_list.next, struct hwrng, list);
311 ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
312 if (!ret)
313 cur_rng_set_by_user = 0;
0e4b5294
GH
314 } else {
315 drop_current_rng();
316 cur_rng_set_by_user = 0;
317 ret = 0;
142a27f0
PM
318 }
319
320 return ret;
321}
322
94fbcded
GKH
323static ssize_t hwrng_attr_current_store(struct device *dev,
324 struct device_attribute *attr,
844dd05f
MB
325 const char *buf, size_t len)
326{
142a27f0 327 int err = -ENODEV;
daae28de 328 struct hwrng *rng, *old_rng, *new_rng;
844dd05f
MB
329
330 err = mutex_lock_interruptible(&rng_mutex);
331 if (err)
332 return -ERESTARTSYS;
142a27f0 333
daae28de 334 old_rng = current_rng;
142a27f0
PM
335 if (sysfs_streq(buf, "")) {
336 err = enable_best_rng();
337 } else {
338 list_for_each_entry(rng, &rng_list, list) {
339 if (sysfs_streq(rng->name, buf)) {
340 cur_rng_set_by_user = 1;
90ac41bd 341 err = set_current_rng(rng);
142a27f0
PM
342 break;
343 }
844dd05f
MB
344 }
345 }
daae28de 346 new_rng = get_current_rng_nolock();
844dd05f
MB
347 mutex_unlock(&rng_mutex);
348
daae28de
LV
349 if (new_rng) {
350 if (new_rng != old_rng)
351 add_early_randomness(new_rng);
352 put_rng(new_rng);
353 }
354
844dd05f
MB
355 return err ? : len;
356}
357
94fbcded
GKH
358static ssize_t hwrng_attr_current_show(struct device *dev,
359 struct device_attribute *attr,
844dd05f
MB
360 char *buf)
361{
844dd05f 362 ssize_t ret;
3a2c0ba5 363 struct hwrng *rng;
844dd05f 364
3a2c0ba5
RR
365 rng = get_current_rng();
366 if (IS_ERR(rng))
367 return PTR_ERR(rng);
368
369 ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
370 put_rng(rng);
844dd05f
MB
371
372 return ret;
373}
374
94fbcded
GKH
375static ssize_t hwrng_attr_available_show(struct device *dev,
376 struct device_attribute *attr,
844dd05f
MB
377 char *buf)
378{
379 int err;
844dd05f
MB
380 struct hwrng *rng;
381
382 err = mutex_lock_interruptible(&rng_mutex);
383 if (err)
384 return -ERESTARTSYS;
385 buf[0] = '\0';
386 list_for_each_entry(rng, &rng_list, list) {
61daf055
RS
387 strlcat(buf, rng->name, PAGE_SIZE);
388 strlcat(buf, " ", PAGE_SIZE);
844dd05f 389 }
61daf055 390 strlcat(buf, "\n", PAGE_SIZE);
844dd05f
MB
391 mutex_unlock(&rng_mutex);
392
61daf055 393 return strlen(buf);
844dd05f
MB
394}
395
10a515dd
HF
396static ssize_t hwrng_attr_selected_show(struct device *dev,
397 struct device_attribute *attr,
398 char *buf)
399{
400 return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
401}
402
94fbcded
GKH
403static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
404 hwrng_attr_current_show,
405 hwrng_attr_current_store);
406static DEVICE_ATTR(rng_available, S_IRUGO,
407 hwrng_attr_available_show,
408 NULL);
10a515dd
HF
409static DEVICE_ATTR(rng_selected, S_IRUGO,
410 hwrng_attr_selected_show,
411 NULL);
844dd05f 412
0daa7a0a
TI
413static struct attribute *rng_dev_attrs[] = {
414 &dev_attr_rng_current.attr,
415 &dev_attr_rng_available.attr,
10a515dd 416 &dev_attr_rng_selected.attr,
0daa7a0a
TI
417 NULL
418};
419
420ATTRIBUTE_GROUPS(rng_dev);
844dd05f 421
ac3a497f 422static void __exit unregister_miscdev(void)
844dd05f 423{
b844eba2 424 misc_deregister(&rng_miscdev);
844dd05f
MB
425}
426
ac3a497f 427static int __init register_miscdev(void)
844dd05f 428{
0daa7a0a 429 return misc_register(&rng_miscdev);
844dd05f
MB
430}
431
be4000bc
TD
432static int hwrng_fillfn(void *unused)
433{
434 long rc;
435
03a3bb7a
SB
436 set_freezable();
437
438 while (!kthread_freezable_should_stop(NULL)) {
3a2c0ba5
RR
439 struct hwrng *rng;
440
441 rng = get_current_rng();
442 if (IS_ERR(rng) || !rng)
be4000bc 443 break;
9372b35e 444 mutex_lock(&reading_mutex);
3a2c0ba5 445 rc = rng_get_data(rng, rng_fillbuf,
be4000bc 446 rng_buffer_size(), 1);
9372b35e 447 mutex_unlock(&reading_mutex);
3a2c0ba5 448 put_rng(rng);
be4000bc
TD
449 if (rc <= 0) {
450 pr_warn("hwrng: no data available\n");
451 msleep_interruptible(10000);
452 continue;
453 }
9372b35e 454 /* Outside lock, sure, but y'know: randomness. */
be4000bc 455 add_hwgenerator_randomness((void *)rng_fillbuf, rc,
e02b8765 456 rc * current_quality * 8 >> 10);
be4000bc 457 }
9dda727d 458 hwrng_fill = NULL;
be4000bc
TD
459 return 0;
460}
461
462static void start_khwrngd(void)
463{
464 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
17fb874d 465 if (IS_ERR(hwrng_fill)) {
4d0ec229 466 pr_err("hwrng_fill thread creation failed\n");
be4000bc
TD
467 hwrng_fill = NULL;
468 }
469}
470
844dd05f
MB
471int hwrng_register(struct hwrng *rng)
472{
844dd05f 473 int err = -EINVAL;
daae28de 474 struct hwrng *old_rng, *new_rng, *tmp;
2bbb6983 475 struct list_head *rng_list_ptr;
844dd05f 476
2a971e3b 477 if (!rng->name || (!rng->data_read && !rng->read))
844dd05f
MB
478 goto out;
479
480 mutex_lock(&rng_mutex);
daae28de
LV
481
482 old_rng = current_rng;
483 new_rng = NULL;
484
844dd05f
MB
485 /* Must not register two RNGs with the same name. */
486 err = -EEXIST;
487 list_for_each_entry(tmp, &rng_list, list) {
488 if (strcmp(tmp->name, rng->name) == 0)
489 goto out_unlock;
490 }
491
15b66cd5
HX
492 init_completion(&rng->cleanup_done);
493 complete(&rng->cleanup_done);
494
2bbb6983
HF
495 /* rng_list is sorted by decreasing quality */
496 list_for_each(rng_list_ptr, &rng_list) {
497 tmp = list_entry(rng_list_ptr, struct hwrng, list);
498 if (tmp->quality < rng->quality)
499 break;
500 }
501 list_add_tail(&rng->list, rng_list_ptr);
502
ebbbfa24 503 err = 0;
10a515dd
HF
504 if (!old_rng ||
505 (!cur_rng_set_by_user && rng->quality > old_rng->quality)) {
2bbb6983
HF
506 /*
507 * Set new rng as current as the new rng source
10a515dd
HF
508 * provides better entropy quality and was not
509 * chosen by userspace.
2bbb6983 510 */
90ac41bd 511 err = set_current_rng(rng);
844dd05f
MB
512 if (err)
513 goto out_unlock;
844dd05f 514 }
d9e79726 515
daae28de
LV
516 new_rng = rng;
517 kref_get(&new_rng->ref);
518out_unlock:
519 mutex_unlock(&rng_mutex);
520
521 if (new_rng) {
522 if (new_rng != old_rng || !rng->init) {
d3cc7996
AS
523 /*
524 * Use a new device's input to add some randomness to
525 * the system. If this rng device isn't going to be
526 * used right away, its init function hasn't been
daae28de
LV
527 * called yet by set_current_rng(); so only use the
528 * randomness from devices that don't need an init callback
d3cc7996 529 */
daae28de
LV
530 add_early_randomness(new_rng);
531 }
532 put_rng(new_rng);
d3cc7996 533 }
844dd05f
MB
534out:
535 return err;
536}
537EXPORT_SYMBOL_GPL(hwrng_register);
538
b844eba2 539void hwrng_unregister(struct hwrng *rng)
844dd05f 540{
daae28de 541 struct hwrng *old_rng, *new_rng;
837bf7cc
MB
542 int err;
543
844dd05f
MB
544 mutex_lock(&rng_mutex);
545
daae28de 546 old_rng = current_rng;
844dd05f 547 list_del(&rng->list);
837bf7cc
MB
548 if (current_rng == rng) {
549 err = enable_best_rng();
550 if (err) {
551 drop_current_rng();
552 cur_rng_set_by_user = 0;
553 }
554 }
3a2c0ba5 555
daae28de 556 new_rng = get_current_rng_nolock();
be4000bc 557 if (list_empty(&rng_list)) {
1dacb395 558 mutex_unlock(&rng_mutex);
be4000bc
TD
559 if (hwrng_fill)
560 kthread_stop(hwrng_fill);
1dacb395
AK
561 } else
562 mutex_unlock(&rng_mutex);
a027f30d 563
daae28de
LV
564 if (new_rng) {
565 if (old_rng != new_rng)
566 add_early_randomness(new_rng);
567 put_rng(new_rng);
568 }
569
77584ee5 570 wait_for_completion(&rng->cleanup_done);
844dd05f 571}
b844eba2 572EXPORT_SYMBOL_GPL(hwrng_unregister);
844dd05f 573
4d9b519c
DT
574static void devm_hwrng_release(struct device *dev, void *res)
575{
576 hwrng_unregister(*(struct hwrng **)res);
577}
578
579static int devm_hwrng_match(struct device *dev, void *res, void *data)
580{
581 struct hwrng **r = res;
582
583 if (WARN_ON(!r || !*r))
584 return 0;
585
586 return *r == data;
587}
588
589int devm_hwrng_register(struct device *dev, struct hwrng *rng)
590{
591 struct hwrng **ptr;
592 int error;
593
594 ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
595 if (!ptr)
596 return -ENOMEM;
597
598 error = hwrng_register(rng);
599 if (error) {
600 devres_free(ptr);
601 return error;
602 }
603
604 *ptr = rng;
605 devres_add(dev, ptr);
606 return 0;
607}
608EXPORT_SYMBOL_GPL(devm_hwrng_register);
609
610void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
611{
612 devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
613}
614EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
615
ac3a497f
HX
616static int __init hwrng_modinit(void)
617{
58b022ac
PM
618 int ret = -ENOMEM;
619
620 /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
621 rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
622 if (!rng_buffer)
623 return -ENOMEM;
624
625 rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
626 if (!rng_fillbuf) {
627 kfree(rng_buffer);
628 return -ENOMEM;
629 }
630
631 ret = register_miscdev();
632 if (ret) {
633 kfree(rng_fillbuf);
634 kfree(rng_buffer);
635 }
636
637 return ret;
ac3a497f
HX
638}
639
640static void __exit hwrng_modexit(void)
b7d44d94
ST
641{
642 mutex_lock(&rng_mutex);
643 BUG_ON(current_rng);
644 kfree(rng_buffer);
be4000bc 645 kfree(rng_fillbuf);
b7d44d94 646 mutex_unlock(&rng_mutex);
ac3a497f
HX
647
648 unregister_miscdev();
b7d44d94
ST
649}
650
ac3a497f
HX
651module_init(hwrng_modinit);
652module_exit(hwrng_modexit);
844dd05f
MB
653
654MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
655MODULE_LICENSE("GPL");