GNU Linux-libre 4.19.264-gnu1
[releases.git] / drivers / char / hw_random / core.c
1 /*
2  * hw_random/core.c: HWRNG core API
3  *
4  * Copyright 2006 Michael Buesch <m@bues.ch>
5  * Copyright 2005 (c) MontaVista Software, Inc.
6  *
7  * Please read Documentation/hw_random.txt for details on use.
8  *
9  * This software may be used and distributed according to the terms
10  * of the GNU General Public License, incorporated herein by reference.
11  */
12
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/fs.h>
17 #include <linux/hw_random.h>
18 #include <linux/random.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/sched/signal.h>
22 #include <linux/miscdevice.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28
29 #define RNG_MODULE_NAME         "hw_random"
30
31 static struct hwrng *current_rng;
32 /* the current rng has been explicitly chosen by user via sysfs */
33 static int cur_rng_set_by_user;
34 static struct task_struct *hwrng_fill;
35 /* list of registered rngs, sorted decending by quality */
36 static LIST_HEAD(rng_list);
37 /* Protects rng_list and current_rng */
38 static DEFINE_MUTEX(rng_mutex);
39 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
40 static DEFINE_MUTEX(reading_mutex);
41 static int data_avail;
42 static u8 *rng_buffer, *rng_fillbuf;
43 static unsigned short current_quality;
44 static unsigned short default_quality; /* = 0; default to "off" */
45
46 module_param(current_quality, ushort, 0644);
47 MODULE_PARM_DESC(current_quality,
48                  "current hwrng entropy estimation per mill");
49 module_param(default_quality, ushort, 0644);
50 MODULE_PARM_DESC(default_quality,
51                  "default entropy content of hwrng per mill");
52
53 static void drop_current_rng(void);
54 static int hwrng_init(struct hwrng *rng);
55 static void start_khwrngd(void);
56
57 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
58                                int wait);
59
60 static size_t rng_buffer_size(void)
61 {
62         return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
63 }
64
65 static void add_early_randomness(struct hwrng *rng)
66 {
67         int bytes_read;
68         size_t size = min_t(size_t, 16, rng_buffer_size());
69
70         mutex_lock(&reading_mutex);
71         bytes_read = rng_get_data(rng, rng_buffer, size, 0);
72         mutex_unlock(&reading_mutex);
73         if (bytes_read > 0)
74                 add_device_randomness(rng_buffer, bytes_read);
75 }
76
77 static inline void cleanup_rng(struct kref *kref)
78 {
79         struct hwrng *rng = container_of(kref, struct hwrng, ref);
80
81         if (rng->cleanup)
82                 rng->cleanup(rng);
83
84         complete(&rng->cleanup_done);
85 }
86
87 static int set_current_rng(struct hwrng *rng)
88 {
89         int err;
90
91         BUG_ON(!mutex_is_locked(&rng_mutex));
92
93         err = hwrng_init(rng);
94         if (err)
95                 return err;
96
97         drop_current_rng();
98         current_rng = rng;
99
100         return 0;
101 }
102
103 static void drop_current_rng(void)
104 {
105         BUG_ON(!mutex_is_locked(&rng_mutex));
106         if (!current_rng)
107                 return;
108
109         /* decrease last reference for triggering the cleanup */
110         kref_put(&current_rng->ref, cleanup_rng);
111         current_rng = NULL;
112 }
113
114 /* Returns ERR_PTR(), NULL or refcounted hwrng */
115 static struct hwrng *get_current_rng(void)
116 {
117         struct hwrng *rng;
118
119         if (mutex_lock_interruptible(&rng_mutex))
120                 return ERR_PTR(-ERESTARTSYS);
121
122         rng = current_rng;
123         if (rng)
124                 kref_get(&rng->ref);
125
126         mutex_unlock(&rng_mutex);
127         return rng;
128 }
129
130 static void put_rng(struct hwrng *rng)
131 {
132         /*
133          * Hold rng_mutex here so we serialize in case they set_current_rng
134          * on rng again immediately.
135          */
136         mutex_lock(&rng_mutex);
137         if (rng)
138                 kref_put(&rng->ref, cleanup_rng);
139         mutex_unlock(&rng_mutex);
140 }
141
142 static int hwrng_init(struct hwrng *rng)
143 {
144         if (kref_get_unless_zero(&rng->ref))
145                 goto skip_init;
146
147         if (rng->init) {
148                 int ret;
149
150                 ret =  rng->init(rng);
151                 if (ret)
152                         return ret;
153         }
154
155         kref_init(&rng->ref);
156         reinit_completion(&rng->cleanup_done);
157
158 skip_init:
159         add_early_randomness(rng);
160
161         current_quality = rng->quality ? : default_quality;
162         if (current_quality > 1024)
163                 current_quality = 1024;
164
165         if (current_quality == 0 && hwrng_fill)
166                 kthread_stop(hwrng_fill);
167         if (current_quality > 0 && !hwrng_fill)
168                 start_khwrngd();
169
170         return 0;
171 }
172
173 static int rng_dev_open(struct inode *inode, struct file *filp)
174 {
175         /* enforce read-only access to this chrdev */
176         if ((filp->f_mode & FMODE_READ) == 0)
177                 return -EINVAL;
178         if (filp->f_mode & FMODE_WRITE)
179                 return -EINVAL;
180         return 0;
181 }
182
183 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
184                         int wait) {
185         int present;
186
187         BUG_ON(!mutex_is_locked(&reading_mutex));
188         if (rng->read)
189                 return rng->read(rng, (void *)buffer, size, wait);
190
191         if (rng->data_present)
192                 present = rng->data_present(rng, wait);
193         else
194                 present = 1;
195
196         if (present)
197                 return rng->data_read(rng, (u32 *)buffer);
198
199         return 0;
200 }
201
202 static ssize_t rng_dev_read(struct file *filp, char __user *buf,
203                             size_t size, loff_t *offp)
204 {
205         ssize_t ret = 0;
206         int err = 0;
207         int bytes_read, len;
208         struct hwrng *rng;
209
210         while (size) {
211                 rng = get_current_rng();
212                 if (IS_ERR(rng)) {
213                         err = PTR_ERR(rng);
214                         goto out;
215                 }
216                 if (!rng) {
217                         err = -ENODEV;
218                         goto out;
219                 }
220
221                 if (mutex_lock_interruptible(&reading_mutex)) {
222                         err = -ERESTARTSYS;
223                         goto out_put;
224                 }
225                 if (!data_avail) {
226                         bytes_read = rng_get_data(rng, rng_buffer,
227                                 rng_buffer_size(),
228                                 !(filp->f_flags & O_NONBLOCK));
229                         if (bytes_read < 0) {
230                                 err = bytes_read;
231                                 goto out_unlock_reading;
232                         }
233                         data_avail = bytes_read;
234                 }
235
236                 if (!data_avail) {
237                         if (filp->f_flags & O_NONBLOCK) {
238                                 err = -EAGAIN;
239                                 goto out_unlock_reading;
240                         }
241                 } else {
242                         len = data_avail;
243                         if (len > size)
244                                 len = size;
245
246                         data_avail -= len;
247
248                         if (copy_to_user(buf + ret, rng_buffer + data_avail,
249                                                                 len)) {
250                                 err = -EFAULT;
251                                 goto out_unlock_reading;
252                         }
253
254                         size -= len;
255                         ret += len;
256                 }
257
258                 mutex_unlock(&reading_mutex);
259                 put_rng(rng);
260
261                 if (need_resched())
262                         schedule_timeout_interruptible(1);
263
264                 if (signal_pending(current)) {
265                         err = -ERESTARTSYS;
266                         goto out;
267                 }
268         }
269 out:
270         return ret ? : err;
271
272 out_unlock_reading:
273         mutex_unlock(&reading_mutex);
274 out_put:
275         put_rng(rng);
276         goto out;
277 }
278
279 static const struct file_operations rng_chrdev_ops = {
280         .owner          = THIS_MODULE,
281         .open           = rng_dev_open,
282         .read           = rng_dev_read,
283         .llseek         = noop_llseek,
284 };
285
286 static const struct attribute_group *rng_dev_groups[];
287
288 static struct miscdevice rng_miscdev = {
289         .minor          = HWRNG_MINOR,
290         .name           = RNG_MODULE_NAME,
291         .nodename       = "hwrng",
292         .fops           = &rng_chrdev_ops,
293         .groups         = rng_dev_groups,
294 };
295
296 static int enable_best_rng(void)
297 {
298         int ret = -ENODEV;
299
300         BUG_ON(!mutex_is_locked(&rng_mutex));
301
302         /* rng_list is sorted by quality, use the best (=first) one */
303         if (!list_empty(&rng_list)) {
304                 struct hwrng *new_rng;
305
306                 new_rng = list_entry(rng_list.next, struct hwrng, list);
307                 ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
308                 if (!ret)
309                         cur_rng_set_by_user = 0;
310         } else {
311                 drop_current_rng();
312                 cur_rng_set_by_user = 0;
313                 ret = 0;
314         }
315
316         return ret;
317 }
318
319 static ssize_t hwrng_attr_current_store(struct device *dev,
320                                         struct device_attribute *attr,
321                                         const char *buf, size_t len)
322 {
323         int err = -ENODEV;
324         struct hwrng *rng;
325
326         err = mutex_lock_interruptible(&rng_mutex);
327         if (err)
328                 return -ERESTARTSYS;
329
330         if (sysfs_streq(buf, "")) {
331                 err = enable_best_rng();
332         } else {
333                 list_for_each_entry(rng, &rng_list, list) {
334                         if (sysfs_streq(rng->name, buf)) {
335                                 cur_rng_set_by_user = 1;
336                                 err = set_current_rng(rng);
337                                 break;
338                         }
339                 }
340         }
341
342         mutex_unlock(&rng_mutex);
343
344         return err ? : len;
345 }
346
347 static ssize_t hwrng_attr_current_show(struct device *dev,
348                                        struct device_attribute *attr,
349                                        char *buf)
350 {
351         ssize_t ret;
352         struct hwrng *rng;
353
354         rng = get_current_rng();
355         if (IS_ERR(rng))
356                 return PTR_ERR(rng);
357
358         ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
359         put_rng(rng);
360
361         return ret;
362 }
363
364 static ssize_t hwrng_attr_available_show(struct device *dev,
365                                          struct device_attribute *attr,
366                                          char *buf)
367 {
368         int err;
369         struct hwrng *rng;
370
371         err = mutex_lock_interruptible(&rng_mutex);
372         if (err)
373                 return -ERESTARTSYS;
374         buf[0] = '\0';
375         list_for_each_entry(rng, &rng_list, list) {
376                 strlcat(buf, rng->name, PAGE_SIZE);
377                 strlcat(buf, " ", PAGE_SIZE);
378         }
379         strlcat(buf, "\n", PAGE_SIZE);
380         mutex_unlock(&rng_mutex);
381
382         return strlen(buf);
383 }
384
385 static ssize_t hwrng_attr_selected_show(struct device *dev,
386                                         struct device_attribute *attr,
387                                         char *buf)
388 {
389         return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
390 }
391
392 static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
393                    hwrng_attr_current_show,
394                    hwrng_attr_current_store);
395 static DEVICE_ATTR(rng_available, S_IRUGO,
396                    hwrng_attr_available_show,
397                    NULL);
398 static DEVICE_ATTR(rng_selected, S_IRUGO,
399                    hwrng_attr_selected_show,
400                    NULL);
401
402 static struct attribute *rng_dev_attrs[] = {
403         &dev_attr_rng_current.attr,
404         &dev_attr_rng_available.attr,
405         &dev_attr_rng_selected.attr,
406         NULL
407 };
408
409 ATTRIBUTE_GROUPS(rng_dev);
410
411 static void __exit unregister_miscdev(void)
412 {
413         misc_deregister(&rng_miscdev);
414 }
415
416 static int __init register_miscdev(void)
417 {
418         return misc_register(&rng_miscdev);
419 }
420
421 static int hwrng_fillfn(void *unused)
422 {
423         long rc;
424
425         while (!kthread_should_stop()) {
426                 struct hwrng *rng;
427
428                 rng = get_current_rng();
429                 if (IS_ERR(rng) || !rng)
430                         break;
431                 mutex_lock(&reading_mutex);
432                 rc = rng_get_data(rng, rng_fillbuf,
433                                   rng_buffer_size(), 1);
434                 mutex_unlock(&reading_mutex);
435                 put_rng(rng);
436                 if (rc <= 0) {
437                         pr_warn("hwrng: no data available\n");
438                         msleep_interruptible(10000);
439                         continue;
440                 }
441                 /* Outside lock, sure, but y'know: randomness. */
442                 add_hwgenerator_randomness((void *)rng_fillbuf, rc,
443                                            rc * current_quality * 8 >> 10);
444         }
445         hwrng_fill = NULL;
446         return 0;
447 }
448
449 static void start_khwrngd(void)
450 {
451         hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
452         if (IS_ERR(hwrng_fill)) {
453                 pr_err("hwrng_fill thread creation failed\n");
454                 hwrng_fill = NULL;
455         }
456 }
457
458 int hwrng_register(struct hwrng *rng)
459 {
460         int err = -EINVAL;
461         struct hwrng *old_rng, *tmp;
462         struct list_head *rng_list_ptr;
463
464         if (!rng->name || (!rng->data_read && !rng->read))
465                 goto out;
466
467         mutex_lock(&rng_mutex);
468         /* Must not register two RNGs with the same name. */
469         err = -EEXIST;
470         list_for_each_entry(tmp, &rng_list, list) {
471                 if (strcmp(tmp->name, rng->name) == 0)
472                         goto out_unlock;
473         }
474
475         init_completion(&rng->cleanup_done);
476         complete(&rng->cleanup_done);
477
478         /* rng_list is sorted by decreasing quality */
479         list_for_each(rng_list_ptr, &rng_list) {
480                 tmp = list_entry(rng_list_ptr, struct hwrng, list);
481                 if (tmp->quality < rng->quality)
482                         break;
483         }
484         list_add_tail(&rng->list, rng_list_ptr);
485
486         old_rng = current_rng;
487         err = 0;
488         if (!old_rng ||
489             (!cur_rng_set_by_user && rng->quality > old_rng->quality)) {
490                 /*
491                  * Set new rng as current as the new rng source
492                  * provides better entropy quality and was not
493                  * chosen by userspace.
494                  */
495                 err = set_current_rng(rng);
496                 if (err)
497                         goto out_unlock;
498         }
499
500         if (old_rng && !rng->init) {
501                 /*
502                  * Use a new device's input to add some randomness to
503                  * the system.  If this rng device isn't going to be
504                  * used right away, its init function hasn't been
505                  * called yet; so only use the randomness from devices
506                  * that don't need an init callback.
507                  */
508                 add_early_randomness(rng);
509         }
510
511 out_unlock:
512         mutex_unlock(&rng_mutex);
513 out:
514         return err;
515 }
516 EXPORT_SYMBOL_GPL(hwrng_register);
517
518 void hwrng_unregister(struct hwrng *rng)
519 {
520         int err;
521
522         mutex_lock(&rng_mutex);
523
524         list_del(&rng->list);
525         if (current_rng == rng) {
526                 err = enable_best_rng();
527                 if (err) {
528                         drop_current_rng();
529                         cur_rng_set_by_user = 0;
530                 }
531         }
532
533         if (list_empty(&rng_list)) {
534                 mutex_unlock(&rng_mutex);
535                 if (hwrng_fill)
536                         kthread_stop(hwrng_fill);
537         } else
538                 mutex_unlock(&rng_mutex);
539
540         wait_for_completion(&rng->cleanup_done);
541 }
542 EXPORT_SYMBOL_GPL(hwrng_unregister);
543
544 static void devm_hwrng_release(struct device *dev, void *res)
545 {
546         hwrng_unregister(*(struct hwrng **)res);
547 }
548
549 static int devm_hwrng_match(struct device *dev, void *res, void *data)
550 {
551         struct hwrng **r = res;
552
553         if (WARN_ON(!r || !*r))
554                 return 0;
555
556         return *r == data;
557 }
558
559 int devm_hwrng_register(struct device *dev, struct hwrng *rng)
560 {
561         struct hwrng **ptr;
562         int error;
563
564         ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
565         if (!ptr)
566                 return -ENOMEM;
567
568         error = hwrng_register(rng);
569         if (error) {
570                 devres_free(ptr);
571                 return error;
572         }
573
574         *ptr = rng;
575         devres_add(dev, ptr);
576         return 0;
577 }
578 EXPORT_SYMBOL_GPL(devm_hwrng_register);
579
580 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
581 {
582         devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
583 }
584 EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
585
586 static int __init hwrng_modinit(void)
587 {
588         int ret = -ENOMEM;
589
590         /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
591         rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
592         if (!rng_buffer)
593                 return -ENOMEM;
594
595         rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
596         if (!rng_fillbuf) {
597                 kfree(rng_buffer);
598                 return -ENOMEM;
599         }
600
601         ret = register_miscdev();
602         if (ret) {
603                 kfree(rng_fillbuf);
604                 kfree(rng_buffer);
605         }
606
607         return ret;
608 }
609
610 static void __exit hwrng_modexit(void)
611 {
612         mutex_lock(&rng_mutex);
613         BUG_ON(current_rng);
614         kfree(rng_buffer);
615         kfree(rng_fillbuf);
616         mutex_unlock(&rng_mutex);
617
618         unregister_miscdev();
619 }
620
621 module_init(hwrng_modinit);
622 module_exit(hwrng_modexit);
623
624 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
625 MODULE_LICENSE("GPL");