GNU Linux-libre 4.9.309-gnu1
[releases.git] / drivers / char / mem.c
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support.
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/backing-dev.h>
25 #include <linux/shmem_fs.h>
26 #include <linux/splice.h>
27 #include <linux/pfn.h>
28 #include <linux/export.h>
29 #include <linux/io.h>
30 #include <linux/uio.h>
31
32 #include <linux/uaccess.h>
33
34 #ifdef CONFIG_IA64
35 # include <linux/efi.h>
36 #endif
37
38 #define DEVPORT_MINOR   4
39
40 static inline unsigned long size_inside_page(unsigned long start,
41                                              unsigned long size)
42 {
43         unsigned long sz;
44
45         sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
46
47         return min(sz, size);
48 }
49
50 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
51 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
52 {
53         return addr + count <= __pa(high_memory);
54 }
55
56 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
57 {
58         return 1;
59 }
60 #endif
61
62 #ifdef CONFIG_STRICT_DEVMEM
63 static inline int page_is_allowed(unsigned long pfn)
64 {
65         return devmem_is_allowed(pfn);
66 }
67 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
68 {
69         u64 from = ((u64)pfn) << PAGE_SHIFT;
70         u64 to = from + size;
71         u64 cursor = from;
72
73         while (cursor < to) {
74                 if (!devmem_is_allowed(pfn))
75                         return 0;
76                 cursor += PAGE_SIZE;
77                 pfn++;
78         }
79         return 1;
80 }
81 #else
82 static inline int page_is_allowed(unsigned long pfn)
83 {
84         return 1;
85 }
86 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
87 {
88         return 1;
89 }
90 #endif
91
92 #ifndef unxlate_dev_mem_ptr
93 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
94 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
95 {
96 }
97 #endif
98
99 static inline bool should_stop_iteration(void)
100 {
101         if (need_resched())
102                 cond_resched();
103         return fatal_signal_pending(current);
104 }
105
106 /*
107  * This funcion reads the *physical* memory. The f_pos points directly to the
108  * memory location.
109  */
110 static ssize_t read_mem(struct file *file, char __user *buf,
111                         size_t count, loff_t *ppos)
112 {
113         phys_addr_t p = *ppos;
114         ssize_t read, sz;
115         void *ptr;
116
117         if (p != *ppos)
118                 return 0;
119
120         if (!valid_phys_addr_range(p, count))
121                 return -EFAULT;
122         read = 0;
123 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
124         /* we don't have page 0 mapped on sparc and m68k.. */
125         if (p < PAGE_SIZE) {
126                 sz = size_inside_page(p, count);
127                 if (sz > 0) {
128                         if (clear_user(buf, sz))
129                                 return -EFAULT;
130                         buf += sz;
131                         p += sz;
132                         count -= sz;
133                         read += sz;
134                 }
135         }
136 #endif
137
138         while (count > 0) {
139                 unsigned long remaining;
140                 int allowed;
141
142                 sz = size_inside_page(p, count);
143
144                 allowed = page_is_allowed(p >> PAGE_SHIFT);
145                 if (!allowed)
146                         return -EPERM;
147                 if (allowed == 2) {
148                         /* Show zeros for restricted memory. */
149                         remaining = clear_user(buf, sz);
150                 } else {
151                         /*
152                          * On ia64 if a page has been mapped somewhere as
153                          * uncached, then it must also be accessed uncached
154                          * by the kernel or data corruption may occur.
155                          */
156                         ptr = xlate_dev_mem_ptr(p);
157                         if (!ptr)
158                                 return -EFAULT;
159
160                         remaining = copy_to_user(buf, ptr, sz);
161
162                         unxlate_dev_mem_ptr(p, ptr);
163                 }
164
165                 if (remaining)
166                         return -EFAULT;
167
168                 buf += sz;
169                 p += sz;
170                 count -= sz;
171                 read += sz;
172                 if (should_stop_iteration())
173                         break;
174         }
175
176         *ppos += read;
177         return read;
178 }
179
180 static ssize_t write_mem(struct file *file, const char __user *buf,
181                          size_t count, loff_t *ppos)
182 {
183         phys_addr_t p = *ppos;
184         ssize_t written, sz;
185         unsigned long copied;
186         void *ptr;
187
188         if (p != *ppos)
189                 return -EFBIG;
190
191         if (!valid_phys_addr_range(p, count))
192                 return -EFAULT;
193
194         written = 0;
195
196 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
197         /* we don't have page 0 mapped on sparc and m68k.. */
198         if (p < PAGE_SIZE) {
199                 sz = size_inside_page(p, count);
200                 /* Hmm. Do something? */
201                 buf += sz;
202                 p += sz;
203                 count -= sz;
204                 written += sz;
205         }
206 #endif
207
208         while (count > 0) {
209                 int allowed;
210
211                 sz = size_inside_page(p, count);
212
213                 allowed = page_is_allowed(p >> PAGE_SHIFT);
214                 if (!allowed)
215                         return -EPERM;
216
217                 /* Skip actual writing when a page is marked as restricted. */
218                 if (allowed == 1) {
219                         /*
220                          * On ia64 if a page has been mapped somewhere as
221                          * uncached, then it must also be accessed uncached
222                          * by the kernel or data corruption may occur.
223                          */
224                         ptr = xlate_dev_mem_ptr(p);
225                         if (!ptr) {
226                                 if (written)
227                                         break;
228                                 return -EFAULT;
229                         }
230
231                         copied = copy_from_user(ptr, buf, sz);
232                         unxlate_dev_mem_ptr(p, ptr);
233                         if (copied) {
234                                 written += sz - copied;
235                                 if (written)
236                                         break;
237                                 return -EFAULT;
238                         }
239                 }
240
241                 buf += sz;
242                 p += sz;
243                 count -= sz;
244                 written += sz;
245                 if (should_stop_iteration())
246                         break;
247         }
248
249         *ppos += written;
250         return written;
251 }
252
253 int __weak phys_mem_access_prot_allowed(struct file *file,
254         unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
255 {
256         return 1;
257 }
258
259 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
260
261 /*
262  * Architectures vary in how they handle caching for addresses
263  * outside of main memory.
264  *
265  */
266 #ifdef pgprot_noncached
267 static int uncached_access(struct file *file, phys_addr_t addr)
268 {
269 #if defined(CONFIG_IA64)
270         /*
271          * On ia64, we ignore O_DSYNC because we cannot tolerate memory
272          * attribute aliases.
273          */
274         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
275 #elif defined(CONFIG_MIPS)
276         {
277                 extern int __uncached_access(struct file *file,
278                                              unsigned long addr);
279
280                 return __uncached_access(file, addr);
281         }
282 #else
283         /*
284          * Accessing memory above the top the kernel knows about or through a
285          * file pointer
286          * that was marked O_DSYNC will be done non-cached.
287          */
288         if (file->f_flags & O_DSYNC)
289                 return 1;
290         return addr >= __pa(high_memory);
291 #endif
292 }
293 #endif
294
295 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
296                                      unsigned long size, pgprot_t vma_prot)
297 {
298 #ifdef pgprot_noncached
299         phys_addr_t offset = pfn << PAGE_SHIFT;
300
301         if (uncached_access(file, offset))
302                 return pgprot_noncached(vma_prot);
303 #endif
304         return vma_prot;
305 }
306 #endif
307
308 #ifndef CONFIG_MMU
309 static unsigned long get_unmapped_area_mem(struct file *file,
310                                            unsigned long addr,
311                                            unsigned long len,
312                                            unsigned long pgoff,
313                                            unsigned long flags)
314 {
315         if (!valid_mmap_phys_addr_range(pgoff, len))
316                 return (unsigned long) -EINVAL;
317         return pgoff << PAGE_SHIFT;
318 }
319
320 /* permit direct mmap, for read, write or exec */
321 static unsigned memory_mmap_capabilities(struct file *file)
322 {
323         return NOMMU_MAP_DIRECT |
324                 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
325 }
326
327 static unsigned zero_mmap_capabilities(struct file *file)
328 {
329         return NOMMU_MAP_COPY;
330 }
331
332 /* can't do an in-place private mapping if there's no MMU */
333 static inline int private_mapping_ok(struct vm_area_struct *vma)
334 {
335         return vma->vm_flags & VM_MAYSHARE;
336 }
337 #else
338
339 static inline int private_mapping_ok(struct vm_area_struct *vma)
340 {
341         return 1;
342 }
343 #endif
344
345 static const struct vm_operations_struct mmap_mem_ops = {
346 #ifdef CONFIG_HAVE_IOREMAP_PROT
347         .access = generic_access_phys
348 #endif
349 };
350
351 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
352 {
353         size_t size = vma->vm_end - vma->vm_start;
354         phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
355
356         /* It's illegal to wrap around the end of the physical address space. */
357         if (offset + (phys_addr_t)size - 1 < offset)
358                 return -EINVAL;
359
360         if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
361                 return -EINVAL;
362
363         if (!private_mapping_ok(vma))
364                 return -ENOSYS;
365
366         if (!range_is_allowed(vma->vm_pgoff, size))
367                 return -EPERM;
368
369         if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
370                                                 &vma->vm_page_prot))
371                 return -EINVAL;
372
373         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
374                                                  size,
375                                                  vma->vm_page_prot);
376
377         vma->vm_ops = &mmap_mem_ops;
378
379         /* Remap-pfn-range will mark the range VM_IO */
380         if (remap_pfn_range(vma,
381                             vma->vm_start,
382                             vma->vm_pgoff,
383                             size,
384                             vma->vm_page_prot)) {
385                 return -EAGAIN;
386         }
387         return 0;
388 }
389
390 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
391 {
392         unsigned long pfn;
393
394         /* Turn a kernel-virtual address into a physical page frame */
395         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
396
397         /*
398          * RED-PEN: on some architectures there is more mapped memory than
399          * available in mem_map which pfn_valid checks for. Perhaps should add a
400          * new macro here.
401          *
402          * RED-PEN: vmalloc is not supported right now.
403          */
404         if (!pfn_valid(pfn))
405                 return -EIO;
406
407         vma->vm_pgoff = pfn;
408         return mmap_mem(file, vma);
409 }
410
411 /*
412  * This function reads the *virtual* memory as seen by the kernel.
413  */
414 static ssize_t read_kmem(struct file *file, char __user *buf,
415                          size_t count, loff_t *ppos)
416 {
417         unsigned long p = *ppos;
418         ssize_t low_count, read, sz;
419         char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
420         int err = 0;
421
422         read = 0;
423         if (p < (unsigned long) high_memory) {
424                 low_count = count;
425                 if (count > (unsigned long)high_memory - p)
426                         low_count = (unsigned long)high_memory - p;
427
428 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
429                 /* we don't have page 0 mapped on sparc and m68k.. */
430                 if (p < PAGE_SIZE && low_count > 0) {
431                         sz = size_inside_page(p, low_count);
432                         if (clear_user(buf, sz))
433                                 return -EFAULT;
434                         buf += sz;
435                         p += sz;
436                         read += sz;
437                         low_count -= sz;
438                         count -= sz;
439                 }
440 #endif
441                 while (low_count > 0) {
442                         sz = size_inside_page(p, low_count);
443
444                         /*
445                          * On ia64 if a page has been mapped somewhere as
446                          * uncached, then it must also be accessed uncached
447                          * by the kernel or data corruption may occur
448                          */
449                         kbuf = xlate_dev_kmem_ptr((void *)p);
450                         if (!virt_addr_valid(kbuf))
451                                 return -ENXIO;
452
453                         if (copy_to_user(buf, kbuf, sz))
454                                 return -EFAULT;
455                         buf += sz;
456                         p += sz;
457                         read += sz;
458                         low_count -= sz;
459                         count -= sz;
460                         if (should_stop_iteration()) {
461                                 count = 0;
462                                 break;
463                         }
464                 }
465         }
466
467         if (count > 0) {
468                 kbuf = (char *)__get_free_page(GFP_KERNEL);
469                 if (!kbuf)
470                         return -ENOMEM;
471                 while (count > 0) {
472                         sz = size_inside_page(p, count);
473                         if (!is_vmalloc_or_module_addr((void *)p)) {
474                                 err = -ENXIO;
475                                 break;
476                         }
477                         sz = vread(kbuf, (char *)p, sz);
478                         if (!sz)
479                                 break;
480                         if (copy_to_user(buf, kbuf, sz)) {
481                                 err = -EFAULT;
482                                 break;
483                         }
484                         count -= sz;
485                         buf += sz;
486                         read += sz;
487                         p += sz;
488                         if (should_stop_iteration())
489                                 break;
490                 }
491                 free_page((unsigned long)kbuf);
492         }
493         *ppos = p;
494         return read ? read : err;
495 }
496
497
498 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
499                                 size_t count, loff_t *ppos)
500 {
501         ssize_t written, sz;
502         unsigned long copied;
503
504         written = 0;
505 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
506         /* we don't have page 0 mapped on sparc and m68k.. */
507         if (p < PAGE_SIZE) {
508                 sz = size_inside_page(p, count);
509                 /* Hmm. Do something? */
510                 buf += sz;
511                 p += sz;
512                 count -= sz;
513                 written += sz;
514         }
515 #endif
516
517         while (count > 0) {
518                 void *ptr;
519
520                 sz = size_inside_page(p, count);
521
522                 /*
523                  * On ia64 if a page has been mapped somewhere as uncached, then
524                  * it must also be accessed uncached by the kernel or data
525                  * corruption may occur.
526                  */
527                 ptr = xlate_dev_kmem_ptr((void *)p);
528                 if (!virt_addr_valid(ptr))
529                         return -ENXIO;
530
531                 copied = copy_from_user(ptr, buf, sz);
532                 if (copied) {
533                         written += sz - copied;
534                         if (written)
535                                 break;
536                         return -EFAULT;
537                 }
538                 buf += sz;
539                 p += sz;
540                 count -= sz;
541                 written += sz;
542                 if (should_stop_iteration())
543                         break;
544         }
545
546         *ppos += written;
547         return written;
548 }
549
550 /*
551  * This function writes to the *virtual* memory as seen by the kernel.
552  */
553 static ssize_t write_kmem(struct file *file, const char __user *buf,
554                           size_t count, loff_t *ppos)
555 {
556         unsigned long p = *ppos;
557         ssize_t wrote = 0;
558         ssize_t virtr = 0;
559         char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
560         int err = 0;
561
562         if (p < (unsigned long) high_memory) {
563                 unsigned long to_write = min_t(unsigned long, count,
564                                                (unsigned long)high_memory - p);
565                 wrote = do_write_kmem(p, buf, to_write, ppos);
566                 if (wrote != to_write)
567                         return wrote;
568                 p += wrote;
569                 buf += wrote;
570                 count -= wrote;
571         }
572
573         if (count > 0) {
574                 kbuf = (char *)__get_free_page(GFP_KERNEL);
575                 if (!kbuf)
576                         return wrote ? wrote : -ENOMEM;
577                 while (count > 0) {
578                         unsigned long sz = size_inside_page(p, count);
579                         unsigned long n;
580
581                         if (!is_vmalloc_or_module_addr((void *)p)) {
582                                 err = -ENXIO;
583                                 break;
584                         }
585                         n = copy_from_user(kbuf, buf, sz);
586                         if (n) {
587                                 err = -EFAULT;
588                                 break;
589                         }
590                         vwrite(kbuf, (char *)p, sz);
591                         count -= sz;
592                         buf += sz;
593                         virtr += sz;
594                         p += sz;
595                         if (should_stop_iteration())
596                                 break;
597                 }
598                 free_page((unsigned long)kbuf);
599         }
600
601         *ppos = p;
602         return virtr + wrote ? : err;
603 }
604
605 static ssize_t read_port(struct file *file, char __user *buf,
606                          size_t count, loff_t *ppos)
607 {
608         unsigned long i = *ppos;
609         char __user *tmp = buf;
610
611         if (!access_ok(VERIFY_WRITE, buf, count))
612                 return -EFAULT;
613         while (count-- > 0 && i < 65536) {
614                 if (__put_user(inb(i), tmp) < 0)
615                         return -EFAULT;
616                 i++;
617                 tmp++;
618         }
619         *ppos = i;
620         return tmp-buf;
621 }
622
623 static ssize_t write_port(struct file *file, const char __user *buf,
624                           size_t count, loff_t *ppos)
625 {
626         unsigned long i = *ppos;
627         const char __user *tmp = buf;
628
629         if (!access_ok(VERIFY_READ, buf, count))
630                 return -EFAULT;
631         while (count-- > 0 && i < 65536) {
632                 char c;
633
634                 if (__get_user(c, tmp)) {
635                         if (tmp > buf)
636                                 break;
637                         return -EFAULT;
638                 }
639                 outb(c, i);
640                 i++;
641                 tmp++;
642         }
643         *ppos = i;
644         return tmp-buf;
645 }
646
647 static ssize_t read_null(struct file *file, char __user *buf,
648                          size_t count, loff_t *ppos)
649 {
650         return 0;
651 }
652
653 static ssize_t write_null(struct file *file, const char __user *buf,
654                           size_t count, loff_t *ppos)
655 {
656         return count;
657 }
658
659 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
660 {
661         return 0;
662 }
663
664 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
665 {
666         size_t count = iov_iter_count(from);
667         iov_iter_advance(from, count);
668         return count;
669 }
670
671 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
672                         struct splice_desc *sd)
673 {
674         return sd->len;
675 }
676
677 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
678                                  loff_t *ppos, size_t len, unsigned int flags)
679 {
680         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
681 }
682
683 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
684 {
685         size_t written = 0;
686
687         while (iov_iter_count(iter)) {
688                 size_t chunk = iov_iter_count(iter), n;
689
690                 if (chunk > PAGE_SIZE)
691                         chunk = PAGE_SIZE;      /* Just for latency reasons */
692                 n = iov_iter_zero(chunk, iter);
693                 if (!n && iov_iter_count(iter))
694                         return written ? written : -EFAULT;
695                 written += n;
696                 if (signal_pending(current))
697                         return written ? written : -ERESTARTSYS;
698                 cond_resched();
699         }
700         return written;
701 }
702
703 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
704 {
705 #ifndef CONFIG_MMU
706         return -ENOSYS;
707 #endif
708         if (vma->vm_flags & VM_SHARED)
709                 return shmem_zero_setup(vma);
710         return 0;
711 }
712
713 static unsigned long get_unmapped_area_zero(struct file *file,
714                                 unsigned long addr, unsigned long len,
715                                 unsigned long pgoff, unsigned long flags)
716 {
717 #ifdef CONFIG_MMU
718         if (flags & MAP_SHARED) {
719                 /*
720                  * mmap_zero() will call shmem_zero_setup() to create a file,
721                  * so use shmem's get_unmapped_area in case it can be huge;
722                  * and pass NULL for file as in mmap.c's get_unmapped_area(),
723                  * so as not to confuse shmem with our handle on "/dev/zero".
724                  */
725                 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
726         }
727
728         /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
729         return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
730 #else
731         return -ENOSYS;
732 #endif
733 }
734
735 static ssize_t write_full(struct file *file, const char __user *buf,
736                           size_t count, loff_t *ppos)
737 {
738         return -ENOSPC;
739 }
740
741 /*
742  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
743  * can fopen() both devices with "a" now.  This was previously impossible.
744  * -- SRB.
745  */
746 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
747 {
748         return file->f_pos = 0;
749 }
750
751 /*
752  * The memory devices use the full 32/64 bits of the offset, and so we cannot
753  * check against negative addresses: they are ok. The return value is weird,
754  * though, in that case (0).
755  *
756  * also note that seeking relative to the "end of file" isn't supported:
757  * it has no meaning, so it returns -EINVAL.
758  */
759 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
760 {
761         loff_t ret;
762
763         inode_lock(file_inode(file));
764         switch (orig) {
765         case SEEK_CUR:
766                 offset += file->f_pos;
767         case SEEK_SET:
768                 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
769                 if ((unsigned long long)offset >= -MAX_ERRNO) {
770                         ret = -EOVERFLOW;
771                         break;
772                 }
773                 file->f_pos = offset;
774                 ret = file->f_pos;
775                 force_successful_syscall_return();
776                 break;
777         default:
778                 ret = -EINVAL;
779         }
780         inode_unlock(file_inode(file));
781         return ret;
782 }
783
784 static int open_port(struct inode *inode, struct file *filp)
785 {
786         return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
787 }
788
789 #define zero_lseek      null_lseek
790 #define full_lseek      null_lseek
791 #define write_zero      write_null
792 #define write_iter_zero write_iter_null
793 #define open_mem        open_port
794 #define open_kmem       open_mem
795
796 static const struct file_operations __maybe_unused mem_fops = {
797         .llseek         = memory_lseek,
798         .read           = read_mem,
799         .write          = write_mem,
800         .mmap           = mmap_mem,
801         .open           = open_mem,
802 #ifndef CONFIG_MMU
803         .get_unmapped_area = get_unmapped_area_mem,
804         .mmap_capabilities = memory_mmap_capabilities,
805 #endif
806 };
807
808 static const struct file_operations __maybe_unused kmem_fops = {
809         .llseek         = memory_lseek,
810         .read           = read_kmem,
811         .write          = write_kmem,
812         .mmap           = mmap_kmem,
813         .open           = open_kmem,
814 #ifndef CONFIG_MMU
815         .get_unmapped_area = get_unmapped_area_mem,
816         .mmap_capabilities = memory_mmap_capabilities,
817 #endif
818 };
819
820 static const struct file_operations null_fops = {
821         .llseek         = null_lseek,
822         .read           = read_null,
823         .write          = write_null,
824         .read_iter      = read_iter_null,
825         .write_iter     = write_iter_null,
826         .splice_write   = splice_write_null,
827 };
828
829 static const struct file_operations __maybe_unused port_fops = {
830         .llseek         = memory_lseek,
831         .read           = read_port,
832         .write          = write_port,
833         .open           = open_port,
834 };
835
836 static const struct file_operations zero_fops = {
837         .llseek         = zero_lseek,
838         .write          = write_zero,
839         .read_iter      = read_iter_zero,
840         .write_iter     = write_iter_zero,
841         .mmap           = mmap_zero,
842         .get_unmapped_area = get_unmapped_area_zero,
843 #ifndef CONFIG_MMU
844         .mmap_capabilities = zero_mmap_capabilities,
845 #endif
846 };
847
848 static const struct file_operations full_fops = {
849         .llseek         = full_lseek,
850         .read_iter      = read_iter_zero,
851         .write          = write_full,
852 };
853
854 static const struct memdev {
855         const char *name;
856         umode_t mode;
857         const struct file_operations *fops;
858         fmode_t fmode;
859 } devlist[] = {
860 #ifdef CONFIG_DEVMEM
861          [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
862 #endif
863 #ifdef CONFIG_DEVKMEM
864          [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
865 #endif
866          [3] = { "null", 0666, &null_fops, 0 },
867 #ifdef CONFIG_DEVPORT
868          [4] = { "port", 0, &port_fops, 0 },
869 #endif
870          [5] = { "zero", 0666, &zero_fops, 0 },
871          [7] = { "full", 0666, &full_fops, 0 },
872          [8] = { "random", 0666, &random_fops, 0 },
873          [9] = { "urandom", 0666, &urandom_fops, 0 },
874 #ifdef CONFIG_PRINTK
875         [11] = { "kmsg", 0644, &kmsg_fops, 0 },
876 #endif
877 };
878
879 static int memory_open(struct inode *inode, struct file *filp)
880 {
881         int minor;
882         const struct memdev *dev;
883
884         minor = iminor(inode);
885         if (minor >= ARRAY_SIZE(devlist))
886                 return -ENXIO;
887
888         dev = &devlist[minor];
889         if (!dev->fops)
890                 return -ENXIO;
891
892         filp->f_op = dev->fops;
893         filp->f_mode |= dev->fmode;
894
895         if (dev->fops->open)
896                 return dev->fops->open(inode, filp);
897
898         return 0;
899 }
900
901 static const struct file_operations memory_fops = {
902         .open = memory_open,
903         .llseek = noop_llseek,
904 };
905
906 static char *mem_devnode(struct device *dev, umode_t *mode)
907 {
908         if (mode && devlist[MINOR(dev->devt)].mode)
909                 *mode = devlist[MINOR(dev->devt)].mode;
910         return NULL;
911 }
912
913 static struct class *mem_class;
914
915 static int __init chr_dev_init(void)
916 {
917         int minor;
918
919         if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
920                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
921
922         mem_class = class_create(THIS_MODULE, "mem");
923         if (IS_ERR(mem_class))
924                 return PTR_ERR(mem_class);
925
926         mem_class->devnode = mem_devnode;
927         for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
928                 if (!devlist[minor].name)
929                         continue;
930
931                 /*
932                  * Create /dev/port?
933                  */
934                 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
935                         continue;
936
937                 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
938                               NULL, devlist[minor].name);
939         }
940
941         return tty_init();
942 }
943
944 fs_initcall(chr_dev_init);