treewide: Use fallthrough pseudo-keyword
[linux-block.git] / drivers / char / mem.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/drivers/char/mem.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
d7d4d849 7 * Added devfs support.
1da177e4 8 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
af901ca1 9 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
1da177e4
LT
10 */
11
1da177e4
LT
12#include <linux/mm.h>
13#include <linux/miscdevice.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/mman.h>
17#include <linux/random.h>
18#include <linux/init.h>
19#include <linux/raw.h>
20#include <linux/tty.h>
21#include <linux/capability.h>
1da177e4
LT
22#include <linux/ptrace.h>
23#include <linux/device.h>
50b1fdbd 24#include <linux/highmem.h>
1da177e4 25#include <linux/backing-dev.h>
c01d5b30 26#include <linux/shmem_fs.h>
d6b29d7c 27#include <linux/splice.h>
b8a3ad5b 28#include <linux/pfn.h>
66300e66 29#include <linux/export.h>
e1612de9 30#include <linux/io.h>
e2e40f2c 31#include <linux/uio.h>
35b6c7e4 32#include <linux/uaccess.h>
9b9d8dda 33#include <linux/security.h>
3234ac66
DW
34#include <linux/pseudo_fs.h>
35#include <uapi/linux/magic.h>
36#include <linux/mount.h>
1da177e4
LT
37
38#ifdef CONFIG_IA64
39# include <linux/efi.h>
40#endif
41
3234ac66 42#define DEVMEM_MINOR 1
e1612de9
HM
43#define DEVPORT_MINOR 4
44
f222318e
WF
45static inline unsigned long size_inside_page(unsigned long start,
46 unsigned long size)
47{
48 unsigned long sz;
49
7fabaddd 50 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
f222318e 51
7fabaddd 52 return min(sz, size);
f222318e
WF
53}
54
1da177e4 55#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
7e6735c3 56static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
1da177e4 57{
cfaf346c 58 return addr + count <= __pa(high_memory);
1da177e4 59}
80851ef2 60
06c67bef 61static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
80851ef2
BH
62{
63 return 1;
64}
1da177e4
LT
65#endif
66
d092633b 67#ifdef CONFIG_STRICT_DEVMEM
a4866aa8
KC
68static inline int page_is_allowed(unsigned long pfn)
69{
70 return devmem_is_allowed(pfn);
71}
e2beb3ea 72static inline int range_is_allowed(unsigned long pfn, unsigned long size)
ae531c26 73{
e2beb3ea
VP
74 u64 from = ((u64)pfn) << PAGE_SHIFT;
75 u64 to = from + size;
76 u64 cursor = from;
77
78 while (cursor < to) {
39380b80 79 if (!devmem_is_allowed(pfn))
ae531c26 80 return 0;
e2beb3ea
VP
81 cursor += PAGE_SIZE;
82 pfn++;
ae531c26
AV
83 }
84 return 1;
85}
86#else
a4866aa8
KC
87static inline int page_is_allowed(unsigned long pfn)
88{
89 return 1;
90}
e2beb3ea 91static inline int range_is_allowed(unsigned long pfn, unsigned long size)
ae531c26
AV
92{
93 return 1;
94}
95#endif
96
4707a341
TR
97#ifndef unxlate_dev_mem_ptr
98#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
99void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
e045fb2a 100{
101}
4707a341 102#endif
e045fb2a 103
8619e5bd
TH
104static inline bool should_stop_iteration(void)
105{
106 if (need_resched())
107 cond_resched();
108 return fatal_signal_pending(current);
109}
110
1da177e4 111/*
d7d4d849
AM
112 * This funcion reads the *physical* memory. The f_pos points directly to the
113 * memory location.
1da177e4 114 */
d7d4d849 115static ssize_t read_mem(struct file *file, char __user *buf,
1da177e4
LT
116 size_t count, loff_t *ppos)
117{
7e6735c3 118 phys_addr_t p = *ppos;
1da177e4 119 ssize_t read, sz;
4707a341 120 void *ptr;
22ec1a2a
KC
121 char *bounce;
122 int err;
1da177e4 123
08d2d00b
PT
124 if (p != *ppos)
125 return 0;
126
136939a2 127 if (!valid_phys_addr_range(p, count))
1da177e4
LT
128 return -EFAULT;
129 read = 0;
130#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
131 /* we don't have page 0 mapped on sparc and m68k.. */
132 if (p < PAGE_SIZE) {
7fabaddd 133 sz = size_inside_page(p, count);
1da177e4
LT
134 if (sz > 0) {
135 if (clear_user(buf, sz))
136 return -EFAULT;
d7d4d849
AM
137 buf += sz;
138 p += sz;
139 count -= sz;
140 read += sz;
1da177e4
LT
141 }
142 }
143#endif
144
22ec1a2a
KC
145 bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
146 if (!bounce)
147 return -ENOMEM;
148
1da177e4 149 while (count > 0) {
fa29e97b 150 unsigned long remaining;
b5b38200 151 int allowed, probe;
fa29e97b 152
f222318e 153 sz = size_inside_page(p, count);
1da177e4 154
22ec1a2a 155 err = -EPERM;
a4866aa8
KC
156 allowed = page_is_allowed(p >> PAGE_SHIFT);
157 if (!allowed)
22ec1a2a
KC
158 goto failed;
159
160 err = -EFAULT;
a4866aa8
KC
161 if (allowed == 2) {
162 /* Show zeros for restricted memory. */
163 remaining = clear_user(buf, sz);
164 } else {
165 /*
166 * On ia64 if a page has been mapped somewhere as
167 * uncached, then it must also be accessed uncached
168 * by the kernel or data corruption may occur.
169 */
170 ptr = xlate_dev_mem_ptr(p);
171 if (!ptr)
22ec1a2a 172 goto failed;
a4866aa8 173
fe557319 174 probe = copy_from_kernel_nofault(bounce, ptr, sz);
a4866aa8 175 unxlate_dev_mem_ptr(p, ptr);
b5b38200 176 if (probe)
22ec1a2a
KC
177 goto failed;
178
179 remaining = copy_to_user(buf, bounce, sz);
a4866aa8 180 }
1da177e4 181
fa29e97b 182 if (remaining)
22ec1a2a 183 goto failed;
e045fb2a 184
1da177e4
LT
185 buf += sz;
186 p += sz;
187 count -= sz;
188 read += sz;
8619e5bd
TH
189 if (should_stop_iteration())
190 break;
1da177e4 191 }
22ec1a2a 192 kfree(bounce);
1da177e4
LT
193
194 *ppos += read;
195 return read;
22ec1a2a
KC
196
197failed:
198 kfree(bounce);
199 return err;
1da177e4
LT
200}
201
d7d4d849 202static ssize_t write_mem(struct file *file, const char __user *buf,
1da177e4
LT
203 size_t count, loff_t *ppos)
204{
7e6735c3 205 phys_addr_t p = *ppos;
1da177e4
LT
206 ssize_t written, sz;
207 unsigned long copied;
208 void *ptr;
209
08d2d00b
PT
210 if (p != *ppos)
211 return -EFBIG;
212
136939a2 213 if (!valid_phys_addr_range(p, count))
1da177e4
LT
214 return -EFAULT;
215
216 written = 0;
217
218#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
219 /* we don't have page 0 mapped on sparc and m68k.. */
220 if (p < PAGE_SIZE) {
7fabaddd 221 sz = size_inside_page(p, count);
1da177e4
LT
222 /* Hmm. Do something? */
223 buf += sz;
224 p += sz;
225 count -= sz;
226 written += sz;
227 }
228#endif
229
230 while (count > 0) {
a4866aa8
KC
231 int allowed;
232
f222318e 233 sz = size_inside_page(p, count);
1da177e4 234
a4866aa8
KC
235 allowed = page_is_allowed(p >> PAGE_SHIFT);
236 if (!allowed)
e045fb2a 237 return -EPERM;
238
a4866aa8
KC
239 /* Skip actual writing when a page is marked as restricted. */
240 if (allowed == 1) {
241 /*
242 * On ia64 if a page has been mapped somewhere as
243 * uncached, then it must also be accessed uncached
244 * by the kernel or data corruption may occur.
245 */
246 ptr = xlate_dev_mem_ptr(p);
247 if (!ptr) {
248 if (written)
249 break;
250 return -EFAULT;
251 }
252
253 copied = copy_from_user(ptr, buf, sz);
254 unxlate_dev_mem_ptr(p, ptr);
255 if (copied) {
256 written += sz - copied;
257 if (written)
258 break;
259 return -EFAULT;
260 }
1da177e4 261 }
e045fb2a 262
1da177e4
LT
263 buf += sz;
264 p += sz;
265 count -= sz;
266 written += sz;
8619e5bd
TH
267 if (should_stop_iteration())
268 break;
1da177e4
LT
269 }
270
271 *ppos += written;
272 return written;
273}
274
d7d4d849 275int __weak phys_mem_access_prot_allowed(struct file *file,
f0970c13 276 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
277{
278 return 1;
279}
280
44ac8413 281#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
d7d4d849
AM
282
283/*
284 * Architectures vary in how they handle caching for addresses
285 * outside of main memory.
286 *
287 */
ea56f411 288#ifdef pgprot_noncached
7e6735c3 289static int uncached_access(struct file *file, phys_addr_t addr)
d7d4d849
AM
290{
291#if defined(CONFIG_IA64)
292 /*
293 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
294 * attribute aliases.
295 */
296 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
297#elif defined(CONFIG_MIPS)
298 {
299 extern int __uncached_access(struct file *file,
300 unsigned long addr);
301
302 return __uncached_access(file, addr);
303 }
304#else
305 /*
306 * Accessing memory above the top the kernel knows about or through a
307 * file pointer
308 * that was marked O_DSYNC will be done non-cached.
309 */
310 if (file->f_flags & O_DSYNC)
311 return 1;
312 return addr >= __pa(high_memory);
313#endif
314}
ea56f411 315#endif
d7d4d849 316
44ac8413
BH
317static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
318 unsigned long size, pgprot_t vma_prot)
319{
320#ifdef pgprot_noncached
7e6735c3 321 phys_addr_t offset = pfn << PAGE_SHIFT;
44ac8413
BH
322
323 if (uncached_access(file, offset))
324 return pgprot_noncached(vma_prot);
325#endif
326 return vma_prot;
327}
328#endif
329
5da6185b
DH
330#ifndef CONFIG_MMU
331static unsigned long get_unmapped_area_mem(struct file *file,
332 unsigned long addr,
333 unsigned long len,
334 unsigned long pgoff,
335 unsigned long flags)
336{
337 if (!valid_mmap_phys_addr_range(pgoff, len))
338 return (unsigned long) -EINVAL;
8a93258c 339 return pgoff << PAGE_SHIFT;
5da6185b
DH
340}
341
b4caecd4
CH
342/* permit direct mmap, for read, write or exec */
343static unsigned memory_mmap_capabilities(struct file *file)
344{
345 return NOMMU_MAP_DIRECT |
346 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
347}
348
349static unsigned zero_mmap_capabilities(struct file *file)
350{
351 return NOMMU_MAP_COPY;
352}
353
5da6185b
DH
354/* can't do an in-place private mapping if there's no MMU */
355static inline int private_mapping_ok(struct vm_area_struct *vma)
356{
357 return vma->vm_flags & VM_MAYSHARE;
358}
359#else
5da6185b
DH
360
361static inline int private_mapping_ok(struct vm_area_struct *vma)
362{
363 return 1;
364}
365#endif
366
f0f37e2f 367static const struct vm_operations_struct mmap_mem_ops = {
7ae8ed50
RR
368#ifdef CONFIG_HAVE_IOREMAP_PROT
369 .access = generic_access_phys
370#endif
e7f260a2 371};
372
d7d4d849 373static int mmap_mem(struct file *file, struct vm_area_struct *vma)
1da177e4 374{
80851ef2 375 size_t size = vma->vm_end - vma->vm_start;
b299cde2
JW
376 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
377
be62a320
CB
378 /* Does it even fit in phys_addr_t? */
379 if (offset >> PAGE_SHIFT != vma->vm_pgoff)
380 return -EINVAL;
381
b299cde2 382 /* It's illegal to wrap around the end of the physical address space. */
32829da5 383 if (offset + (phys_addr_t)size - 1 < offset)
b299cde2 384 return -EINVAL;
80851ef2 385
06c67bef 386 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
80851ef2
BH
387 return -EINVAL;
388
5da6185b
DH
389 if (!private_mapping_ok(vma))
390 return -ENOSYS;
391
e2beb3ea
VP
392 if (!range_is_allowed(vma->vm_pgoff, size))
393 return -EPERM;
394
f0970c13 395 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
396 &vma->vm_page_prot))
397 return -EINVAL;
398
8b150478 399 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
80851ef2 400 size,
1da177e4 401 vma->vm_page_prot);
1da177e4 402
e7f260a2 403 vma->vm_ops = &mmap_mem_ops;
404
314e51b9 405 /* Remap-pfn-range will mark the range VM_IO */
1da177e4
LT
406 if (remap_pfn_range(vma,
407 vma->vm_start,
408 vma->vm_pgoff,
80851ef2 409 size,
e7f260a2 410 vma->vm_page_prot)) {
1da177e4 411 return -EAGAIN;
e7f260a2 412 }
1da177e4
LT
413 return 0;
414}
415
d7d4d849 416static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
1da177e4 417{
4bb82551
LT
418 unsigned long pfn;
419
6d3154cc
LT
420 /* Turn a kernel-virtual address into a physical page frame */
421 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
4bb82551 422
1da177e4 423 /*
d7d4d849
AM
424 * RED-PEN: on some architectures there is more mapped memory than
425 * available in mem_map which pfn_valid checks for. Perhaps should add a
426 * new macro here.
1da177e4
LT
427 *
428 * RED-PEN: vmalloc is not supported right now.
429 */
4bb82551 430 if (!pfn_valid(pfn))
1da177e4 431 return -EIO;
4bb82551
LT
432
433 vma->vm_pgoff = pfn;
1da177e4
LT
434 return mmap_mem(file, vma);
435}
436
1da177e4
LT
437/*
438 * This function reads the *virtual* memory as seen by the kernel.
439 */
d7d4d849 440static ssize_t read_kmem(struct file *file, char __user *buf,
1da177e4
LT
441 size_t count, loff_t *ppos)
442{
443 unsigned long p = *ppos;
444 ssize_t low_count, read, sz;
890537b3 445 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
325fda71 446 int err = 0;
1da177e4
LT
447
448 read = 0;
449 if (p < (unsigned long) high_memory) {
450 low_count = count;
d7d4d849
AM
451 if (count > (unsigned long)high_memory - p)
452 low_count = (unsigned long)high_memory - p;
1da177e4
LT
453
454#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
455 /* we don't have page 0 mapped on sparc and m68k.. */
456 if (p < PAGE_SIZE && low_count > 0) {
7fabaddd
WF
457 sz = size_inside_page(p, low_count);
458 if (clear_user(buf, sz))
1da177e4 459 return -EFAULT;
7fabaddd
WF
460 buf += sz;
461 p += sz;
462 read += sz;
463 low_count -= sz;
464 count -= sz;
1da177e4
LT
465 }
466#endif
467 while (low_count > 0) {
f222318e 468 sz = size_inside_page(p, low_count);
1da177e4
LT
469
470 /*
471 * On ia64 if a page has been mapped somewhere as
472 * uncached, then it must also be accessed uncached
473 * by the kernel or data corruption may occur
474 */
4707a341 475 kbuf = xlate_dev_kmem_ptr((void *)p);
488debb9
RM
476 if (!virt_addr_valid(kbuf))
477 return -ENXIO;
1da177e4
LT
478
479 if (copy_to_user(buf, kbuf, sz))
480 return -EFAULT;
481 buf += sz;
482 p += sz;
483 read += sz;
484 low_count -= sz;
485 count -= sz;
8619e5bd
TH
486 if (should_stop_iteration()) {
487 count = 0;
488 break;
489 }
1da177e4
LT
490 }
491 }
492
493 if (count > 0) {
494 kbuf = (char *)__get_free_page(GFP_KERNEL);
495 if (!kbuf)
496 return -ENOMEM;
497 while (count > 0) {
80ad89a0 498 sz = size_inside_page(p, count);
325fda71
KH
499 if (!is_vmalloc_or_module_addr((void *)p)) {
500 err = -ENXIO;
501 break;
502 }
80ad89a0
WF
503 sz = vread(kbuf, (char *)p, sz);
504 if (!sz)
1da177e4 505 break;
80ad89a0 506 if (copy_to_user(buf, kbuf, sz)) {
325fda71
KH
507 err = -EFAULT;
508 break;
1da177e4 509 }
80ad89a0
WF
510 count -= sz;
511 buf += sz;
512 read += sz;
513 p += sz;
8619e5bd
TH
514 if (should_stop_iteration())
515 break;
1da177e4
LT
516 }
517 free_page((unsigned long)kbuf);
518 }
325fda71
KH
519 *ppos = p;
520 return read ? read : err;
1da177e4
LT
521}
522
523
d7d4d849
AM
524static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
525 size_t count, loff_t *ppos)
1da177e4
LT
526{
527 ssize_t written, sz;
528 unsigned long copied;
529
530 written = 0;
531#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
532 /* we don't have page 0 mapped on sparc and m68k.. */
ee32398f
WF
533 if (p < PAGE_SIZE) {
534 sz = size_inside_page(p, count);
1da177e4
LT
535 /* Hmm. Do something? */
536 buf += sz;
537 p += sz;
1da177e4
LT
538 count -= sz;
539 written += sz;
540 }
541#endif
542
543 while (count > 0) {
4707a341 544 void *ptr;
1da177e4 545
ee32398f 546 sz = size_inside_page(p, count);
1da177e4
LT
547
548 /*
d7d4d849
AM
549 * On ia64 if a page has been mapped somewhere as uncached, then
550 * it must also be accessed uncached by the kernel or data
551 * corruption may occur.
1da177e4 552 */
4707a341 553 ptr = xlate_dev_kmem_ptr((void *)p);
488debb9
RM
554 if (!virt_addr_valid(ptr))
555 return -ENXIO;
1da177e4
LT
556
557 copied = copy_from_user(ptr, buf, sz);
558 if (copied) {
c654d60e
JB
559 written += sz - copied;
560 if (written)
561 break;
1da177e4
LT
562 return -EFAULT;
563 }
564 buf += sz;
565 p += sz;
1da177e4
LT
566 count -= sz;
567 written += sz;
8619e5bd
TH
568 if (should_stop_iteration())
569 break;
1da177e4
LT
570 }
571
572 *ppos += written;
573 return written;
574}
575
1da177e4
LT
576/*
577 * This function writes to the *virtual* memory as seen by the kernel.
578 */
d7d4d849 579static ssize_t write_kmem(struct file *file, const char __user *buf,
1da177e4
LT
580 size_t count, loff_t *ppos)
581{
582 unsigned long p = *ppos;
583 ssize_t wrote = 0;
584 ssize_t virtr = 0;
890537b3 585 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
325fda71 586 int err = 0;
1da177e4
LT
587
588 if (p < (unsigned long) high_memory) {
80ad89a0
WF
589 unsigned long to_write = min_t(unsigned long, count,
590 (unsigned long)high_memory - p);
ee32398f 591 wrote = do_write_kmem(p, buf, to_write, ppos);
80ad89a0
WF
592 if (wrote != to_write)
593 return wrote;
1da177e4
LT
594 p += wrote;
595 buf += wrote;
596 count -= wrote;
597 }
598
599 if (count > 0) {
600 kbuf = (char *)__get_free_page(GFP_KERNEL);
601 if (!kbuf)
602 return wrote ? wrote : -ENOMEM;
603 while (count > 0) {
80ad89a0
WF
604 unsigned long sz = size_inside_page(p, count);
605 unsigned long n;
1da177e4 606
325fda71
KH
607 if (!is_vmalloc_or_module_addr((void *)p)) {
608 err = -ENXIO;
609 break;
610 }
80ad89a0
WF
611 n = copy_from_user(kbuf, buf, sz);
612 if (n) {
325fda71
KH
613 err = -EFAULT;
614 break;
1da177e4 615 }
c85e9a97 616 vwrite(kbuf, (char *)p, sz);
80ad89a0
WF
617 count -= sz;
618 buf += sz;
619 virtr += sz;
620 p += sz;
8619e5bd
TH
621 if (should_stop_iteration())
622 break;
1da177e4
LT
623 }
624 free_page((unsigned long)kbuf);
625 }
626
325fda71
KH
627 *ppos = p;
628 return virtr + wrote ? : err;
1da177e4
LT
629}
630
d7d4d849 631static ssize_t read_port(struct file *file, char __user *buf,
1da177e4
LT
632 size_t count, loff_t *ppos)
633{
634 unsigned long i = *ppos;
635 char __user *tmp = buf;
636
96d4f267 637 if (!access_ok(buf, count))
d7d4d849 638 return -EFAULT;
1da177e4 639 while (count-- > 0 && i < 65536) {
d7d4d849
AM
640 if (__put_user(inb(i), tmp) < 0)
641 return -EFAULT;
1da177e4
LT
642 i++;
643 tmp++;
644 }
645 *ppos = i;
646 return tmp-buf;
647}
648
d7d4d849 649static ssize_t write_port(struct file *file, const char __user *buf,
1da177e4
LT
650 size_t count, loff_t *ppos)
651{
652 unsigned long i = *ppos;
890537b3 653 const char __user *tmp = buf;
1da177e4 654
96d4f267 655 if (!access_ok(buf, count))
1da177e4
LT
656 return -EFAULT;
657 while (count-- > 0 && i < 65536) {
658 char c;
6a0061ba 659
c654d60e
JB
660 if (__get_user(c, tmp)) {
661 if (tmp > buf)
662 break;
d7d4d849 663 return -EFAULT;
c654d60e 664 }
d7d4d849 665 outb(c, i);
1da177e4
LT
666 i++;
667 tmp++;
668 }
669 *ppos = i;
670 return tmp-buf;
671}
1da177e4 672
d7d4d849 673static ssize_t read_null(struct file *file, char __user *buf,
1da177e4
LT
674 size_t count, loff_t *ppos)
675{
676 return 0;
677}
678
d7d4d849 679static ssize_t write_null(struct file *file, const char __user *buf,
1da177e4
LT
680 size_t count, loff_t *ppos)
681{
682 return count;
683}
684
cd28e28d 685static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
162934de
ZB
686{
687 return 0;
688}
689
cd28e28d 690static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
162934de 691{
cd28e28d
AV
692 size_t count = iov_iter_count(from);
693 iov_iter_advance(from, count);
694 return count;
162934de
ZB
695}
696
1ebd32fc
JA
697static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
698 struct splice_desc *sd)
699{
700 return sd->len;
701}
702
d7d4d849 703static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
1ebd32fc
JA
704 loff_t *ppos, size_t len, unsigned int flags)
705{
706 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
707}
708
13ba33e8 709static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
1da177e4 710{
13ba33e8 711 size_t written = 0;
1da177e4 712
13ba33e8
AV
713 while (iov_iter_count(iter)) {
714 size_t chunk = iov_iter_count(iter), n;
6a0061ba 715
557ed1fa
NP
716 if (chunk > PAGE_SIZE)
717 chunk = PAGE_SIZE; /* Just for latency reasons */
13ba33e8
AV
718 n = iov_iter_zero(chunk, iter);
719 if (!n && iov_iter_count(iter))
720 return written ? written : -EFAULT;
721 written += n;
2b838687
LT
722 if (signal_pending(current))
723 return written ? written : -ERESTARTSYS;
1da177e4
LT
724 cond_resched();
725 }
13ba33e8 726 return written;
162934de
ZB
727}
728
d7d4d849 729static int mmap_zero(struct file *file, struct vm_area_struct *vma)
1da177e4 730{
557ed1fa 731#ifndef CONFIG_MMU
1da177e4 732 return -ENOSYS;
557ed1fa
NP
733#endif
734 if (vma->vm_flags & VM_SHARED)
735 return shmem_zero_setup(vma);
bfd40eaf 736 vma_set_anonymous(vma);
557ed1fa 737 return 0;
1da177e4 738}
1da177e4 739
c01d5b30
HD
740static unsigned long get_unmapped_area_zero(struct file *file,
741 unsigned long addr, unsigned long len,
742 unsigned long pgoff, unsigned long flags)
743{
744#ifdef CONFIG_MMU
745 if (flags & MAP_SHARED) {
746 /*
747 * mmap_zero() will call shmem_zero_setup() to create a file,
748 * so use shmem's get_unmapped_area in case it can be huge;
749 * and pass NULL for file as in mmap.c's get_unmapped_area(),
750 * so as not to confuse shmem with our handle on "/dev/zero".
751 */
752 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
753 }
754
755 /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
756 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
757#else
758 return -ENOSYS;
759#endif
760}
761
d7d4d849 762static ssize_t write_full(struct file *file, const char __user *buf,
1da177e4
LT
763 size_t count, loff_t *ppos)
764{
765 return -ENOSPC;
766}
767
768/*
769 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
770 * can fopen() both devices with "a" now. This was previously impossible.
771 * -- SRB.
772 */
d7d4d849 773static loff_t null_lseek(struct file *file, loff_t offset, int orig)
1da177e4
LT
774{
775 return file->f_pos = 0;
776}
777
778/*
779 * The memory devices use the full 32/64 bits of the offset, and so we cannot
780 * check against negative addresses: they are ok. The return value is weird,
781 * though, in that case (0).
782 *
783 * also note that seeking relative to the "end of file" isn't supported:
784 * it has no meaning, so it returns -EINVAL.
785 */
d7d4d849 786static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
1da177e4
LT
787{
788 loff_t ret;
789
5955102c 790 inode_lock(file_inode(file));
1da177e4 791 switch (orig) {
d7d4d849
AM
792 case SEEK_CUR:
793 offset += file->f_pos;
df561f66 794 fallthrough;
d7d4d849
AM
795 case SEEK_SET:
796 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
ecb63a1b 797 if ((unsigned long long)offset >= -MAX_ERRNO) {
d7d4d849 798 ret = -EOVERFLOW;
1da177e4 799 break;
d7d4d849
AM
800 }
801 file->f_pos = offset;
802 ret = file->f_pos;
803 force_successful_syscall_return();
804 break;
805 default:
806 ret = -EINVAL;
1da177e4 807 }
5955102c 808 inode_unlock(file_inode(file));
1da177e4
LT
809 return ret;
810}
811
3234ac66
DW
812static struct inode *devmem_inode;
813
814#ifdef CONFIG_IO_STRICT_DEVMEM
815void revoke_devmem(struct resource *res)
816{
b34e7e29
EB
817 /* pairs with smp_store_release() in devmem_init_inode() */
818 struct inode *inode = smp_load_acquire(&devmem_inode);
3234ac66
DW
819
820 /*
821 * Check that the initialization has completed. Losing the race
822 * is ok because it means drivers are claiming resources before
823 * the fs_initcall level of init and prevent /dev/mem from
824 * establishing mappings.
825 */
826 if (!inode)
827 return;
828
829 /*
830 * The expectation is that the driver has successfully marked
831 * the resource busy by this point, so devmem_is_allowed()
832 * should start returning false, however for performance this
833 * does not iterate the entire resource range.
834 */
835 if (devmem_is_allowed(PHYS_PFN(res->start)) &&
836 devmem_is_allowed(PHYS_PFN(res->end))) {
837 /*
838 * *cringe* iomem=relaxed says "go ahead, what's the
839 * worst that can happen?"
840 */
841 return;
842 }
843
844 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
845}
846#endif
847
890537b3 848static int open_port(struct inode *inode, struct file *filp)
1da177e4 849{
3234ac66
DW
850 int rc;
851
9b9d8dda
MG
852 if (!capable(CAP_SYS_RAWIO))
853 return -EPERM;
854
3234ac66
DW
855 rc = security_locked_down(LOCKDOWN_DEV_MEM);
856 if (rc)
857 return rc;
858
859 if (iminor(inode) != DEVMEM_MINOR)
860 return 0;
861
862 /*
863 * Use a unified address space to have a single point to manage
864 * revocations when drivers want to take over a /dev/mem mapped
865 * range.
866 */
867 inode->i_mapping = devmem_inode->i_mapping;
868 filp->f_mapping = inode->i_mapping;
869
870 return 0;
1da177e4
LT
871}
872
873#define zero_lseek null_lseek
874#define full_lseek null_lseek
875#define write_zero write_null
cd28e28d 876#define write_iter_zero write_iter_null
1da177e4
LT
877#define open_mem open_port
878#define open_kmem open_mem
879
73f0718e 880static const struct file_operations __maybe_unused mem_fops = {
1da177e4
LT
881 .llseek = memory_lseek,
882 .read = read_mem,
883 .write = write_mem,
884 .mmap = mmap_mem,
885 .open = open_mem,
b4caecd4 886#ifndef CONFIG_MMU
5da6185b 887 .get_unmapped_area = get_unmapped_area_mem,
b4caecd4
CH
888 .mmap_capabilities = memory_mmap_capabilities,
889#endif
1da177e4
LT
890};
891
a8c91252 892static const struct file_operations __maybe_unused kmem_fops = {
1da177e4
LT
893 .llseek = memory_lseek,
894 .read = read_kmem,
895 .write = write_kmem,
896 .mmap = mmap_kmem,
897 .open = open_kmem,
b4caecd4 898#ifndef CONFIG_MMU
5da6185b 899 .get_unmapped_area = get_unmapped_area_mem,
b4caecd4
CH
900 .mmap_capabilities = memory_mmap_capabilities,
901#endif
1da177e4
LT
902};
903
62322d25 904static const struct file_operations null_fops = {
1da177e4
LT
905 .llseek = null_lseek,
906 .read = read_null,
907 .write = write_null,
cd28e28d
AV
908 .read_iter = read_iter_null,
909 .write_iter = write_iter_null,
1ebd32fc 910 .splice_write = splice_write_null,
1da177e4
LT
911};
912
3a4bc2fb 913static const struct file_operations __maybe_unused port_fops = {
1da177e4
LT
914 .llseek = memory_lseek,
915 .read = read_port,
916 .write = write_port,
917 .open = open_port,
918};
1da177e4 919
62322d25 920static const struct file_operations zero_fops = {
1da177e4 921 .llseek = zero_lseek,
1da177e4 922 .write = write_zero,
13ba33e8 923 .read_iter = read_iter_zero,
cd28e28d 924 .write_iter = write_iter_zero,
1da177e4 925 .mmap = mmap_zero,
c01d5b30 926 .get_unmapped_area = get_unmapped_area_zero,
b4caecd4
CH
927#ifndef CONFIG_MMU
928 .mmap_capabilities = zero_mmap_capabilities,
929#endif
1da177e4
LT
930};
931
62322d25 932static const struct file_operations full_fops = {
1da177e4 933 .llseek = full_lseek,
13ba33e8 934 .read_iter = read_iter_zero,
1da177e4
LT
935 .write = write_full,
936};
937
389e0cb9
KS
938static const struct memdev {
939 const char *name;
2c9ede55 940 umode_t mode;
389e0cb9 941 const struct file_operations *fops;
b4caecd4 942 fmode_t fmode;
389e0cb9 943} devlist[] = {
73f0718e 944#ifdef CONFIG_DEVMEM
3234ac66 945 [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
73f0718e 946#endif
b781ecb6 947#ifdef CONFIG_DEVKMEM
b4caecd4 948 [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
b781ecb6 949#endif
b4caecd4 950 [3] = { "null", 0666, &null_fops, 0 },
4f911d64 951#ifdef CONFIG_DEVPORT
b4caecd4 952 [4] = { "port", 0, &port_fops, 0 },
1da177e4 953#endif
b4caecd4
CH
954 [5] = { "zero", 0666, &zero_fops, 0 },
955 [7] = { "full", 0666, &full_fops, 0 },
956 [8] = { "random", 0666, &random_fops, 0 },
957 [9] = { "urandom", 0666, &urandom_fops, 0 },
7f3a781d 958#ifdef CONFIG_PRINTK
b4caecd4 959 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
7f3a781d 960#endif
d6f47bef
ASF
961};
962
963static int memory_open(struct inode *inode, struct file *filp)
964{
389e0cb9
KS
965 int minor;
966 const struct memdev *dev;
d6f47bef 967
389e0cb9
KS
968 minor = iminor(inode);
969 if (minor >= ARRAY_SIZE(devlist))
205153aa 970 return -ENXIO;
d6f47bef 971
389e0cb9
KS
972 dev = &devlist[minor];
973 if (!dev->fops)
205153aa 974 return -ENXIO;
d6f47bef 975
389e0cb9 976 filp->f_op = dev->fops;
b4caecd4 977 filp->f_mode |= dev->fmode;
4a3956c7 978
389e0cb9 979 if (dev->fops->open)
205153aa
FW
980 return dev->fops->open(inode, filp);
981
982 return 0;
1da177e4
LT
983}
984
62322d25 985static const struct file_operations memory_fops = {
d7d4d849 986 .open = memory_open,
6038f373 987 .llseek = noop_llseek,
1da177e4
LT
988};
989
2c9ede55 990static char *mem_devnode(struct device *dev, umode_t *mode)
e454cea2
KS
991{
992 if (mode && devlist[MINOR(dev->devt)].mode)
993 *mode = devlist[MINOR(dev->devt)].mode;
994 return NULL;
995}
996
ca8eca68 997static struct class *mem_class;
1da177e4 998
3234ac66
DW
999static int devmem_fs_init_fs_context(struct fs_context *fc)
1000{
1001 return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
1002}
1003
1004static struct file_system_type devmem_fs_type = {
1005 .name = "devmem",
1006 .owner = THIS_MODULE,
1007 .init_fs_context = devmem_fs_init_fs_context,
1008 .kill_sb = kill_anon_super,
1009};
1010
1011static int devmem_init_inode(void)
1012{
1013 static struct vfsmount *devmem_vfs_mount;
1014 static int devmem_fs_cnt;
1015 struct inode *inode;
1016 int rc;
1017
1018 rc = simple_pin_fs(&devmem_fs_type, &devmem_vfs_mount, &devmem_fs_cnt);
1019 if (rc < 0) {
1020 pr_err("Cannot mount /dev/mem pseudo filesystem: %d\n", rc);
1021 return rc;
1022 }
1023
1024 inode = alloc_anon_inode(devmem_vfs_mount->mnt_sb);
1025 if (IS_ERR(inode)) {
1026 rc = PTR_ERR(inode);
1027 pr_err("Cannot allocate inode for /dev/mem: %d\n", rc);
1028 simple_release_fs(&devmem_vfs_mount, &devmem_fs_cnt);
1029 return rc;
1030 }
1031
b34e7e29
EB
1032 /*
1033 * Publish /dev/mem initialized.
1034 * Pairs with smp_load_acquire() in revoke_devmem().
1035 */
1036 smp_store_release(&devmem_inode, inode);
3234ac66
DW
1037
1038 return 0;
1039}
1040
1da177e4
LT
1041static int __init chr_dev_init(void)
1042{
389e0cb9 1043 int minor;
1da177e4 1044
d7d4d849 1045 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
1da177e4
LT
1046 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
1047
ca8eca68 1048 mem_class = class_create(THIS_MODULE, "mem");
6e191f7b
AB
1049 if (IS_ERR(mem_class))
1050 return PTR_ERR(mem_class);
1051
e454cea2 1052 mem_class->devnode = mem_devnode;
389e0cb9
KS
1053 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
1054 if (!devlist[minor].name)
1055 continue;
e1612de9
HM
1056
1057 /*
890537b3 1058 * Create /dev/port?
e1612de9
HM
1059 */
1060 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
1061 continue;
3234ac66
DW
1062 if ((minor == DEVMEM_MINOR) && devmem_init_inode() != 0)
1063 continue;
e1612de9 1064
389e0cb9
KS
1065 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
1066 NULL, devlist[minor].name);
1067 }
ebf644c4 1068
31d1d48e 1069 return tty_init();
1da177e4
LT
1070}
1071
1072fs_initcall(chr_dev_init);