Merge tag 'net-6.16-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-block.git] / drivers / char / mem.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/drivers/char/mem.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
d7d4d849 7 * Added devfs support.
1da177e4 8 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
af901ca1 9 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
1da177e4
LT
10 */
11
1da177e4
LT
12#include <linux/mm.h>
13#include <linux/miscdevice.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/mman.h>
17#include <linux/random.h>
18#include <linux/init.h>
1da177e4
LT
19#include <linux/tty.h>
20#include <linux/capability.h>
1da177e4
LT
21#include <linux/ptrace.h>
22#include <linux/device.h>
50b1fdbd 23#include <linux/highmem.h>
1da177e4 24#include <linux/backing-dev.h>
c01d5b30 25#include <linux/shmem_fs.h>
d6b29d7c 26#include <linux/splice.h>
b8a3ad5b 27#include <linux/pfn.h>
66300e66 28#include <linux/export.h>
e1612de9 29#include <linux/io.h>
e2e40f2c 30#include <linux/uio.h>
35b6c7e4 31#include <linux/uaccess.h>
9b9d8dda 32#include <linux/security.h>
1da177e4 33
3234ac66 34#define DEVMEM_MINOR 1
e1612de9
HM
35#define DEVPORT_MINOR 4
36
f222318e
WF
37static inline unsigned long size_inside_page(unsigned long start,
38 unsigned long size)
39{
40 unsigned long sz;
41
7fabaddd 42 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
f222318e 43
7fabaddd 44 return min(sz, size);
f222318e
WF
45}
46
1da177e4 47#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
7e6735c3 48static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
1da177e4 49{
cfaf346c 50 return addr + count <= __pa(high_memory);
1da177e4 51}
80851ef2 52
06c67bef 53static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
80851ef2
BH
54{
55 return 1;
56}
1da177e4
LT
57#endif
58
d092633b 59#ifdef CONFIG_STRICT_DEVMEM
a4866aa8
KC
60static inline int page_is_allowed(unsigned long pfn)
61{
62 return devmem_is_allowed(pfn);
63}
ae531c26 64#else
a4866aa8
KC
65static inline int page_is_allowed(unsigned long pfn)
66{
67 return 1;
68}
ae531c26
AV
69#endif
70
8619e5bd
TH
71static inline bool should_stop_iteration(void)
72{
73 if (need_resched())
74 cond_resched();
830a4e5c 75 return signal_pending(current);
8619e5bd
TH
76}
77
1da177e4 78/*
d7d4d849
AM
79 * This funcion reads the *physical* memory. The f_pos points directly to the
80 * memory location.
1da177e4 81 */
d7d4d849 82static ssize_t read_mem(struct file *file, char __user *buf,
1da177e4
LT
83 size_t count, loff_t *ppos)
84{
7e6735c3 85 phys_addr_t p = *ppos;
1da177e4 86 ssize_t read, sz;
4707a341 87 void *ptr;
22ec1a2a
KC
88 char *bounce;
89 int err;
1da177e4 90
08d2d00b
PT
91 if (p != *ppos)
92 return 0;
93
136939a2 94 if (!valid_phys_addr_range(p, count))
1da177e4
LT
95 return -EFAULT;
96 read = 0;
97#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
98 /* we don't have page 0 mapped on sparc and m68k.. */
99 if (p < PAGE_SIZE) {
7fabaddd 100 sz = size_inside_page(p, count);
1da177e4
LT
101 if (sz > 0) {
102 if (clear_user(buf, sz))
103 return -EFAULT;
d7d4d849
AM
104 buf += sz;
105 p += sz;
106 count -= sz;
107 read += sz;
1da177e4
LT
108 }
109 }
110#endif
111
22ec1a2a
KC
112 bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
113 if (!bounce)
114 return -ENOMEM;
115
1da177e4 116 while (count > 0) {
fa29e97b 117 unsigned long remaining;
b5b38200 118 int allowed, probe;
fa29e97b 119
f222318e 120 sz = size_inside_page(p, count);
1da177e4 121
22ec1a2a 122 err = -EPERM;
a4866aa8
KC
123 allowed = page_is_allowed(p >> PAGE_SHIFT);
124 if (!allowed)
22ec1a2a
KC
125 goto failed;
126
127 err = -EFAULT;
a4866aa8
KC
128 if (allowed == 2) {
129 /* Show zeros for restricted memory. */
130 remaining = clear_user(buf, sz);
131 } else {
132 /*
133 * On ia64 if a page has been mapped somewhere as
134 * uncached, then it must also be accessed uncached
135 * by the kernel or data corruption may occur.
136 */
137 ptr = xlate_dev_mem_ptr(p);
138 if (!ptr)
22ec1a2a 139 goto failed;
a4866aa8 140
fe557319 141 probe = copy_from_kernel_nofault(bounce, ptr, sz);
a4866aa8 142 unxlate_dev_mem_ptr(p, ptr);
b5b38200 143 if (probe)
22ec1a2a
KC
144 goto failed;
145
146 remaining = copy_to_user(buf, bounce, sz);
a4866aa8 147 }
1da177e4 148
fa29e97b 149 if (remaining)
22ec1a2a 150 goto failed;
e045fb2a 151
1da177e4
LT
152 buf += sz;
153 p += sz;
154 count -= sz;
155 read += sz;
8619e5bd
TH
156 if (should_stop_iteration())
157 break;
1da177e4 158 }
22ec1a2a 159 kfree(bounce);
1da177e4
LT
160
161 *ppos += read;
162 return read;
22ec1a2a
KC
163
164failed:
165 kfree(bounce);
166 return err;
1da177e4
LT
167}
168
d7d4d849 169static ssize_t write_mem(struct file *file, const char __user *buf,
1da177e4
LT
170 size_t count, loff_t *ppos)
171{
7e6735c3 172 phys_addr_t p = *ppos;
1da177e4
LT
173 ssize_t written, sz;
174 unsigned long copied;
175 void *ptr;
176
08d2d00b
PT
177 if (p != *ppos)
178 return -EFBIG;
179
136939a2 180 if (!valid_phys_addr_range(p, count))
1da177e4
LT
181 return -EFAULT;
182
183 written = 0;
184
185#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
186 /* we don't have page 0 mapped on sparc and m68k.. */
187 if (p < PAGE_SIZE) {
7fabaddd 188 sz = size_inside_page(p, count);
1da177e4
LT
189 /* Hmm. Do something? */
190 buf += sz;
191 p += sz;
192 count -= sz;
193 written += sz;
194 }
195#endif
196
197 while (count > 0) {
a4866aa8
KC
198 int allowed;
199
f222318e 200 sz = size_inside_page(p, count);
1da177e4 201
a4866aa8
KC
202 allowed = page_is_allowed(p >> PAGE_SHIFT);
203 if (!allowed)
e045fb2a 204 return -EPERM;
205
a4866aa8
KC
206 /* Skip actual writing when a page is marked as restricted. */
207 if (allowed == 1) {
208 /*
209 * On ia64 if a page has been mapped somewhere as
210 * uncached, then it must also be accessed uncached
211 * by the kernel or data corruption may occur.
212 */
213 ptr = xlate_dev_mem_ptr(p);
214 if (!ptr) {
215 if (written)
216 break;
217 return -EFAULT;
218 }
219
220 copied = copy_from_user(ptr, buf, sz);
221 unxlate_dev_mem_ptr(p, ptr);
222 if (copied) {
223 written += sz - copied;
224 if (written)
225 break;
226 return -EFAULT;
227 }
1da177e4 228 }
e045fb2a 229
1da177e4
LT
230 buf += sz;
231 p += sz;
232 count -= sz;
233 written += sz;
8619e5bd
TH
234 if (should_stop_iteration())
235 break;
1da177e4
LT
236 }
237
238 *ppos += written;
239 return written;
240}
241
d7d4d849 242int __weak phys_mem_access_prot_allowed(struct file *file,
f0970c13 243 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
244{
245 return 1;
246}
247
44ac8413 248#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
d7d4d849
AM
249
250/*
251 * Architectures vary in how they handle caching for addresses
252 * outside of main memory.
253 *
254 */
ea56f411 255#ifdef pgprot_noncached
7e6735c3 256static int uncached_access(struct file *file, phys_addr_t addr)
d7d4d849 257{
d7d4d849
AM
258 /*
259 * Accessing memory above the top the kernel knows about or through a
260 * file pointer
261 * that was marked O_DSYNC will be done non-cached.
262 */
263 if (file->f_flags & O_DSYNC)
264 return 1;
265 return addr >= __pa(high_memory);
d7d4d849 266}
ea56f411 267#endif
d7d4d849 268
44ac8413
BH
269static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
270 unsigned long size, pgprot_t vma_prot)
271{
272#ifdef pgprot_noncached
7e6735c3 273 phys_addr_t offset = pfn << PAGE_SHIFT;
44ac8413
BH
274
275 if (uncached_access(file, offset))
276 return pgprot_noncached(vma_prot);
277#endif
278 return vma_prot;
279}
280#endif
281
5da6185b
DH
282#ifndef CONFIG_MMU
283static unsigned long get_unmapped_area_mem(struct file *file,
284 unsigned long addr,
285 unsigned long len,
286 unsigned long pgoff,
287 unsigned long flags)
288{
289 if (!valid_mmap_phys_addr_range(pgoff, len))
290 return (unsigned long) -EINVAL;
8a93258c 291 return pgoff << PAGE_SHIFT;
5da6185b
DH
292}
293
b4caecd4
CH
294/* permit direct mmap, for read, write or exec */
295static unsigned memory_mmap_capabilities(struct file *file)
296{
297 return NOMMU_MAP_DIRECT |
298 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
299}
300
301static unsigned zero_mmap_capabilities(struct file *file)
302{
303 return NOMMU_MAP_COPY;
304}
305
5da6185b
DH
306/* can't do an in-place private mapping if there's no MMU */
307static inline int private_mapping_ok(struct vm_area_struct *vma)
308{
fc4f4be9 309 return is_nommu_shared_mapping(vma->vm_flags);
5da6185b
DH
310}
311#else
5da6185b
DH
312
313static inline int private_mapping_ok(struct vm_area_struct *vma)
314{
315 return 1;
316}
317#endif
318
f0f37e2f 319static const struct vm_operations_struct mmap_mem_ops = {
7ae8ed50
RR
320#ifdef CONFIG_HAVE_IOREMAP_PROT
321 .access = generic_access_phys
322#endif
e7f260a2 323};
324
d7d4d849 325static int mmap_mem(struct file *file, struct vm_area_struct *vma)
1da177e4 326{
80851ef2 327 size_t size = vma->vm_end - vma->vm_start;
b299cde2
JW
328 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
329
be62a320
CB
330 /* Does it even fit in phys_addr_t? */
331 if (offset >> PAGE_SHIFT != vma->vm_pgoff)
332 return -EINVAL;
333
b299cde2 334 /* It's illegal to wrap around the end of the physical address space. */
32829da5 335 if (offset + (phys_addr_t)size - 1 < offset)
b299cde2 336 return -EINVAL;
80851ef2 337
06c67bef 338 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
80851ef2
BH
339 return -EINVAL;
340
5da6185b
DH
341 if (!private_mapping_ok(vma))
342 return -ENOSYS;
343
e2beb3ea
VP
344 if (!range_is_allowed(vma->vm_pgoff, size))
345 return -EPERM;
346
f0970c13 347 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
348 &vma->vm_page_prot))
349 return -EINVAL;
350
8b150478 351 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
80851ef2 352 size,
1da177e4 353 vma->vm_page_prot);
1da177e4 354
e7f260a2 355 vma->vm_ops = &mmap_mem_ops;
356
314e51b9 357 /* Remap-pfn-range will mark the range VM_IO */
1da177e4
LT
358 if (remap_pfn_range(vma,
359 vma->vm_start,
360 vma->vm_pgoff,
80851ef2 361 size,
e7f260a2 362 vma->vm_page_prot)) {
1da177e4 363 return -EAGAIN;
e7f260a2 364 }
1da177e4
LT
365 return 0;
366}
367
f0a816fb 368#ifdef CONFIG_DEVPORT
d7d4d849 369static ssize_t read_port(struct file *file, char __user *buf,
1da177e4
LT
370 size_t count, loff_t *ppos)
371{
372 unsigned long i = *ppos;
373 char __user *tmp = buf;
374
96d4f267 375 if (!access_ok(buf, count))
d7d4d849 376 return -EFAULT;
1da177e4 377 while (count-- > 0 && i < 65536) {
d7d4d849
AM
378 if (__put_user(inb(i), tmp) < 0)
379 return -EFAULT;
1da177e4
LT
380 i++;
381 tmp++;
382 }
383 *ppos = i;
384 return tmp-buf;
385}
386
d7d4d849 387static ssize_t write_port(struct file *file, const char __user *buf,
1da177e4
LT
388 size_t count, loff_t *ppos)
389{
390 unsigned long i = *ppos;
890537b3 391 const char __user *tmp = buf;
1da177e4 392
96d4f267 393 if (!access_ok(buf, count))
1da177e4
LT
394 return -EFAULT;
395 while (count-- > 0 && i < 65536) {
396 char c;
6a0061ba 397
c654d60e
JB
398 if (__get_user(c, tmp)) {
399 if (tmp > buf)
400 break;
d7d4d849 401 return -EFAULT;
c654d60e 402 }
d7d4d849 403 outb(c, i);
1da177e4
LT
404 i++;
405 tmp++;
406 }
407 *ppos = i;
408 return tmp-buf;
409}
f0a816fb 410#endif
1da177e4 411
d7d4d849 412static ssize_t read_null(struct file *file, char __user *buf,
1da177e4
LT
413 size_t count, loff_t *ppos)
414{
415 return 0;
416}
417
d7d4d849 418static ssize_t write_null(struct file *file, const char __user *buf,
1da177e4
LT
419 size_t count, loff_t *ppos)
420{
421 return count;
422}
423
cd28e28d 424static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
162934de
ZB
425{
426 return 0;
427}
428
cd28e28d 429static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
162934de 430{
cd28e28d
AV
431 size_t count = iov_iter_count(from);
432 iov_iter_advance(from, count);
433 return count;
162934de
ZB
434}
435
1ebd32fc
JA
436static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
437 struct splice_desc *sd)
438{
439 return sd->len;
440}
441
d7d4d849 442static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
1ebd32fc
JA
443 loff_t *ppos, size_t len, unsigned int flags)
444{
445 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
446}
447
70752795
PM
448static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
449{
450 return 0;
451}
452
13ba33e8 453static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
1da177e4 454{
13ba33e8 455 size_t written = 0;
1da177e4 456
13ba33e8
AV
457 while (iov_iter_count(iter)) {
458 size_t chunk = iov_iter_count(iter), n;
6a0061ba 459
557ed1fa
NP
460 if (chunk > PAGE_SIZE)
461 chunk = PAGE_SIZE; /* Just for latency reasons */
13ba33e8
AV
462 n = iov_iter_zero(chunk, iter);
463 if (!n && iov_iter_count(iter))
464 return written ? written : -EFAULT;
465 written += n;
2b838687
LT
466 if (signal_pending(current))
467 return written ? written : -ERESTARTSYS;
e5f71d60
PB
468 if (!need_resched())
469 continue;
470 if (iocb->ki_flags & IOCB_NOWAIT)
471 return written ? written : -EAGAIN;
1da177e4
LT
472 cond_resched();
473 }
13ba33e8 474 return written;
162934de
ZB
475}
476
99f66735
CH
477static ssize_t read_zero(struct file *file, char __user *buf,
478 size_t count, loff_t *ppos)
479{
480 size_t cleared = 0;
481
482 while (count) {
483 size_t chunk = min_t(size_t, count, PAGE_SIZE);
ab04de8e 484 size_t left;
99f66735 485
ab04de8e
CH
486 left = clear_user(buf + cleared, chunk);
487 if (unlikely(left)) {
488 cleared += (chunk - left);
489 if (!cleared)
490 return -EFAULT;
491 break;
492 }
99f66735
CH
493 cleared += chunk;
494 count -= chunk;
495
496 if (signal_pending(current))
ab04de8e 497 break;
99f66735
CH
498 cond_resched();
499 }
500
501 return cleared;
502}
503
d7d4d849 504static int mmap_zero(struct file *file, struct vm_area_struct *vma)
1da177e4 505{
557ed1fa 506#ifndef CONFIG_MMU
1da177e4 507 return -ENOSYS;
557ed1fa
NP
508#endif
509 if (vma->vm_flags & VM_SHARED)
510 return shmem_zero_setup(vma);
bfd40eaf 511 vma_set_anonymous(vma);
557ed1fa 512 return 0;
1da177e4 513}
1da177e4 514
c01d5b30
HD
515static unsigned long get_unmapped_area_zero(struct file *file,
516 unsigned long addr, unsigned long len,
517 unsigned long pgoff, unsigned long flags)
518{
519#ifdef CONFIG_MMU
520 if (flags & MAP_SHARED) {
521 /*
522 * mmap_zero() will call shmem_zero_setup() to create a file,
523 * so use shmem's get_unmapped_area in case it can be huge;
524 * and pass NULL for file as in mmap.c's get_unmapped_area(),
525 * so as not to confuse shmem with our handle on "/dev/zero".
526 */
527 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
528 }
529
530 /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
529ce23a 531 return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
c01d5b30
HD
532#else
533 return -ENOSYS;
534#endif
535}
536
d7d4d849 537static ssize_t write_full(struct file *file, const char __user *buf,
1da177e4
LT
538 size_t count, loff_t *ppos)
539{
540 return -ENOSPC;
541}
542
543/*
544 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
545 * can fopen() both devices with "a" now. This was previously impossible.
546 * -- SRB.
547 */
d7d4d849 548static loff_t null_lseek(struct file *file, loff_t offset, int orig)
1da177e4
LT
549{
550 return file->f_pos = 0;
551}
552
553/*
554 * The memory devices use the full 32/64 bits of the offset, and so we cannot
555 * check against negative addresses: they are ok. The return value is weird,
556 * though, in that case (0).
557 *
558 * also note that seeking relative to the "end of file" isn't supported:
559 * it has no meaning, so it returns -EINVAL.
560 */
d7d4d849 561static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
1da177e4
LT
562{
563 loff_t ret;
564
5955102c 565 inode_lock(file_inode(file));
1da177e4 566 switch (orig) {
d7d4d849
AM
567 case SEEK_CUR:
568 offset += file->f_pos;
df561f66 569 fallthrough;
d7d4d849
AM
570 case SEEK_SET:
571 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
ecb63a1b 572 if ((unsigned long long)offset >= -MAX_ERRNO) {
d7d4d849 573 ret = -EOVERFLOW;
1da177e4 574 break;
d7d4d849
AM
575 }
576 file->f_pos = offset;
577 ret = file->f_pos;
578 force_successful_syscall_return();
579 break;
580 default:
581 ret = -EINVAL;
1da177e4 582 }
5955102c 583 inode_unlock(file_inode(file));
1da177e4
LT
584 return ret;
585}
586
890537b3 587static int open_port(struct inode *inode, struct file *filp)
1da177e4 588{
3234ac66
DW
589 int rc;
590
9b9d8dda
MG
591 if (!capable(CAP_SYS_RAWIO))
592 return -EPERM;
593
3234ac66
DW
594 rc = security_locked_down(LOCKDOWN_DEV_MEM);
595 if (rc)
596 return rc;
597
598 if (iminor(inode) != DEVMEM_MINOR)
599 return 0;
600
601 /*
602 * Use a unified address space to have a single point to manage
603 * revocations when drivers want to take over a /dev/mem mapped
604 * range.
605 */
71a1d8ed 606 filp->f_mapping = iomem_get_mapping();
3234ac66
DW
607
608 return 0;
1da177e4
LT
609}
610
611#define zero_lseek null_lseek
612#define full_lseek null_lseek
613#define write_zero write_null
cd28e28d 614#define write_iter_zero write_iter_null
1b057bd8 615#define splice_write_zero splice_write_null
1da177e4 616#define open_mem open_port
1da177e4 617
73f0718e 618static const struct file_operations __maybe_unused mem_fops = {
1da177e4
LT
619 .llseek = memory_lseek,
620 .read = read_mem,
621 .write = write_mem,
622 .mmap = mmap_mem,
623 .open = open_mem,
b4caecd4 624#ifndef CONFIG_MMU
5da6185b 625 .get_unmapped_area = get_unmapped_area_mem,
b4caecd4
CH
626 .mmap_capabilities = memory_mmap_capabilities,
627#endif
641bb439 628 .fop_flags = FOP_UNSIGNED_OFFSET,
1da177e4
LT
629};
630
62322d25 631static const struct file_operations null_fops = {
1da177e4
LT
632 .llseek = null_lseek,
633 .read = read_null,
634 .write = write_null,
cd28e28d
AV
635 .read_iter = read_iter_null,
636 .write_iter = write_iter_null,
1ebd32fc 637 .splice_write = splice_write_null,
70752795 638 .uring_cmd = uring_cmd_null,
1da177e4
LT
639};
640
f0a816fb
NS
641#ifdef CONFIG_DEVPORT
642static const struct file_operations port_fops = {
1da177e4
LT
643 .llseek = memory_lseek,
644 .read = read_port,
645 .write = write_port,
646 .open = open_port,
647};
f0a816fb 648#endif
1da177e4 649
62322d25 650static const struct file_operations zero_fops = {
1da177e4 651 .llseek = zero_lseek,
1da177e4 652 .write = write_zero,
13ba33e8 653 .read_iter = read_iter_zero,
99f66735 654 .read = read_zero,
cd28e28d 655 .write_iter = write_iter_zero,
1b057bd8
MK
656 .splice_read = copy_splice_read,
657 .splice_write = splice_write_zero,
1da177e4 658 .mmap = mmap_zero,
c01d5b30 659 .get_unmapped_area = get_unmapped_area_zero,
b4caecd4
CH
660#ifndef CONFIG_MMU
661 .mmap_capabilities = zero_mmap_capabilities,
662#endif
1da177e4
LT
663};
664
62322d25 665static const struct file_operations full_fops = {
1da177e4 666 .llseek = full_lseek,
13ba33e8 667 .read_iter = read_iter_zero,
1da177e4 668 .write = write_full,
1b057bd8 669 .splice_read = copy_splice_read,
1da177e4
LT
670};
671
389e0cb9
KS
672static const struct memdev {
673 const char *name;
674 const struct file_operations *fops;
b4caecd4 675 fmode_t fmode;
ed1af26c 676 umode_t mode;
389e0cb9 677} devlist[] = {
73f0718e 678#ifdef CONFIG_DEVMEM
641bb439 679 [DEVMEM_MINOR] = { "mem", &mem_fops, 0, 0 },
b781ecb6 680#endif
ed1af26c 681 [3] = { "null", &null_fops, FMODE_NOWAIT, 0666 },
4f911d64 682#ifdef CONFIG_DEVPORT
ed1af26c 683 [4] = { "port", &port_fops, 0, 0 },
1da177e4 684#endif
ed1af26c
AD
685 [5] = { "zero", &zero_fops, FMODE_NOWAIT, 0666 },
686 [7] = { "full", &full_fops, 0, 0666 },
687 [8] = { "random", &random_fops, FMODE_NOWAIT, 0666 },
688 [9] = { "urandom", &urandom_fops, FMODE_NOWAIT, 0666 },
7f3a781d 689#ifdef CONFIG_PRINTK
ed1af26c 690 [11] = { "kmsg", &kmsg_fops, 0, 0644 },
7f3a781d 691#endif
d6f47bef
ASF
692};
693
694static int memory_open(struct inode *inode, struct file *filp)
695{
389e0cb9
KS
696 int minor;
697 const struct memdev *dev;
d6f47bef 698
389e0cb9
KS
699 minor = iminor(inode);
700 if (minor >= ARRAY_SIZE(devlist))
205153aa 701 return -ENXIO;
d6f47bef 702
389e0cb9
KS
703 dev = &devlist[minor];
704 if (!dev->fops)
205153aa 705 return -ENXIO;
d6f47bef 706
389e0cb9 707 filp->f_op = dev->fops;
b4caecd4 708 filp->f_mode |= dev->fmode;
4a3956c7 709
389e0cb9 710 if (dev->fops->open)
205153aa
FW
711 return dev->fops->open(inode, filp);
712
713 return 0;
1da177e4
LT
714}
715
62322d25 716static const struct file_operations memory_fops = {
d7d4d849 717 .open = memory_open,
6038f373 718 .llseek = noop_llseek,
1da177e4
LT
719};
720
ff62b8e6 721static char *mem_devnode(const struct device *dev, umode_t *mode)
e454cea2
KS
722{
723 if (mode && devlist[MINOR(dev->devt)].mode)
724 *mode = devlist[MINOR(dev->devt)].mode;
725 return NULL;
726}
727
7671284b
IO
728static const struct class mem_class = {
729 .name = "mem",
730 .devnode = mem_devnode,
731};
1da177e4
LT
732
733static int __init chr_dev_init(void)
734{
7671284b 735 int retval;
389e0cb9 736 int minor;
1da177e4 737
d7d4d849 738 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
1da177e4
LT
739 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
740
7671284b
IO
741 retval = class_register(&mem_class);
742 if (retval)
743 return retval;
6e191f7b 744
389e0cb9
KS
745 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
746 if (!devlist[minor].name)
747 continue;
e1612de9
HM
748
749 /*
890537b3 750 * Create /dev/port?
e1612de9
HM
751 */
752 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
753 continue;
754
7671284b 755 device_create(&mem_class, NULL, MKDEV(MEM_MAJOR, minor),
389e0cb9
KS
756 NULL, devlist[minor].name);
757 }
ebf644c4 758
31d1d48e 759 return tty_init();
1da177e4
LT
760}
761
762fs_initcall(chr_dev_init);