Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/drivers/char/mem.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * | |
6 | * Added devfs support. | |
7 | * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> | |
8 | * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> | |
9 | */ | |
10 | ||
1da177e4 LT |
11 | #include <linux/mm.h> |
12 | #include <linux/miscdevice.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/vmalloc.h> | |
15 | #include <linux/mman.h> | |
16 | #include <linux/random.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/raw.h> | |
19 | #include <linux/tty.h> | |
20 | #include <linux/capability.h> | |
21 | #include <linux/smp_lock.h> | |
1da177e4 LT |
22 | #include <linux/ptrace.h> |
23 | #include <linux/device.h> | |
50b1fdbd VG |
24 | #include <linux/highmem.h> |
25 | #include <linux/crash_dump.h> | |
1da177e4 | 26 | #include <linux/backing-dev.h> |
315c215c | 27 | #include <linux/bootmem.h> |
1ebd32fc | 28 | #include <linux/pipe_fs_i.h> |
b8a3ad5b | 29 | #include <linux/pfn.h> |
1da177e4 LT |
30 | |
31 | #include <asm/uaccess.h> | |
32 | #include <asm/io.h> | |
33 | ||
34 | #ifdef CONFIG_IA64 | |
35 | # include <linux/efi.h> | |
36 | #endif | |
37 | ||
1da177e4 LT |
38 | /* |
39 | * Architectures vary in how they handle caching for addresses | |
40 | * outside of main memory. | |
41 | * | |
42 | */ | |
43 | static inline int uncached_access(struct file *file, unsigned long addr) | |
44 | { | |
45 | #if defined(__i386__) | |
46 | /* | |
47 | * On the PPro and successors, the MTRRs are used to set | |
48 | * memory types for physical addresses outside main memory, | |
49 | * so blindly setting PCD or PWT on those pages is wrong. | |
50 | * For Pentiums and earlier, the surround logic should disable | |
51 | * caching for the high addresses through the KEN pin, but | |
52 | * we maintain the tradition of paranoia in this code. | |
53 | */ | |
54 | if (file->f_flags & O_SYNC) | |
55 | return 1; | |
56 | return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) || | |
57 | test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) || | |
58 | test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) || | |
59 | test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) ) | |
60 | && addr >= __pa(high_memory); | |
61 | #elif defined(__x86_64__) | |
62 | /* | |
63 | * This is broken because it can generate memory type aliases, | |
64 | * which can cause cache corruptions | |
65 | * But it is only available for root and we have to be bug-to-bug | |
66 | * compatible with i386. | |
67 | */ | |
68 | if (file->f_flags & O_SYNC) | |
69 | return 1; | |
70 | /* same behaviour as i386. PAT always set to cached and MTRRs control the | |
71 | caching behaviour. | |
72 | Hopefully a full PAT implementation will fix that soon. */ | |
73 | return 0; | |
74 | #elif defined(CONFIG_IA64) | |
75 | /* | |
76 | * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases. | |
77 | */ | |
78 | return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); | |
79 | #else | |
80 | /* | |
81 | * Accessing memory above the top the kernel knows about or through a file pointer | |
82 | * that was marked O_SYNC will be done non-cached. | |
83 | */ | |
84 | if (file->f_flags & O_SYNC) | |
85 | return 1; | |
86 | return addr >= __pa(high_memory); | |
87 | #endif | |
88 | } | |
89 | ||
90 | #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE | |
136939a2 | 91 | static inline int valid_phys_addr_range(unsigned long addr, size_t count) |
1da177e4 | 92 | { |
136939a2 | 93 | if (addr + count > __pa(high_memory)) |
1da177e4 LT |
94 | return 0; |
95 | ||
1da177e4 LT |
96 | return 1; |
97 | } | |
80851ef2 | 98 | |
06c67bef | 99 | static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) |
80851ef2 BH |
100 | { |
101 | return 1; | |
102 | } | |
1da177e4 LT |
103 | #endif |
104 | ||
105 | /* | |
106 | * This funcion reads the *physical* memory. The f_pos points directly to the | |
107 | * memory location. | |
108 | */ | |
109 | static ssize_t read_mem(struct file * file, char __user * buf, | |
110 | size_t count, loff_t *ppos) | |
111 | { | |
112 | unsigned long p = *ppos; | |
113 | ssize_t read, sz; | |
114 | char *ptr; | |
115 | ||
136939a2 | 116 | if (!valid_phys_addr_range(p, count)) |
1da177e4 LT |
117 | return -EFAULT; |
118 | read = 0; | |
119 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED | |
120 | /* we don't have page 0 mapped on sparc and m68k.. */ | |
121 | if (p < PAGE_SIZE) { | |
122 | sz = PAGE_SIZE - p; | |
123 | if (sz > count) | |
124 | sz = count; | |
125 | if (sz > 0) { | |
126 | if (clear_user(buf, sz)) | |
127 | return -EFAULT; | |
128 | buf += sz; | |
129 | p += sz; | |
130 | count -= sz; | |
131 | read += sz; | |
132 | } | |
133 | } | |
134 | #endif | |
135 | ||
136 | while (count > 0) { | |
137 | /* | |
138 | * Handle first page in case it's not aligned | |
139 | */ | |
140 | if (-p & (PAGE_SIZE - 1)) | |
141 | sz = -p & (PAGE_SIZE - 1); | |
142 | else | |
143 | sz = PAGE_SIZE; | |
144 | ||
145 | sz = min_t(unsigned long, sz, count); | |
146 | ||
147 | /* | |
148 | * On ia64 if a page has been mapped somewhere as | |
149 | * uncached, then it must also be accessed uncached | |
150 | * by the kernel or data corruption may occur | |
151 | */ | |
152 | ptr = xlate_dev_mem_ptr(p); | |
153 | ||
154 | if (copy_to_user(buf, ptr, sz)) | |
155 | return -EFAULT; | |
156 | buf += sz; | |
157 | p += sz; | |
158 | count -= sz; | |
159 | read += sz; | |
160 | } | |
161 | ||
162 | *ppos += read; | |
163 | return read; | |
164 | } | |
165 | ||
166 | static ssize_t write_mem(struct file * file, const char __user * buf, | |
167 | size_t count, loff_t *ppos) | |
168 | { | |
169 | unsigned long p = *ppos; | |
170 | ssize_t written, sz; | |
171 | unsigned long copied; | |
172 | void *ptr; | |
173 | ||
136939a2 | 174 | if (!valid_phys_addr_range(p, count)) |
1da177e4 LT |
175 | return -EFAULT; |
176 | ||
177 | written = 0; | |
178 | ||
179 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED | |
180 | /* we don't have page 0 mapped on sparc and m68k.. */ | |
181 | if (p < PAGE_SIZE) { | |
182 | unsigned long sz = PAGE_SIZE - p; | |
183 | if (sz > count) | |
184 | sz = count; | |
185 | /* Hmm. Do something? */ | |
186 | buf += sz; | |
187 | p += sz; | |
188 | count -= sz; | |
189 | written += sz; | |
190 | } | |
191 | #endif | |
192 | ||
193 | while (count > 0) { | |
194 | /* | |
195 | * Handle first page in case it's not aligned | |
196 | */ | |
197 | if (-p & (PAGE_SIZE - 1)) | |
198 | sz = -p & (PAGE_SIZE - 1); | |
199 | else | |
200 | sz = PAGE_SIZE; | |
201 | ||
202 | sz = min_t(unsigned long, sz, count); | |
203 | ||
204 | /* | |
205 | * On ia64 if a page has been mapped somewhere as | |
206 | * uncached, then it must also be accessed uncached | |
207 | * by the kernel or data corruption may occur | |
208 | */ | |
209 | ptr = xlate_dev_mem_ptr(p); | |
210 | ||
211 | copied = copy_from_user(ptr, buf, sz); | |
212 | if (copied) { | |
c654d60e JB |
213 | written += sz - copied; |
214 | if (written) | |
215 | break; | |
1da177e4 LT |
216 | return -EFAULT; |
217 | } | |
218 | buf += sz; | |
219 | p += sz; | |
220 | count -= sz; | |
221 | written += sz; | |
222 | } | |
223 | ||
224 | *ppos += written; | |
225 | return written; | |
226 | } | |
227 | ||
44ac8413 BH |
228 | #ifndef __HAVE_PHYS_MEM_ACCESS_PROT |
229 | static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
230 | unsigned long size, pgprot_t vma_prot) | |
231 | { | |
232 | #ifdef pgprot_noncached | |
233 | unsigned long offset = pfn << PAGE_SHIFT; | |
234 | ||
235 | if (uncached_access(file, offset)) | |
236 | return pgprot_noncached(vma_prot); | |
237 | #endif | |
238 | return vma_prot; | |
239 | } | |
240 | #endif | |
241 | ||
5da6185b DH |
242 | #ifndef CONFIG_MMU |
243 | static unsigned long get_unmapped_area_mem(struct file *file, | |
244 | unsigned long addr, | |
245 | unsigned long len, | |
246 | unsigned long pgoff, | |
247 | unsigned long flags) | |
248 | { | |
249 | if (!valid_mmap_phys_addr_range(pgoff, len)) | |
250 | return (unsigned long) -EINVAL; | |
251 | return pgoff; | |
252 | } | |
253 | ||
254 | /* can't do an in-place private mapping if there's no MMU */ | |
255 | static inline int private_mapping_ok(struct vm_area_struct *vma) | |
256 | { | |
257 | return vma->vm_flags & VM_MAYSHARE; | |
258 | } | |
259 | #else | |
260 | #define get_unmapped_area_mem NULL | |
261 | ||
262 | static inline int private_mapping_ok(struct vm_area_struct *vma) | |
263 | { | |
264 | return 1; | |
265 | } | |
266 | #endif | |
267 | ||
1da177e4 LT |
268 | static int mmap_mem(struct file * file, struct vm_area_struct * vma) |
269 | { | |
80851ef2 BH |
270 | size_t size = vma->vm_end - vma->vm_start; |
271 | ||
06c67bef | 272 | if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) |
80851ef2 BH |
273 | return -EINVAL; |
274 | ||
5da6185b DH |
275 | if (!private_mapping_ok(vma)) |
276 | return -ENOSYS; | |
277 | ||
8b150478 | 278 | vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, |
80851ef2 | 279 | size, |
1da177e4 | 280 | vma->vm_page_prot); |
1da177e4 LT |
281 | |
282 | /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ | |
283 | if (remap_pfn_range(vma, | |
284 | vma->vm_start, | |
285 | vma->vm_pgoff, | |
80851ef2 | 286 | size, |
1da177e4 LT |
287 | vma->vm_page_prot)) |
288 | return -EAGAIN; | |
289 | return 0; | |
290 | } | |
291 | ||
292 | static int mmap_kmem(struct file * file, struct vm_area_struct * vma) | |
293 | { | |
4bb82551 LT |
294 | unsigned long pfn; |
295 | ||
99a10a60 FBH |
296 | /* Turn a pfn offset into an absolute pfn */ |
297 | pfn = PFN_DOWN(virt_to_phys((void *)PAGE_OFFSET)) + vma->vm_pgoff; | |
4bb82551 | 298 | |
1da177e4 LT |
299 | /* |
300 | * RED-PEN: on some architectures there is more mapped memory | |
301 | * than available in mem_map which pfn_valid checks | |
302 | * for. Perhaps should add a new macro here. | |
303 | * | |
304 | * RED-PEN: vmalloc is not supported right now. | |
305 | */ | |
4bb82551 | 306 | if (!pfn_valid(pfn)) |
1da177e4 | 307 | return -EIO; |
4bb82551 LT |
308 | |
309 | vma->vm_pgoff = pfn; | |
1da177e4 LT |
310 | return mmap_mem(file, vma); |
311 | } | |
312 | ||
50b1fdbd VG |
313 | #ifdef CONFIG_CRASH_DUMP |
314 | /* | |
315 | * Read memory corresponding to the old kernel. | |
50b1fdbd | 316 | */ |
315c215c | 317 | static ssize_t read_oldmem(struct file *file, char __user *buf, |
50b1fdbd VG |
318 | size_t count, loff_t *ppos) |
319 | { | |
315c215c VG |
320 | unsigned long pfn, offset; |
321 | size_t read = 0, csize; | |
322 | int rc = 0; | |
50b1fdbd | 323 | |
72414d3f | 324 | while (count) { |
50b1fdbd | 325 | pfn = *ppos / PAGE_SIZE; |
315c215c VG |
326 | if (pfn > saved_max_pfn) |
327 | return read; | |
50b1fdbd | 328 | |
315c215c VG |
329 | offset = (unsigned long)(*ppos % PAGE_SIZE); |
330 | if (count > PAGE_SIZE - offset) | |
331 | csize = PAGE_SIZE - offset; | |
332 | else | |
333 | csize = count; | |
50b1fdbd | 334 | |
315c215c VG |
335 | rc = copy_oldmem_page(pfn, buf, csize, offset, 1); |
336 | if (rc < 0) | |
337 | return rc; | |
50b1fdbd VG |
338 | buf += csize; |
339 | *ppos += csize; | |
340 | read += csize; | |
341 | count -= csize; | |
342 | } | |
50b1fdbd VG |
343 | return read; |
344 | } | |
345 | #endif | |
346 | ||
1da177e4 LT |
347 | extern long vread(char *buf, char *addr, unsigned long count); |
348 | extern long vwrite(char *buf, char *addr, unsigned long count); | |
349 | ||
350 | /* | |
351 | * This function reads the *virtual* memory as seen by the kernel. | |
352 | */ | |
353 | static ssize_t read_kmem(struct file *file, char __user *buf, | |
354 | size_t count, loff_t *ppos) | |
355 | { | |
356 | unsigned long p = *ppos; | |
357 | ssize_t low_count, read, sz; | |
358 | char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ | |
359 | ||
360 | read = 0; | |
361 | if (p < (unsigned long) high_memory) { | |
362 | low_count = count; | |
363 | if (count > (unsigned long) high_memory - p) | |
364 | low_count = (unsigned long) high_memory - p; | |
365 | ||
366 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED | |
367 | /* we don't have page 0 mapped on sparc and m68k.. */ | |
368 | if (p < PAGE_SIZE && low_count > 0) { | |
369 | size_t tmp = PAGE_SIZE - p; | |
370 | if (tmp > low_count) tmp = low_count; | |
371 | if (clear_user(buf, tmp)) | |
372 | return -EFAULT; | |
373 | buf += tmp; | |
374 | p += tmp; | |
375 | read += tmp; | |
376 | low_count -= tmp; | |
377 | count -= tmp; | |
378 | } | |
379 | #endif | |
380 | while (low_count > 0) { | |
381 | /* | |
382 | * Handle first page in case it's not aligned | |
383 | */ | |
384 | if (-p & (PAGE_SIZE - 1)) | |
385 | sz = -p & (PAGE_SIZE - 1); | |
386 | else | |
387 | sz = PAGE_SIZE; | |
388 | ||
389 | sz = min_t(unsigned long, sz, low_count); | |
390 | ||
391 | /* | |
392 | * On ia64 if a page has been mapped somewhere as | |
393 | * uncached, then it must also be accessed uncached | |
394 | * by the kernel or data corruption may occur | |
395 | */ | |
396 | kbuf = xlate_dev_kmem_ptr((char *)p); | |
397 | ||
398 | if (copy_to_user(buf, kbuf, sz)) | |
399 | return -EFAULT; | |
400 | buf += sz; | |
401 | p += sz; | |
402 | read += sz; | |
403 | low_count -= sz; | |
404 | count -= sz; | |
405 | } | |
406 | } | |
407 | ||
408 | if (count > 0) { | |
409 | kbuf = (char *)__get_free_page(GFP_KERNEL); | |
410 | if (!kbuf) | |
411 | return -ENOMEM; | |
412 | while (count > 0) { | |
413 | int len = count; | |
414 | ||
415 | if (len > PAGE_SIZE) | |
416 | len = PAGE_SIZE; | |
417 | len = vread(kbuf, (char *)p, len); | |
418 | if (!len) | |
419 | break; | |
420 | if (copy_to_user(buf, kbuf, len)) { | |
421 | free_page((unsigned long)kbuf); | |
422 | return -EFAULT; | |
423 | } | |
424 | count -= len; | |
425 | buf += len; | |
426 | read += len; | |
427 | p += len; | |
428 | } | |
429 | free_page((unsigned long)kbuf); | |
430 | } | |
431 | *ppos = p; | |
432 | return read; | |
433 | } | |
434 | ||
435 | ||
436 | static inline ssize_t | |
437 | do_write_kmem(void *p, unsigned long realp, const char __user * buf, | |
438 | size_t count, loff_t *ppos) | |
439 | { | |
440 | ssize_t written, sz; | |
441 | unsigned long copied; | |
442 | ||
443 | written = 0; | |
444 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED | |
445 | /* we don't have page 0 mapped on sparc and m68k.. */ | |
446 | if (realp < PAGE_SIZE) { | |
447 | unsigned long sz = PAGE_SIZE - realp; | |
448 | if (sz > count) | |
449 | sz = count; | |
450 | /* Hmm. Do something? */ | |
451 | buf += sz; | |
452 | p += sz; | |
453 | realp += sz; | |
454 | count -= sz; | |
455 | written += sz; | |
456 | } | |
457 | #endif | |
458 | ||
459 | while (count > 0) { | |
460 | char *ptr; | |
461 | /* | |
462 | * Handle first page in case it's not aligned | |
463 | */ | |
464 | if (-realp & (PAGE_SIZE - 1)) | |
465 | sz = -realp & (PAGE_SIZE - 1); | |
466 | else | |
467 | sz = PAGE_SIZE; | |
468 | ||
469 | sz = min_t(unsigned long, sz, count); | |
470 | ||
471 | /* | |
472 | * On ia64 if a page has been mapped somewhere as | |
473 | * uncached, then it must also be accessed uncached | |
474 | * by the kernel or data corruption may occur | |
475 | */ | |
476 | ptr = xlate_dev_kmem_ptr(p); | |
477 | ||
478 | copied = copy_from_user(ptr, buf, sz); | |
479 | if (copied) { | |
c654d60e JB |
480 | written += sz - copied; |
481 | if (written) | |
482 | break; | |
1da177e4 LT |
483 | return -EFAULT; |
484 | } | |
485 | buf += sz; | |
486 | p += sz; | |
487 | realp += sz; | |
488 | count -= sz; | |
489 | written += sz; | |
490 | } | |
491 | ||
492 | *ppos += written; | |
493 | return written; | |
494 | } | |
495 | ||
496 | ||
497 | /* | |
498 | * This function writes to the *virtual* memory as seen by the kernel. | |
499 | */ | |
500 | static ssize_t write_kmem(struct file * file, const char __user * buf, | |
501 | size_t count, loff_t *ppos) | |
502 | { | |
503 | unsigned long p = *ppos; | |
504 | ssize_t wrote = 0; | |
505 | ssize_t virtr = 0; | |
506 | ssize_t written; | |
507 | char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ | |
508 | ||
509 | if (p < (unsigned long) high_memory) { | |
510 | ||
511 | wrote = count; | |
512 | if (count > (unsigned long) high_memory - p) | |
513 | wrote = (unsigned long) high_memory - p; | |
514 | ||
515 | written = do_write_kmem((void*)p, p, buf, wrote, ppos); | |
516 | if (written != wrote) | |
517 | return written; | |
518 | wrote = written; | |
519 | p += wrote; | |
520 | buf += wrote; | |
521 | count -= wrote; | |
522 | } | |
523 | ||
524 | if (count > 0) { | |
525 | kbuf = (char *)__get_free_page(GFP_KERNEL); | |
526 | if (!kbuf) | |
527 | return wrote ? wrote : -ENOMEM; | |
528 | while (count > 0) { | |
529 | int len = count; | |
530 | ||
531 | if (len > PAGE_SIZE) | |
532 | len = PAGE_SIZE; | |
533 | if (len) { | |
534 | written = copy_from_user(kbuf, buf, len); | |
535 | if (written) { | |
c654d60e JB |
536 | if (wrote + virtr) |
537 | break; | |
1da177e4 | 538 | free_page((unsigned long)kbuf); |
c654d60e | 539 | return -EFAULT; |
1da177e4 LT |
540 | } |
541 | } | |
542 | len = vwrite(kbuf, (char *)p, len); | |
543 | count -= len; | |
544 | buf += len; | |
545 | virtr += len; | |
546 | p += len; | |
547 | } | |
548 | free_page((unsigned long)kbuf); | |
549 | } | |
550 | ||
551 | *ppos = p; | |
552 | return virtr + wrote; | |
553 | } | |
554 | ||
153dcc54 | 555 | #if (defined(CONFIG_ISA) || defined(CONFIG_PCI)) && !defined(__mc68000__) |
1da177e4 LT |
556 | static ssize_t read_port(struct file * file, char __user * buf, |
557 | size_t count, loff_t *ppos) | |
558 | { | |
559 | unsigned long i = *ppos; | |
560 | char __user *tmp = buf; | |
561 | ||
562 | if (!access_ok(VERIFY_WRITE, buf, count)) | |
563 | return -EFAULT; | |
564 | while (count-- > 0 && i < 65536) { | |
565 | if (__put_user(inb(i),tmp) < 0) | |
566 | return -EFAULT; | |
567 | i++; | |
568 | tmp++; | |
569 | } | |
570 | *ppos = i; | |
571 | return tmp-buf; | |
572 | } | |
573 | ||
574 | static ssize_t write_port(struct file * file, const char __user * buf, | |
575 | size_t count, loff_t *ppos) | |
576 | { | |
577 | unsigned long i = *ppos; | |
578 | const char __user * tmp = buf; | |
579 | ||
580 | if (!access_ok(VERIFY_READ,buf,count)) | |
581 | return -EFAULT; | |
582 | while (count-- > 0 && i < 65536) { | |
583 | char c; | |
c654d60e JB |
584 | if (__get_user(c, tmp)) { |
585 | if (tmp > buf) | |
586 | break; | |
1da177e4 | 587 | return -EFAULT; |
c654d60e | 588 | } |
1da177e4 LT |
589 | outb(c,i); |
590 | i++; | |
591 | tmp++; | |
592 | } | |
593 | *ppos = i; | |
594 | return tmp-buf; | |
595 | } | |
596 | #endif | |
597 | ||
598 | static ssize_t read_null(struct file * file, char __user * buf, | |
599 | size_t count, loff_t *ppos) | |
600 | { | |
601 | return 0; | |
602 | } | |
603 | ||
604 | static ssize_t write_null(struct file * file, const char __user * buf, | |
605 | size_t count, loff_t *ppos) | |
606 | { | |
607 | return count; | |
608 | } | |
609 | ||
1ebd32fc JA |
610 | static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf, |
611 | struct splice_desc *sd) | |
612 | { | |
613 | return sd->len; | |
614 | } | |
615 | ||
616 | static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out, | |
617 | loff_t *ppos, size_t len, unsigned int flags) | |
618 | { | |
619 | return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); | |
620 | } | |
621 | ||
1da177e4 LT |
622 | #ifdef CONFIG_MMU |
623 | /* | |
624 | * For fun, we are using the MMU for this. | |
625 | */ | |
626 | static inline size_t read_zero_pagealigned(char __user * buf, size_t size) | |
627 | { | |
628 | struct mm_struct *mm; | |
629 | struct vm_area_struct * vma; | |
630 | unsigned long addr=(unsigned long)buf; | |
631 | ||
632 | mm = current->mm; | |
633 | /* Oops, this was forgotten before. -ben */ | |
634 | down_read(&mm->mmap_sem); | |
635 | ||
636 | /* For private mappings, just map in zero pages. */ | |
637 | for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { | |
638 | unsigned long count; | |
639 | ||
640 | if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0) | |
641 | goto out_up; | |
6aab341e | 642 | if (vma->vm_flags & (VM_SHARED | VM_HUGETLB)) |
1da177e4 LT |
643 | break; |
644 | count = vma->vm_end - addr; | |
645 | if (count > size) | |
646 | count = size; | |
647 | ||
648 | zap_page_range(vma, addr, count, NULL); | |
5fcf7bb7 HD |
649 | if (zeromap_page_range(vma, addr, count, PAGE_COPY)) |
650 | break; | |
1da177e4 LT |
651 | |
652 | size -= count; | |
653 | buf += count; | |
654 | addr += count; | |
655 | if (size == 0) | |
656 | goto out_up; | |
657 | } | |
658 | ||
659 | up_read(&mm->mmap_sem); | |
660 | ||
661 | /* The shared case is hard. Let's do the conventional zeroing. */ | |
662 | do { | |
663 | unsigned long unwritten = clear_user(buf, PAGE_SIZE); | |
664 | if (unwritten) | |
665 | return size + unwritten - PAGE_SIZE; | |
666 | cond_resched(); | |
667 | buf += PAGE_SIZE; | |
668 | size -= PAGE_SIZE; | |
669 | } while (size); | |
670 | ||
671 | return size; | |
672 | out_up: | |
673 | up_read(&mm->mmap_sem); | |
674 | return size; | |
675 | } | |
676 | ||
677 | static ssize_t read_zero(struct file * file, char __user * buf, | |
678 | size_t count, loff_t *ppos) | |
679 | { | |
680 | unsigned long left, unwritten, written = 0; | |
681 | ||
682 | if (!count) | |
683 | return 0; | |
684 | ||
685 | if (!access_ok(VERIFY_WRITE, buf, count)) | |
686 | return -EFAULT; | |
687 | ||
688 | left = count; | |
689 | ||
690 | /* do we want to be clever? Arbitrary cut-off */ | |
691 | if (count >= PAGE_SIZE*4) { | |
692 | unsigned long partial; | |
693 | ||
694 | /* How much left of the page? */ | |
695 | partial = (PAGE_SIZE-1) & -(unsigned long) buf; | |
696 | unwritten = clear_user(buf, partial); | |
697 | written = partial - unwritten; | |
698 | if (unwritten) | |
699 | goto out; | |
700 | left -= partial; | |
701 | buf += partial; | |
702 | unwritten = read_zero_pagealigned(buf, left & PAGE_MASK); | |
703 | written += (left & PAGE_MASK) - unwritten; | |
704 | if (unwritten) | |
705 | goto out; | |
706 | buf += left & PAGE_MASK; | |
707 | left &= ~PAGE_MASK; | |
708 | } | |
709 | unwritten = clear_user(buf, left); | |
710 | written += left - unwritten; | |
711 | out: | |
712 | return written ? written : -EFAULT; | |
713 | } | |
714 | ||
715 | static int mmap_zero(struct file * file, struct vm_area_struct * vma) | |
716 | { | |
5fcf7bb7 HD |
717 | int err; |
718 | ||
1da177e4 LT |
719 | if (vma->vm_flags & VM_SHARED) |
720 | return shmem_zero_setup(vma); | |
5fcf7bb7 HD |
721 | err = zeromap_page_range(vma, vma->vm_start, |
722 | vma->vm_end - vma->vm_start, vma->vm_page_prot); | |
723 | BUG_ON(err == -EEXIST); | |
724 | return err; | |
1da177e4 LT |
725 | } |
726 | #else /* CONFIG_MMU */ | |
727 | static ssize_t read_zero(struct file * file, char * buf, | |
728 | size_t count, loff_t *ppos) | |
729 | { | |
730 | size_t todo = count; | |
731 | ||
732 | while (todo) { | |
733 | size_t chunk = todo; | |
734 | ||
735 | if (chunk > 4096) | |
736 | chunk = 4096; /* Just for latency reasons */ | |
737 | if (clear_user(buf, chunk)) | |
738 | return -EFAULT; | |
739 | buf += chunk; | |
740 | todo -= chunk; | |
741 | cond_resched(); | |
742 | } | |
743 | return count; | |
744 | } | |
745 | ||
746 | static int mmap_zero(struct file * file, struct vm_area_struct * vma) | |
747 | { | |
748 | return -ENOSYS; | |
749 | } | |
750 | #endif /* CONFIG_MMU */ | |
751 | ||
752 | static ssize_t write_full(struct file * file, const char __user * buf, | |
753 | size_t count, loff_t *ppos) | |
754 | { | |
755 | return -ENOSPC; | |
756 | } | |
757 | ||
758 | /* | |
759 | * Special lseek() function for /dev/null and /dev/zero. Most notably, you | |
760 | * can fopen() both devices with "a" now. This was previously impossible. | |
761 | * -- SRB. | |
762 | */ | |
763 | ||
764 | static loff_t null_lseek(struct file * file, loff_t offset, int orig) | |
765 | { | |
766 | return file->f_pos = 0; | |
767 | } | |
768 | ||
769 | /* | |
770 | * The memory devices use the full 32/64 bits of the offset, and so we cannot | |
771 | * check against negative addresses: they are ok. The return value is weird, | |
772 | * though, in that case (0). | |
773 | * | |
774 | * also note that seeking relative to the "end of file" isn't supported: | |
775 | * it has no meaning, so it returns -EINVAL. | |
776 | */ | |
777 | static loff_t memory_lseek(struct file * file, loff_t offset, int orig) | |
778 | { | |
779 | loff_t ret; | |
780 | ||
a7113a96 | 781 | mutex_lock(&file->f_path.dentry->d_inode->i_mutex); |
1da177e4 LT |
782 | switch (orig) { |
783 | case 0: | |
784 | file->f_pos = offset; | |
785 | ret = file->f_pos; | |
786 | force_successful_syscall_return(); | |
787 | break; | |
788 | case 1: | |
789 | file->f_pos += offset; | |
790 | ret = file->f_pos; | |
791 | force_successful_syscall_return(); | |
792 | break; | |
793 | default: | |
794 | ret = -EINVAL; | |
795 | } | |
a7113a96 | 796 | mutex_unlock(&file->f_path.dentry->d_inode->i_mutex); |
1da177e4 LT |
797 | return ret; |
798 | } | |
799 | ||
800 | static int open_port(struct inode * inode, struct file * filp) | |
801 | { | |
802 | return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; | |
803 | } | |
804 | ||
805 | #define zero_lseek null_lseek | |
806 | #define full_lseek null_lseek | |
807 | #define write_zero write_null | |
808 | #define read_full read_zero | |
809 | #define open_mem open_port | |
810 | #define open_kmem open_mem | |
50b1fdbd | 811 | #define open_oldmem open_mem |
1da177e4 | 812 | |
62322d25 | 813 | static const struct file_operations mem_fops = { |
1da177e4 LT |
814 | .llseek = memory_lseek, |
815 | .read = read_mem, | |
816 | .write = write_mem, | |
817 | .mmap = mmap_mem, | |
818 | .open = open_mem, | |
5da6185b | 819 | .get_unmapped_area = get_unmapped_area_mem, |
1da177e4 LT |
820 | }; |
821 | ||
62322d25 | 822 | static const struct file_operations kmem_fops = { |
1da177e4 LT |
823 | .llseek = memory_lseek, |
824 | .read = read_kmem, | |
825 | .write = write_kmem, | |
826 | .mmap = mmap_kmem, | |
827 | .open = open_kmem, | |
5da6185b | 828 | .get_unmapped_area = get_unmapped_area_mem, |
1da177e4 LT |
829 | }; |
830 | ||
62322d25 | 831 | static const struct file_operations null_fops = { |
1da177e4 LT |
832 | .llseek = null_lseek, |
833 | .read = read_null, | |
834 | .write = write_null, | |
1ebd32fc | 835 | .splice_write = splice_write_null, |
1da177e4 LT |
836 | }; |
837 | ||
153dcc54 | 838 | #if (defined(CONFIG_ISA) || defined(CONFIG_PCI)) && !defined(__mc68000__) |
62322d25 | 839 | static const struct file_operations port_fops = { |
1da177e4 LT |
840 | .llseek = memory_lseek, |
841 | .read = read_port, | |
842 | .write = write_port, | |
843 | .open = open_port, | |
844 | }; | |
845 | #endif | |
846 | ||
62322d25 | 847 | static const struct file_operations zero_fops = { |
1da177e4 LT |
848 | .llseek = zero_lseek, |
849 | .read = read_zero, | |
850 | .write = write_zero, | |
851 | .mmap = mmap_zero, | |
852 | }; | |
853 | ||
5da6185b DH |
854 | /* |
855 | * capabilities for /dev/zero | |
856 | * - permits private mappings, "copies" are taken of the source of zeros | |
857 | */ | |
1da177e4 LT |
858 | static struct backing_dev_info zero_bdi = { |
859 | .capabilities = BDI_CAP_MAP_COPY, | |
860 | }; | |
861 | ||
62322d25 | 862 | static const struct file_operations full_fops = { |
1da177e4 LT |
863 | .llseek = full_lseek, |
864 | .read = read_full, | |
865 | .write = write_full, | |
866 | }; | |
867 | ||
50b1fdbd | 868 | #ifdef CONFIG_CRASH_DUMP |
62322d25 | 869 | static const struct file_operations oldmem_fops = { |
50b1fdbd VG |
870 | .read = read_oldmem, |
871 | .open = open_oldmem, | |
872 | }; | |
873 | #endif | |
874 | ||
1da177e4 LT |
875 | static ssize_t kmsg_write(struct file * file, const char __user * buf, |
876 | size_t count, loff_t *ppos) | |
877 | { | |
878 | char *tmp; | |
cd140a5c | 879 | ssize_t ret; |
1da177e4 LT |
880 | |
881 | tmp = kmalloc(count + 1, GFP_KERNEL); | |
882 | if (tmp == NULL) | |
883 | return -ENOMEM; | |
884 | ret = -EFAULT; | |
885 | if (!copy_from_user(tmp, buf, count)) { | |
886 | tmp[count] = 0; | |
887 | ret = printk("%s", tmp); | |
cd140a5c GC |
888 | if (ret > count) |
889 | /* printk can add a prefix */ | |
890 | ret = count; | |
1da177e4 LT |
891 | } |
892 | kfree(tmp); | |
893 | return ret; | |
894 | } | |
895 | ||
62322d25 | 896 | static const struct file_operations kmsg_fops = { |
1da177e4 LT |
897 | .write = kmsg_write, |
898 | }; | |
899 | ||
900 | static int memory_open(struct inode * inode, struct file * filp) | |
901 | { | |
902 | switch (iminor(inode)) { | |
903 | case 1: | |
904 | filp->f_op = &mem_fops; | |
5da6185b DH |
905 | filp->f_mapping->backing_dev_info = |
906 | &directly_mappable_cdev_bdi; | |
1da177e4 LT |
907 | break; |
908 | case 2: | |
909 | filp->f_op = &kmem_fops; | |
5da6185b DH |
910 | filp->f_mapping->backing_dev_info = |
911 | &directly_mappable_cdev_bdi; | |
1da177e4 LT |
912 | break; |
913 | case 3: | |
914 | filp->f_op = &null_fops; | |
915 | break; | |
153dcc54 | 916 | #if (defined(CONFIG_ISA) || defined(CONFIG_PCI)) && !defined(__mc68000__) |
1da177e4 LT |
917 | case 4: |
918 | filp->f_op = &port_fops; | |
919 | break; | |
920 | #endif | |
921 | case 5: | |
922 | filp->f_mapping->backing_dev_info = &zero_bdi; | |
923 | filp->f_op = &zero_fops; | |
924 | break; | |
925 | case 7: | |
926 | filp->f_op = &full_fops; | |
927 | break; | |
928 | case 8: | |
929 | filp->f_op = &random_fops; | |
930 | break; | |
931 | case 9: | |
932 | filp->f_op = &urandom_fops; | |
933 | break; | |
934 | case 11: | |
935 | filp->f_op = &kmsg_fops; | |
936 | break; | |
50b1fdbd VG |
937 | #ifdef CONFIG_CRASH_DUMP |
938 | case 12: | |
939 | filp->f_op = &oldmem_fops; | |
940 | break; | |
941 | #endif | |
1da177e4 LT |
942 | default: |
943 | return -ENXIO; | |
944 | } | |
945 | if (filp->f_op && filp->f_op->open) | |
946 | return filp->f_op->open(inode,filp); | |
947 | return 0; | |
948 | } | |
949 | ||
62322d25 | 950 | static const struct file_operations memory_fops = { |
1da177e4 LT |
951 | .open = memory_open, /* just a selector for the real open */ |
952 | }; | |
953 | ||
954 | static const struct { | |
955 | unsigned int minor; | |
956 | char *name; | |
957 | umode_t mode; | |
99ac48f5 | 958 | const struct file_operations *fops; |
1da177e4 LT |
959 | } devlist[] = { /* list of minor devices */ |
960 | {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, | |
961 | {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops}, | |
962 | {3, "null", S_IRUGO | S_IWUGO, &null_fops}, | |
153dcc54 | 963 | #if (defined(CONFIG_ISA) || defined(CONFIG_PCI)) && !defined(__mc68000__) |
1da177e4 LT |
964 | {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops}, |
965 | #endif | |
966 | {5, "zero", S_IRUGO | S_IWUGO, &zero_fops}, | |
967 | {7, "full", S_IRUGO | S_IWUGO, &full_fops}, | |
968 | {8, "random", S_IRUGO | S_IWUSR, &random_fops}, | |
969 | {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops}, | |
970 | {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops}, | |
50b1fdbd VG |
971 | #ifdef CONFIG_CRASH_DUMP |
972 | {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops}, | |
973 | #endif | |
1da177e4 LT |
974 | }; |
975 | ||
ca8eca68 | 976 | static struct class *mem_class; |
1da177e4 LT |
977 | |
978 | static int __init chr_dev_init(void) | |
979 | { | |
980 | int i; | |
981 | ||
982 | if (register_chrdev(MEM_MAJOR,"mem",&memory_fops)) | |
983 | printk("unable to get major %d for memory devs\n", MEM_MAJOR); | |
984 | ||
ca8eca68 | 985 | mem_class = class_create(THIS_MODULE, "mem"); |
7c69ef79 | 986 | for (i = 0; i < ARRAY_SIZE(devlist); i++) |
ebf644c4 GKH |
987 | device_create(mem_class, NULL, |
988 | MKDEV(MEM_MAJOR, devlist[i].minor), | |
989 | devlist[i].name); | |
990 | ||
1da177e4 LT |
991 | return 0; |
992 | } | |
993 | ||
994 | fs_initcall(chr_dev_init); |