Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/drivers/char/mem.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * | |
6 | * Added devfs support. | |
7 | * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> | |
8 | * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> | |
9 | */ | |
10 | ||
11 | #include <linux/config.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/miscdevice.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/vmalloc.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/random.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/raw.h> | |
20 | #include <linux/tty.h> | |
21 | #include <linux/capability.h> | |
22 | #include <linux/smp_lock.h> | |
23 | #include <linux/devfs_fs_kernel.h> | |
24 | #include <linux/ptrace.h> | |
25 | #include <linux/device.h> | |
26 | #include <linux/backing-dev.h> | |
27 | ||
28 | #include <asm/uaccess.h> | |
29 | #include <asm/io.h> | |
30 | ||
31 | #ifdef CONFIG_IA64 | |
32 | # include <linux/efi.h> | |
33 | #endif | |
34 | ||
35 | #if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR) | |
36 | extern void tapechar_init(void); | |
37 | #endif | |
38 | ||
39 | /* | |
40 | * Architectures vary in how they handle caching for addresses | |
41 | * outside of main memory. | |
42 | * | |
43 | */ | |
44 | static inline int uncached_access(struct file *file, unsigned long addr) | |
45 | { | |
46 | #if defined(__i386__) | |
47 | /* | |
48 | * On the PPro and successors, the MTRRs are used to set | |
49 | * memory types for physical addresses outside main memory, | |
50 | * so blindly setting PCD or PWT on those pages is wrong. | |
51 | * For Pentiums and earlier, the surround logic should disable | |
52 | * caching for the high addresses through the KEN pin, but | |
53 | * we maintain the tradition of paranoia in this code. | |
54 | */ | |
55 | if (file->f_flags & O_SYNC) | |
56 | return 1; | |
57 | return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) || | |
58 | test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) || | |
59 | test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) || | |
60 | test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) ) | |
61 | && addr >= __pa(high_memory); | |
62 | #elif defined(__x86_64__) | |
63 | /* | |
64 | * This is broken because it can generate memory type aliases, | |
65 | * which can cause cache corruptions | |
66 | * But it is only available for root and we have to be bug-to-bug | |
67 | * compatible with i386. | |
68 | */ | |
69 | if (file->f_flags & O_SYNC) | |
70 | return 1; | |
71 | /* same behaviour as i386. PAT always set to cached and MTRRs control the | |
72 | caching behaviour. | |
73 | Hopefully a full PAT implementation will fix that soon. */ | |
74 | return 0; | |
75 | #elif defined(CONFIG_IA64) | |
76 | /* | |
77 | * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases. | |
78 | */ | |
79 | return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); | |
80 | #else | |
81 | /* | |
82 | * Accessing memory above the top the kernel knows about or through a file pointer | |
83 | * that was marked O_SYNC will be done non-cached. | |
84 | */ | |
85 | if (file->f_flags & O_SYNC) | |
86 | return 1; | |
87 | return addr >= __pa(high_memory); | |
88 | #endif | |
89 | } | |
90 | ||
91 | #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE | |
92 | static inline int valid_phys_addr_range(unsigned long addr, size_t *count) | |
93 | { | |
94 | unsigned long end_mem; | |
95 | ||
96 | end_mem = __pa(high_memory); | |
97 | if (addr >= end_mem) | |
98 | return 0; | |
99 | ||
100 | if (*count > end_mem - addr) | |
101 | *count = end_mem - addr; | |
102 | ||
103 | return 1; | |
104 | } | |
105 | #endif | |
106 | ||
107 | /* | |
108 | * This funcion reads the *physical* memory. The f_pos points directly to the | |
109 | * memory location. | |
110 | */ | |
111 | static ssize_t read_mem(struct file * file, char __user * buf, | |
112 | size_t count, loff_t *ppos) | |
113 | { | |
114 | unsigned long p = *ppos; | |
115 | ssize_t read, sz; | |
116 | char *ptr; | |
117 | ||
118 | if (!valid_phys_addr_range(p, &count)) | |
119 | return -EFAULT; | |
120 | read = 0; | |
121 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED | |
122 | /* we don't have page 0 mapped on sparc and m68k.. */ | |
123 | if (p < PAGE_SIZE) { | |
124 | sz = PAGE_SIZE - p; | |
125 | if (sz > count) | |
126 | sz = count; | |
127 | if (sz > 0) { | |
128 | if (clear_user(buf, sz)) | |
129 | return -EFAULT; | |
130 | buf += sz; | |
131 | p += sz; | |
132 | count -= sz; | |
133 | read += sz; | |
134 | } | |
135 | } | |
136 | #endif | |
137 | ||
138 | while (count > 0) { | |
139 | /* | |
140 | * Handle first page in case it's not aligned | |
141 | */ | |
142 | if (-p & (PAGE_SIZE - 1)) | |
143 | sz = -p & (PAGE_SIZE - 1); | |
144 | else | |
145 | sz = PAGE_SIZE; | |
146 | ||
147 | sz = min_t(unsigned long, sz, count); | |
148 | ||
149 | /* | |
150 | * On ia64 if a page has been mapped somewhere as | |
151 | * uncached, then it must also be accessed uncached | |
152 | * by the kernel or data corruption may occur | |
153 | */ | |
154 | ptr = xlate_dev_mem_ptr(p); | |
155 | ||
156 | if (copy_to_user(buf, ptr, sz)) | |
157 | return -EFAULT; | |
158 | buf += sz; | |
159 | p += sz; | |
160 | count -= sz; | |
161 | read += sz; | |
162 | } | |
163 | ||
164 | *ppos += read; | |
165 | return read; | |
166 | } | |
167 | ||
168 | static ssize_t write_mem(struct file * file, const char __user * buf, | |
169 | size_t count, loff_t *ppos) | |
170 | { | |
171 | unsigned long p = *ppos; | |
172 | ssize_t written, sz; | |
173 | unsigned long copied; | |
174 | void *ptr; | |
175 | ||
176 | if (!valid_phys_addr_range(p, &count)) | |
177 | return -EFAULT; | |
178 | ||
179 | written = 0; | |
180 | ||
181 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED | |
182 | /* we don't have page 0 mapped on sparc and m68k.. */ | |
183 | if (p < PAGE_SIZE) { | |
184 | unsigned long sz = PAGE_SIZE - p; | |
185 | if (sz > count) | |
186 | sz = count; | |
187 | /* Hmm. Do something? */ | |
188 | buf += sz; | |
189 | p += sz; | |
190 | count -= sz; | |
191 | written += sz; | |
192 | } | |
193 | #endif | |
194 | ||
195 | while (count > 0) { | |
196 | /* | |
197 | * Handle first page in case it's not aligned | |
198 | */ | |
199 | if (-p & (PAGE_SIZE - 1)) | |
200 | sz = -p & (PAGE_SIZE - 1); | |
201 | else | |
202 | sz = PAGE_SIZE; | |
203 | ||
204 | sz = min_t(unsigned long, sz, count); | |
205 | ||
206 | /* | |
207 | * On ia64 if a page has been mapped somewhere as | |
208 | * uncached, then it must also be accessed uncached | |
209 | * by the kernel or data corruption may occur | |
210 | */ | |
211 | ptr = xlate_dev_mem_ptr(p); | |
212 | ||
213 | copied = copy_from_user(ptr, buf, sz); | |
214 | if (copied) { | |
215 | ssize_t ret; | |
216 | ||
217 | ret = written + (sz - copied); | |
218 | if (ret) | |
219 | return ret; | |
220 | return -EFAULT; | |
221 | } | |
222 | buf += sz; | |
223 | p += sz; | |
224 | count -= sz; | |
225 | written += sz; | |
226 | } | |
227 | ||
228 | *ppos += written; | |
229 | return written; | |
230 | } | |
231 | ||
232 | static int mmap_mem(struct file * file, struct vm_area_struct * vma) | |
233 | { | |
234 | #if defined(__HAVE_PHYS_MEM_ACCESS_PROT) | |
235 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | |
236 | ||
237 | vma->vm_page_prot = phys_mem_access_prot(file, offset, | |
238 | vma->vm_end - vma->vm_start, | |
239 | vma->vm_page_prot); | |
240 | #elif defined(pgprot_noncached) | |
241 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | |
242 | int uncached; | |
243 | ||
244 | uncached = uncached_access(file, offset); | |
245 | if (uncached) | |
246 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | |
247 | #endif | |
248 | ||
249 | /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ | |
250 | if (remap_pfn_range(vma, | |
251 | vma->vm_start, | |
252 | vma->vm_pgoff, | |
253 | vma->vm_end-vma->vm_start, | |
254 | vma->vm_page_prot)) | |
255 | return -EAGAIN; | |
256 | return 0; | |
257 | } | |
258 | ||
259 | static int mmap_kmem(struct file * file, struct vm_area_struct * vma) | |
260 | { | |
261 | unsigned long long val; | |
262 | /* | |
263 | * RED-PEN: on some architectures there is more mapped memory | |
264 | * than available in mem_map which pfn_valid checks | |
265 | * for. Perhaps should add a new macro here. | |
266 | * | |
267 | * RED-PEN: vmalloc is not supported right now. | |
268 | */ | |
269 | if (!pfn_valid(vma->vm_pgoff)) | |
270 | return -EIO; | |
271 | val = (u64)vma->vm_pgoff << PAGE_SHIFT; | |
272 | vma->vm_pgoff = __pa(val) >> PAGE_SHIFT; | |
273 | return mmap_mem(file, vma); | |
274 | } | |
275 | ||
276 | extern long vread(char *buf, char *addr, unsigned long count); | |
277 | extern long vwrite(char *buf, char *addr, unsigned long count); | |
278 | ||
279 | /* | |
280 | * This function reads the *virtual* memory as seen by the kernel. | |
281 | */ | |
282 | static ssize_t read_kmem(struct file *file, char __user *buf, | |
283 | size_t count, loff_t *ppos) | |
284 | { | |
285 | unsigned long p = *ppos; | |
286 | ssize_t low_count, read, sz; | |
287 | char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ | |
288 | ||
289 | read = 0; | |
290 | if (p < (unsigned long) high_memory) { | |
291 | low_count = count; | |
292 | if (count > (unsigned long) high_memory - p) | |
293 | low_count = (unsigned long) high_memory - p; | |
294 | ||
295 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED | |
296 | /* we don't have page 0 mapped on sparc and m68k.. */ | |
297 | if (p < PAGE_SIZE && low_count > 0) { | |
298 | size_t tmp = PAGE_SIZE - p; | |
299 | if (tmp > low_count) tmp = low_count; | |
300 | if (clear_user(buf, tmp)) | |
301 | return -EFAULT; | |
302 | buf += tmp; | |
303 | p += tmp; | |
304 | read += tmp; | |
305 | low_count -= tmp; | |
306 | count -= tmp; | |
307 | } | |
308 | #endif | |
309 | while (low_count > 0) { | |
310 | /* | |
311 | * Handle first page in case it's not aligned | |
312 | */ | |
313 | if (-p & (PAGE_SIZE - 1)) | |
314 | sz = -p & (PAGE_SIZE - 1); | |
315 | else | |
316 | sz = PAGE_SIZE; | |
317 | ||
318 | sz = min_t(unsigned long, sz, low_count); | |
319 | ||
320 | /* | |
321 | * On ia64 if a page has been mapped somewhere as | |
322 | * uncached, then it must also be accessed uncached | |
323 | * by the kernel or data corruption may occur | |
324 | */ | |
325 | kbuf = xlate_dev_kmem_ptr((char *)p); | |
326 | ||
327 | if (copy_to_user(buf, kbuf, sz)) | |
328 | return -EFAULT; | |
329 | buf += sz; | |
330 | p += sz; | |
331 | read += sz; | |
332 | low_count -= sz; | |
333 | count -= sz; | |
334 | } | |
335 | } | |
336 | ||
337 | if (count > 0) { | |
338 | kbuf = (char *)__get_free_page(GFP_KERNEL); | |
339 | if (!kbuf) | |
340 | return -ENOMEM; | |
341 | while (count > 0) { | |
342 | int len = count; | |
343 | ||
344 | if (len > PAGE_SIZE) | |
345 | len = PAGE_SIZE; | |
346 | len = vread(kbuf, (char *)p, len); | |
347 | if (!len) | |
348 | break; | |
349 | if (copy_to_user(buf, kbuf, len)) { | |
350 | free_page((unsigned long)kbuf); | |
351 | return -EFAULT; | |
352 | } | |
353 | count -= len; | |
354 | buf += len; | |
355 | read += len; | |
356 | p += len; | |
357 | } | |
358 | free_page((unsigned long)kbuf); | |
359 | } | |
360 | *ppos = p; | |
361 | return read; | |
362 | } | |
363 | ||
364 | ||
365 | static inline ssize_t | |
366 | do_write_kmem(void *p, unsigned long realp, const char __user * buf, | |
367 | size_t count, loff_t *ppos) | |
368 | { | |
369 | ssize_t written, sz; | |
370 | unsigned long copied; | |
371 | ||
372 | written = 0; | |
373 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED | |
374 | /* we don't have page 0 mapped on sparc and m68k.. */ | |
375 | if (realp < PAGE_SIZE) { | |
376 | unsigned long sz = PAGE_SIZE - realp; | |
377 | if (sz > count) | |
378 | sz = count; | |
379 | /* Hmm. Do something? */ | |
380 | buf += sz; | |
381 | p += sz; | |
382 | realp += sz; | |
383 | count -= sz; | |
384 | written += sz; | |
385 | } | |
386 | #endif | |
387 | ||
388 | while (count > 0) { | |
389 | char *ptr; | |
390 | /* | |
391 | * Handle first page in case it's not aligned | |
392 | */ | |
393 | if (-realp & (PAGE_SIZE - 1)) | |
394 | sz = -realp & (PAGE_SIZE - 1); | |
395 | else | |
396 | sz = PAGE_SIZE; | |
397 | ||
398 | sz = min_t(unsigned long, sz, count); | |
399 | ||
400 | /* | |
401 | * On ia64 if a page has been mapped somewhere as | |
402 | * uncached, then it must also be accessed uncached | |
403 | * by the kernel or data corruption may occur | |
404 | */ | |
405 | ptr = xlate_dev_kmem_ptr(p); | |
406 | ||
407 | copied = copy_from_user(ptr, buf, sz); | |
408 | if (copied) { | |
409 | ssize_t ret; | |
410 | ||
411 | ret = written + (sz - copied); | |
412 | if (ret) | |
413 | return ret; | |
414 | return -EFAULT; | |
415 | } | |
416 | buf += sz; | |
417 | p += sz; | |
418 | realp += sz; | |
419 | count -= sz; | |
420 | written += sz; | |
421 | } | |
422 | ||
423 | *ppos += written; | |
424 | return written; | |
425 | } | |
426 | ||
427 | ||
428 | /* | |
429 | * This function writes to the *virtual* memory as seen by the kernel. | |
430 | */ | |
431 | static ssize_t write_kmem(struct file * file, const char __user * buf, | |
432 | size_t count, loff_t *ppos) | |
433 | { | |
434 | unsigned long p = *ppos; | |
435 | ssize_t wrote = 0; | |
436 | ssize_t virtr = 0; | |
437 | ssize_t written; | |
438 | char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ | |
439 | ||
440 | if (p < (unsigned long) high_memory) { | |
441 | ||
442 | wrote = count; | |
443 | if (count > (unsigned long) high_memory - p) | |
444 | wrote = (unsigned long) high_memory - p; | |
445 | ||
446 | written = do_write_kmem((void*)p, p, buf, wrote, ppos); | |
447 | if (written != wrote) | |
448 | return written; | |
449 | wrote = written; | |
450 | p += wrote; | |
451 | buf += wrote; | |
452 | count -= wrote; | |
453 | } | |
454 | ||
455 | if (count > 0) { | |
456 | kbuf = (char *)__get_free_page(GFP_KERNEL); | |
457 | if (!kbuf) | |
458 | return wrote ? wrote : -ENOMEM; | |
459 | while (count > 0) { | |
460 | int len = count; | |
461 | ||
462 | if (len > PAGE_SIZE) | |
463 | len = PAGE_SIZE; | |
464 | if (len) { | |
465 | written = copy_from_user(kbuf, buf, len); | |
466 | if (written) { | |
467 | ssize_t ret; | |
468 | ||
469 | free_page((unsigned long)kbuf); | |
470 | ret = wrote + virtr + (len - written); | |
471 | return ret ? ret : -EFAULT; | |
472 | } | |
473 | } | |
474 | len = vwrite(kbuf, (char *)p, len); | |
475 | count -= len; | |
476 | buf += len; | |
477 | virtr += len; | |
478 | p += len; | |
479 | } | |
480 | free_page((unsigned long)kbuf); | |
481 | } | |
482 | ||
483 | *ppos = p; | |
484 | return virtr + wrote; | |
485 | } | |
486 | ||
145d01e4 | 487 | #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) |
1da177e4 LT |
488 | static ssize_t read_port(struct file * file, char __user * buf, |
489 | size_t count, loff_t *ppos) | |
490 | { | |
491 | unsigned long i = *ppos; | |
492 | char __user *tmp = buf; | |
493 | ||
494 | if (!access_ok(VERIFY_WRITE, buf, count)) | |
495 | return -EFAULT; | |
496 | while (count-- > 0 && i < 65536) { | |
497 | if (__put_user(inb(i),tmp) < 0) | |
498 | return -EFAULT; | |
499 | i++; | |
500 | tmp++; | |
501 | } | |
502 | *ppos = i; | |
503 | return tmp-buf; | |
504 | } | |
505 | ||
506 | static ssize_t write_port(struct file * file, const char __user * buf, | |
507 | size_t count, loff_t *ppos) | |
508 | { | |
509 | unsigned long i = *ppos; | |
510 | const char __user * tmp = buf; | |
511 | ||
512 | if (!access_ok(VERIFY_READ,buf,count)) | |
513 | return -EFAULT; | |
514 | while (count-- > 0 && i < 65536) { | |
515 | char c; | |
516 | if (__get_user(c, tmp)) | |
517 | return -EFAULT; | |
518 | outb(c,i); | |
519 | i++; | |
520 | tmp++; | |
521 | } | |
522 | *ppos = i; | |
523 | return tmp-buf; | |
524 | } | |
525 | #endif | |
526 | ||
527 | static ssize_t read_null(struct file * file, char __user * buf, | |
528 | size_t count, loff_t *ppos) | |
529 | { | |
530 | return 0; | |
531 | } | |
532 | ||
533 | static ssize_t write_null(struct file * file, const char __user * buf, | |
534 | size_t count, loff_t *ppos) | |
535 | { | |
536 | return count; | |
537 | } | |
538 | ||
539 | #ifdef CONFIG_MMU | |
540 | /* | |
541 | * For fun, we are using the MMU for this. | |
542 | */ | |
543 | static inline size_t read_zero_pagealigned(char __user * buf, size_t size) | |
544 | { | |
545 | struct mm_struct *mm; | |
546 | struct vm_area_struct * vma; | |
547 | unsigned long addr=(unsigned long)buf; | |
548 | ||
549 | mm = current->mm; | |
550 | /* Oops, this was forgotten before. -ben */ | |
551 | down_read(&mm->mmap_sem); | |
552 | ||
553 | /* For private mappings, just map in zero pages. */ | |
554 | for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { | |
555 | unsigned long count; | |
556 | ||
557 | if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0) | |
558 | goto out_up; | |
559 | if (vma->vm_flags & (VM_SHARED | VM_HUGETLB)) | |
560 | break; | |
561 | count = vma->vm_end - addr; | |
562 | if (count > size) | |
563 | count = size; | |
564 | ||
565 | zap_page_range(vma, addr, count, NULL); | |
566 | zeromap_page_range(vma, addr, count, PAGE_COPY); | |
567 | ||
568 | size -= count; | |
569 | buf += count; | |
570 | addr += count; | |
571 | if (size == 0) | |
572 | goto out_up; | |
573 | } | |
574 | ||
575 | up_read(&mm->mmap_sem); | |
576 | ||
577 | /* The shared case is hard. Let's do the conventional zeroing. */ | |
578 | do { | |
579 | unsigned long unwritten = clear_user(buf, PAGE_SIZE); | |
580 | if (unwritten) | |
581 | return size + unwritten - PAGE_SIZE; | |
582 | cond_resched(); | |
583 | buf += PAGE_SIZE; | |
584 | size -= PAGE_SIZE; | |
585 | } while (size); | |
586 | ||
587 | return size; | |
588 | out_up: | |
589 | up_read(&mm->mmap_sem); | |
590 | return size; | |
591 | } | |
592 | ||
593 | static ssize_t read_zero(struct file * file, char __user * buf, | |
594 | size_t count, loff_t *ppos) | |
595 | { | |
596 | unsigned long left, unwritten, written = 0; | |
597 | ||
598 | if (!count) | |
599 | return 0; | |
600 | ||
601 | if (!access_ok(VERIFY_WRITE, buf, count)) | |
602 | return -EFAULT; | |
603 | ||
604 | left = count; | |
605 | ||
606 | /* do we want to be clever? Arbitrary cut-off */ | |
607 | if (count >= PAGE_SIZE*4) { | |
608 | unsigned long partial; | |
609 | ||
610 | /* How much left of the page? */ | |
611 | partial = (PAGE_SIZE-1) & -(unsigned long) buf; | |
612 | unwritten = clear_user(buf, partial); | |
613 | written = partial - unwritten; | |
614 | if (unwritten) | |
615 | goto out; | |
616 | left -= partial; | |
617 | buf += partial; | |
618 | unwritten = read_zero_pagealigned(buf, left & PAGE_MASK); | |
619 | written += (left & PAGE_MASK) - unwritten; | |
620 | if (unwritten) | |
621 | goto out; | |
622 | buf += left & PAGE_MASK; | |
623 | left &= ~PAGE_MASK; | |
624 | } | |
625 | unwritten = clear_user(buf, left); | |
626 | written += left - unwritten; | |
627 | out: | |
628 | return written ? written : -EFAULT; | |
629 | } | |
630 | ||
631 | static int mmap_zero(struct file * file, struct vm_area_struct * vma) | |
632 | { | |
633 | if (vma->vm_flags & VM_SHARED) | |
634 | return shmem_zero_setup(vma); | |
635 | if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot)) | |
636 | return -EAGAIN; | |
637 | return 0; | |
638 | } | |
639 | #else /* CONFIG_MMU */ | |
640 | static ssize_t read_zero(struct file * file, char * buf, | |
641 | size_t count, loff_t *ppos) | |
642 | { | |
643 | size_t todo = count; | |
644 | ||
645 | while (todo) { | |
646 | size_t chunk = todo; | |
647 | ||
648 | if (chunk > 4096) | |
649 | chunk = 4096; /* Just for latency reasons */ | |
650 | if (clear_user(buf, chunk)) | |
651 | return -EFAULT; | |
652 | buf += chunk; | |
653 | todo -= chunk; | |
654 | cond_resched(); | |
655 | } | |
656 | return count; | |
657 | } | |
658 | ||
659 | static int mmap_zero(struct file * file, struct vm_area_struct * vma) | |
660 | { | |
661 | return -ENOSYS; | |
662 | } | |
663 | #endif /* CONFIG_MMU */ | |
664 | ||
665 | static ssize_t write_full(struct file * file, const char __user * buf, | |
666 | size_t count, loff_t *ppos) | |
667 | { | |
668 | return -ENOSPC; | |
669 | } | |
670 | ||
671 | /* | |
672 | * Special lseek() function for /dev/null and /dev/zero. Most notably, you | |
673 | * can fopen() both devices with "a" now. This was previously impossible. | |
674 | * -- SRB. | |
675 | */ | |
676 | ||
677 | static loff_t null_lseek(struct file * file, loff_t offset, int orig) | |
678 | { | |
679 | return file->f_pos = 0; | |
680 | } | |
681 | ||
682 | /* | |
683 | * The memory devices use the full 32/64 bits of the offset, and so we cannot | |
684 | * check against negative addresses: they are ok. The return value is weird, | |
685 | * though, in that case (0). | |
686 | * | |
687 | * also note that seeking relative to the "end of file" isn't supported: | |
688 | * it has no meaning, so it returns -EINVAL. | |
689 | */ | |
690 | static loff_t memory_lseek(struct file * file, loff_t offset, int orig) | |
691 | { | |
692 | loff_t ret; | |
693 | ||
694 | down(&file->f_dentry->d_inode->i_sem); | |
695 | switch (orig) { | |
696 | case 0: | |
697 | file->f_pos = offset; | |
698 | ret = file->f_pos; | |
699 | force_successful_syscall_return(); | |
700 | break; | |
701 | case 1: | |
702 | file->f_pos += offset; | |
703 | ret = file->f_pos; | |
704 | force_successful_syscall_return(); | |
705 | break; | |
706 | default: | |
707 | ret = -EINVAL; | |
708 | } | |
709 | up(&file->f_dentry->d_inode->i_sem); | |
710 | return ret; | |
711 | } | |
712 | ||
713 | static int open_port(struct inode * inode, struct file * filp) | |
714 | { | |
715 | return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; | |
716 | } | |
717 | ||
718 | #define zero_lseek null_lseek | |
719 | #define full_lseek null_lseek | |
720 | #define write_zero write_null | |
721 | #define read_full read_zero | |
722 | #define open_mem open_port | |
723 | #define open_kmem open_mem | |
724 | ||
725 | static struct file_operations mem_fops = { | |
726 | .llseek = memory_lseek, | |
727 | .read = read_mem, | |
728 | .write = write_mem, | |
729 | .mmap = mmap_mem, | |
730 | .open = open_mem, | |
731 | }; | |
732 | ||
733 | static struct file_operations kmem_fops = { | |
734 | .llseek = memory_lseek, | |
735 | .read = read_kmem, | |
736 | .write = write_kmem, | |
737 | .mmap = mmap_kmem, | |
738 | .open = open_kmem, | |
739 | }; | |
740 | ||
741 | static struct file_operations null_fops = { | |
742 | .llseek = null_lseek, | |
743 | .read = read_null, | |
744 | .write = write_null, | |
745 | }; | |
746 | ||
145d01e4 | 747 | #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) |
1da177e4 LT |
748 | static struct file_operations port_fops = { |
749 | .llseek = memory_lseek, | |
750 | .read = read_port, | |
751 | .write = write_port, | |
752 | .open = open_port, | |
753 | }; | |
754 | #endif | |
755 | ||
756 | static struct file_operations zero_fops = { | |
757 | .llseek = zero_lseek, | |
758 | .read = read_zero, | |
759 | .write = write_zero, | |
760 | .mmap = mmap_zero, | |
761 | }; | |
762 | ||
763 | static struct backing_dev_info zero_bdi = { | |
764 | .capabilities = BDI_CAP_MAP_COPY, | |
765 | }; | |
766 | ||
767 | static struct file_operations full_fops = { | |
768 | .llseek = full_lseek, | |
769 | .read = read_full, | |
770 | .write = write_full, | |
771 | }; | |
772 | ||
773 | static ssize_t kmsg_write(struct file * file, const char __user * buf, | |
774 | size_t count, loff_t *ppos) | |
775 | { | |
776 | char *tmp; | |
777 | int ret; | |
778 | ||
779 | tmp = kmalloc(count + 1, GFP_KERNEL); | |
780 | if (tmp == NULL) | |
781 | return -ENOMEM; | |
782 | ret = -EFAULT; | |
783 | if (!copy_from_user(tmp, buf, count)) { | |
784 | tmp[count] = 0; | |
785 | ret = printk("%s", tmp); | |
786 | } | |
787 | kfree(tmp); | |
788 | return ret; | |
789 | } | |
790 | ||
791 | static struct file_operations kmsg_fops = { | |
792 | .write = kmsg_write, | |
793 | }; | |
794 | ||
795 | static int memory_open(struct inode * inode, struct file * filp) | |
796 | { | |
797 | switch (iminor(inode)) { | |
798 | case 1: | |
799 | filp->f_op = &mem_fops; | |
800 | break; | |
801 | case 2: | |
802 | filp->f_op = &kmem_fops; | |
803 | break; | |
804 | case 3: | |
805 | filp->f_op = &null_fops; | |
806 | break; | |
145d01e4 | 807 | #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) |
1da177e4 LT |
808 | case 4: |
809 | filp->f_op = &port_fops; | |
810 | break; | |
811 | #endif | |
812 | case 5: | |
813 | filp->f_mapping->backing_dev_info = &zero_bdi; | |
814 | filp->f_op = &zero_fops; | |
815 | break; | |
816 | case 7: | |
817 | filp->f_op = &full_fops; | |
818 | break; | |
819 | case 8: | |
820 | filp->f_op = &random_fops; | |
821 | break; | |
822 | case 9: | |
823 | filp->f_op = &urandom_fops; | |
824 | break; | |
825 | case 11: | |
826 | filp->f_op = &kmsg_fops; | |
827 | break; | |
828 | default: | |
829 | return -ENXIO; | |
830 | } | |
831 | if (filp->f_op && filp->f_op->open) | |
832 | return filp->f_op->open(inode,filp); | |
833 | return 0; | |
834 | } | |
835 | ||
836 | static struct file_operations memory_fops = { | |
837 | .open = memory_open, /* just a selector for the real open */ | |
838 | }; | |
839 | ||
840 | static const struct { | |
841 | unsigned int minor; | |
842 | char *name; | |
843 | umode_t mode; | |
844 | struct file_operations *fops; | |
845 | } devlist[] = { /* list of minor devices */ | |
846 | {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, | |
847 | {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops}, | |
848 | {3, "null", S_IRUGO | S_IWUGO, &null_fops}, | |
145d01e4 | 849 | #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) |
1da177e4 LT |
850 | {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops}, |
851 | #endif | |
852 | {5, "zero", S_IRUGO | S_IWUGO, &zero_fops}, | |
853 | {7, "full", S_IRUGO | S_IWUGO, &full_fops}, | |
854 | {8, "random", S_IRUGO | S_IWUSR, &random_fops}, | |
855 | {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops}, | |
856 | {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops}, | |
857 | }; | |
858 | ||
ca8eca68 | 859 | static struct class *mem_class; |
1da177e4 LT |
860 | |
861 | static int __init chr_dev_init(void) | |
862 | { | |
863 | int i; | |
864 | ||
865 | if (register_chrdev(MEM_MAJOR,"mem",&memory_fops)) | |
866 | printk("unable to get major %d for memory devs\n", MEM_MAJOR); | |
867 | ||
ca8eca68 | 868 | mem_class = class_create(THIS_MODULE, "mem"); |
1da177e4 | 869 | for (i = 0; i < ARRAY_SIZE(devlist); i++) { |
ca8eca68 | 870 | class_device_create(mem_class, MKDEV(MEM_MAJOR, devlist[i].minor), |
1da177e4 LT |
871 | NULL, devlist[i].name); |
872 | devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor), | |
873 | S_IFCHR | devlist[i].mode, devlist[i].name); | |
874 | } | |
875 | ||
876 | return 0; | |
877 | } | |
878 | ||
879 | fs_initcall(chr_dev_init); |