fb:support 180 degree rotate
[firefly-linux-kernel-4.4.55.git] / drivers / char / mem.c
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support. 
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/smp_lock.h>
30
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
33
34 #ifdef CONFIG_IA64
35 # include <linux/efi.h>
36 #endif
37
38 static inline unsigned long size_inside_page(unsigned long start,
39                                              unsigned long size)
40 {
41         unsigned long sz;
42
43         if (-start & (PAGE_SIZE - 1))
44                 sz = -start & (PAGE_SIZE - 1);
45         else
46                 sz = PAGE_SIZE;
47
48         return min_t(unsigned long, sz, size);
49 }
50
51 /*
52  * Architectures vary in how they handle caching for addresses
53  * outside of main memory.
54  *
55  */
56 static inline int uncached_access(struct file *file, unsigned long addr)
57 {
58 #if defined(CONFIG_IA64)
59         /*
60          * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
61          */
62         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
63 #elif defined(CONFIG_MIPS)
64         {
65                 extern int __uncached_access(struct file *file,
66                                              unsigned long addr);
67
68                 return __uncached_access(file, addr);
69         }
70 #else
71         /*
72          * Accessing memory above the top the kernel knows about or through a file pointer
73          * that was marked O_SYNC will be done non-cached.
74          */
75         if (file->f_flags & O_SYNC)
76                 return 1;
77         return addr >= __pa(high_memory);
78 #endif
79 }
80
81 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
82 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
83 {
84         if (addr + count > __pa(high_memory))
85                 return 0;
86
87         return 1;
88 }
89
90 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
91 {
92         return 1;
93 }
94 #endif
95
96 #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
97 #ifdef CONFIG_STRICT_DEVMEM
98 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
99 {
100         u64 from = ((u64)pfn) << PAGE_SHIFT;
101         u64 to = from + size;
102         u64 cursor = from;
103
104         while (cursor < to) {
105                 if (!devmem_is_allowed(pfn)) {
106                         printk(KERN_INFO
107                 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
108                                 current->comm, from, to);
109                         return 0;
110                 }
111                 cursor += PAGE_SIZE;
112                 pfn++;
113         }
114         return 1;
115 }
116 #else
117 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
118 {
119         return 1;
120 }
121 #endif
122 #endif
123
124 #ifdef CONFIG_DEVMEM
125 void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
126 {
127 }
128
129 /*
130  * This funcion reads the *physical* memory. The f_pos points directly to the 
131  * memory location. 
132  */
133 static ssize_t read_mem(struct file * file, char __user * buf,
134                         size_t count, loff_t *ppos)
135 {
136         unsigned long p = *ppos;
137         ssize_t read, sz;
138         char *ptr;
139
140         if (!valid_phys_addr_range(p, count))
141                 return -EFAULT;
142         read = 0;
143 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
144         /* we don't have page 0 mapped on sparc and m68k.. */
145         if (p < PAGE_SIZE) {
146                 sz = PAGE_SIZE - p;
147                 if (sz > count) 
148                         sz = count; 
149                 if (sz > 0) {
150                         if (clear_user(buf, sz))
151                                 return -EFAULT;
152                         buf += sz; 
153                         p += sz; 
154                         count -= sz; 
155                         read += sz; 
156                 }
157         }
158 #endif
159
160         while (count > 0) {
161                 /*
162                  * Handle first page in case it's not aligned
163                  */
164                 if (-p & (PAGE_SIZE - 1))
165                         sz = -p & (PAGE_SIZE - 1);
166                 else
167                         sz = PAGE_SIZE;
168
169                 sz = min_t(unsigned long, sz, count);
170
171                 if (!range_is_allowed(p >> PAGE_SHIFT, count))
172                         return -EPERM;
173
174                 /*
175                  * On ia64 if a page has been mapped somewhere as
176                  * uncached, then it must also be accessed uncached
177                  * by the kernel or data corruption may occur
178                  */
179                 ptr = xlate_dev_mem_ptr(p);
180                 if (!ptr)
181                         return -EFAULT;
182
183                 if (copy_to_user(buf, ptr, sz)) {
184                         unxlate_dev_mem_ptr(p, ptr);
185                         return -EFAULT;
186                 }
187
188                 unxlate_dev_mem_ptr(p, ptr);
189
190                 buf += sz;
191                 p += sz;
192                 count -= sz;
193                 read += sz;
194         }
195
196         *ppos += read;
197         return read;
198 }
199
200 static ssize_t write_mem(struct file * file, const char __user * buf, 
201                          size_t count, loff_t *ppos)
202 {
203         unsigned long p = *ppos;
204         ssize_t written, sz;
205         unsigned long copied;
206         void *ptr;
207
208         if (!valid_phys_addr_range(p, count))
209                 return -EFAULT;
210
211         written = 0;
212
213 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
214         /* we don't have page 0 mapped on sparc and m68k.. */
215         if (p < PAGE_SIZE) {
216                 unsigned long sz = PAGE_SIZE - p;
217                 if (sz > count)
218                         sz = count;
219                 /* Hmm. Do something? */
220                 buf += sz;
221                 p += sz;
222                 count -= sz;
223                 written += sz;
224         }
225 #endif
226
227         while (count > 0) {
228                 /*
229                  * Handle first page in case it's not aligned
230                  */
231                 if (-p & (PAGE_SIZE - 1))
232                         sz = -p & (PAGE_SIZE - 1);
233                 else
234                         sz = PAGE_SIZE;
235
236                 sz = min_t(unsigned long, sz, count);
237
238                 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
239                         return -EPERM;
240
241                 /*
242                  * On ia64 if a page has been mapped somewhere as
243                  * uncached, then it must also be accessed uncached
244                  * by the kernel or data corruption may occur
245                  */
246                 ptr = xlate_dev_mem_ptr(p);
247                 if (!ptr) {
248                         if (written)
249                                 break;
250                         return -EFAULT;
251                 }
252
253                 copied = copy_from_user(ptr, buf, sz);
254                 if (copied) {
255                         written += sz - copied;
256                         unxlate_dev_mem_ptr(p, ptr);
257                         if (written)
258                                 break;
259                         return -EFAULT;
260                 }
261
262                 unxlate_dev_mem_ptr(p, ptr);
263
264                 buf += sz;
265                 p += sz;
266                 count -= sz;
267                 written += sz;
268         }
269
270         *ppos += written;
271         return written;
272 }
273 #endif  /* CONFIG_DEVMEM */
274
275 #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
276
277 int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
278         unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
279 {
280         return 1;
281 }
282
283 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
284 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
285                                      unsigned long size, pgprot_t vma_prot)
286 {
287 #ifdef pgprot_noncached
288         unsigned long offset = pfn << PAGE_SHIFT;
289
290         if (uncached_access(file, offset))
291                 return pgprot_noncached(vma_prot);
292 #endif
293         return vma_prot;
294 }
295 #endif
296
297 #ifndef CONFIG_MMU
298 static unsigned long get_unmapped_area_mem(struct file *file,
299                                            unsigned long addr,
300                                            unsigned long len,
301                                            unsigned long pgoff,
302                                            unsigned long flags)
303 {
304         if (!valid_mmap_phys_addr_range(pgoff, len))
305                 return (unsigned long) -EINVAL;
306         return pgoff << PAGE_SHIFT;
307 }
308
309 /* can't do an in-place private mapping if there's no MMU */
310 static inline int private_mapping_ok(struct vm_area_struct *vma)
311 {
312         return vma->vm_flags & VM_MAYSHARE;
313 }
314 #else
315 #define get_unmapped_area_mem   NULL
316
317 static inline int private_mapping_ok(struct vm_area_struct *vma)
318 {
319         return 1;
320 }
321 #endif
322
323 static const struct vm_operations_struct mmap_mem_ops = {
324 #ifdef CONFIG_HAVE_IOREMAP_PROT
325         .access = generic_access_phys
326 #endif
327 };
328
329 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
330 {
331         size_t size = vma->vm_end - vma->vm_start;
332
333         if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
334                 return -EINVAL;
335
336         if (!private_mapping_ok(vma))
337                 return -ENOSYS;
338
339         if (!range_is_allowed(vma->vm_pgoff, size))
340                 return -EPERM;
341
342         if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
343                                                 &vma->vm_page_prot))
344                 return -EINVAL;
345
346         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
347                                                  size,
348                                                  vma->vm_page_prot);
349
350         vma->vm_ops = &mmap_mem_ops;
351
352         /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
353         if (remap_pfn_range(vma,
354                             vma->vm_start,
355                             vma->vm_pgoff,
356                             size,
357                             vma->vm_page_prot)) {
358                 return -EAGAIN;
359         }
360         return 0;
361 }
362 #endif  /* CONFIG_DEVMEM */
363
364 #ifdef CONFIG_DEVKMEM
365 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
366 {
367         unsigned long pfn;
368
369         /* Turn a kernel-virtual address into a physical page frame */
370         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
371
372         /*
373          * RED-PEN: on some architectures there is more mapped memory
374          * than available in mem_map which pfn_valid checks
375          * for. Perhaps should add a new macro here.
376          *
377          * RED-PEN: vmalloc is not supported right now.
378          */
379         if (!pfn_valid(pfn))
380                 return -EIO;
381
382         vma->vm_pgoff = pfn;
383         return mmap_mem(file, vma);
384 }
385 #endif
386
387 #ifdef CONFIG_CRASH_DUMP
388 /*
389  * Read memory corresponding to the old kernel.
390  */
391 static ssize_t read_oldmem(struct file *file, char __user *buf,
392                                 size_t count, loff_t *ppos)
393 {
394         unsigned long pfn, offset;
395         size_t read = 0, csize;
396         int rc = 0;
397
398         while (count) {
399                 pfn = *ppos / PAGE_SIZE;
400                 if (pfn > saved_max_pfn)
401                         return read;
402
403                 offset = (unsigned long)(*ppos % PAGE_SIZE);
404                 if (count > PAGE_SIZE - offset)
405                         csize = PAGE_SIZE - offset;
406                 else
407                         csize = count;
408
409                 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
410                 if (rc < 0)
411                         return rc;
412                 buf += csize;
413                 *ppos += csize;
414                 read += csize;
415                 count -= csize;
416         }
417         return read;
418 }
419 #endif
420
421 #ifdef CONFIG_DEVKMEM
422 /*
423  * This function reads the *virtual* memory as seen by the kernel.
424  */
425 static ssize_t read_kmem(struct file *file, char __user *buf, 
426                          size_t count, loff_t *ppos)
427 {
428         unsigned long p = *ppos;
429         ssize_t low_count, read, sz;
430         char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
431         int err = 0;
432
433         read = 0;
434         if (p < (unsigned long) high_memory) {
435                 low_count = count;
436                 if (count > (unsigned long) high_memory - p)
437                         low_count = (unsigned long) high_memory - p;
438
439 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
440                 /* we don't have page 0 mapped on sparc and m68k.. */
441                 if (p < PAGE_SIZE && low_count > 0) {
442                         size_t tmp = PAGE_SIZE - p;
443                         if (tmp > low_count) tmp = low_count;
444                         if (clear_user(buf, tmp))
445                                 return -EFAULT;
446                         buf += tmp;
447                         p += tmp;
448                         read += tmp;
449                         low_count -= tmp;
450                         count -= tmp;
451                 }
452 #endif
453                 while (low_count > 0) {
454                         sz = size_inside_page(p, low_count);
455
456                         /*
457                          * On ia64 if a page has been mapped somewhere as
458                          * uncached, then it must also be accessed uncached
459                          * by the kernel or data corruption may occur
460                          */
461                         kbuf = xlate_dev_kmem_ptr((char *)p);
462
463                         if (copy_to_user(buf, kbuf, sz))
464                                 return -EFAULT;
465                         buf += sz;
466                         p += sz;
467                         read += sz;
468                         low_count -= sz;
469                         count -= sz;
470                 }
471         }
472
473         if (count > 0) {
474                 kbuf = (char *)__get_free_page(GFP_KERNEL);
475                 if (!kbuf)
476                         return -ENOMEM;
477                 while (count > 0) {
478                         int len = size_inside_page(p, count);
479
480                         if (!is_vmalloc_or_module_addr((void *)p)) {
481                                 err = -ENXIO;
482                                 break;
483                         }
484                         len = vread(kbuf, (char *)p, len);
485                         if (!len)
486                                 break;
487                         if (copy_to_user(buf, kbuf, len)) {
488                                 err = -EFAULT;
489                                 break;
490                         }
491                         count -= len;
492                         buf += len;
493                         read += len;
494                         p += len;
495                 }
496                 free_page((unsigned long)kbuf);
497         }
498         *ppos = p;
499         return read ? read : err;
500 }
501
502
503 static inline ssize_t
504 do_write_kmem(void *p, unsigned long realp, const char __user * buf,
505               size_t count, loff_t *ppos)
506 {
507         ssize_t written, sz;
508         unsigned long copied;
509
510         written = 0;
511 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
512         /* we don't have page 0 mapped on sparc and m68k.. */
513         if (realp < PAGE_SIZE) {
514                 unsigned long sz = PAGE_SIZE - realp;
515                 if (sz > count)
516                         sz = count;
517                 /* Hmm. Do something? */
518                 buf += sz;
519                 p += sz;
520                 realp += sz;
521                 count -= sz;
522                 written += sz;
523         }
524 #endif
525
526         while (count > 0) {
527                 char *ptr;
528
529                 sz = size_inside_page(realp, count);
530
531                 /*
532                  * On ia64 if a page has been mapped somewhere as
533                  * uncached, then it must also be accessed uncached
534                  * by the kernel or data corruption may occur
535                  */
536                 ptr = xlate_dev_kmem_ptr(p);
537
538                 copied = copy_from_user(ptr, buf, sz);
539                 if (copied) {
540                         written += sz - copied;
541                         if (written)
542                                 break;
543                         return -EFAULT;
544                 }
545                 buf += sz;
546                 p += sz;
547                 realp += sz;
548                 count -= sz;
549                 written += sz;
550         }
551
552         *ppos += written;
553         return written;
554 }
555
556
557 /*
558  * This function writes to the *virtual* memory as seen by the kernel.
559  */
560 static ssize_t write_kmem(struct file * file, const char __user * buf, 
561                           size_t count, loff_t *ppos)
562 {
563         unsigned long p = *ppos;
564         ssize_t wrote = 0;
565         ssize_t virtr = 0;
566         ssize_t written;
567         char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
568         int err = 0;
569
570         if (p < (unsigned long) high_memory) {
571
572                 wrote = count;
573                 if (count > (unsigned long) high_memory - p)
574                         wrote = (unsigned long) high_memory - p;
575
576                 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
577                 if (written != wrote)
578                         return written;
579                 wrote = written;
580                 p += wrote;
581                 buf += wrote;
582                 count -= wrote;
583         }
584
585         if (count > 0) {
586                 kbuf = (char *)__get_free_page(GFP_KERNEL);
587                 if (!kbuf)
588                         return wrote ? wrote : -ENOMEM;
589                 while (count > 0) {
590                         int len = size_inside_page(p, count);
591
592                         if (!is_vmalloc_or_module_addr((void *)p)) {
593                                 err = -ENXIO;
594                                 break;
595                         }
596                         if (len) {
597                                 written = copy_from_user(kbuf, buf, len);
598                                 if (written) {
599                                         err = -EFAULT;
600                                         break;
601                                 }
602                         }
603                         vwrite(kbuf, (char *)p, len);
604                         count -= len;
605                         buf += len;
606                         virtr += len;
607                         p += len;
608                 }
609                 free_page((unsigned long)kbuf);
610         }
611
612         *ppos = p;
613         return virtr + wrote ? : err;
614 }
615 #endif
616
617 #ifdef CONFIG_DEVPORT
618 static ssize_t read_port(struct file * file, char __user * buf,
619                          size_t count, loff_t *ppos)
620 {
621         unsigned long i = *ppos;
622         char __user *tmp = buf;
623
624         if (!access_ok(VERIFY_WRITE, buf, count))
625                 return -EFAULT; 
626         while (count-- > 0 && i < 65536) {
627                 if (__put_user(inb(i),tmp) < 0) 
628                         return -EFAULT;  
629                 i++;
630                 tmp++;
631         }
632         *ppos = i;
633         return tmp-buf;
634 }
635
636 static ssize_t write_port(struct file * file, const char __user * buf,
637                           size_t count, loff_t *ppos)
638 {
639         unsigned long i = *ppos;
640         const char __user * tmp = buf;
641
642         if (!access_ok(VERIFY_READ,buf,count))
643                 return -EFAULT;
644         while (count-- > 0 && i < 65536) {
645                 char c;
646                 if (__get_user(c, tmp)) {
647                         if (tmp > buf)
648                                 break;
649                         return -EFAULT; 
650                 }
651                 outb(c,i);
652                 i++;
653                 tmp++;
654         }
655         *ppos = i;
656         return tmp-buf;
657 }
658 #endif
659
660 static ssize_t read_null(struct file * file, char __user * buf,
661                          size_t count, loff_t *ppos)
662 {
663         return 0;
664 }
665
666 static ssize_t write_null(struct file * file, const char __user * buf,
667                           size_t count, loff_t *ppos)
668 {
669         return count;
670 }
671
672 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
673                         struct splice_desc *sd)
674 {
675         return sd->len;
676 }
677
678 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
679                                  loff_t *ppos, size_t len, unsigned int flags)
680 {
681         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
682 }
683
684 static ssize_t read_zero(struct file * file, char __user * buf, 
685                          size_t count, loff_t *ppos)
686 {
687         size_t written;
688
689         if (!count)
690                 return 0;
691
692         if (!access_ok(VERIFY_WRITE, buf, count))
693                 return -EFAULT;
694
695         written = 0;
696         while (count) {
697                 unsigned long unwritten;
698                 size_t chunk = count;
699
700                 if (chunk > PAGE_SIZE)
701                         chunk = PAGE_SIZE;      /* Just for latency reasons */
702                 unwritten = __clear_user(buf, chunk);
703                 written += chunk - unwritten;
704                 if (unwritten)
705                         break;
706                 if (signal_pending(current))
707                         return written ? written : -ERESTARTSYS;
708                 buf += chunk;
709                 count -= chunk;
710                 cond_resched();
711         }
712         return written ? written : -EFAULT;
713 }
714
715 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
716 {
717 #ifndef CONFIG_MMU
718         return -ENOSYS;
719 #endif
720         if (vma->vm_flags & VM_SHARED)
721                 return shmem_zero_setup(vma);
722         return 0;
723 }
724
725 static ssize_t write_full(struct file * file, const char __user * buf,
726                           size_t count, loff_t *ppos)
727 {
728         return -ENOSPC;
729 }
730
731 /*
732  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
733  * can fopen() both devices with "a" now.  This was previously impossible.
734  * -- SRB.
735  */
736
737 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
738 {
739         return file->f_pos = 0;
740 }
741
742 #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
743
744 /*
745  * The memory devices use the full 32/64 bits of the offset, and so we cannot
746  * check against negative addresses: they are ok. The return value is weird,
747  * though, in that case (0).
748  *
749  * also note that seeking relative to the "end of file" isn't supported:
750  * it has no meaning, so it returns -EINVAL.
751  */
752 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
753 {
754         loff_t ret;
755
756         mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
757         switch (orig) {
758                 case 0:
759                         file->f_pos = offset;
760                         ret = file->f_pos;
761                         force_successful_syscall_return();
762                         break;
763                 case 1:
764                         file->f_pos += offset;
765                         ret = file->f_pos;
766                         force_successful_syscall_return();
767                         break;
768                 default:
769                         ret = -EINVAL;
770         }
771         mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
772         return ret;
773 }
774
775 #endif
776
777 #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
778 static int open_port(struct inode * inode, struct file * filp)
779 {
780         return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
781 }
782 #endif
783
784 #define zero_lseek      null_lseek
785 #define full_lseek      null_lseek
786 #define write_zero      write_null
787 #define read_full       read_zero
788 #define open_mem        open_port
789 #define open_kmem       open_mem
790 #define open_oldmem     open_mem
791
792 #ifdef CONFIG_DEVMEM
793 static const struct file_operations mem_fops = {
794         .llseek         = memory_lseek,
795         .read           = read_mem,
796         .write          = write_mem,
797         .mmap           = mmap_mem,
798         .open           = open_mem,
799         .get_unmapped_area = get_unmapped_area_mem,
800 };
801 #endif
802
803 #ifdef CONFIG_DEVKMEM
804 static const struct file_operations kmem_fops = {
805         .llseek         = memory_lseek,
806         .read           = read_kmem,
807         .write          = write_kmem,
808         .mmap           = mmap_kmem,
809         .open           = open_kmem,
810         .get_unmapped_area = get_unmapped_area_mem,
811 };
812 #endif
813
814 static const struct file_operations null_fops = {
815         .llseek         = null_lseek,
816         .read           = read_null,
817         .write          = write_null,
818         .splice_write   = splice_write_null,
819 };
820
821 #ifdef CONFIG_DEVPORT
822 static const struct file_operations port_fops = {
823         .llseek         = memory_lseek,
824         .read           = read_port,
825         .write          = write_port,
826         .open           = open_port,
827 };
828 #endif
829
830 static const struct file_operations zero_fops = {
831         .llseek         = zero_lseek,
832         .read           = read_zero,
833         .write          = write_zero,
834         .mmap           = mmap_zero,
835 };
836
837 /*
838  * capabilities for /dev/zero
839  * - permits private mappings, "copies" are taken of the source of zeros
840  * - no writeback happens
841  */
842 static struct backing_dev_info zero_bdi = {
843         .name           = "char/mem",
844         .capabilities   = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
845 };
846
847 static const struct file_operations full_fops = {
848         .llseek         = full_lseek,
849         .read           = read_full,
850         .write          = write_full,
851 };
852
853 #ifdef CONFIG_CRASH_DUMP
854 static const struct file_operations oldmem_fops = {
855         .read   = read_oldmem,
856         .open   = open_oldmem,
857 };
858 #endif
859
860 static ssize_t kmsg_write(struct file * file, const char __user * buf,
861                           size_t count, loff_t *ppos)
862 {
863         char *tmp;
864         ssize_t ret;
865
866         tmp = kmalloc(count + 1, GFP_KERNEL);
867         if (tmp == NULL)
868                 return -ENOMEM;
869         ret = -EFAULT;
870         if (!copy_from_user(tmp, buf, count)) {
871                 tmp[count] = 0;
872                 ret = printk("%s", tmp);
873                 if (ret > count)
874                         /* printk can add a prefix */
875                         ret = count;
876         }
877         kfree(tmp);
878         return ret;
879 }
880
881 static const struct file_operations kmsg_fops = {
882         .write =        kmsg_write,
883 };
884
885 static const struct memdev {
886         const char *name;
887         mode_t mode;
888         const struct file_operations *fops;
889         struct backing_dev_info *dev_info;
890 } devlist[] = {
891 #ifdef CONFIG_DEVMEM
892          [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
893 #endif
894 #ifdef CONFIG_DEVKMEM
895          [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
896 #endif
897          [3] = { "null", 0666, &null_fops, NULL },
898 #ifdef CONFIG_DEVPORT
899          [4] = { "port", 0, &port_fops, NULL },
900 #endif
901          [5] = { "zero", 0666, &zero_fops, &zero_bdi },
902          [7] = { "full", 0666, &full_fops, NULL },
903          [8] = { "random", 0666, &random_fops, NULL },
904          [9] = { "urandom", 0666, &urandom_fops, NULL },
905         [11] = { "kmsg", 0, &kmsg_fops, NULL },
906 #ifdef CONFIG_CRASH_DUMP
907         [12] = { "oldmem", 0, &oldmem_fops, NULL },
908 #endif
909 };
910
911 static int memory_open(struct inode *inode, struct file *filp)
912 {
913         int minor;
914         const struct memdev *dev;
915         int ret = -ENXIO;
916
917         lock_kernel();
918
919         minor = iminor(inode);
920         if (minor >= ARRAY_SIZE(devlist))
921                 goto out;
922
923         dev = &devlist[minor];
924         if (!dev->fops)
925                 goto out;
926
927         filp->f_op = dev->fops;
928         if (dev->dev_info)
929                 filp->f_mapping->backing_dev_info = dev->dev_info;
930
931         if (dev->fops->open)
932                 ret = dev->fops->open(inode, filp);
933         else
934                 ret = 0;
935 out:
936         unlock_kernel();
937         return ret;
938 }
939
940 static const struct file_operations memory_fops = {
941         .open           = memory_open,
942 };
943
944 static char *mem_devnode(struct device *dev, mode_t *mode)
945 {
946         if (mode && devlist[MINOR(dev->devt)].mode)
947                 *mode = devlist[MINOR(dev->devt)].mode;
948         return NULL;
949 }
950
951 static struct class *mem_class;
952
953 static int __init chr_dev_init(void)
954 {
955         int minor;
956         int err;
957
958         err = bdi_init(&zero_bdi);
959         if (err)
960                 return err;
961
962         if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
963                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
964
965         mem_class = class_create(THIS_MODULE, "mem");
966         mem_class->devnode = mem_devnode;
967         for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
968                 if (!devlist[minor].name)
969                         continue;
970                 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
971                               NULL, devlist[minor].name);
972         }
973
974         return 0;
975 }
976
977 fs_initcall(chr_dev_init);