Remove (at least temporarily) the "incomplete PFN mapping" support
authorLinus Torvalds <torvalds@g5.osdl.org>
Mon, 12 Dec 2005 03:57:52 +0000 (19:57 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Mon, 12 Dec 2005 03:57:52 +0000 (19:57 -0800)
With the previous commit, we can handle arbitrary shared re-mappings
even without this complexity, and since the only known private mappings
are for strange users of /dev/mem (which never create an incomplete one),
there seems to be no reason to support it.

Signed-off-by: Linus Torvalds <torvalds@osdl.org>
include/linux/mm.h
mm/memory.c

index 29f02d8513f649fc74c3fcd3be1ade57fd4cf466..e5677f45674273b853be00e951f0045405fd1fb9 100644 (file)
@@ -163,7 +163,6 @@ extern unsigned int kobjsize(const void *objp);
 #define VM_HUGETLB     0x00400000      /* Huge TLB Page VM */
 #define VM_NONLINEAR   0x00800000      /* Is non-linear (remap_file_pages) */
 #define VM_MAPPED_COPY 0x01000000      /* T if mapped copy of data (nommu mmap) */
-#define VM_INCOMPLETE  0x02000000      /* Strange partial PFN mapping marker */
 
 #ifndef VM_STACK_DEFAULT_FLAGS         /* arch can override this */
 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
index e65f8fc8ea672a26f4a054d474326e14f06822b8..430a72ed08d5aeb77c9b32148b7ba3aee84fd5bf 100644 (file)
@@ -1227,50 +1227,6 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *
 }
 EXPORT_SYMBOL(vm_insert_page);
 
-/*
- * Somebody does a pfn remapping that doesn't actually work as a vma.
- *
- * Do it as individual pages instead, and warn about it. It's bad form,
- * and very inefficient.
- */
-static int incomplete_pfn_remap(struct vm_area_struct *vma,
-               unsigned long start, unsigned long end,
-               unsigned long pfn, pgprot_t prot)
-{
-       static int warn = 10;
-       struct page *page;
-       int retval;
-
-       if (!(vma->vm_flags & VM_INCOMPLETE)) {
-               if (warn) {
-                       warn--;
-                       printk("%s does an incomplete pfn remapping", current->comm);
-                       dump_stack();
-               }
-       }
-       vma->vm_flags |= VM_INCOMPLETE | VM_IO | VM_RESERVED;
-
-       if (start < vma->vm_start || end > vma->vm_end)
-               return -EINVAL;
-
-       if (!pfn_valid(pfn))
-               return -EINVAL;
-
-       page = pfn_to_page(pfn);
-       if (!PageReserved(page))
-               return -EINVAL;
-
-       retval = 0;
-       while (start < end) {
-               retval = insert_page(vma->vm_mm, start, page, prot);
-               if (retval < 0)
-                       break;
-               start += PAGE_SIZE;
-               page++;
-       }
-       return retval;
-}
-
 /*
  * maps a range of physical memory into the requested pages. the old
  * mappings are removed. any references to nonexistent pages results
@@ -1365,7 +1321,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
         */
        if (!(vma->vm_flags & VM_SHARED)) {
                if (addr != vma->vm_start || end != vma->vm_end)
-                       return incomplete_pfn_remap(vma, addr, end, pfn, prot);
+                       return -EINVAL;
                vma->vm_pgoff = pfn;
        }