mm: introduce follow_pfn()
[firefly-linux-kernel-4.4.55.git] / mm / memory.c
index 891bad0613f484f132b14ce6410f9c2dbebdc1eb..d5d1653d60a6be51a4ba74de53fd997d8b0bc94c 100644 (file)
@@ -3103,22 +3103,13 @@ int in_gate_area_no_task(unsigned long addr)
 
 #endif /* __HAVE_ARCH_GATE_AREA */
 
-#ifdef CONFIG_HAVE_IOREMAP_PROT
-int follow_phys(struct vm_area_struct *vma,
-               unsigned long address, unsigned int flags,
-               unsigned long *prot, resource_size_t *phys)
+static int follow_pte(struct mm_struct *mm, unsigned long address,
+               pte_t **ptepp, spinlock_t **ptlp)
 {
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
-       pte_t *ptep, pte;
-       spinlock_t *ptl;
-       resource_size_t phys_addr = 0;
-       struct mm_struct *mm = vma->vm_mm;
-       int ret = -EINVAL;
-
-       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
-               goto out;
+       pte_t *ptep;
 
        pgd = pgd_offset(mm, address);
        if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
@@ -3136,22 +3127,71 @@ int follow_phys(struct vm_area_struct *vma,
        if (pmd_huge(*pmd))
                goto out;
 
-       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+       ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
        if (!ptep)
                goto out;
+       if (!pte_present(*ptep))
+               goto unlock;
+       *ptepp = ptep;
+       return 0;
+unlock:
+       pte_unmap_unlock(ptep, *ptlp);
+out:
+       return -EINVAL;
+}
+
+/**
+ * follow_pfn - look up PFN at a user virtual address
+ * @vma: memory mapping
+ * @address: user virtual address
+ * @pfn: location to store found PFN
+ *
+ * Only IO mappings and raw PFN mappings are allowed.
+ *
+ * Returns zero and the pfn at @pfn on success, -ve otherwise.
+ */
+int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+       unsigned long *pfn)
+{
+       int ret = -EINVAL;
+       spinlock_t *ptl;
+       pte_t *ptep;
+
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               return ret;
+
+       ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
+       if (ret)
+               return ret;
+       *pfn = pte_pfn(*ptep);
+       pte_unmap_unlock(ptep, ptl);
+       return 0;
+}
+EXPORT_SYMBOL(follow_pfn);
+
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+int follow_phys(struct vm_area_struct *vma,
+               unsigned long address, unsigned int flags,
+               unsigned long *prot, resource_size_t *phys)
+{
+       int ret = -EINVAL;
+       pte_t *ptep, pte;
+       spinlock_t *ptl;
+
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               goto out;
 
+       if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
+               goto out;
        pte = *ptep;
-       if (!pte_present(pte))
-               goto unlock;
+
        if ((flags & FOLL_WRITE) && !pte_write(pte))
                goto unlock;
-       phys_addr = pte_pfn(pte);
-       phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */
 
        *prot = pgprot_val(pte_pgprot(pte));
-       *phys = phys_addr;
-       ret = 0;
+       *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
 
+       ret = 0;
 unlock:
        pte_unmap_unlock(ptep, ptl);
 out: