intel-iommu: Fix dma vs. mm page confusion with aligned_nrpages()
authorDavid Woodhouse <David.Woodhouse@intel.com>
Sat, 4 Jul 2009 08:35:44 +0000 (09:35 +0100)
committerDavid Woodhouse <David.Woodhouse@intel.com>
Sat, 4 Jul 2009 08:35:52 +0000 (09:35 +0100)
The aligned_nrpages() function rounds up to the next VM page, but
returns its result as a number of DMA pages.

Purely theoretical except on IA64, which doesn't boot with VT-d right
now anyway.

Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
drivers/pci/intel-iommu.c

index 53075424a4349c0efdbbb24a22ba0f7226aa21d3..ad85e95d2dcc2f3e894dc6b5b6cdb717b2dc1385 100644 (file)
@@ -2368,15 +2368,15 @@ error:
        return ret;
 }
 
+/* Returns a number of VTD pages, but aligned to MM page size */
 static inline unsigned long aligned_nrpages(unsigned long host_addr,
                                            size_t size)
 {
        host_addr &= ~PAGE_MASK;
-       host_addr += size + PAGE_SIZE - 1;
-
-       return host_addr >> VTD_PAGE_SHIFT;
+       return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
 }
 
+/* This takes a number of _MM_ pages, not VTD pages */
 static struct iova *intel_alloc_iova(struct device *dev,
                                     struct dmar_domain *domain,
                                     unsigned long nrpages, uint64_t dma_mask)
@@ -2506,7 +2506,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
        iommu = domain_get_iommu(domain);
        size = aligned_nrpages(paddr, size);
 
-       iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
+       iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
+                               pdev->dma_mask);
        if (!iova)
                goto error;
 
@@ -2797,7 +2798,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
        for_each_sg(sglist, sg, nelems, i)
                size += aligned_nrpages(sg->offset, sg->length);
 
-       iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
+       iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
+                               pdev->dma_mask);
        if (!iova) {
                sglist->dma_length = 0;
                return 0;