Merge remote-tracking branch 'lsk/v3.10/topic/arm64-cma' into lsk-v3.10-arm64-dma
authorMark Brown <broonie@linaro.org>
Mon, 12 May 2014 17:10:17 +0000 (18:10 +0100)
committerMark Brown <broonie@linaro.org>
Mon, 12 May 2014 17:10:17 +0000 (18:10 +0100)
1  2 
arch/arm64/mm/dma-mapping.c

index ba0ff75666ee08c7060c34026bd8a6a1a9fd596b,0150f5d14edaa9f9ef3dba4d21c56e48770eb0d4..550873ace5977eff3092ee53f2f0b160174b2fb7
@@@ -36,178 -37,45 +37,204 @@@ static void *__dma_alloc_coherent(struc
        if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
            dev->coherent_dma_mask <= DMA_BIT_MASK(32))
                flags |= GFP_DMA32;
-       return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+       if (IS_ENABLED(CONFIG_DMA_CMA)) {
+               struct page *page;
+               size = PAGE_ALIGN(size);
+               page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+                                                       get_order(size));
+               if (!page)
+                       return NULL;
+               *dma_handle = phys_to_dma(dev, page_to_phys(page));
+               return page_address(page);
+       } else {
+               return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+       }
  }
  
 -static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
 -                                      void *vaddr, dma_addr_t dma_handle,
 -                                      struct dma_attrs *attrs)
 +static void __dma_free_coherent(struct device *dev, size_t size,
 +                              void *vaddr, dma_addr_t dma_handle,
 +                              struct dma_attrs *attrs)
  {
-       swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+       if (dev == NULL) {
+               WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
+               return;
+       }
+       if (IS_ENABLED(CONFIG_DMA_CMA)) {
+               phys_addr_t paddr = dma_to_phys(dev, dma_handle);
+               dma_release_from_contiguous(dev,
+                                       phys_to_page(paddr),
+                                       size >> PAGE_SHIFT);
+       } else {
+               swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+       }
  }
  
 -static struct dma_map_ops arm64_swiotlb_dma_ops = {
 -      .alloc = arm64_swiotlb_alloc_coherent,
 -      .free = arm64_swiotlb_free_coherent,
 +static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
 +                                   dma_addr_t *dma_handle, gfp_t flags,
 +                                   struct dma_attrs *attrs)
 +{
 +      struct page *page, **map;
 +      void *ptr, *coherent_ptr;
 +      int order, i;
 +
 +      size = PAGE_ALIGN(size);
 +      order = get_order(size);
 +
 +      ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
 +      if (!ptr)
 +              goto no_mem;
 +      map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
 +      if (!map)
 +              goto no_map;
 +
 +      /* remove any dirty cache lines on the kernel alias */
 +      __dma_flush_range(ptr, ptr + size);
 +
 +      /* create a coherent mapping */
 +      page = virt_to_page(ptr);
 +      for (i = 0; i < (size >> PAGE_SHIFT); i++)
 +              map[i] = page + i;
 +      coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
 +                          pgprot_dmacoherent(pgprot_default));
 +      kfree(map);
 +      if (!coherent_ptr)
 +              goto no_map;
 +
 +      return coherent_ptr;
 +
 +no_map:
 +      __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
 +no_mem:
 +      *dma_handle = ~0;
 +      return NULL;
 +}
 +
 +static void __dma_free_noncoherent(struct device *dev, size_t size,
 +                                 void *vaddr, dma_addr_t dma_handle,
 +                                 struct dma_attrs *attrs)
 +{
 +      void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
 +
 +      vunmap(vaddr);
 +      __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
 +}
 +
 +static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
 +                                   unsigned long offset, size_t size,
 +                                   enum dma_data_direction dir,
 +                                   struct dma_attrs *attrs)
 +{
 +      dma_addr_t dev_addr;
 +
 +      dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
 +      __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
 +
 +      return dev_addr;
 +}
 +
 +
 +static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
 +                               size_t size, enum dma_data_direction dir,
 +                               struct dma_attrs *attrs)
 +{
 +      __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
 +      swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
 +}
 +
 +static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
 +                                int nelems, enum dma_data_direction dir,
 +                                struct dma_attrs *attrs)
 +{
 +      struct scatterlist *sg;
 +      int i, ret;
 +
 +      ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
 +      for_each_sg(sgl, sg, ret, i)
 +              __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
 +                             sg->length, dir);
 +
 +      return ret;
 +}
 +
 +static void __swiotlb_unmap_sg_attrs(struct device *dev,
 +                                   struct scatterlist *sgl, int nelems,
 +                                   enum dma_data_direction dir,
 +                                   struct dma_attrs *attrs)
 +{
 +      struct scatterlist *sg;
 +      int i;
 +
 +      for_each_sg(sgl, sg, nelems, i)
 +              __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
 +                               sg->length, dir);
 +      swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
 +}
 +
 +static void __swiotlb_sync_single_for_cpu(struct device *dev,
 +                                        dma_addr_t dev_addr, size_t size,
 +                                        enum dma_data_direction dir)
 +{
 +      __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
 +      swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
 +}
 +
 +static void __swiotlb_sync_single_for_device(struct device *dev,
 +                                           dma_addr_t dev_addr, size_t size,
 +                                           enum dma_data_direction dir)
 +{
 +      swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
 +      __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
 +}
 +
 +static void __swiotlb_sync_sg_for_cpu(struct device *dev,
 +                                    struct scatterlist *sgl, int nelems,
 +                                    enum dma_data_direction dir)
 +{
 +      struct scatterlist *sg;
 +      int i;
 +
 +      for_each_sg(sgl, sg, nelems, i)
 +              __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
 +                               sg->length, dir);
 +      swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
 +}
 +
 +static void __swiotlb_sync_sg_for_device(struct device *dev,
 +                                       struct scatterlist *sgl, int nelems,
 +                                       enum dma_data_direction dir)
 +{
 +      struct scatterlist *sg;
 +      int i;
 +
 +      swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
 +      for_each_sg(sgl, sg, nelems, i)
 +              __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
 +                             sg->length, dir);
 +}
 +
 +struct dma_map_ops noncoherent_swiotlb_dma_ops = {
 +      .alloc = __dma_alloc_noncoherent,
 +      .free = __dma_free_noncoherent,
 +      .map_page = __swiotlb_map_page,
 +      .unmap_page = __swiotlb_unmap_page,
 +      .map_sg = __swiotlb_map_sg_attrs,
 +      .unmap_sg = __swiotlb_unmap_sg_attrs,
 +      .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
 +      .sync_single_for_device = __swiotlb_sync_single_for_device,
 +      .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
 +      .sync_sg_for_device = __swiotlb_sync_sg_for_device,
 +      .dma_supported = swiotlb_dma_supported,
 +      .mapping_error = swiotlb_dma_mapping_error,
 +};
 +EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
 +
 +struct dma_map_ops coherent_swiotlb_dma_ops = {
 +      .alloc = __dma_alloc_coherent,
 +      .free = __dma_free_coherent,
        .map_page = swiotlb_map_page,
        .unmap_page = swiotlb_unmap_page,
        .map_sg = swiotlb_map_sg_attrs,