arm64: Align CMA sizes to PAGE_SIZE
[firefly-linux-kernel-4.4.55.git] / arch / arm64 / mm / dma-mapping.c
1 /*
2  * SWIOTLB-based DMA API implementation
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  * Author: Catalin Marinas <catalin.marinas@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <linux/gfp.h>
21 #include <linux/export.h>
22 #include <linux/slab.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dma-contiguous.h>
25 #include <linux/vmalloc.h>
26 #include <linux/swiotlb.h>
27
28 #include <asm/cacheflush.h>
29
30 struct dma_map_ops *dma_ops;
31 EXPORT_SYMBOL(dma_ops);
32
33 static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
34                                           dma_addr_t *dma_handle, gfp_t flags,
35                                           struct dma_attrs *attrs)
36 {
37         if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
38             dev->coherent_dma_mask <= DMA_BIT_MASK(32))
39                 flags |= GFP_DMA32;
40         if (IS_ENABLED(CONFIG_DMA_CMA)) {
41                 struct page *page;
42
43                 size = PAGE_ALIGN(size);
44                 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
45                                                         get_order(size));
46                 if (!page)
47                         return NULL;
48
49                 *dma_handle = phys_to_dma(dev, page_to_phys(page));
50                 return page_address(page);
51         } else {
52                 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
53         }
54 }
55
56 static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
57                                         void *vaddr, dma_addr_t dma_handle,
58                                         struct dma_attrs *attrs)
59 {
60         if (dev == NULL) {
61                 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
62                 return;
63         }
64
65         if (IS_ENABLED(CONFIG_DMA_CMA)) {
66                 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
67
68                 dma_release_from_contiguous(dev,
69                                         phys_to_page(paddr),
70                                         size >> PAGE_SHIFT);
71         } else {
72                 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
73         }
74 }
75
76 static struct dma_map_ops arm64_swiotlb_dma_ops = {
77         .alloc = arm64_swiotlb_alloc_coherent,
78         .free = arm64_swiotlb_free_coherent,
79         .map_page = swiotlb_map_page,
80         .unmap_page = swiotlb_unmap_page,
81         .map_sg = swiotlb_map_sg_attrs,
82         .unmap_sg = swiotlb_unmap_sg_attrs,
83         .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
84         .sync_single_for_device = swiotlb_sync_single_for_device,
85         .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
86         .sync_sg_for_device = swiotlb_sync_sg_for_device,
87         .dma_supported = swiotlb_dma_supported,
88         .mapping_error = swiotlb_dma_mapping_error,
89 };
90
91 void __init arm64_swiotlb_init(void)
92 {
93         dma_ops = &arm64_swiotlb_dma_ops;
94         swiotlb_init(1);
95 }
96
97 #define PREALLOC_DMA_DEBUG_ENTRIES      4096
98
99 static int __init dma_debug_do_init(void)
100 {
101         dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
102         return 0;
103 }
104 fs_initcall(dma_debug_do_init);