1 /* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
2 /* vi: set ts=8 sw=8 sts=8: */
3 /*************************************************************************/ /*!
5 @Codingstyle LinuxKernel
6 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
7 @License Dual MIT/GPLv2
9 The contents of this file are subject to the MIT license as set out below.
11 Permission is hereby granted, free of charge, to any person obtaining a copy
12 of this software and associated documentation files (the "Software"), to deal
13 in the Software without restriction, including without limitation the rights
14 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 copies of the Software, and to permit persons to whom the Software is
16 furnished to do so, subject to the following conditions:
18 The above copyright notice and this permission notice shall be included in
19 all copies or substantial portions of the Software.
21 Alternatively, the contents of this file may be used under the terms of
22 the GNU General Public License Version 2 ("GPL") in which case the provisions
23 of GPL are applicable instead of those above.
25 If you wish to allow use of your version of this file only under the terms of
26 GPL, and not to allow others to use your version of this file under the terms
27 of the MIT license, indicate your decision by deleting the provisions above
28 and replace them with the notice and other provisions required by GPL as set
29 out in the file called "GPL-COPYING" included in this distribution. If you do
30 not delete the provisions above, a recipient may use your version of this file
31 under the terms of either the MIT license or GPL.
33 This License is also included in this distribution in the file called
36 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
37 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
38 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
39 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
40 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
41 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
42 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
43 */ /**************************************************************************/
45 #include "ion_lma_heap.h"
48 #include <linux/slab.h>
49 #include <linux/genalloc.h>
50 #include <linux/scatterlist.h>
52 /* Ion heap for LMA allocations. This heap is identical to CARVEOUT except
53 * that it does not do any CPU cache maintenance nor does it zero the memory
54 * using the CPU (this is handled with PVR_ANDROID_DEFER_CLEAR in userspace).
59 struct gen_pool *pool;
64 static ion_phys_addr_t ion_lma_allocate(struct ion_heap *heap,
68 struct ion_lma_heap *lma_heap =
69 container_of(heap, struct ion_lma_heap, heap);
70 unsigned long offset = gen_pool_alloc(lma_heap->pool, size);
73 return ION_CARVEOUT_ALLOCATE_FAIL;
78 static void ion_lma_free(struct ion_heap *heap, ion_phys_addr_t addr,
81 struct ion_lma_heap *lma_heap =
82 container_of(heap, struct ion_lma_heap, heap);
84 if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
87 gen_pool_free(lma_heap->pool, addr, size);
90 static int ion_lma_heap_phys(struct ion_heap *heap,
91 struct ion_buffer *buffer,
92 ion_phys_addr_t *addr, size_t *len)
94 struct sg_table *table = buffer->priv_virt;
95 struct page *page = sg_page(table->sgl);
96 ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
103 static int ion_lma_heap_allocate(struct ion_heap *heap,
104 struct ion_buffer *buffer,
105 unsigned long size, unsigned long align,
108 struct sg_table *table;
109 ion_phys_addr_t paddr;
112 if (align > PAGE_SIZE)
115 table = kzalloc(sizeof(*table), GFP_KERNEL);
119 ret = sg_alloc_table(table, 1, GFP_KERNEL);
123 paddr = ion_lma_allocate(heap, size, align);
124 if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
129 sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
130 buffer->priv_virt = table;
134 sg_free_table(table);
140 static void ion_lma_heap_free(struct ion_buffer *buffer)
142 struct ion_heap *heap = buffer->heap;
143 struct sg_table *table = buffer->priv_virt;
144 struct page *page = sg_page(table->sgl);
145 ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
147 /* Do not zero the LMA heap from the CPU. This is very slow with
148 * the current TCF (w/ no DMA engine). We will use the TLA to clear
149 * the memory with Rogue in another place.
151 * We also skip the CPU cache maintenance for the heap space, as we
152 * statically know that the TCF PCI memory bar has UC/WC set by the
153 * MTRR/PAT subsystem.
156 ion_lma_free(heap, paddr, buffer->size);
157 sg_free_table(table);
161 static struct sg_table *ion_lma_heap_map_dma(struct ion_heap *heap,
162 struct ion_buffer *buffer)
164 return buffer->priv_virt;
167 static void ion_lma_heap_unmap_dma(struct ion_heap *heap,
168 struct ion_buffer *buffer)
173 static int ion_lma_heap_map_user(struct ion_heap *mapper,
174 struct ion_buffer *buffer,
175 struct vm_area_struct *vma)
177 struct sg_table *table = buffer->priv_virt;
178 struct page *page = sg_page(table->sgl);
179 ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
180 struct ion_lma_heap *lma_heap =
181 container_of(mapper, struct ion_lma_heap, heap);
183 if (!lma_heap->allow_cpu_map) {
184 pr_err("Trying to map_user fake secure ION handle\n");
188 return remap_pfn_range(vma, vma->vm_start,
189 PFN_DOWN(paddr) + vma->vm_pgoff,
190 vma->vm_end - vma->vm_start,
191 pgprot_writecombine(vma->vm_page_prot));
194 static void *ion_lma_heap_map_kernel(struct ion_heap *heap,
195 struct ion_buffer *buffer)
197 struct sg_table *table = buffer->priv_virt;
198 struct page *page = sg_page(table->sgl);
199 ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
200 struct ion_lma_heap *lma_heap =
201 container_of(heap, struct ion_lma_heap, heap);
203 if (!lma_heap->allow_cpu_map) {
204 pr_err("Trying to map_kernel fake secure ION handle\n");
205 return ERR_PTR(-EPERM);
208 return ioremap_wc(paddr, buffer->size);
211 static void ion_lma_heap_unmap_kernel(struct ion_heap *heap,
212 struct ion_buffer *buffer)
214 iounmap(buffer->vaddr);
217 static struct ion_heap_ops lma_heap_ops = {
218 .allocate = ion_lma_heap_allocate,
219 .free = ion_lma_heap_free,
220 .phys = ion_lma_heap_phys,
221 .map_dma = ion_lma_heap_map_dma,
222 .unmap_dma = ion_lma_heap_unmap_dma,
223 .map_user = ion_lma_heap_map_user,
224 .map_kernel = ion_lma_heap_map_kernel,
225 .unmap_kernel = ion_lma_heap_unmap_kernel,
228 struct ion_heap *ion_lma_heap_create(struct ion_platform_heap *heap_data,
231 struct ion_lma_heap *lma_heap;
232 size_t size = heap_data->size;
235 page = pfn_to_page(PFN_DOWN(heap_data->base));
237 /* Do not zero the LMA heap from the CPU. This is very slow with
238 * the current TCF (w/ no DMA engine). We will use the TLA to clear
239 * the memory with Rogue in another place.
241 * We also skip the CPU cache maintenance for the heap space, as we
242 * statically know that the TCF PCI memory bar has UC/WC set by the
243 * MTRR/PAT subsystem.
246 lma_heap = kzalloc(sizeof(*lma_heap), GFP_KERNEL);
248 return ERR_PTR(-ENOMEM);
250 lma_heap->pool = gen_pool_create(12, -1);
251 if (!lma_heap->pool) {
253 return ERR_PTR(-ENOMEM);
256 lma_heap->base = heap_data->base;
257 gen_pool_add(lma_heap->pool, lma_heap->base, size, -1);
259 lma_heap->heap.id = heap_data->id;
260 lma_heap->heap.ops = &lma_heap_ops;
261 lma_heap->heap.name = heap_data->name;
262 lma_heap->heap.type = ION_HEAP_TYPE_CUSTOM;
263 lma_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
265 lma_heap->allow_cpu_map = allow_cpu_map;
267 return &lma_heap->heap;
270 void ion_lma_heap_destroy(struct ion_heap *heap)
272 struct ion_lma_heap *lma_heap =
273 container_of(heap, struct ion_lma_heap, heap);
274 gen_pool_destroy(lma_heap->pool);