RK3368 GPU: Rogue N Init.
[firefly-linux-kernel-4.4.55.git] / drivers / staging / imgtec / apollo / ion_lma_heap.c
1 /* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
2 /* vi: set ts=8 sw=8 sts=8: */
3 /*************************************************************************/ /*!
4 @File           ion_lma_heap.c
5 @Codingstyle    LinuxKernel
6 @Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
7 @License        Dual MIT/GPLv2
8
9 The contents of this file are subject to the MIT license as set out below.
10
11 Permission is hereby granted, free of charge, to any person obtaining a copy
12 of this software and associated documentation files (the "Software"), to deal
13 in the Software without restriction, including without limitation the rights
14 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 copies of the Software, and to permit persons to whom the Software is
16 furnished to do so, subject to the following conditions:
17
18 The above copyright notice and this permission notice shall be included in
19 all copies or substantial portions of the Software.
20
21 Alternatively, the contents of this file may be used under the terms of
22 the GNU General Public License Version 2 ("GPL") in which case the provisions
23 of GPL are applicable instead of those above.
24
25 If you wish to allow use of your version of this file only under the terms of
26 GPL, and not to allow others to use your version of this file under the terms
27 of the MIT license, indicate your decision by deleting the provisions above
28 and replace them with the notice and other provisions required by GPL as set
29 out in the file called "GPL-COPYING" included in this distribution. If you do
30 not delete the provisions above, a recipient may use your version of this file
31 under the terms of either the MIT license or GPL.
32
33 This License is also included in this distribution in the file called
34 "MIT-COPYING".
35
36 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
37 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
38 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
39 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
40 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
41 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
42 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
43 */ /**************************************************************************/
44
45 #include "ion_lma_heap.h"
46
47 #include <linux/mm.h>
48 #include <linux/slab.h>
49 #include <linux/genalloc.h>
50 #include <linux/scatterlist.h>
51
52 /* Ion heap for LMA allocations. This heap is identical to CARVEOUT except
53  * that it does not do any CPU cache maintenance nor does it zero the memory
54  * using the CPU (this is handled with PVR_ANDROID_DEFER_CLEAR in userspace).
55  */
56
57 struct ion_lma_heap {
58         struct ion_heap heap;
59         struct gen_pool *pool;
60         ion_phys_addr_t base;
61         bool allow_cpu_map;
62 };
63
64 static ion_phys_addr_t ion_lma_allocate(struct ion_heap *heap,
65                                         unsigned long size,
66                                         unsigned long align)
67 {
68         struct ion_lma_heap *lma_heap =
69                 container_of(heap, struct ion_lma_heap, heap);
70         unsigned long offset = gen_pool_alloc(lma_heap->pool, size);
71
72         if (!offset)
73                 return ION_CARVEOUT_ALLOCATE_FAIL;
74
75         return offset;
76 }
77
78 static void ion_lma_free(struct ion_heap *heap, ion_phys_addr_t addr,
79                          unsigned long size)
80 {
81         struct ion_lma_heap *lma_heap =
82                 container_of(heap, struct ion_lma_heap, heap);
83
84         if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
85                 return;
86
87         gen_pool_free(lma_heap->pool, addr, size);
88 }
89
90 static int ion_lma_heap_phys(struct ion_heap *heap,
91                              struct ion_buffer *buffer,
92                              ion_phys_addr_t *addr, size_t *len)
93 {
94         struct sg_table *table = buffer->priv_virt;
95         struct page *page = sg_page(table->sgl);
96         ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
97
98         *addr = paddr;
99         *len = buffer->size;
100         return 0;
101 }
102
103 static int ion_lma_heap_allocate(struct ion_heap *heap,
104                                  struct ion_buffer *buffer,
105                                  unsigned long size, unsigned long align,
106                                  unsigned long flags)
107 {
108         struct sg_table *table;
109         ion_phys_addr_t paddr;
110         int ret;
111
112         if (align > PAGE_SIZE)
113                 return -EINVAL;
114
115         table = kzalloc(sizeof(*table), GFP_KERNEL);
116         if (!table)
117                 return -ENOMEM;
118
119         ret = sg_alloc_table(table, 1, GFP_KERNEL);
120         if (ret)
121                 goto err_free;
122
123         paddr = ion_lma_allocate(heap, size, align);
124         if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
125                 ret = -ENOMEM;
126                 goto err_free_table;
127         }
128
129         sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
130         buffer->priv_virt = table;
131         return 0;
132
133 err_free_table:
134         sg_free_table(table);
135 err_free:
136         kfree(table);
137         return ret;
138 }
139
140 static void ion_lma_heap_free(struct ion_buffer *buffer)
141 {
142         struct ion_heap *heap = buffer->heap;
143         struct sg_table *table = buffer->priv_virt;
144         struct page *page = sg_page(table->sgl);
145         ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
146
147         /* Do not zero the LMA heap from the CPU. This is very slow with
148          * the current TCF (w/ no DMA engine). We will use the TLA to clear
149          * the memory with Rogue in another place.
150          *
151          * We also skip the CPU cache maintenance for the heap space, as we
152          * statically know that the TCF PCI memory bar has UC/WC set by the
153          * MTRR/PAT subsystem.
154          */
155
156         ion_lma_free(heap, paddr, buffer->size);
157         sg_free_table(table);
158         kfree(table);
159 }
160
161 static struct sg_table *ion_lma_heap_map_dma(struct ion_heap *heap,
162                                              struct ion_buffer *buffer)
163 {
164         return buffer->priv_virt;
165 }
166
167 static void ion_lma_heap_unmap_dma(struct ion_heap *heap,
168                                    struct ion_buffer *buffer)
169 {
170         /* No-op */
171 }
172
173 static int ion_lma_heap_map_user(struct ion_heap *mapper,
174                                  struct ion_buffer *buffer,
175                                  struct vm_area_struct *vma)
176 {
177         struct sg_table *table = buffer->priv_virt;
178         struct page *page = sg_page(table->sgl);
179         ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
180         struct ion_lma_heap *lma_heap =
181                 container_of(mapper, struct ion_lma_heap, heap);
182         
183         if (!lma_heap->allow_cpu_map) {
184                 pr_err("Trying to map_user fake secure ION handle\n");
185                 return -EPERM;
186         }
187
188         return remap_pfn_range(vma, vma->vm_start,
189                                PFN_DOWN(paddr) + vma->vm_pgoff,
190                                vma->vm_end - vma->vm_start,
191                                pgprot_writecombine(vma->vm_page_prot));
192 }
193
194 static void *ion_lma_heap_map_kernel(struct ion_heap *heap,
195                                      struct ion_buffer *buffer)
196 {
197         struct sg_table *table = buffer->priv_virt;
198         struct page *page = sg_page(table->sgl);
199         ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
200         struct ion_lma_heap *lma_heap =
201                 container_of(heap, struct ion_lma_heap, heap);
202         
203         if (!lma_heap->allow_cpu_map) {
204                 pr_err("Trying to map_kernel fake secure ION handle\n");
205                 return ERR_PTR(-EPERM);
206         }
207
208         return ioremap_wc(paddr, buffer->size);
209 }
210
211 static void ion_lma_heap_unmap_kernel(struct ion_heap *heap,
212                                       struct ion_buffer *buffer)
213 {
214         iounmap(buffer->vaddr);
215 }
216
217 static struct ion_heap_ops lma_heap_ops = {
218         .allocate = ion_lma_heap_allocate,
219         .free = ion_lma_heap_free,
220         .phys = ion_lma_heap_phys,
221         .map_dma = ion_lma_heap_map_dma,
222         .unmap_dma = ion_lma_heap_unmap_dma,
223         .map_user = ion_lma_heap_map_user,
224         .map_kernel = ion_lma_heap_map_kernel,
225         .unmap_kernel = ion_lma_heap_unmap_kernel,
226 };
227
228 struct ion_heap *ion_lma_heap_create(struct ion_platform_heap *heap_data,
229         bool allow_cpu_map)
230 {
231         struct ion_lma_heap *lma_heap;
232         size_t size = heap_data->size;
233         struct page *page;
234
235         page = pfn_to_page(PFN_DOWN(heap_data->base));
236
237         /* Do not zero the LMA heap from the CPU. This is very slow with
238          * the current TCF (w/ no DMA engine). We will use the TLA to clear
239          * the memory with Rogue in another place.
240          *
241          * We also skip the CPU cache maintenance for the heap space, as we
242          * statically know that the TCF PCI memory bar has UC/WC set by the
243          * MTRR/PAT subsystem.
244          */
245
246         lma_heap = kzalloc(sizeof(*lma_heap), GFP_KERNEL);
247         if (!lma_heap)
248                 return ERR_PTR(-ENOMEM);
249
250         lma_heap->pool = gen_pool_create(12, -1);
251         if (!lma_heap->pool) {
252                 kfree(lma_heap);
253                 return ERR_PTR(-ENOMEM);
254         }
255
256         lma_heap->base = heap_data->base;
257         gen_pool_add(lma_heap->pool, lma_heap->base, size, -1);
258
259         lma_heap->heap.id = heap_data->id;
260         lma_heap->heap.ops = &lma_heap_ops;
261         lma_heap->heap.name = heap_data->name;
262         lma_heap->heap.type = ION_HEAP_TYPE_CUSTOM;
263         lma_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
264
265         lma_heap->allow_cpu_map = allow_cpu_map;
266
267         return &lma_heap->heap;
268 }
269
270 void ion_lma_heap_destroy(struct ion_heap *heap)
271 {
272         struct ion_lma_heap *lma_heap =
273                 container_of(heap, struct ion_lma_heap, heap);
274         gen_pool_destroy(lma_heap->pool);
275         kfree(lma_heap);
276         lma_heap = NULL;
277 }