rk30:add ion support
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / ion / ion_carveout_heap.c
1 /*
2  * drivers/gpu/ion/ion_carveout_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17  #include <linux/spinlock.h>
18 #include <linux/err.h>
19 #include <linux/genalloc.h>
20 #include <linux/io.h>
21 #include <linux/ion.h>
22 #include <linux/mm.h>
23 #include <linux/scatterlist.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include <linux/iommu.h>
27 #include <linux/seq_file.h>
28 #include <asm/mach/map.h>
29 #include <linux/dma-mapping.h>
30 #include <asm/cacheflush.h>
31
32 #include "ion_priv.h"
33
34 #define ION_CACHED
35 #define RESERVED_SIZE(total)    ((total)/10)
36 struct ion_carveout_heap {
37         struct ion_heap heap;
38         struct gen_pool *pool;
39         ion_phys_addr_t base;
40
41         unsigned long allocated_bytes;
42         unsigned long vpu_allocated_bytes;
43         unsigned long max_allocated;
44         unsigned long total_size;
45         unsigned long bit_nr;
46         unsigned long *bits;
47 };
48 ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
49                                       unsigned long size,
50                                       unsigned long align,
51                                       unsigned long flags)
52 {
53         struct ion_carveout_heap *carveout_heap =
54                 container_of(heap, struct ion_carveout_heap, heap);
55         unsigned long offset; 
56         unsigned long free_size = carveout_heap->total_size - carveout_heap->allocated_bytes;
57
58         if((flags & (1<<ION_VPU_ID)) && 
59                 (free_size < RESERVED_SIZE(carveout_heap->total_size))){
60                 printk("%s: heap %s has not enough memory for vpu: vpu allocated(%luM)\n", 
61                         __func__, heap->name, carveout_heap->vpu_allocated_bytes/SZ_1M);
62                 return ION_CARVEOUT_ALLOCATE_FAIL;
63         }
64         offset = gen_pool_alloc(carveout_heap->pool, size);
65
66         if (!offset) {
67                 if ((carveout_heap->total_size -
68                       carveout_heap->allocated_bytes) > size)
69                         printk("%s: heap %s has enough memory (%luK) but"
70                                 " the allocation of size %lu pages still failed."
71                                 " Memory is probably fragmented.\n",
72                                 __func__, heap->name,
73                                 (carveout_heap->total_size - carveout_heap->allocated_bytes)/SZ_1K, 
74                                 size/SZ_1K);
75                 else
76                         printk("%s: heap %s has not enough memory(%luK)"
77                                 "the alloction of size is %luK.\n",
78                                 __func__, heap->name,
79                                 (carveout_heap->total_size - carveout_heap->allocated_bytes)/SZ_1K, 
80                                 size/SZ_1K);
81                 return ION_CARVEOUT_ALLOCATE_FAIL;
82         }
83
84         if(flags & (1<<ION_VPU_ID))
85                 carveout_heap->vpu_allocated_bytes += size;
86         carveout_heap->allocated_bytes += size;
87
88         if((offset + size - carveout_heap->base) > carveout_heap->max_allocated)
89                 carveout_heap->max_allocated = offset + size - carveout_heap->base;
90
91         bitmap_set(carveout_heap->bits, 
92                 (offset - carveout_heap->base)/PAGE_SIZE , size/PAGE_SIZE);
93         return offset;
94 }
95
96 void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
97                        unsigned long size, unsigned long flags)
98 {
99         struct ion_carveout_heap *carveout_heap =
100                 container_of(heap, struct ion_carveout_heap, heap);
101
102         if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
103                 return;
104         gen_pool_free(carveout_heap->pool, addr, size);
105         if(flags & (1<<ION_VPU_ID))
106                 carveout_heap->vpu_allocated_bytes -= size;
107         carveout_heap->allocated_bytes -= size;
108         bitmap_clear(carveout_heap->bits, 
109                 (addr - carveout_heap->base)/PAGE_SIZE, size/PAGE_SIZE);
110 }
111
112 static int ion_carveout_heap_phys(struct ion_heap *heap,
113                                   struct ion_buffer *buffer,
114                                   ion_phys_addr_t *addr, size_t *len)
115 {
116         *addr = buffer->priv_phys;
117         *len = buffer->size;
118         return 0;
119 }
120
121 static int ion_carveout_heap_allocate(struct ion_heap *heap,
122                                       struct ion_buffer *buffer,
123                                       unsigned long size, unsigned long align,
124                                       unsigned long flags)
125 {
126         buffer->priv_phys = ion_carveout_allocate(heap, size, align, flags);
127         return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
128 }
129
130 static void ion_carveout_heap_free(struct ion_buffer *buffer)
131 {
132         struct ion_heap *heap = buffer->heap;
133
134         ion_carveout_free(heap, buffer->priv_phys, buffer->size, buffer->flags);
135         buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
136 }
137
138 struct scatterlist *ion_carveout_heap_map_dma(struct ion_heap *heap,
139                                               struct ion_buffer *buffer)
140 {
141         return ERR_PTR(-EINVAL);
142 }
143
144 void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
145                                  struct ion_buffer *buffer)
146 {
147         return;
148 }
149
150 void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
151                                    struct ion_buffer *buffer)
152 {
153         return __arch_ioremap(buffer->priv_phys, buffer->size,
154                               MT_MEMORY_NONCACHED);
155 }
156
157 void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
158                                     struct ion_buffer *buffer)
159 {
160         __arch_iounmap(buffer->vaddr);
161         buffer->vaddr = NULL;
162         return;
163 }
164
165 int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
166                                struct vm_area_struct *vma, unsigned long flags)
167 {
168         int err = 0;
169         if (ION_IS_CACHED(flags))
170                 err = remap_pfn_range(vma, vma->vm_start,
171                                __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
172                                vma->vm_end - vma->vm_start,
173                                vma->vm_page_prot);
174         else
175                 err = remap_pfn_range(vma, vma->vm_start,
176                                __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
177                                         vma->vm_end - vma->vm_start,
178                                         pgprot_noncached(vma->vm_page_prot));
179
180         return err;
181 }
182 int ion_carveout_cache_op(struct ion_heap *heap, struct ion_buffer *buffer,
183                         void *virt, size_t size, unsigned int cmd)
184 {
185         unsigned long start, end;
186
187         start = (unsigned long)virt;
188         end = start + size;
189         switch(cmd) {
190                 case ION_CACHE_FLUSH:
191                         dmac_flush_range((void *)start, (void *)end);
192                         outer_flush_range(buffer->priv_phys,buffer->priv_phys + size); 
193                         break;
194                 case ION_CACHE_CLEAN:
195             /* When cleaning, always clean the innermost (L1) cache first 
196              * and then clean the outer cache(s).
197              */
198                         dmac_clean_range((void *)start, (void *)end);
199                         outer_clean_range(buffer->priv_phys,buffer->priv_phys + size); 
200                         break;
201                 case ION_CACHE_INVALID:
202             /* When invalidating, always invalidate the outermost cache first 
203              * and the L1 cache last.
204              */
205                         outer_inv_range(buffer->priv_phys,buffer->priv_phys + size); 
206                         dmac_inv_range((void *)start, (void *)end);
207                         break;
208                 default:
209                         return -EINVAL;
210         }
211         return 0;
212 }
213
214 static int ion_carveout_print_debug(struct ion_heap *heap, struct seq_file *s)
215 {
216         int i;
217         struct ion_carveout_heap *carveout_heap =
218                 container_of(heap, struct ion_carveout_heap, heap);
219
220         for(i = carveout_heap->bit_nr/8 - 1; i>= 0; i--){
221                 seq_printf(s, "%.3uM> Bits[%.3d - %.3d]: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 
222                                 i+1, i*8 + 7, i*8,
223                                 carveout_heap->bits[i*8 + 7],
224                                 carveout_heap->bits[i*8 + 6],
225                                 carveout_heap->bits[i*8 + 5],
226                                 carveout_heap->bits[i*8 + 4],
227                                 carveout_heap->bits[i*8 + 3],
228                                 carveout_heap->bits[i*8 + 2],
229                                 carveout_heap->bits[i*8 + 1],
230                                 carveout_heap->bits[i*8]);
231         }
232         seq_printf(s, "VPU allocated: %luM\n",
233                 carveout_heap->vpu_allocated_bytes/SZ_1M);
234         seq_printf(s, "Total allocated: %luM\n",
235                 carveout_heap->allocated_bytes/SZ_1M);
236         seq_printf(s, "max_allocated: %luM\n",
237                 carveout_heap->max_allocated/SZ_1M);
238         seq_printf(s, "Heap size: %luM, heap base: 0x%lx\n", 
239                 carveout_heap->total_size/SZ_1M, carveout_heap->base);
240         return 0;
241 }
242 static struct ion_heap_ops carveout_heap_ops = {
243         .allocate = ion_carveout_heap_allocate,
244         .free = ion_carveout_heap_free,
245         .phys = ion_carveout_heap_phys,
246         .map_user = ion_carveout_heap_map_user,
247         .map_kernel = ion_carveout_heap_map_kernel,
248         .unmap_kernel = ion_carveout_heap_unmap_kernel,
249         .cache_op = ion_carveout_cache_op,
250         .print_debug = ion_carveout_print_debug,
251 };
252
253 struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
254 {
255         struct ion_carveout_heap *carveout_heap;
256
257         carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
258         if (!carveout_heap)
259                 return ERR_PTR(-ENOMEM);
260
261         carveout_heap->pool = gen_pool_create(12, -1);
262         if (!carveout_heap->pool) {
263                 kfree(carveout_heap);
264                 return ERR_PTR(-ENOMEM);
265         }
266         carveout_heap->base = heap_data->base;
267         gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
268                      -1);
269         carveout_heap->heap.ops = &carveout_heap_ops;
270         carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
271         carveout_heap->vpu_allocated_bytes = 0;
272         carveout_heap->allocated_bytes = 0;
273         carveout_heap->max_allocated = 0;
274         carveout_heap->total_size = heap_data->size;
275         carveout_heap->bit_nr = heap_data->size/(PAGE_SIZE * sizeof(unsigned long) * 8);
276         carveout_heap->bits = 
277                 (unsigned long *)kzalloc(carveout_heap->bit_nr * sizeof(unsigned long), GFP_KERNEL);
278
279         return &carveout_heap->heap;
280 }
281
282 void ion_carveout_heap_destroy(struct ion_heap *heap)
283 {
284         struct ion_carveout_heap *carveout_heap =
285              container_of(heap, struct  ion_carveout_heap, heap);
286
287         gen_pool_destroy(carveout_heap->pool);
288         kfree(carveout_heap->bits);
289         kfree(carveout_heap);
290         carveout_heap = NULL;
291 }