47690c2e08803a54a22a0c71b496942a123404ce
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / ion / ion_carveout_heap.c
1 /*
2  * drivers/gpu/ion/ion_carveout_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 #include <linux/spinlock.h>
17
18 #include <linux/err.h>
19 #include <linux/genalloc.h>
20 #include <linux/io.h>
21 #include <linux/ion.h>
22 #include <linux/mm.h>
23 #include <linux/scatterlist.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include "ion_priv.h"
27
28 #include <asm/mach/map.h>
29
30 struct ion_carveout_heap {
31         struct ion_heap heap;
32         struct gen_pool *pool;
33         ion_phys_addr_t base;
34 };
35
36 ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
37                                       unsigned long size,
38                                       unsigned long align)
39 {
40         struct ion_carveout_heap *carveout_heap =
41                 container_of(heap, struct ion_carveout_heap, heap);
42         unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
43
44         if (!offset)
45                 return ION_CARVEOUT_ALLOCATE_FAIL;
46
47         return offset;
48 }
49
50 void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
51                        unsigned long size)
52 {
53         struct ion_carveout_heap *carveout_heap =
54                 container_of(heap, struct ion_carveout_heap, heap);
55
56         if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
57                 return;
58         gen_pool_free(carveout_heap->pool, addr, size);
59 }
60
61 static int ion_carveout_heap_phys(struct ion_heap *heap,
62                                   struct ion_buffer *buffer,
63                                   ion_phys_addr_t *addr, size_t *len)
64 {
65         *addr = buffer->priv_phys;
66         *len = buffer->size;
67         return 0;
68 }
69
70 static int ion_carveout_heap_allocate(struct ion_heap *heap,
71                                       struct ion_buffer *buffer,
72                                       unsigned long size, unsigned long align,
73                                       unsigned long flags)
74 {
75         buffer->priv_phys = ion_carveout_allocate(heap, size, align);
76         return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
77 }
78
79 static void ion_carveout_heap_free(struct ion_buffer *buffer)
80 {
81         struct ion_heap *heap = buffer->heap;
82
83         ion_carveout_free(heap, buffer->priv_phys, buffer->size);
84         buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
85 }
86
87 struct scatterlist *ion_carveout_heap_map_dma(struct ion_heap *heap,
88                                               struct ion_buffer *buffer)
89 {
90         return ERR_PTR(-EINVAL);
91 }
92
93 void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
94                                  struct ion_buffer *buffer)
95 {
96         return;
97 }
98
99 void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
100                                    struct ion_buffer *buffer)
101 {
102         return __arm_ioremap(buffer->priv_phys, buffer->size,
103                               MT_MEMORY_NONCACHED);
104 }
105
106 void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
107                                     struct ion_buffer *buffer)
108 {
109         __arm_iounmap(buffer->vaddr);
110         buffer->vaddr = NULL;
111         return;
112 }
113
114 int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
115                                struct vm_area_struct *vma)
116 {
117         return remap_pfn_range(vma, vma->vm_start,
118                                __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
119                                vma->vm_end - vma->vm_start,
120                                pgprot_noncached(vma->vm_page_prot));
121 }
122
123 static struct ion_heap_ops carveout_heap_ops = {
124         .allocate = ion_carveout_heap_allocate,
125         .free = ion_carveout_heap_free,
126         .phys = ion_carveout_heap_phys,
127         .map_user = ion_carveout_heap_map_user,
128         .map_kernel = ion_carveout_heap_map_kernel,
129         .unmap_kernel = ion_carveout_heap_unmap_kernel,
130 };
131
132 struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
133 {
134         struct ion_carveout_heap *carveout_heap;
135
136         carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
137         if (!carveout_heap)
138                 return ERR_PTR(-ENOMEM);
139
140         carveout_heap->pool = gen_pool_create(12, -1);
141         if (!carveout_heap->pool) {
142                 kfree(carveout_heap);
143                 return ERR_PTR(-ENOMEM);
144         }
145         carveout_heap->base = heap_data->base;
146         gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
147                      -1);
148         carveout_heap->heap.ops = &carveout_heap_ops;
149         carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
150
151         return &carveout_heap->heap;
152 }
153
154 void ion_carveout_heap_destroy(struct ion_heap *heap)
155 {
156         struct ion_carveout_heap *carveout_heap =
157              container_of(heap, struct  ion_carveout_heap, heap);
158
159         gen_pool_destroy(carveout_heap->pool);
160         kfree(carveout_heap);
161         carveout_heap = NULL;
162 }