/* if the client doesn't support this heap type */
if (!((1 << heap->type) & client->heap_mask))
continue;
--
/* if the caller didn't specify this heap type */
if (!((1 << heap->id) & flags))
continue;
static long ion_share_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct ion_buffer *buffer = filp->private_data;
--
++
switch (cmd) {
case PMEM_GET_PHYS:
{
if (parent->f_flags & O_DSYNC)
file->f_flags |= O_DSYNC;
--
++
ion_buffer_get(handle->buffer);
fd_install(fd, file);
case ION_IOC_ALLOC:
{
struct ion_allocation_data data;
++
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
--
data.handle = ion_alloc(client, data.len, data.align,
data.flags);
if (IS_ERR_OR_NULL(data.handle)) {
printk("%s: alloc 0x%x bytes failed\n", __func__, data.len);
-- return -ENOMEM;
-- }
++ return -ENOMEM;
++ }
if (copy_to_user((void __user *)arg, &data, sizeof(data)))
return -EFAULT;
break;
return err;
}
-- err = data.handle->buffer->heap->ops->cache_op(buffer->heap, buffer,
++ err = data.handle->buffer->heap->ops->cache_op(buffer->heap, buffer,
data.virt, data.size, cmd);
mutex_unlock(&client->lock);
if(err < 0)
return err;
break;
}
- case ION_GET_CLIENT:
++ case ION_GET_CLIENT:
+ {
+ struct ion_handle *handle;
+ struct ion_client_data data;
+ struct rb_node *n;
+
+ if (copy_from_user(&data, (void __user *)arg,
+ sizeof(struct ion_client_data)))
+ return -EFAULT;
+
+ mutex_lock(&client->lock);
+ switch (data.type) {
+ case ION_TYPE_GET_TOTAL_SIZE:
+ data.total_size = 0;
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ handle = rb_entry(n, struct ion_handle, node);
+ data.total_size += handle->buffer->size;
+ }
+ break;
+ case ION_TYPE_SIZE_GET_COUNT:
+ data.count = 0;
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ handle = rb_entry(n, struct ion_handle, node);
+ if(handle->buffer->size == data.size)
+ data.count++;
+ }
+ break;
+ }
+ mutex_unlock(&client->lock);
+ if (copy_to_user((void __user *)arg, &data,
+ sizeof(struct ion_client_data)))
+ return -EFAULT;
+ break;
+ }
default:
return -ENOTTY;
}
.llseek = seq_lseek,
.release = single_release,
};
++
struct ion_device *ion_device_create(long (*custom_ioctl)
(struct ion_client *client,
unsigned int cmd,
* GNU General Public License for more details.
*
*/
++#include <linux/spinlock.h>
-- #include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/vmalloc.h>
#include <linux/iommu.h>
#include <linux/seq_file.h>
--#include <asm/mach/map.h>
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
--
#include "ion_priv.h"
--#define ION_CACHED
++#include <asm/mach/map.h>
++
#define RESERVED_SIZE(total) ((total)/10)
++
struct ion_carveout_heap {
struct ion_heap heap;
struct gen_pool *pool;
ion_phys_addr_t base;
--
unsigned long allocated_bytes;
unsigned long vpu_allocated_bytes;
unsigned long max_allocated;
unsigned long bit_nr;
unsigned long *bits;
};
++
ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
unsigned long size,
unsigned long align,
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
-- unsigned long offset;
++ unsigned long offset;
unsigned long free_size = carveout_heap->total_size - carveout_heap->allocated_bytes;
-- if((flags & (1<<ION_VPU_ID)) &&
-- (free_size < RESERVED_SIZE(carveout_heap->total_size))){
-- printk("%s: heap %s has not enough memory for vpu: vpu allocated(%luM)\n",
++ if ((flags & (1<<ION_VPU_ID)) &&
++ (free_size < RESERVED_SIZE(carveout_heap->total_size))) {
++ printk("%s: heap %s has not enough memory for vpu: vpu allocated(%luM)\n",
__func__, heap->name, carveout_heap->vpu_allocated_bytes/SZ_1M);
return ION_CARVEOUT_ALLOCATE_FAIL;
}
" the allocation of size %lu pages still failed."
" Memory is probably fragmented.\n",
__func__, heap->name,
-- (carveout_heap->total_size - carveout_heap->allocated_bytes)/SZ_1K,
++ (carveout_heap->total_size - carveout_heap->allocated_bytes)/SZ_1K,
size/SZ_1K);
else
printk("%s: heap %s has not enough memory(%luK)"
"the alloction of size is %luK.\n",
__func__, heap->name,
-- (carveout_heap->total_size - carveout_heap->allocated_bytes)/SZ_1K,
++ (carveout_heap->total_size - carveout_heap->allocated_bytes)/SZ_1K,
size/SZ_1K);
return ION_CARVEOUT_ALLOCATE_FAIL;
}
if((offset + size - carveout_heap->base) > carveout_heap->max_allocated)
carveout_heap->max_allocated = offset + size - carveout_heap->base;
-- bitmap_set(carveout_heap->bits,
++ bitmap_set(carveout_heap->bits,
(offset - carveout_heap->base)/PAGE_SIZE , size/PAGE_SIZE);
return offset;
}
if(flags & (1<<ION_VPU_ID))
carveout_heap->vpu_allocated_bytes -= size;
carveout_heap->allocated_bytes -= size;
-- bitmap_clear(carveout_heap->bits,
++ bitmap_clear(carveout_heap->bits,
(addr - carveout_heap->base)/PAGE_SIZE, size/PAGE_SIZE);
}
vma->vm_end - vma->vm_start,
pgprot_noncached(vma->vm_page_prot));
+ buffer->vm_start = vma->vm_start;
return err;
}
++
int ion_carveout_cache_op(struct ion_heap *heap, struct ion_buffer *buffer,
void *virt, size_t size, unsigned int cmd)
{
switch(cmd) {
case ION_CACHE_FLUSH:
dmac_flush_range((void *)start, (void *)end);
-- outer_flush_range(buffer->priv_phys,buffer->priv_phys + size);
++ outer_flush_range(buffer->priv_phys,buffer->priv_phys + size);
break;
case ION_CACHE_CLEAN:
-- /* When cleaning, always clean the innermost (L1) cache first
++ /* When cleaning, always clean the innermost (L1) cache first
* and then clean the outer cache(s).
*/
dmac_clean_range((void *)start, (void *)end);
-- outer_clean_range(buffer->priv_phys,buffer->priv_phys + size);
++ outer_clean_range(buffer->priv_phys,buffer->priv_phys + size);
break;
case ION_CACHE_INVALID:
-- /* When invalidating, always invalidate the outermost cache first
++ /* When invalidating, always invalidate the outermost cache first
* and the L1 cache last.
*/
-- outer_inv_range(buffer->priv_phys,buffer->priv_phys + size);
++ outer_inv_range(buffer->priv_phys,buffer->priv_phys + size);
dmac_inv_range((void *)start, (void *)end);
break;
default:
container_of(heap, struct ion_carveout_heap, heap);
for(i = carveout_heap->bit_nr/8 - 1; i>= 0; i--){
-- seq_printf(s, "%.3uM> Bits[%.3d - %.3d]: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
++ seq_printf(s, "%.3uM> Bits[%.3d - %.3d]: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
i+1, i*8 + 7, i*8,
carveout_heap->bits[i*8 + 7],
carveout_heap->bits[i*8 + 6],
carveout_heap->allocated_bytes/SZ_1M);
seq_printf(s, "max_allocated: %luM\n",
carveout_heap->max_allocated/SZ_1M);
-- seq_printf(s, "Heap size: %luM, heap base: 0x%lx\n",
++ seq_printf(s, "Heap size: %luM, heap base: 0x%lx\n",
carveout_heap->total_size/SZ_1M, carveout_heap->base);
return 0;
}
++
static struct ion_heap_ops carveout_heap_ops = {
.allocate = ion_carveout_heap_allocate,
.free = ion_carveout_heap_free,
carveout_heap->max_allocated = 0;
carveout_heap->total_size = heap_data->size;
carveout_heap->bit_nr = heap_data->size/(PAGE_SIZE * sizeof(unsigned long) * 8);
-- carveout_heap->bits =
++ carveout_heap->bits =
(unsigned long *)kzalloc(carveout_heap->bit_nr * sizeof(unsigned long), GFP_KERNEL);
return &carveout_heap->heap;