return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
}
+static inline void *dma_alloc_nonconsistent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
+}
+
+static inline void dma_free_nonconsistent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
+
+static inline int dma_mmap_nonconsistent(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
+
/*
* This can be called during early boot to increase the size of the atomic
* coherent DMA pool above the default value of 256KiB. It must be called
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
#include <linux/idr.h>
+#include <linux/rockchip_ion.h>
#include "ion.h"
#include "ion_priv.h"
.compat_ioctl = compat_ion_ioctl,
};
+int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+ void *uaddr, unsigned long offset, unsigned long len,
+ unsigned int cmd)
+{
+ struct ion_buffer *buffer;
+ int ret = -EINVAL;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to do_cache_op.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+ buffer = handle->buffer;
+ mutex_lock(&buffer->lock);
+
+ if (!ION_IS_CACHED(buffer->flags)) {
+ ret = 0;
+ goto out;
+ }
+
+ if (!handle->buffer->heap->ops->cache_op) {
+ pr_err("%s: cache_op is not implemented by this heap.\n",
+ __func__);
+ ret = -ENODEV;
+ goto out;
+ }
+
+
+ ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
+ offset, len, cmd);
+
+out:
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+ return ret;
+
+}
+EXPORT_SYMBOL(ion_do_cache_op);
+
static size_t ion_debug_heap_total(struct ion_client *client,
unsigned int id)
{
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <linux/rockchip_ion.h>
#include "ion.h"
#include "ion_priv.h"
void *cpu_addr;
dma_addr_t handle;
struct sg_table *table;
+ bool is_cached;
};
/*
dev_dbg(dev, "Request buffer allocation len %ld\n", len);
- if (buffer->flags & ION_FLAG_CACHED)
- return -EINVAL;
+// if (buffer->flags & ION_FLAG_CACHED)
+// return -EINVAL;
if (align > PAGE_SIZE)
return -EINVAL;
return ION_CMA_ALLOCATE_FAILED;
}
- info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
- GFP_HIGHUSER | __GFP_ZERO);
+ if (!ION_IS_CACHED(flags)) {
+ info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
+ GFP_HIGHUSER | __GFP_ZERO);
+ } else {
+ info->cpu_addr = dma_alloc_nonconsistent(dev, len,
+ &(info->handle), GFP_HIGHUSER | __GFP_ZERO/*0*/);
+ }
if (!info->cpu_addr) {
dev_err(dev, "Fail to allocate buffer\n");
goto free_mem;
}
+ info->is_cached = ION_IS_CACHED(flags);
+
if (ion_cma_get_sgtable
(dev, info->table, info->cpu_addr, info->handle, len))
goto free_table;
struct device *dev = cma_heap->dev;
struct ion_cma_buffer_info *info = buffer->priv_virt;
- return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
- buffer->size);
+
+ if (info->is_cached)
+ return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
+ info->handle, buffer->size);
+ else
+ return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
+ buffer->size);
}
static void *ion_cma_map_kernel(struct ion_heap *heap,
{
}
+int ion_cma_cache_ops(struct ion_heap *heap,
+ struct ion_buffer *buffer, void *vaddr,
+ unsigned int offset, unsigned int length,
+ unsigned int cmd)
+{
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+ void (*outer_cache_op)(phys_addr_t, phys_addr_t);
+
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ dmac_map_area(vaddr, length, DMA_TO_DEVICE);
+ outer_cache_op = outer_clean_range;
+ break;
+ case ION_IOC_INV_CACHES:
+ dmac_unmap_area(vaddr, length, DMA_FROM_DEVICE);
+ outer_cache_op = outer_inv_range;
+ break;
+ case ION_IOC_CLEAN_INV_CACHES:
+ dmac_flush_range(vaddr, vaddr + length);
+ outer_cache_op = outer_flush_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ outer_cache_op(info->handle, info->handle + length);
+
+ return 0;
+}
+
static struct ion_heap_ops ion_cma_ops = {
.allocate = ion_cma_allocate,
.free = ion_cma_free,
.map_user = ion_cma_mmap,
.map_kernel = ion_cma_map_kernel,
.unmap_kernel = ion_cma_unmap_kernel,
+ .cache_op = ion_cma_cache_ops,
};
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma);
int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
+ int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer,
+ void *vaddr, unsigned int offset,
+ unsigned int length, unsigned int cmd);
};
/**
int id);
extern int ion_handle_put(struct ion_handle *handle);
+extern int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+ void *uaddr, unsigned long offset, unsigned long len,
+ unsigned int cmd);
+
static struct ion_heap_desc ion_heap_meta[] = {
{
.id = ION_SYSTEM_HEAP_ID,
}
EXPORT_SYMBOL(rockchip_ion_client_create);
+static int check_vaddr_bounds(unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = current->active_mm;
+ struct vm_area_struct *vma;
+ int ret = 1;
+
+ if (end < start)
+ goto out;
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, start);
+ if (vma && vma->vm_start < end) {
+ if (start < vma->vm_start)
+ goto out_up;
+ if (end > vma->vm_end)
+ goto out_up;
+ ret = 0;
+ }
+
+out_up:
+ up_read(&mm->mmap_sem);
+out:
+ return ret;
+}
+
static long rockchip_custom_ioctl (struct ion_client *client, unsigned int cmd,
unsigned long arg)
{
case ION_IOC_CLEAN_CACHES:
case ION_IOC_INV_CACHES:
case ION_IOC_CLEAN_INV_CACHES:
+ {
+ struct ion_flush_data data;
+ unsigned long start, end;
+ struct ion_handle *handle = NULL;
+ int ret;
+ int is_import_fd = 0;
+
+ if (copy_from_user(&data, (void __user *)arg,
+ sizeof(struct ion_flush_data)))
+ return -EFAULT;
+
+ start = (unsigned long) data.vaddr;
+ end = (unsigned long) data.vaddr + data.length;
+
+ if (check_vaddr_bounds(start, end)) {
+ pr_err("%s: virtual address %p is out of bounds\n",
+ __func__, data.vaddr);
+ return -EINVAL;
+ }
+
+ handle = ion_handle_get_by_id(client, data.handle);
+
+ if (IS_ERR(handle)) {
+ handle = ion_import_dma_buf(client, data.fd);
+ is_import_fd = 1;
+ if (IS_ERR(handle)) {
+ pr_info("%s: Could not import handle: %d\n",
+ __func__, (int)handle);
+ return -EINVAL;
+ }
+ }
+
+ ret = ion_do_cache_op(client, handle,
+ data.vaddr, data.offset, data.length,cmd);
+
+ if (is_import_fd)
+ ion_free(client, handle);
+ else
+ ion_handle_put(handle);
+
+ if (ret < 0)
+ return ret;
break;
+ }
case ION_IOC_GET_PHYS:
{
struct ion_phys_data data;