}
EXPORT_SYMBOL(ion_unmap_kernel);
+#ifdef CONFIG_ROCKCHIP_IOMMU
+static void ion_iommu_add(struct ion_buffer *buffer,
+ struct ion_iommu_map *iommu)
+{
+ struct rb_node **p = &buffer->iommu_maps.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_map *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_map, node);
+
+ if (iommu->key < entry->key) {
+ p = &(*p)->rb_left;
+ } else if (iommu->key > entry->key) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: buffer %p already has mapping for domainid %x\n",
+ __func__,
+ buffer,
+ iommu->key);
+ BUG();
+ }
+ }
+
+ rb_link_node(&iommu->node, parent, p);
+ rb_insert_color(&iommu->node, &buffer->iommu_maps);
+}
+
+static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
+ uint32_t key)
+{
+ struct rb_node **p = &buffer->iommu_maps.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_map *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_map, node);
+
+ if (key < entry->key)
+ p = &(*p)->rb_left;
+ else if (key > entry->key)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+ }
+
+ return NULL;
+}
+
+static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
+ struct device *iommu_dev, unsigned long *iova)
+{
+ struct ion_iommu_map *data;
+ int ret;
+
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ data->buffer = buffer;
+ data->key = (uint32_t)iommu_dev;
+
+ ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
+ buffer->size, buffer->flags);
+ if (ret)
+ goto out;
+
+ kref_init(&data->ref);
+ *iova = data->iova_addr;
+
+ ion_iommu_add(buffer, data);
+
+ return data;
+
+out:
+ kfree(data);
+ return ERR_PTR(ret);
+}
+
+int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
+ struct ion_handle *handle, unsigned long *iova, unsigned long *size)
+{
+ struct ion_buffer *buffer;
+ struct ion_iommu_map *iommu_map;
+ int ret = 0;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_kernel.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+
+ buffer = handle->buffer;
+ pr_debug("%s: map buffer(%x)\n", __func__, buffer);
+
+ mutex_lock(&buffer->lock);
+
+ if (ION_IS_CACHED(buffer->flags)) {
+ pr_err("%s: Cannot map iommu as cached.\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!handle->buffer->heap->ops->map_iommu) {
+ pr_err("%s: map_iommu is not implemented by this heap.\n",
+ __func__);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (buffer->size & ~PAGE_MASK) {
+ pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
+ buffer->size, PAGE_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ iommu_map = ion_iommu_lookup(buffer, (uint32_t)iommu_dev);
+ if (!iommu_map) {
+ pr_debug("%s: create new map for buffer(%x)\n", __func__, buffer);
+ iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
+ } else {
+ pr_debug("%s: buffer(%x) already mapped\n", __func__, buffer);
+ if (iommu_map->mapped_size != buffer->size) {
+ pr_err("%s: handle %p is already mapped with length"
+ " %x, trying to map with length %x\n",
+ __func__, handle, iommu_map->mapped_size, buffer->size);
+ ret = -EINVAL;
+ } else {
+ kref_get(&iommu_map->ref);
+ *iova = iommu_map->iova_addr;
+ }
+ }
+ if (!ret)
+ buffer->iommu_map_cnt++;
+ *size = buffer->size;
+out:
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+ return ret;
+}
+EXPORT_SYMBOL(ion_map_iommu);
+
+static void ion_iommu_release(struct kref *kref)
+{
+ struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
+ ref);
+ struct ion_buffer *buffer = map->buffer;
+
+ rb_erase(&map->node, &buffer->iommu_maps);
+ buffer->heap->ops->unmap_iommu((struct device*)map->key, map);
+ kfree(map);
+}
+
+void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
+ struct ion_handle *handle)
+{
+ struct ion_iommu_map *iommu_map;
+ struct ion_buffer *buffer;
+
+ mutex_lock(&client->lock);
+ buffer = handle->buffer;
+ pr_debug("%s: unmap buffer(%x)\n", __func__, buffer);
+
+ mutex_lock(&buffer->lock);
+
+ iommu_map = ion_iommu_lookup(buffer, (uint32_t)iommu_dev);
+
+ if (!iommu_map) {
+ WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
+ iommu_dev, buffer);
+ goto out;
+ }
+
+ kref_put(&iommu_map->ref, ion_iommu_release);
+
+ buffer->iommu_map_cnt--;
+
+out:
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+}
+EXPORT_SYMBOL(ion_unmap_iommu);
+#endif
+
static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
{
struct ion_client *client = s->private;
*/
struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
+int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
+ struct ion_handle *handle, unsigned long *iova, unsigned long *size);
+
+void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
+ struct ion_handle *handle);
+
#endif /* _LINUX_ION_H */
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
#include <linux/rockchip_ion.h>
+#include <linux/rockchip/iovmm.h>
#include "ion.h"
#include "ion_priv.h"
return 0;
}
+#ifdef CONFIG_ROCKCHIP_IOMMU
+// get device's vaddr
+static int ion_cma_map_iommu(struct ion_buffer *buffer,
+ struct device *iommu_dev,
+ struct ion_iommu_map *data,
+ unsigned long iova_length,
+ unsigned long flags)
+{
+ int ret = 0;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ data->iova_addr = iovmm_map(iommu_dev, info->table->sgl, 0, iova_length);
+ pr_debug("%s: map %lx -> %lx\n", __func__, info->table->sgl->dma_address,
+ data->iova_addr);
+ if (!data->iova_addr || IS_ERR_VALUE(data->iova_addr)) {
+ pr_err("%s: iovmm_map() failed: %lx\n", __func__, data->iova_addr);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ data->mapped_size = iova_length;
+
+out:
+ return ret;
+}
+
+void ion_cma_unmap_iommu(struct device *iommu_dev, struct ion_iommu_map *data)
+{
+ pr_debug("%s: unmap %x@%lx\n", __func__, data->mapped_size, data->iova_addr);
+ iovmm_unmap(iommu_dev, data->iova_addr);
+
+ return;
+}
+#endif
+
static struct ion_heap_ops ion_cma_ops = {
.allocate = ion_cma_allocate,
.free = ion_cma_free,
.map_kernel = ion_cma_map_kernel,
.unmap_kernel = ion_cma_unmap_kernel,
.cache_op = ion_cma_cache_ops,
+#ifdef CONFIG_ROCKCHIP_IOMMU
+ .map_iommu = ion_cma_map_iommu,
+ .unmap_iommu = ion_cma_unmap_iommu,
+#endif
};
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
+/**
+ * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
+ * @iova_addr - iommu virtual address
+ * @node - rb node to exist in the buffer's tree of iommu mappings
+ * @key - contains the iommu device info
+ * @ref - for reference counting this mapping
+ * @mapped_size - size of the iova space mapped
+ * (may not be the same as the buffer size)
+ *
+ * Represents a mapping of one ion buffer to a particular iommu domain
+ * and address range. There may exist other mappings of this buffer in
+ * different domains or address ranges. All mappings will have the same
+ * cacheability and security.
+ */
+struct ion_iommu_map {
+ unsigned long iova_addr;
+ struct rb_node node;
+ uint32_t key;
+ struct ion_buffer *buffer;
+ struct kref ref;
+ int mapped_size;
+};
+
/**
* struct ion_buffer - metadata for a particular buffer
* @ref: refernce count
int handle_count;
char task_comm[TASK_COMM_LEN];
pid_t pid;
+ unsigned int iommu_map_cnt;
+ struct rb_root iommu_maps;
};
void ion_buffer_destroy(struct ion_buffer *buffer);
int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer,
void *vaddr, unsigned int offset,
unsigned int length, unsigned int cmd);
+ int (*map_iommu)(struct ion_buffer *buffer,
+ struct device *iommu_dev,
+ struct ion_iommu_map *map_data,
+ unsigned long iova_length,
+ unsigned long flags);
+ void (*unmap_iommu)(struct device *iommu_dev, struct ion_iommu_map *data);
};
/**
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/rockchip/iovmm.h>
#include "ion.h"
#include "ion_priv.h"
return nr_total;
}
+#ifdef CONFIG_ROCKCHIP_IOMMU
+// get device's vaddr
+static int ion_system_map_iommu(struct ion_buffer *buffer,
+ struct device *iommu_dev,
+ struct ion_iommu_map *data,
+ unsigned long iova_length,
+ unsigned long flags)
+{
+ int ret = 0;
+ struct sg_table *table = (struct sg_table*)buffer->priv_virt;
+
+ data->iova_addr = iovmm_map(iommu_dev, table->sgl, 0, iova_length);
+ pr_debug("%s: map %lx -> %lx\n", __func__, table->sgl->dma_address, data->iova_addr);
+ if (!data->iova_addr || IS_ERR_VALUE(data->iova_addr)) {
+ pr_err("%s: iovmm_map() failed: %lx\n", __func__, data->iova_addr);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ data->mapped_size = iova_length;
+
+out:
+ return ret;
+}
+
+void ion_system_unmap_iommu(struct device *iommu_dev, struct ion_iommu_map *data)
+{
+ pr_debug("%s: unmap %x@%lx\n", __func__, data->mapped_size, data->iova_addr);
+ iovmm_unmap(iommu_dev, data->iova_addr);
+
+ return;
+}
+#endif
+
static struct ion_heap_ops system_heap_ops = {
.allocate = ion_system_heap_allocate,
.free = ion_system_heap_free,
.unmap_kernel = ion_heap_unmap_kernel,
.map_user = ion_heap_map_user,
.shrink = ion_system_heap_shrink,
+#ifdef CONFIG_ROCKCHIP_IOMMU
+ .map_iommu = ion_system_map_iommu,
+ .unmap_iommu = ion_system_unmap_iommu,
+#endif
};
static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,