int id;
};
+#ifdef CONFIG_RK_IOMMU
+static void ion_iommu_force_unmap(struct ion_buffer *buffer);
+#endif
+
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
{
return (buffer->flags & ION_FLAG_CACHED) &&
if (WARN_ON(buffer->kmap_cnt > 0))
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+#ifdef CONFIG_RK_IOMMU
+ ion_iommu_force_unmap(buffer);
+#endif
buffer->heap->ops->free(buffer);
vfree(buffer->pages);
kfree(buffer);
}
EXPORT_SYMBOL(ion_unmap_kernel);
+#ifdef CONFIG_RK_IOMMU
+static void ion_iommu_add(struct ion_buffer *buffer,
+ struct ion_iommu_map *iommu)
+{
+ struct rb_node **p = &buffer->iommu_maps.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_map *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_map, node);
+
+ if (iommu->key < entry->key) {
+ p = &(*p)->rb_left;
+ } else if (iommu->key > entry->key) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: buffer %p already has mapping for domainid %lx\n",
+ __func__,
+ buffer,
+ iommu->key);
+ }
+ }
+
+ rb_link_node(&iommu->node, parent, p);
+ rb_insert_color(&iommu->node, &buffer->iommu_maps);
+}
+
+static struct ion_iommu_map *ion_iommu_lookup(
+ struct ion_buffer *buffer,
+ unsigned long key)
+{
+ struct rb_node **p = &buffer->iommu_maps.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_map *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_map, node);
+
+ if (key < entry->key)
+ p = &(*p)->rb_left;
+ else if (key > entry->key)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+ }
+
+ return NULL;
+}
+
+static struct ion_iommu_map *__ion_iommu_map(
+ struct ion_buffer *buffer,
+ struct device *iommu_dev, unsigned long *iova)
+{
+ struct ion_iommu_map *data;
+ int ret;
+
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ data->buffer = buffer;
+ data->key = (unsigned long)iommu_dev;
+
+ ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
+ buffer->size, buffer->flags);
+ if (ret)
+ goto out;
+
+ kref_init(&data->ref);
+ *iova = data->iova_addr;
+
+ ion_iommu_add(buffer, data);
+
+ return data;
+
+out:
+ kfree(data);
+ return ERR_PTR(ret);
+}
+
+int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
+ struct ion_handle *handle, unsigned long *iova,
+ unsigned long *size)
+{
+ struct ion_buffer *buffer;
+ struct ion_iommu_map *iommu_map;
+ int ret = 0;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_kernel.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+
+ buffer = handle->buffer;
+ mutex_lock(&buffer->lock);
+
+ if (!handle->buffer->heap->ops->map_iommu) {
+ pr_err("%s: map_iommu is not implemented by this heap.\n",
+ __func__);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (buffer->size & ~PAGE_MASK) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
+ if (!iommu_map) {
+ iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
+ if (IS_ERR(iommu_map))
+ ret = PTR_ERR(iommu_map);
+ } else {
+ if (iommu_map->mapped_size != buffer->size) {
+ pr_err("%s: handle %p is already mapped with length\n"
+ " %d, trying to map with length %zu\n",
+ __func__, handle, iommu_map->mapped_size,
+ buffer->size);
+ ret = -EINVAL;
+ } else {
+ kref_get(&iommu_map->ref);
+ *iova = iommu_map->iova_addr;
+ }
+ }
+ if (!ret)
+ buffer->iommu_map_cnt++;
+
+ *size = buffer->size;
+out:
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+ return ret;
+}
+EXPORT_SYMBOL(ion_map_iommu);
+
+static void ion_iommu_release(struct kref *kref)
+{
+ struct ion_iommu_map *map = container_of(
+ kref,
+ struct ion_iommu_map,
+ ref);
+ struct ion_buffer *buffer = map->buffer;
+
+ rb_erase(&map->node, &buffer->iommu_maps);
+ buffer->heap->ops->unmap_iommu((struct device *)map->key, map);
+ kfree(map);
+}
+
+/**
+ * Unmap any outstanding mappings which would otherwise have been leaked.
+ */
+static void ion_iommu_force_unmap(struct ion_buffer *buffer)
+{
+ struct ion_iommu_map *iommu_map;
+ struct rb_node *node;
+ const struct rb_root *rb = &buffer->iommu_maps;
+
+ mutex_lock(&buffer->lock);
+ while ((node = rb_first(rb)) != 0) {
+ iommu_map = rb_entry(node, struct ion_iommu_map, node);
+ /* set ref count to 1 to force release */
+ kref_init(&iommu_map->ref);
+ kref_put(&iommu_map->ref, ion_iommu_release);
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
+ struct ion_handle *handle)
+{
+ struct ion_iommu_map *iommu_map;
+ struct ion_buffer *buffer;
+
+ mutex_lock(&client->lock);
+ buffer = handle->buffer;
+ mutex_lock(&buffer->lock);
+
+ iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
+ if (!iommu_map) {
+ WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
+ iommu_dev, buffer);
+ goto out;
+ }
+
+ buffer->iommu_map_cnt--;
+ kref_put(&iommu_map->ref, ion_iommu_release);
+out:
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+}
+EXPORT_SYMBOL(ion_unmap_iommu);
+
+static int ion_debug_client_show_buffer_map(struct seq_file *s,
+ struct ion_buffer *buffer)
+{
+ struct ion_iommu_map *iommu_map;
+ const struct rb_root *rb;
+ struct rb_node *node;
+
+ mutex_lock(&buffer->lock);
+ rb = &buffer->iommu_maps;
+ node = rb_first(rb);
+ while (node) {
+ iommu_map = rb_entry(node, struct ion_iommu_map, node);
+ seq_printf(s, "%16.16s: 0x%08lx 0x%08x 0x%08x %8zuKB %4d\n",
+ "<iommu>", iommu_map->iova_addr, 0, 0,
+ (size_t)iommu_map->mapped_size >> 10,
+ atomic_read(&iommu_map->ref.refcount));
+ node = rb_next(node);
+ }
+
+ mutex_unlock(&buffer->lock);
+ return 0;
+}
+
+static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
+{
+ struct ion_client *client = s->private;
+ struct rb_node *n;
+
+ seq_puts(s, "----------------------------------------------------\n");
+ seq_printf(s, "%16.s: %12.s %12.s %12.s %10.s %4.s %4.s %4.s\n",
+ "heap_name", "VA", "PA", "IBUF", "size", "HC", "IBR", "IHR");
+ mutex_lock(&client->lock);
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ struct ion_handle *handle = rb_entry(n, struct ion_handle,
+ node);
+ struct ion_buffer *buffer = handle->buffer;
+ ion_phys_addr_t pa = 0;
+ size_t len = buffer->size;
+
+ mutex_lock(&buffer->lock);
+ if (buffer->heap->ops->phys)
+ buffer->heap->ops->phys(buffer->heap,
+ buffer, &pa, &len);
+
+ seq_printf(s, "%16.16s: 0x%08lx 0x%08lx 0x%08lx %8zuKB %4d %4d %4d\n",
+ buffer->heap->name, (unsigned long)buffer->vaddr, pa,
+ (unsigned long)buffer, len >> 10,
+ buffer->handle_count,
+ atomic_read(&buffer->ref.refcount),
+ atomic_read(&handle->ref.refcount));
+
+ mutex_unlock(&buffer->lock);
+ ion_debug_client_show_buffer_map(s, buffer);
+ }
+
+ mutex_unlock(&client->lock);
+ return 0;
+}
+#endif
+
static int ion_debug_client_show(struct seq_file *s, void *unused)
{
struct ion_client *client = s->private;
continue;
seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
}
+#ifdef CONFIG_RK_IOMMU
+ ion_debug_client_show_buffer(s, unused);
+#endif
return 0;
}