return err;
break;
}
+ case ION_GET_CLIENT:
+ {
+ struct ion_handle *handle;
+ struct ion_client_data data;
+ struct rb_node *n;
+
+ if (copy_from_user(&data, (void __user *)arg,
+ sizeof(struct ion_client_data)))
+ return -EFAULT;
+
+ mutex_lock(&client->lock);
+ switch (data.type) {
+ case ION_TYPE_GET_TOTAL_SIZE:
+ data.total_size = 0;
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ handle = rb_entry(n, struct ion_handle, node);
+ data.total_size += handle->buffer->size;
+ }
+ break;
+ case ION_TYPE_SIZE_GET_COUNT:
+ data.count = 0;
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ handle = rb_entry(n, struct ion_handle, node);
+ if(handle->buffer->size == data.size)
+ data.count++;
+ }
+ break;
+ }
+ mutex_unlock(&client->lock);
+ if (copy_to_user((void __user *)arg, &data,
+ sizeof(struct ion_client_data)))
+ return -EFAULT;
+ break;
+ }
default:
return -ENOTTY;
}
void *virt;
size_t size;
};
+struct ion_client_data {
+#define ION_TYPE_GET_TOTAL_SIZE 0
+#define ION_TYPE_SIZE_GET_COUNT 1
+ unsigned int type;
+ union {
+ size_t size;
+ size_t total_size;
+ };
+ unsigned int count;
+};
#define ION_IOC_MAGIC 'I'
/**
#define ION_CACHE_CLEAN _IOWR(ION_IOC_MAGIC, 8, struct ion_flush_data)
#define ION_CACHE_INVALID _IOWR(ION_IOC_MAGIC, 9, struct ion_flush_data)
#define ION_GET_PHYS _IOWR(ION_IOC_MAGIC, 10, unsigned long)
+#define ION_GET_CLIENT _IOWR(ION_IOC_MAGIC, 11, struct ion_client_data)
#endif /* _LINUX_ION_H */