#include <drm/rockchip_drm.h>
#include <linux/dma-mapping.h>
#include <linux/dma-iommu.h>
+#include <linux/genalloc.h>
#include <linux/pm_runtime.h>
#include <linux/memblock.h>
#include <linux/module.h>
return 0;
}
+static int rockchip_gem_pool_init(struct drm_device *drm)
+{
+ struct rockchip_drm_private *private = drm->dev_private;
+ struct device_node *np = drm->dev->of_node;
+ struct device_node *node;
+ phys_addr_t start, size;
+ struct resource res;
+ int ret;
+
+ node = of_parse_phandle(np, "secure-memory-region", 0);
+ if (!node)
+ return -ENXIO;
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret)
+ return ret;
+ start = res.start;
+ size = resource_size(&res);
+ if (!size)
+ return -ENOMEM;
+
+ private->secure_buffer_pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (!private->secure_buffer_pool)
+ return -ENOMEM;
+
+ gen_pool_add(private->secure_buffer_pool, start, size, -1);
+
+ return 0;
+}
+
+static void rockchip_gem_pool_destroy(struct drm_device *drm)
+{
+ struct rockchip_drm_private *private = drm->dev_private;
+
+ if (!private->secure_buffer_pool)
+ return;
+
+ gen_pool_destroy(private->secure_buffer_pool);
+}
+
static int rockchip_drm_bind(struct device *dev)
{
struct drm_device *drm_dev;
drm_mode_config_reset(drm_dev);
+ rockchip_gem_pool_init(drm_dev);
#ifndef MODULE
show_loader_logo(drm_dev);
#endif
err_fbdev_fini:
rockchip_drm_fbdev_fini(drm_dev);
err_vblank_cleanup:
+ rockchip_gem_pool_destroy(drm_dev);
drm_vblank_cleanup(drm_dev);
err_kms_helper_poll_fini:
drm_kms_helper_poll_fini(drm_dev);
struct drm_device *drm_dev = dev_get_drvdata(dev);
rockchip_drm_fbdev_fini(drm_dev);
+ rockchip_gem_pool_destroy(drm_dev);
drm_vblank_cleanup(drm_dev);
drm_kms_helper_poll_fini(drm_dev);
component_unbind_all(dev, drm_dev);
#include <linux/completion.h>
#include <linux/dma-attrs.h>
#include <linux/dma-buf.h>
+#include <linux/genalloc.h>
#include <linux/reservation.h>
#include <linux/iommu.h>
#include <linux/pagemap.h>
rk_obj->dma_addr, &rk_obj->dma_attrs);
}
+static int rockchip_gem_alloc_secure(struct rockchip_gem_object *rk_obj)
+{
+ struct drm_gem_object *obj = &rk_obj->base;
+ struct drm_device *drm = obj->dev;
+ struct rockchip_drm_private *private = drm->dev_private;
+ unsigned long paddr;
+ struct sg_table *sgt;
+ int ret, i;
+
+ if (!private->secure_buffer_pool) {
+ DRM_ERROR("No secure buffer pool found\n");
+ return -ENOMEM;
+ }
+
+ paddr = gen_pool_alloc(private->secure_buffer_pool, rk_obj->base.size);
+ if (!paddr) {
+ DRM_ERROR("failed to allocate secure buffer\n");
+ return -ENOMEM;
+ }
+
+ rk_obj->dma_handle = paddr;
+ rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
+
+ rk_obj->pages = drm_calloc_large(rk_obj->num_pages,
+ sizeof(*rk_obj->pages));
+ if (!rk_obj->pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ goto err_buf_free;
+ }
+
+ i = 0;
+ while (i < rk_obj->num_pages) {
+ rk_obj->pages[i] = phys_to_page(paddr);
+ paddr += PAGE_SIZE;
+ i++;
+ }
+ sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err_free_pages;
+ }
+
+ rk_obj->sgt = sgt;
+
+ return 0;
+
+err_free_pages:
+ drm_free_large(rk_obj->pages);
+err_buf_free:
+ gen_pool_free(private->secure_buffer_pool, paddr, rk_obj->base.size);
+
+ return ret;
+}
+
+static void rockchip_gem_free_secure(struct rockchip_gem_object *rk_obj)
+{
+ struct drm_gem_object *obj = &rk_obj->base;
+ struct drm_device *drm = obj->dev;
+ struct rockchip_drm_private *private = drm->dev_private;
+
+ drm_free_large(rk_obj->pages);
+ sg_free_table(rk_obj->sgt);
+ kfree(rk_obj->sgt);
+ gen_pool_free(private->secure_buffer_pool, rk_obj->dma_handle,
+ rk_obj->base.size);
+}
static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
bool alloc_kmap)
{
if (!private->domain)
rk_obj->flags |= ROCKCHIP_BO_CONTIG;
- if (rk_obj->flags & ROCKCHIP_BO_CONTIG) {
+ if (rk_obj->flags & ROCKCHIP_BO_SECURE) {
+ rk_obj->buf_type = ROCKCHIP_GEM_BUF_TYPE_SECURE;
+ rk_obj->flags |= ROCKCHIP_BO_CONTIG;
+ if (alloc_kmap) {
+ DRM_ERROR("Not allow alloc secure buffer with kmap\n");
+ return -EINVAL;
+ }
+ ret = rockchip_gem_alloc_secure(rk_obj);
+ if (ret < 0)
+ return ret;
+ } else if (rk_obj->flags & ROCKCHIP_BO_CONTIG) {
rk_obj->buf_type = ROCKCHIP_GEM_BUF_TYPE_CMA;
ret = rockchip_gem_alloc_cma(rk_obj);
if (ret < 0)
if (private->domain)
rockchip_gem_iommu_unmap(rk_obj);
err_free:
- if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_CMA)
+ if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SECURE)
+ rockchip_gem_free_secure(rk_obj);
+ else if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_CMA)
rockchip_gem_free_cma(rk_obj);
else
rockchip_gem_put_pages(rk_obj);
vunmap(rk_obj->kvaddr);
- if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SHMEM) {
+ if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SHMEM)
rockchip_gem_put_pages(rk_obj);
- } else {
+ else if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SECURE)
+ rockchip_gem_free_secure(rk_obj);
+ else
rockchip_gem_free_cma(rk_obj);
- }
}
static int rockchip_drm_gem_object_mmap_shm(struct drm_gem_object *obj,
*/
vma->vm_flags &= ~VM_PFNMAP;
- if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SHMEM)
+ if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SECURE) {
+ DRM_ERROR("Disallow mmap for secure buffer\n");
+ ret = -EINVAL;
+ } else if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SHMEM) {
ret = rockchip_drm_gem_object_mmap_shm(obj, vma);
- else
+ } else {
ret = rockchip_drm_gem_object_mmap_cma(obj, vma);
+ }
if (ret)
drm_gem_vm_close(vma);
struct drm_rockchip_gem_phys *args = data;
struct rockchip_gem_object *rk_obj;
struct drm_gem_object *obj;
+ int ret = 0;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (!obj) {
if (!(rk_obj->flags & ROCKCHIP_BO_CONTIG)) {
DRM_ERROR("Can't get phys address from non-continus buf.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
args->phy_addr = page_to_phys(rk_obj->pages[0]);
- return 0;
+out:
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
}
int rockchip_gem_create_ioctl(struct drm_device *dev, void *data,