#include <drm/drm.h>
#include <drm/drmP.h>
#include <drm/drm_gem.h>
+#include <drm/drm_sync_helper.h>
#include <drm/drm_vma_manager.h>
+#include <drm/rockchip_drm.h>
+#include <linux/completion.h>
#include <linux/dma-attrs.h>
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+#include <linux/iommu.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
-static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
+static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
+{
+ struct drm_device *drm = rk_obj->base.dev;
+ struct rockchip_drm_private *private = drm->dev_private;
+ int prot = IOMMU_READ | IOMMU_WRITE;
+ ssize_t ret;
+
+ ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
+ rk_obj->base.size, PAGE_SIZE,
+ 0, 0, 0);
+ if (ret < 0) {
+ DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
+ return ret;
+ }
+
+ rk_obj->dma_addr = rk_obj->mm.start;
+
+ ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
+ rk_obj->sgt->nents, prot);
+ if (ret < 0) {
+ DRM_ERROR("failed to map buffer: %zd\n", ret);
+ goto err_remove_node;
+ }
+
+ rk_obj->size = ret;
+
+ return 0;
+
+err_remove_node:
+ drm_mm_remove_node(&rk_obj->mm);
+
+ return ret;
+}
+
+static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
+{
+ struct drm_device *drm = rk_obj->base.dev;
+ struct rockchip_drm_private *private = drm->dev_private;
+
+ iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
+ drm_mm_remove_node(&rk_obj->mm);
+
+ return 0;
+}
+
+static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
+{
+ struct drm_device *drm = rk_obj->base.dev;
+ int ret, i;
+ struct scatterlist *s;
+
+ rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
+ if (IS_ERR(rk_obj->pages))
+ return PTR_ERR(rk_obj->pages);
+
+ rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
+
+ rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
+ if (IS_ERR(rk_obj->sgt)) {
+ ret = PTR_ERR(rk_obj->sgt);
+ goto err_put_pages;
+ }
+
+ /*
+ * Fake up the SG table so that dma_sync_sg_for_device() can be used
+ * to flush the pages associated with it.
+ *
+ * TODO: Replace this by drm_clflush_sg() once it can be implemented
+ * without relying on symbols that are not exported.
+ */
+ for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
+ sg_dma_address(s) = sg_phys(s);
+
+ dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
+ DMA_TO_DEVICE);
+
+ return 0;
+
+err_put_pages:
+ drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
+ return ret;
+}
+
+static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
+{
+ sg_free_table(rk_obj->sgt);
+ kfree(rk_obj->sgt);
+ drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
+}
+
+static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
+ bool alloc_kmap)
+{
+ int ret;
+
+ ret = rockchip_gem_get_pages(rk_obj);
+ if (ret < 0)
+ return ret;
+
+ ret = rockchip_gem_iommu_map(rk_obj);
+ if (ret < 0)
+ goto err_free;
+
+ if (alloc_kmap) {
+ rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ if (!rk_obj->kvaddr) {
+ DRM_ERROR("failed to vmap() buffer\n");
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+ }
+
+ return 0;
+
+err_unmap:
+ rockchip_gem_iommu_unmap(rk_obj);
+err_free:
+ rockchip_gem_put_pages(rk_obj);
+
+ return ret;
+}
+
+static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
bool alloc_kmap)
{
struct drm_gem_object *obj = &rk_obj->base;
&rk_obj->dma_addr, GFP_KERNEL,
&rk_obj->dma_attrs);
if (!rk_obj->kvaddr) {
- DRM_ERROR("failed to allocate %#x byte dma buffer", obj->size);
+ DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
return -ENOMEM;
}
return 0;
}
-static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
+static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
+ bool alloc_kmap)
{
struct drm_gem_object *obj = &rk_obj->base;
struct drm_device *drm = obj->dev;
+ struct rockchip_drm_private *private = drm->dev_private;
- dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
- &rk_obj->dma_attrs);
+ if (private->domain)
+ return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
+ else
+ return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
}
-static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
+static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
+{
+ vunmap(rk_obj->kvaddr);
+ rockchip_gem_iommu_unmap(rk_obj);
+ rockchip_gem_put_pages(rk_obj);
+}
+
+static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
+{
+ struct drm_gem_object *obj = &rk_obj->base;
+ struct drm_device *drm = obj->dev;
+ dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr,
+ rk_obj->dma_addr, &rk_obj->dma_attrs);
+}
+
+static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
{
+ if (rk_obj->pages)
+ rockchip_gem_free_iommu(rk_obj);
+ else
+ rockchip_gem_free_dma(rk_obj);
+}
+
+static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+ unsigned int i, count = obj->size >> PAGE_SHIFT;
+ unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long uaddr = vma->vm_start;
int ret;
+
+ if (user_count == 0 || user_count > count)
+ return -ENXIO;
+
+ for (i = 0; i < user_count; i++) {
+ ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]);
+ if (ret)
+ return ret;
+ uaddr += PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
struct drm_device *drm = obj->dev;
+ return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
+ obj->size, &rk_obj->dma_attrs);
+}
+
+static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ int ret;
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+
/*
- * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear
+ * We allocated a struct page table for rk_obj, so clear
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_pgoff = 0;
+
+ if (rk_obj->pages)
+ ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
+ else
+ ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
- ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
- obj->size, &rk_obj->dma_attrs);
if (ret)
drm_gem_vm_close(vma);
return rockchip_drm_gem_object_mmap(obj, vma);
}
-struct rockchip_gem_object *
- rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
- bool alloc_kmap)
+static struct rockchip_gem_object *
+rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
{
struct rockchip_gem_object *rk_obj;
struct drm_gem_object *obj;
- int ret;
size = round_up(size, PAGE_SIZE);
obj = &rk_obj->base;
- drm_gem_private_object_init(drm, obj, size);
+ drm_gem_object_init(drm, obj, size);
+
+ return rk_obj;
+}
+
+struct rockchip_gem_object *
+rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
+ bool alloc_kmap)
+{
+ struct rockchip_gem_object *rk_obj;
+ int ret;
+
+ rk_obj = rockchip_gem_alloc_object(drm, size);
+ if (IS_ERR(rk_obj))
+ return rk_obj;
ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
if (ret)
*/
void rockchip_gem_free_object(struct drm_gem_object *obj)
{
- struct rockchip_gem_object *rk_obj;
+ struct drm_device *drm = obj->dev;
+ struct rockchip_drm_private *private = drm->dev_private;
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
drm_gem_free_mmap_offset(obj);
- rk_obj = to_rockchip_obj(obj);
+ if (obj->import_attach) {
+ if (private->domain) {
+ rockchip_gem_iommu_unmap(rk_obj);
+ } else {
+ dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
+ rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ }
+ drm_prime_gem_destroy(obj, rk_obj->sgt);
+ } else {
+ rockchip_gem_free_buf(rk_obj);
+ }
- rockchip_gem_free_buf(rk_obj);
+#ifdef CONFIG_DRM_DMA_SYNC
+ drm_fence_signal_and_put(&rk_obj->acquire_fence);
+#endif
kfree(rk_obj);
}
/*
* align to 64 bytes since Mali requires it.
*/
- min_pitch = ALIGN(min_pitch, 64);
+ args->pitch = ALIGN(min_pitch, 64);
+ args->size = args->pitch * args->height;
+
+ rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
+ &args->handle);
+
+ return PTR_ERR_OR_ZERO(rk_obj);
+}
+
+int rockchip_gem_map_offset_ioctl(struct drm_device *drm, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_rockchip_gem_map_off *args = data;
- if (args->pitch < min_pitch)
- args->pitch = min_pitch;
+ return rockchip_gem_dumb_map_offset(file_priv, drm, args->handle,
+ &args->offset);
+}
- if (args->size < args->pitch * args->height)
- args->size = args->pitch * args->height;
+int rockchip_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_rockchip_gem_create *args = data;
+ struct rockchip_gem_object *rk_obj;
rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
&args->handle);
-
return PTR_ERR_OR_ZERO(rk_obj);
}
+static struct reservation_object *drm_gem_get_resv(struct drm_gem_object *gem)
+{
+ struct dma_buf *dma_buf = gem->dma_buf;
+ return dma_buf ? dma_buf->resv : NULL;
+}
+
+#ifdef CONFIG_DRM_DMA_SYNC
+static void rockchip_gem_acquire_complete(struct drm_reservation_cb *rcb,
+ void *context)
+{
+ struct completion *compl = context;
+ complete(compl);
+}
+
+static int rockchip_gem_acquire(struct drm_device *dev,
+ struct rockchip_gem_object *rockchip_gem_obj,
+ bool exclusive)
+{
+ struct fence *fence;
+ struct rockchip_drm_private *dev_priv = dev->dev_private;
+ struct reservation_object *resv =
+ drm_gem_get_resv(&rockchip_gem_obj->base);
+ int ret = 0;
+ struct drm_reservation_cb rcb;
+ DECLARE_COMPLETION_ONSTACK(compl);
+
+ if (!resv)
+ return ret;
+
+ if (!exclusive &&
+ !rockchip_gem_obj->acquire_exclusive &&
+ rockchip_gem_obj->acquire_fence) {
+ atomic_inc(&rockchip_gem_obj->acquire_shared_count);
+ return ret;
+ }
+
+ fence = drm_sw_fence_new(dev_priv->cpu_fence_context,
+ atomic_add_return(1, &dev_priv->cpu_fence_seqno));
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ DRM_ERROR("Failed to create acquire fence %d.\n", ret);
+ return ret;
+ }
+ ww_mutex_lock(&resv->lock, NULL);
+ if (!exclusive) {
+ ret = reservation_object_reserve_shared(resv);
+ if (ret < 0) {
+ DRM_ERROR("Failed to reserve space for shared fence %d.\n",
+ ret);
+ goto resv_unlock;
+ }
+ }
+ drm_reservation_cb_init(&rcb, rockchip_gem_acquire_complete, &compl);
+ ret = drm_reservation_cb_add(&rcb, resv, exclusive);
+ if (ret < 0) {
+ DRM_ERROR("Failed to add reservation to callback %d.\n", ret);
+ goto resv_unlock;
+ }
+ drm_reservation_cb_done(&rcb);
+ if (exclusive)
+ reservation_object_add_excl_fence(resv, fence);
+ else
+ reservation_object_add_shared_fence(resv, fence);
+
+ ww_mutex_unlock(&resv->lock);
+ mutex_unlock(&dev->struct_mutex);
+ ret = wait_for_completion_interruptible(&compl);
+ mutex_lock(&dev->struct_mutex);
+ if (ret < 0) {
+ DRM_ERROR("Failed wait for reservation callback %d.\n", ret);
+ drm_reservation_cb_fini(&rcb);
+ /* somebody else may be already waiting on it */
+ drm_fence_signal_and_put(&fence);
+ return ret;
+ }
+ rockchip_gem_obj->acquire_fence = fence;
+ rockchip_gem_obj->acquire_exclusive = exclusive;
+ atomic_set(&rockchip_gem_obj->acquire_shared_count, 1);
+ return ret;
+
+resv_unlock:
+ ww_mutex_unlock(&resv->lock);
+ fence_put(fence);
+ return ret;
+}
+
+static void rockchip_gem_release(struct rockchip_gem_object *rockchip_gem_obj)
+{
+ BUG_ON(!rockchip_gem_obj->acquire_fence);
+ if (atomic_sub_and_test(1,
+ &rockchip_gem_obj->acquire_shared_count))
+ drm_fence_signal_and_put(&rockchip_gem_obj->acquire_fence);
+}
+#endif
+
+int rockchip_gem_cpu_acquire_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_rockchip_gem_cpu_acquire *args = data;
+ struct rockchip_drm_file_private *file_priv = file->driver_priv;
+ struct drm_gem_object *obj;
+ struct rockchip_gem_object *rockchip_gem_obj;
+ struct rockchip_gem_object_node *gem_node;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("[BO:%u] flags: 0x%x\n", args->handle, args->flags);
+
+ mutex_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ rockchip_gem_obj = to_rockchip_obj(obj);
+
+ if (!drm_gem_get_resv(&rockchip_gem_obj->base)) {
+ /* If there is no reservation object present, there is no
+ * cross-process/cross-device sharing and sync is unnecessary.
+ */
+ ret = 0;
+ goto unref_obj;
+ }
+
+#ifdef CONFIG_DRM_DMA_SYNC
+ ret = rockchip_gem_acquire(dev, rockchip_gem_obj,
+ args->flags & DRM_ROCKCHIP_GEM_CPU_ACQUIRE_EXCLUSIVE);
+ if (ret < 0)
+ goto unref_obj;
+#endif
+
+ gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
+ if (!gem_node) {
+ DRM_ERROR("Failed to allocate rockchip_drm_gem_obj_node.\n");
+ ret = -ENOMEM;
+ goto release_sync;
+ }
+
+ gem_node->rockchip_gem_obj = rockchip_gem_obj;
+ list_add(&gem_node->list, &file_priv->gem_cpu_acquire_list);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+
+release_sync:
+#ifdef CONFIG_DRM_DMA_SYNC
+ rockchip_gem_release(rockchip_gem_obj);
+#endif
+unref_obj:
+ drm_gem_object_unreference(obj);
+
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int rockchip_gem_cpu_release_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_rockchip_gem_cpu_release *args = data;
+ struct rockchip_drm_file_private *file_priv = file->driver_priv;
+ struct drm_gem_object *obj;
+ struct rockchip_gem_object *rockchip_gem_obj;
+ struct list_head *cur;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("[BO:%u]\n", args->handle);
+
+ mutex_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ rockchip_gem_obj = to_rockchip_obj(obj);
+
+ if (!drm_gem_get_resv(&rockchip_gem_obj->base)) {
+ /* If there is no reservation object present, there is no
+ * cross-process/cross-device sharing and sync is unnecessary.
+ */
+ ret = 0;
+ goto unref_obj;
+ }
+
+ list_for_each(cur, &file_priv->gem_cpu_acquire_list) {
+ struct rockchip_gem_object_node *node = list_entry(
+ cur, struct rockchip_gem_object_node, list);
+ if (node->rockchip_gem_obj == rockchip_gem_obj)
+ break;
+ }
+ if (cur == &file_priv->gem_cpu_acquire_list) {
+ DRM_ERROR("gem object not acquired for current process.\n");
+ ret = -EINVAL;
+ goto unref_obj;
+ }
+
+#ifdef CONFIG_DRM_DMA_SYNC
+ rockchip_gem_release(rockchip_gem_obj);
+#endif
+
+ list_del(cur);
+ kfree(list_entry(cur, struct rockchip_gem_object_node, list));
+ /* unreference for the reference held since cpu_acquire_ioctl */
+ drm_gem_object_unreference(obj);
+ ret = 0;
+
+unref_obj:
+ /* unreference for the reference from drm_gem_object_lookup() */
+ drm_gem_object_unreference(obj);
+
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
/*
* Allocate a sg_table for this GEM object.
* Note: Both the table's contents, and the sg_table itself must be freed by
struct sg_table *sgt;
int ret;
+ if (rk_obj->pages)
+ return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
+
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
return ERR_PTR(-ENOMEM);
return sgt;
}
+static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
+ int count)
+{
+ struct scatterlist *s;
+ dma_addr_t expected = sg_dma_address(sgt->sgl);
+ unsigned int i;
+ unsigned long size = 0;
+
+ for_each_sg(sgt->sgl, s, count, i) {
+ if (sg_dma_address(s) != expected)
+ break;
+ expected = sg_dma_address(s) + sg_dma_len(s);
+ size += sg_dma_len(s);
+ }
+ return size;
+}
+
+static int
+rockchip_gem_iommu_map_sg(struct drm_device *drm,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg,
+ struct rockchip_gem_object *rk_obj)
+{
+ rk_obj->sgt = sg;
+ return rockchip_gem_iommu_map(rk_obj);
+}
+
+static int
+rockchip_gem_dma_map_sg(struct drm_device *drm,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg,
+ struct rockchip_gem_object *rk_obj)
+{
+ int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
+ DMA_BIDIRECTIONAL);
+ if (!count)
+ return -EINVAL;
+
+ if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
+ DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
+ dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
+ DMA_BIDIRECTIONAL);
+ return -EINVAL;
+ }
+
+ rk_obj->dma_addr = sg_dma_address(sg->sgl);
+ rk_obj->sgt = sg;
+ return 0;
+}
+
+struct drm_gem_object *
+rockchip_gem_prime_import_sg_table(struct drm_device *drm,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg)
+{
+ struct rockchip_drm_private *private = drm->dev_private;
+ struct rockchip_gem_object *rk_obj;
+ int ret;
+
+ rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
+ if (IS_ERR(rk_obj))
+ return ERR_CAST(rk_obj);
+
+ if (private->domain)
+ ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
+ else
+ ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
+
+ if (ret < 0) {
+ DRM_ERROR("failed to import sg table: %d\n", ret);
+ goto err_free_rk_obj;
+ }
+
+ return &rk_obj->base;
+
+err_free_rk_obj:
+ kfree(rk_obj);
+ return ERR_PTR(ret);
+}
+
void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+ if (rk_obj->pages)
+ return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs))
return NULL;
void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
- /* Nothing to do */
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+
+ if (rk_obj->pages) {
+ vunmap(vaddr);
+ return;
+ }
+
+ /* Nothing to do if allocated by DMA mapping API. */
}