drm/vmwgfx: Hook up guest-backed surfaces
authorThomas Hellstrom <thellstrom@vmware.com>
Wed, 21 Nov 2012 10:45:13 +0000 (11:45 +0100)
committerThomas Hellstrom <thellstrom@vmware.com>
Fri, 17 Jan 2014 06:52:26 +0000 (07:52 +0100)
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Brian Paul <brianp@vmware.com>
Reviewed-by: Zack Rusin <zackr@vmware.com>
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c

index eaffb052409259cd169455d6801a8ec947faeb01..84f8f4c4ad0c90955e6e305501c8d67a8fb85660 100644 (file)
 #define DRM_IOCTL_VMW_UPDATE_LAYOUT                            \
        DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,       \
                 struct drm_vmw_update_layout_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_CREATE                                \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,  \
+                union drm_vmw_gb_surface_create_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_REF                           \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,     \
+                union drm_vmw_gb_surface_reference_arg)
 
 /**
  * The core DRM version of this macro doesn't account for
@@ -177,6 +183,12 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
        VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
                      vmw_kms_update_layout_ioctl,
                      DRM_MASTER | DRM_UNLOCKED),
+       VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
+                     vmw_gb_surface_define_ioctl,
+                     DRM_AUTH | DRM_UNLOCKED),
+       VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
+                     vmw_gb_surface_reference_ioctl,
+                     DRM_AUTH | DRM_UNLOCKED),
 };
 
 static struct pci_device_id vmw_pci_id_list[] = {
index 3d672adf0ea486422f636b3e52ca1dd2e74659fa..71da388a808150c15321c17131774dfa6cef5078 100644 (file)
@@ -528,6 +528,10 @@ extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
                                    struct drm_file *file_priv);
 extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
                                       struct drm_file *file_priv);
+extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+                                      struct drm_file *file_priv);
+extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
+                                         struct drm_file *file_priv);
 extern int vmw_surface_check(struct vmw_private *dev_priv,
                             struct ttm_object_file *tfile,
                             uint32_t handle, int *id);
@@ -541,6 +545,15 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
                           void (*bo_free) (struct ttm_buffer_object *bo));
 extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
                                  struct ttm_object_file *tfile);
+extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+                                struct ttm_object_file *tfile,
+                                uint32_t size,
+                                bool shareable,
+                                uint32_t *handle,
+                                struct vmw_dma_buffer **p_dma_buf);
+extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+                                    struct vmw_dma_buffer *dma_buf,
+                                    uint32_t *handle);
 extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv);
 extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
index c2a6e4832e744245368bdd2273318a1441290964..4d51ad0a2f51febc9e546b6f7ac1ac761d8cc925 100644 (file)
@@ -1185,6 +1185,222 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
        return ret;
 }
 
+/**
+ * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @res_type: The resource type.
+ * @converter: Information about user-space binding for this resource type.
+ * @res_id: Pointer to the user-space resource handle in the command stream.
+ * @buf_id: Pointer to the user-space backup buffer handle in the command
+ * stream.
+ * @backup_offset: Offset of backup into MOB.
+ *
+ * This function prepares for registering a switch of backup buffers
+ * in the resource metadata just prior to unreserving.
+ */
+static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
+                                struct vmw_sw_context *sw_context,
+                                enum vmw_res_type res_type,
+                                const struct vmw_user_resource_conv
+                                *converter,
+                                uint32_t *res_id,
+                                uint32_t *buf_id,
+                                unsigned long backup_offset)
+{
+       int ret;
+       struct vmw_dma_buffer *dma_buf;
+       struct vmw_resource_val_node *val_node;
+
+       ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
+                               converter, res_id, &val_node);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
+       if (unlikely(ret != 0))
+               return ret;
+
+       if (val_node->first_usage)
+               val_node->no_buffer_needed = true;
+
+       vmw_dmabuf_unreference(&val_node->new_backup);
+       val_node->new_backup = dma_buf;
+       val_node->new_backup_offset = backup_offset;
+
+       return 0;
+}
+
+/**
+ * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
+                                  struct vmw_sw_context *sw_context,
+                                  SVGA3dCmdHeader *header)
+{
+       struct vmw_bind_gb_surface_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdBindGBSurface body;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
+
+       return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
+                                    user_surface_converter,
+                                    &cmd->body.sid, &cmd->body.mobid,
+                                    0);
+}
+
+/**
+ * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
+                                  struct vmw_sw_context *sw_context,
+                                  SVGA3dCmdHeader *header)
+{
+       struct vmw_gb_surface_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdUpdateGBImage body;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
+                                    struct vmw_sw_context *sw_context,
+                                    SVGA3dCmdHeader *header)
+{
+       struct vmw_gb_surface_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdUpdateGBSurface body;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.sid, NULL);
+}
+
+/**
+ * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
+                                    struct vmw_sw_context *sw_context,
+                                    SVGA3dCmdHeader *header)
+{
+       struct vmw_gb_surface_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdReadbackGBImage body;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
+                                      struct vmw_sw_context *sw_context,
+                                      SVGA3dCmdHeader *header)
+{
+       struct vmw_gb_surface_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdReadbackGBSurface body;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.sid, NULL);
+}
+
+/**
+ * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
+                                      struct vmw_sw_context *sw_context,
+                                      SVGA3dCmdHeader *header)
+{
+       struct vmw_gb_surface_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdInvalidateGBImage body;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_invalidate_gb_surface - Validate an
+ * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
+                                        struct vmw_sw_context *sw_context,
+                                        SVGA3dCmdHeader *header)
+{
+       struct vmw_gb_surface_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdInvalidateGBSurface body;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.sid, NULL);
+}
+
 /**
  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
  * command
@@ -1300,6 +1516,21 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
        VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
        VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
        VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
+       VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid),
+       VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid),
+       VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface),
+       VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid),
+       VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image),
+       VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
+                   &vmw_cmd_update_gb_surface),
+       VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
+                   &vmw_cmd_readback_gb_image),
+       VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
+                   &vmw_cmd_readback_gb_surface),
+       VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
+                   &vmw_cmd_invalidate_gb_image),
+       VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
+                   &vmw_cmd_invalidate_gb_surface),
        VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid),
        VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid),
        VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid),
index 6cd1560c15470bdc1f7182a3f64a1169eb7ed85c..b40978f0ca967e13090087cb4b08ccb87d71f5ab 100644 (file)
@@ -593,7 +593,8 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
 }
 
 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
-                             struct vmw_dma_buffer *dma_buf)
+                             struct vmw_dma_buffer *dma_buf,
+                             uint32_t *handle)
 {
        struct vmw_user_dma_buffer *user_bo;
 
@@ -601,6 +602,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
                return -EINVAL;
 
        user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+
+       *handle = user_bo->prime.base.hash.key;
        return ttm_ref_object_add(tfile, &user_bo->prime.base,
                                  TTM_REF_USAGE, NULL);
 }
index 0fc93398bba202b36205203dbe9407efc326aade..c3b53e1bafb8a0b8a8ba1a0070698970dbf15262 100644 (file)
@@ -41,7 +41,6 @@ struct vmw_user_surface {
        struct ttm_prime_object prime;
        struct vmw_surface srf;
        uint32_t size;
-       uint32_t backup_handle;
 };
 
 /**
@@ -68,6 +67,14 @@ static int vmw_legacy_srf_unbind(struct vmw_resource *res,
                                 struct ttm_validate_buffer *val_buf);
 static int vmw_legacy_srf_create(struct vmw_resource *res);
 static int vmw_legacy_srf_destroy(struct vmw_resource *res);
+static int vmw_gb_surface_create(struct vmw_resource *res);
+static int vmw_gb_surface_bind(struct vmw_resource *res,
+                              struct ttm_validate_buffer *val_buf);
+static int vmw_gb_surface_unbind(struct vmw_resource *res,
+                                bool readback,
+                                struct ttm_validate_buffer *val_buf);
+static int vmw_gb_surface_destroy(struct vmw_resource *res);
+
 
 static const struct vmw_user_resource_conv user_surface_conv = {
        .object_type = VMW_RES_SURFACE,
@@ -93,6 +100,18 @@ static const struct vmw_res_func vmw_legacy_surface_func = {
        .unbind = &vmw_legacy_srf_unbind
 };
 
+static const struct vmw_res_func vmw_gb_surface_func = {
+       .res_type = vmw_res_surface,
+       .needs_backup = true,
+       .may_evict = true,
+       .type_name = "guest backed surfaces",
+       .backup_placement = &vmw_mob_placement,
+       .create = vmw_gb_surface_create,
+       .destroy = vmw_gb_surface_destroy,
+       .bind = vmw_gb_surface_bind,
+       .unbind = vmw_gb_surface_unbind
+};
+
 /**
  * struct vmw_surface_dma - SVGA3D DMA command
  */
@@ -291,6 +310,11 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
        struct vmw_surface *srf;
        void *cmd;
 
+       if (res->func->destroy == vmw_gb_surface_destroy) {
+               (void) vmw_gb_surface_destroy(res);
+               return;
+       }
+
        if (res->id != -1) {
 
                cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
@@ -549,12 +573,15 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
        struct vmw_resource *res = &srf->res;
 
        BUG_ON(res_free == NULL);
-       (void) vmw_3d_resource_inc(dev_priv, false);
+       if (!dev_priv->has_mob)
+               (void) vmw_3d_resource_inc(dev_priv, false);
        ret = vmw_resource_init(dev_priv, res, true, res_free,
+                               (dev_priv->has_mob) ? &vmw_gb_surface_func :
                                &vmw_legacy_surface_func);
 
        if (unlikely(ret != 0)) {
-               vmw_3d_resource_dec(dev_priv, false);
+               if (!dev_priv->has_mob)
+                       vmw_3d_resource_dec(dev_priv, false);
                res_free(res);
                return ret;
        }
@@ -894,3 +921,421 @@ out_no_reference:
 
        return ret;
 }
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static int vmw_gb_surface_create(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct vmw_surface *srf = vmw_res_to_srf(res);
+       uint32_t cmd_len, submit_len;
+       int ret;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDefineGBSurface body;
+       } *cmd;
+
+       if (likely(res->id != -1))
+               return 0;
+
+       (void) vmw_3d_resource_inc(dev_priv, false);
+       ret = vmw_resource_alloc_id(res);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed to allocate a surface id.\n");
+               goto out_no_id;
+       }
+
+       if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
+               ret = -EBUSY;
+               goto out_no_fifo;
+       }
+
+       cmd_len = sizeof(cmd->body);
+       submit_len = sizeof(*cmd);
+       cmd = vmw_fifo_reserve(dev_priv, submit_len);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for surface "
+                         "creation.\n");
+               ret = -ENOMEM;
+               goto out_no_fifo;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
+       cmd->header.size = cmd_len;
+       cmd->body.sid = srf->res.id;
+       cmd->body.surfaceFlags = srf->flags;
+       cmd->body.format = cpu_to_le32(srf->format);
+       cmd->body.numMipLevels = srf->mip_levels[0];
+       cmd->body.multisampleCount = srf->multisample_count;
+       cmd->body.autogenFilter = srf->autogen_filter;
+       cmd->body.size.width = srf->base_size.width;
+       cmd->body.size.height = srf->base_size.height;
+       cmd->body.size.depth = srf->base_size.depth;
+       vmw_fifo_commit(dev_priv, submit_len);
+
+       return 0;
+
+out_no_fifo:
+       vmw_resource_release_id(res);
+out_no_id:
+       vmw_3d_resource_dec(dev_priv, false);
+       return ret;
+}
+
+
+static int vmw_gb_surface_bind(struct vmw_resource *res,
+                              struct ttm_validate_buffer *val_buf)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdBindGBSurface body;
+       } *cmd1;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdUpdateGBSurface body;
+       } *cmd2;
+       uint32_t submit_size;
+       struct ttm_buffer_object *bo = val_buf->bo;
+
+       BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+       submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
+
+       cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
+       if (unlikely(cmd1 == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for surface "
+                         "binding.\n");
+               return -ENOMEM;
+       }
+
+       cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
+       cmd1->header.size = sizeof(cmd1->body);
+       cmd1->body.sid = res->id;
+       cmd1->body.mobid = bo->mem.start;
+       if (res->backup_dirty) {
+               cmd2 = (void *) &cmd1[1];
+               cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
+               cmd2->header.size = sizeof(cmd2->body);
+               cmd2->body.sid = res->id;
+               res->backup_dirty = false;
+       }
+       vmw_fifo_commit(dev_priv, submit_size);
+
+       return 0;
+}
+
+static int vmw_gb_surface_unbind(struct vmw_resource *res,
+                                bool readback,
+                                struct ttm_validate_buffer *val_buf)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct ttm_buffer_object *bo = val_buf->bo;
+       struct vmw_fence_obj *fence;
+
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdReadbackGBSurface body;
+       } *cmd1;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdBindGBSurface body;
+       } *cmd2;
+       uint32_t submit_size;
+       uint8_t *cmd;
+
+
+       BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+       submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
+       cmd = vmw_fifo_reserve(dev_priv, submit_size);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for surface "
+                         "unbinding.\n");
+               return -ENOMEM;
+       }
+
+       cmd2 = (void *) cmd;
+       if (readback) {
+               cmd1 = (void *) cmd;
+               cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
+               cmd1->header.size = sizeof(cmd1->body);
+               cmd1->body.sid = res->id;
+               cmd2 = (void *) &cmd1[1];
+       }
+       cmd2->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
+       cmd2->header.size = sizeof(cmd2->body);
+       cmd2->body.sid = res->id;
+       cmd2->body.mobid = SVGA3D_INVALID_ID;
+
+       vmw_fifo_commit(dev_priv, submit_size);
+
+       /*
+        * Create a fence object and fence the backup buffer.
+        */
+
+       (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+                                         &fence, NULL);
+
+       vmw_fence_single_bo(val_buf->bo, fence);
+
+       if (likely(fence != NULL))
+               vmw_fence_obj_unreference(&fence);
+
+       return 0;
+}
+
+static int vmw_gb_surface_destroy(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDestroyGBSurface body;
+       } *cmd;
+
+       if (likely(res->id == -1))
+               return 0;
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for surface "
+                         "destruction.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.sid = res->id;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       vmw_resource_release_id(res);
+       vmw_3d_resource_dec(dev_priv, false);
+
+       return 0;
+}
+
+/**
+ * vmw_gb_surface_define_ioctl - Ioctl function implementing
+ *                               the user surface define functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct vmw_user_surface *user_srf;
+       struct vmw_surface *srf;
+       struct vmw_resource *res;
+       struct vmw_resource *tmp;
+       union drm_vmw_gb_surface_create_arg *arg =
+           (union drm_vmw_gb_surface_create_arg *)data;
+       struct drm_vmw_gb_surface_create_req *req = &arg->req;
+       struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       int ret;
+       uint32_t size;
+       struct vmw_master *vmaster = vmw_master(file_priv->master);
+       const struct svga3d_surface_desc *desc;
+       uint32_t backup_handle;
+
+       if (unlikely(vmw_user_surface_size == 0))
+               vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+                       128;
+
+       size = vmw_user_surface_size + 128;
+
+       desc = svga3dsurface_get_desc(req->format);
+       if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+               DRM_ERROR("Invalid surface format for surface creation.\n");
+               return -EINVAL;
+       }
+
+       ret = ttm_read_lock(&vmaster->lock, true);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+                                  size, false, true);
+       if (unlikely(ret != 0)) {
+               if (ret != -ERESTARTSYS)
+                       DRM_ERROR("Out of graphics memory for surface"
+                                 " creation.\n");
+               goto out_unlock;
+       }
+
+       user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+       if (unlikely(user_srf == NULL)) {
+               ret = -ENOMEM;
+               goto out_no_user_srf;
+       }
+
+       srf = &user_srf->srf;
+       res = &srf->res;
+
+       srf->flags = req->svga3d_flags;
+       srf->format = req->format;
+       srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
+       srf->mip_levels[0] = req->mip_levels;
+       srf->num_sizes = 1;
+       srf->sizes = NULL;
+       srf->offsets = NULL;
+       user_srf->size = size;
+       srf->base_size = req->base_size;
+       srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+       srf->multisample_count = req->multisample_count;
+       res->backup_size = svga3dsurface_get_serialized_size
+         (srf->format, srf->base_size, srf->mip_levels[0],
+          srf->flags & SVGA3D_SURFACE_CUBEMAP);
+
+       user_srf->prime.base.shareable = false;
+       user_srf->prime.base.tfile = NULL;
+
+       /**
+        * From this point, the generic resource management functions
+        * destroy the object on failure.
+        */
+
+       ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+       if (unlikely(ret != 0))
+               goto out_unlock;
+
+       if (req->buffer_handle != SVGA3D_INVALID_ID) {
+               ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
+                                            &res->backup);
+       } else if (req->drm_surface_flags &
+                  drm_vmw_surface_flag_create_buffer)
+               ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
+                                           res->backup_size,
+                                           req->drm_surface_flags &
+                                           drm_vmw_surface_flag_shareable,
+                                           &backup_handle,
+                                           &res->backup);
+
+       if (unlikely(ret != 0)) {
+               vmw_resource_unreference(&res);
+               goto out_unlock;
+       }
+
+       tmp = vmw_resource_reference(&srf->res);
+       ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+                                   req->drm_surface_flags &
+                                   drm_vmw_surface_flag_shareable,
+                                   VMW_RES_SURFACE,
+                                   &vmw_user_surface_base_release, NULL);
+
+       if (unlikely(ret != 0)) {
+               vmw_resource_unreference(&tmp);
+               vmw_resource_unreference(&res);
+               goto out_unlock;
+       }
+
+       rep->handle = user_srf->prime.base.hash.key;
+       rep->backup_size = res->backup_size;
+       if (res->backup) {
+               rep->buffer_map_handle =
+                       drm_vma_node_offset_addr(&res->backup->base.vma_node);
+               rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
+               rep->buffer_handle = backup_handle;
+       } else {
+               rep->buffer_map_handle = 0;
+               rep->buffer_size = 0;
+               rep->buffer_handle = SVGA3D_INVALID_ID;
+       }
+
+       vmw_resource_unreference(&res);
+
+       ttm_read_unlock(&vmaster->lock);
+       return 0;
+out_no_user_srf:
+       ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+out_unlock:
+       ttm_read_unlock(&vmaster->lock);
+       return ret;
+}
+
+/**
+ * vmw_gb_surface_reference_ioctl - Ioctl function implementing
+ *                                  the user surface reference functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
+                                  struct drm_file *file_priv)
+{
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       union drm_vmw_gb_surface_reference_arg *arg =
+           (union drm_vmw_gb_surface_reference_arg *)data;
+       struct drm_vmw_surface_arg *req = &arg->req;
+       struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct vmw_surface *srf;
+       struct vmw_user_surface *user_srf;
+       struct ttm_base_object *base;
+       uint32_t backup_handle;
+       int ret = -EINVAL;
+
+       base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
+       if (unlikely(base == NULL)) {
+               DRM_ERROR("Could not find surface to reference.\n");
+               return -EINVAL;
+       }
+
+       if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
+               goto out_bad_resource;
+
+       user_srf = container_of(base, struct vmw_user_surface, prime.base);
+       srf = &user_srf->srf;
+       if (srf->res.backup == NULL) {
+               DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
+               goto out_bad_resource;
+       }
+
+       ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
+                                TTM_REF_USAGE, NULL);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Could not add a reference to a GB surface.\n");
+               goto out_bad_resource;
+       }
+
+       mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
+       ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
+                                       &backup_handle);
+       mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Could not add a reference to a GB surface "
+                         "backup buffer.\n");
+               (void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+                                                req->sid,
+                                                TTM_REF_USAGE);
+               goto out_bad_resource;
+       }
+
+       rep->creq.svga3d_flags = srf->flags;
+       rep->creq.format = srf->format;
+       rep->creq.mip_levels = srf->mip_levels[0];
+       rep->creq.drm_surface_flags = 0;
+       rep->creq.multisample_count = srf->multisample_count;
+       rep->creq.autogen_filter = srf->autogen_filter;
+       rep->creq.buffer_handle = backup_handle;
+       rep->creq.base_size = srf->base_size;
+       rep->crep.handle = user_srf->prime.base.hash.key;
+       rep->crep.backup_size = srf->res.backup_size;
+       rep->crep.buffer_handle = backup_handle;
+       rep->crep.buffer_map_handle =
+               drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
+       rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
+
+out_bad_resource:
+       ttm_base_object_unref(&base);
+
+       return ret;
+}