1 /**************************************************************************
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include "ttm/ttm_placement.h"
32 struct vmw_user_context {
33 struct ttm_base_object base;
34 struct vmw_resource res;
37 static void vmw_user_context_free(struct vmw_resource *res);
38 static struct vmw_resource *
39 vmw_user_context_base_to_res(struct ttm_base_object *base);
41 static int vmw_gb_context_create(struct vmw_resource *res);
42 static int vmw_gb_context_bind(struct vmw_resource *res,
43 struct ttm_validate_buffer *val_buf);
44 static int vmw_gb_context_unbind(struct vmw_resource *res,
46 struct ttm_validate_buffer *val_buf);
47 static int vmw_gb_context_destroy(struct vmw_resource *res);
49 static uint64_t vmw_user_context_size;
51 static const struct vmw_user_resource_conv user_context_conv = {
52 .object_type = VMW_RES_CONTEXT,
53 .base_obj_to_res = vmw_user_context_base_to_res,
54 .res_free = vmw_user_context_free
57 const struct vmw_user_resource_conv *user_context_converter =
61 static const struct vmw_res_func vmw_legacy_context_func = {
62 .res_type = vmw_res_context,
63 .needs_backup = false,
65 .type_name = "legacy contexts",
66 .backup_placement = NULL,
73 static const struct vmw_res_func vmw_gb_context_func = {
74 .res_type = vmw_res_context,
77 .type_name = "guest backed contexts",
78 .backup_placement = &vmw_mob_placement,
79 .create = vmw_gb_context_create,
80 .destroy = vmw_gb_context_destroy,
81 .bind = vmw_gb_context_bind,
82 .unbind = vmw_gb_context_unbind
89 static void vmw_hw_context_destroy(struct vmw_resource *res)
92 struct vmw_private *dev_priv = res->dev_priv;
94 SVGA3dCmdHeader header;
95 SVGA3dCmdDestroyContext body;
99 if (res->func->destroy == vmw_gb_context_destroy) {
100 mutex_lock(&dev_priv->cmdbuf_mutex);
101 (void) vmw_gb_context_destroy(res);
102 if (dev_priv->pinned_bo != NULL &&
103 !dev_priv->query_cid_valid)
104 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
105 mutex_unlock(&dev_priv->cmdbuf_mutex);
109 vmw_execbuf_release_pinned_bo(dev_priv);
110 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
111 if (unlikely(cmd == NULL)) {
112 DRM_ERROR("Failed reserving FIFO space for surface "
117 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
118 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
119 cmd->body.cid = cpu_to_le32(res->id);
121 vmw_fifo_commit(dev_priv, sizeof(*cmd));
122 vmw_3d_resource_dec(dev_priv, false);
125 static int vmw_gb_context_init(struct vmw_private *dev_priv,
126 struct vmw_resource *res,
127 void (*res_free) (struct vmw_resource *res))
131 ret = vmw_resource_init(dev_priv, res, true,
132 res_free, &vmw_gb_context_func);
133 res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
135 if (unlikely(ret != 0)) {
143 vmw_resource_activate(res, vmw_hw_context_destroy);
147 static int vmw_context_init(struct vmw_private *dev_priv,
148 struct vmw_resource *res,
149 void (*res_free) (struct vmw_resource *res))
154 SVGA3dCmdHeader header;
155 SVGA3dCmdDefineContext body;
158 if (dev_priv->has_mob)
159 return vmw_gb_context_init(dev_priv, res, res_free);
161 ret = vmw_resource_init(dev_priv, res, false,
162 res_free, &vmw_legacy_context_func);
164 if (unlikely(ret != 0)) {
165 DRM_ERROR("Failed to allocate a resource id.\n");
169 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
170 DRM_ERROR("Out of hw context ids.\n");
171 vmw_resource_unreference(&res);
175 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
176 if (unlikely(cmd == NULL)) {
177 DRM_ERROR("Fifo reserve failed.\n");
178 vmw_resource_unreference(&res);
182 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
183 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
184 cmd->body.cid = cpu_to_le32(res->id);
186 vmw_fifo_commit(dev_priv, sizeof(*cmd));
187 (void) vmw_3d_resource_inc(dev_priv, false);
188 vmw_resource_activate(res, vmw_hw_context_destroy);
192 if (res_free == NULL)
199 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
201 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
204 if (unlikely(res == NULL))
207 ret = vmw_context_init(dev_priv, res, NULL);
209 return (ret == 0) ? res : NULL;
213 static int vmw_gb_context_create(struct vmw_resource *res)
215 struct vmw_private *dev_priv = res->dev_priv;
218 SVGA3dCmdHeader header;
219 SVGA3dCmdDefineGBContext body;
222 if (likely(res->id != -1))
225 ret = vmw_resource_alloc_id(res);
226 if (unlikely(ret != 0)) {
227 DRM_ERROR("Failed to allocate a context id.\n");
231 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
236 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
237 if (unlikely(cmd == NULL)) {
238 DRM_ERROR("Failed reserving FIFO space for context "
244 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
245 cmd->header.size = sizeof(cmd->body);
246 cmd->body.cid = res->id;
247 vmw_fifo_commit(dev_priv, sizeof(*cmd));
248 (void) vmw_3d_resource_inc(dev_priv, false);
253 vmw_resource_release_id(res);
258 static int vmw_gb_context_bind(struct vmw_resource *res,
259 struct ttm_validate_buffer *val_buf)
261 struct vmw_private *dev_priv = res->dev_priv;
263 SVGA3dCmdHeader header;
264 SVGA3dCmdBindGBContext body;
266 struct ttm_buffer_object *bo = val_buf->bo;
268 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
270 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
271 if (unlikely(cmd == NULL)) {
272 DRM_ERROR("Failed reserving FIFO space for context "
277 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
278 cmd->header.size = sizeof(cmd->body);
279 cmd->body.cid = res->id;
280 cmd->body.mobid = bo->mem.start;
281 cmd->body.validContents = res->backup_dirty;
282 res->backup_dirty = false;
283 vmw_fifo_commit(dev_priv, sizeof(*cmd));
288 static int vmw_gb_context_unbind(struct vmw_resource *res,
290 struct ttm_validate_buffer *val_buf)
292 struct vmw_private *dev_priv = res->dev_priv;
293 struct ttm_buffer_object *bo = val_buf->bo;
294 struct vmw_fence_obj *fence;
297 SVGA3dCmdHeader header;
298 SVGA3dCmdReadbackGBContext body;
301 SVGA3dCmdHeader header;
302 SVGA3dCmdBindGBContext body;
304 uint32_t submit_size;
308 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
310 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
312 cmd = vmw_fifo_reserve(dev_priv, submit_size);
313 if (unlikely(cmd == NULL)) {
314 DRM_ERROR("Failed reserving FIFO space for context "
322 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
323 cmd1->header.size = sizeof(cmd1->body);
324 cmd1->body.cid = res->id;
325 cmd2 = (void *) (&cmd1[1]);
327 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
328 cmd2->header.size = sizeof(cmd2->body);
329 cmd2->body.cid = res->id;
330 cmd2->body.mobid = SVGA3D_INVALID_ID;
332 vmw_fifo_commit(dev_priv, submit_size);
335 * Create a fence object and fence the backup buffer.
338 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
341 vmw_fence_single_bo(bo, fence);
343 if (likely(fence != NULL))
344 vmw_fence_obj_unreference(&fence);
349 static int vmw_gb_context_destroy(struct vmw_resource *res)
351 struct vmw_private *dev_priv = res->dev_priv;
353 SVGA3dCmdHeader header;
354 SVGA3dCmdDestroyGBContext body;
357 if (likely(res->id == -1))
360 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
361 if (unlikely(cmd == NULL)) {
362 DRM_ERROR("Failed reserving FIFO space for context "
367 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
368 cmd->header.size = sizeof(cmd->body);
369 cmd->body.cid = res->id;
370 vmw_fifo_commit(dev_priv, sizeof(*cmd));
371 if (dev_priv->query_cid == res->id)
372 dev_priv->query_cid_valid = false;
373 vmw_resource_release_id(res);
374 vmw_3d_resource_dec(dev_priv, false);
380 * User-space context management:
383 static struct vmw_resource *
384 vmw_user_context_base_to_res(struct ttm_base_object *base)
386 return &(container_of(base, struct vmw_user_context, base)->res);
389 static void vmw_user_context_free(struct vmw_resource *res)
391 struct vmw_user_context *ctx =
392 container_of(res, struct vmw_user_context, res);
393 struct vmw_private *dev_priv = res->dev_priv;
395 ttm_base_object_kfree(ctx, base);
396 ttm_mem_global_free(vmw_mem_glob(dev_priv),
397 vmw_user_context_size);
401 * This function is called when user space has no more references on the
402 * base object. It releases the base-object's reference on the resource object.
405 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
407 struct ttm_base_object *base = *p_base;
408 struct vmw_user_context *ctx =
409 container_of(base, struct vmw_user_context, base);
410 struct vmw_resource *res = &ctx->res;
413 vmw_resource_unreference(&res);
416 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
417 struct drm_file *file_priv)
419 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
420 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
422 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
425 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
426 struct drm_file *file_priv)
428 struct vmw_private *dev_priv = vmw_priv(dev);
429 struct vmw_user_context *ctx;
430 struct vmw_resource *res;
431 struct vmw_resource *tmp;
432 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
433 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
434 struct vmw_master *vmaster = vmw_master(file_priv->master);
439 * Approximate idr memory usage with 128 bytes. It will be limited
440 * by maximum number_of contexts anyway.
443 if (unlikely(vmw_user_context_size == 0))
444 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
446 ret = ttm_read_lock(&vmaster->lock, true);
447 if (unlikely(ret != 0))
450 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
451 vmw_user_context_size,
453 if (unlikely(ret != 0)) {
454 if (ret != -ERESTARTSYS)
455 DRM_ERROR("Out of graphics memory for context"
460 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
461 if (unlikely(ctx == NULL)) {
462 ttm_mem_global_free(vmw_mem_glob(dev_priv),
463 vmw_user_context_size);
469 ctx->base.shareable = false;
470 ctx->base.tfile = NULL;
473 * From here on, the destructor takes over resource freeing.
476 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
477 if (unlikely(ret != 0))
480 tmp = vmw_resource_reference(&ctx->res);
481 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
482 &vmw_user_context_base_release, NULL);
484 if (unlikely(ret != 0)) {
485 vmw_resource_unreference(&tmp);
489 arg->cid = ctx->base.hash.key;
491 vmw_resource_unreference(&res);
493 ttm_read_unlock(&vmaster->lock);