1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
34 struct vmw_user_context {
35 struct ttm_base_object base;
36 struct vmw_resource res;
39 struct vmw_user_surface {
40 struct ttm_base_object base;
41 struct vmw_surface srf;
44 struct vmw_user_dma_buffer {
45 struct ttm_base_object base;
46 struct vmw_dma_buffer dma;
49 struct vmw_bo_user_rep {
55 struct vmw_resource res;
59 struct vmw_user_stream {
60 struct ttm_base_object base;
61 struct vmw_stream stream;
64 static inline struct vmw_dma_buffer *
65 vmw_dma_buffer(struct ttm_buffer_object *bo)
67 return container_of(bo, struct vmw_dma_buffer, base);
70 static inline struct vmw_user_dma_buffer *
71 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
73 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
74 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
77 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
83 static void vmw_resource_release(struct kref *kref)
85 struct vmw_resource *res =
86 container_of(kref, struct vmw_resource, kref);
87 struct vmw_private *dev_priv = res->dev_priv;
89 idr_remove(res->idr, res->id);
90 write_unlock(&dev_priv->resource_lock);
92 if (likely(res->hw_destroy != NULL))
95 if (res->res_free != NULL)
100 write_lock(&dev_priv->resource_lock);
103 void vmw_resource_unreference(struct vmw_resource **p_res)
105 struct vmw_resource *res = *p_res;
106 struct vmw_private *dev_priv = res->dev_priv;
109 write_lock(&dev_priv->resource_lock);
110 kref_put(&res->kref, vmw_resource_release);
111 write_unlock(&dev_priv->resource_lock);
114 static int vmw_resource_init(struct vmw_private *dev_priv,
115 struct vmw_resource *res,
117 enum ttm_object_type obj_type,
118 void (*res_free) (struct vmw_resource *res))
122 kref_init(&res->kref);
123 res->hw_destroy = NULL;
124 res->res_free = res_free;
125 res->res_type = obj_type;
128 res->dev_priv = dev_priv;
129 INIT_LIST_HEAD(&res->query_head);
131 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
134 write_lock(&dev_priv->resource_lock);
135 ret = idr_get_new_above(idr, res, 1, &res->id);
136 write_unlock(&dev_priv->resource_lock);
138 } while (ret == -EAGAIN);
144 * vmw_resource_activate
146 * @res: Pointer to the newly created resource
147 * @hw_destroy: Destroy function. NULL if none.
149 * Activate a resource after the hardware has been made aware of it.
150 * Set tye destroy function to @destroy. Typically this frees the
151 * resource and destroys the hardware resources associated with it.
152 * Activate basically means that the function vmw_resource_lookup will
156 static void vmw_resource_activate(struct vmw_resource *res,
157 void (*hw_destroy) (struct vmw_resource *))
159 struct vmw_private *dev_priv = res->dev_priv;
161 write_lock(&dev_priv->resource_lock);
163 res->hw_destroy = hw_destroy;
164 write_unlock(&dev_priv->resource_lock);
167 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
168 struct idr *idr, int id)
170 struct vmw_resource *res;
172 read_lock(&dev_priv->resource_lock);
173 res = idr_find(idr, id);
174 if (res && res->avail)
175 kref_get(&res->kref);
178 read_unlock(&dev_priv->resource_lock);
180 if (unlikely(res == NULL))
187 * Context management:
190 static void vmw_hw_context_destroy(struct vmw_resource *res)
193 struct vmw_private *dev_priv = res->dev_priv;
195 SVGA3dCmdHeader header;
196 SVGA3dCmdDestroyContext body;
200 vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
202 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
203 if (unlikely(cmd == NULL)) {
204 DRM_ERROR("Failed reserving FIFO space for surface "
209 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
210 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
211 cmd->body.cid = cpu_to_le32(res->id);
213 vmw_fifo_commit(dev_priv, sizeof(*cmd));
214 vmw_3d_resource_dec(dev_priv, false);
217 static int vmw_context_init(struct vmw_private *dev_priv,
218 struct vmw_resource *res,
219 void (*res_free) (struct vmw_resource *res))
224 SVGA3dCmdHeader header;
225 SVGA3dCmdDefineContext body;
228 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
229 VMW_RES_CONTEXT, res_free);
231 if (unlikely(ret != 0)) {
232 if (res_free == NULL)
239 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
240 if (unlikely(cmd == NULL)) {
241 DRM_ERROR("Fifo reserve failed.\n");
242 vmw_resource_unreference(&res);
246 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
247 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
248 cmd->body.cid = cpu_to_le32(res->id);
250 vmw_fifo_commit(dev_priv, sizeof(*cmd));
251 (void) vmw_3d_resource_inc(dev_priv, false);
252 vmw_resource_activate(res, vmw_hw_context_destroy);
256 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
258 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
261 if (unlikely(res == NULL))
264 ret = vmw_context_init(dev_priv, res, NULL);
265 return (ret == 0) ? res : NULL;
269 * User-space context management:
272 static void vmw_user_context_free(struct vmw_resource *res)
274 struct vmw_user_context *ctx =
275 container_of(res, struct vmw_user_context, res);
281 * This function is called when user space has no more references on the
282 * base object. It releases the base-object's reference on the resource object.
285 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
287 struct ttm_base_object *base = *p_base;
288 struct vmw_user_context *ctx =
289 container_of(base, struct vmw_user_context, base);
290 struct vmw_resource *res = &ctx->res;
293 vmw_resource_unreference(&res);
296 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
297 struct drm_file *file_priv)
299 struct vmw_private *dev_priv = vmw_priv(dev);
300 struct vmw_resource *res;
301 struct vmw_user_context *ctx;
302 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
303 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
306 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
307 if (unlikely(res == NULL))
310 if (res->res_free != &vmw_user_context_free) {
315 ctx = container_of(res, struct vmw_user_context, res);
316 if (ctx->base.tfile != tfile && !ctx->base.shareable) {
321 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
323 vmw_resource_unreference(&res);
327 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
328 struct drm_file *file_priv)
330 struct vmw_private *dev_priv = vmw_priv(dev);
331 struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
332 struct vmw_resource *res;
333 struct vmw_resource *tmp;
334 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
335 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
338 if (unlikely(ctx == NULL))
342 ctx->base.shareable = false;
343 ctx->base.tfile = NULL;
345 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
346 if (unlikely(ret != 0))
349 tmp = vmw_resource_reference(&ctx->res);
350 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
351 &vmw_user_context_base_release, NULL);
353 if (unlikely(ret != 0)) {
354 vmw_resource_unreference(&tmp);
360 vmw_resource_unreference(&res);
365 int vmw_context_check(struct vmw_private *dev_priv,
366 struct ttm_object_file *tfile,
368 struct vmw_resource **p_res)
370 struct vmw_resource *res;
373 read_lock(&dev_priv->resource_lock);
374 res = idr_find(&dev_priv->context_idr, id);
375 if (res && res->avail) {
376 struct vmw_user_context *ctx =
377 container_of(res, struct vmw_user_context, res);
378 if (ctx->base.tfile != tfile && !ctx->base.shareable)
381 *p_res = vmw_resource_reference(res);
384 read_unlock(&dev_priv->resource_lock);
391 * Surface management.
394 static void vmw_hw_surface_destroy(struct vmw_resource *res)
397 struct vmw_private *dev_priv = res->dev_priv;
399 SVGA3dCmdHeader header;
400 SVGA3dCmdDestroySurface body;
401 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
403 if (unlikely(cmd == NULL)) {
404 DRM_ERROR("Failed reserving FIFO space for surface "
409 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
410 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
411 cmd->body.sid = cpu_to_le32(res->id);
413 vmw_fifo_commit(dev_priv, sizeof(*cmd));
414 vmw_3d_resource_dec(dev_priv, false);
417 void vmw_surface_res_free(struct vmw_resource *res)
419 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
422 kfree(srf->snooper.image);
426 int vmw_surface_init(struct vmw_private *dev_priv,
427 struct vmw_surface *srf,
428 void (*res_free) (struct vmw_resource *res))
432 SVGA3dCmdHeader header;
433 SVGA3dCmdDefineSurface body;
435 SVGA3dSize *cmd_size;
436 struct vmw_resource *res = &srf->res;
437 struct drm_vmw_size *src_size;
442 BUG_ON(res_free == NULL);
443 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
444 VMW_RES_SURFACE, res_free);
446 if (unlikely(ret != 0)) {
451 submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
452 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
454 cmd = vmw_fifo_reserve(dev_priv, submit_size);
455 if (unlikely(cmd == NULL)) {
456 DRM_ERROR("Fifo reserve failed for create surface.\n");
457 vmw_resource_unreference(&res);
461 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
462 cmd->header.size = cpu_to_le32(cmd_len);
463 cmd->body.sid = cpu_to_le32(res->id);
464 cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
465 cmd->body.format = cpu_to_le32(srf->format);
466 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
467 cmd->body.face[i].numMipLevels =
468 cpu_to_le32(srf->mip_levels[i]);
472 cmd_size = (SVGA3dSize *) cmd;
473 src_size = srf->sizes;
475 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
476 cmd_size->width = cpu_to_le32(src_size->width);
477 cmd_size->height = cpu_to_le32(src_size->height);
478 cmd_size->depth = cpu_to_le32(src_size->depth);
481 vmw_fifo_commit(dev_priv, submit_size);
482 (void) vmw_3d_resource_inc(dev_priv, false);
483 vmw_resource_activate(res, vmw_hw_surface_destroy);
487 static void vmw_user_surface_free(struct vmw_resource *res)
489 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
490 struct vmw_user_surface *user_srf =
491 container_of(srf, struct vmw_user_surface, srf);
494 kfree(srf->snooper.image);
498 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
499 struct ttm_object_file *tfile,
500 uint32_t handle, struct vmw_surface **out)
502 struct vmw_resource *res;
503 struct vmw_surface *srf;
504 struct vmw_user_surface *user_srf;
505 struct ttm_base_object *base;
508 base = ttm_base_object_lookup(tfile, handle);
509 if (unlikely(base == NULL))
512 if (unlikely(base->object_type != VMW_RES_SURFACE))
513 goto out_bad_resource;
515 user_srf = container_of(base, struct vmw_user_surface, base);
516 srf = &user_srf->srf;
519 read_lock(&dev_priv->resource_lock);
521 if (!res->avail || res->res_free != &vmw_user_surface_free) {
522 read_unlock(&dev_priv->resource_lock);
523 goto out_bad_resource;
526 kref_get(&res->kref);
527 read_unlock(&dev_priv->resource_lock);
533 ttm_base_object_unref(&base);
538 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
540 struct ttm_base_object *base = *p_base;
541 struct vmw_user_surface *user_srf =
542 container_of(base, struct vmw_user_surface, base);
543 struct vmw_resource *res = &user_srf->srf.res;
546 vmw_resource_unreference(&res);
549 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
550 struct drm_file *file_priv)
552 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
553 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
555 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
558 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
559 struct drm_file *file_priv)
561 struct vmw_private *dev_priv = vmw_priv(dev);
562 struct vmw_user_surface *user_srf =
563 kmalloc(sizeof(*user_srf), GFP_KERNEL);
564 struct vmw_surface *srf;
565 struct vmw_resource *res;
566 struct vmw_resource *tmp;
567 union drm_vmw_surface_create_arg *arg =
568 (union drm_vmw_surface_create_arg *)data;
569 struct drm_vmw_surface_create_req *req = &arg->req;
570 struct drm_vmw_surface_arg *rep = &arg->rep;
571 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
572 struct drm_vmw_size __user *user_sizes;
576 if (unlikely(user_srf == NULL))
579 srf = &user_srf->srf;
582 srf->flags = req->flags;
583 srf->format = req->format;
584 srf->scanout = req->scanout;
585 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
587 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
588 srf->num_sizes += srf->mip_levels[i];
590 if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
591 DRM_VMW_MAX_MIP_LEVELS) {
596 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
597 if (unlikely(srf->sizes == NULL)) {
602 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
605 ret = copy_from_user(srf->sizes, user_sizes,
606 srf->num_sizes * sizeof(*srf->sizes));
607 if (unlikely(ret != 0)) {
613 srf->num_sizes == 1 &&
614 srf->sizes[0].width == 64 &&
615 srf->sizes[0].height == 64 &&
616 srf->format == SVGA3D_A8R8G8B8) {
618 /* allocate image area and clear it */
619 srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
620 if (!srf->snooper.image) {
621 DRM_ERROR("Failed to allocate cursor_image\n");
626 srf->snooper.image = NULL;
628 srf->snooper.crtc = NULL;
630 user_srf->base.shareable = false;
631 user_srf->base.tfile = NULL;
634 * From this point, the generic resource management functions
635 * destroy the object on failure.
638 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
639 if (unlikely(ret != 0))
642 tmp = vmw_resource_reference(&srf->res);
643 ret = ttm_base_object_init(tfile, &user_srf->base,
644 req->shareable, VMW_RES_SURFACE,
645 &vmw_user_surface_base_release, NULL);
647 if (unlikely(ret != 0)) {
648 vmw_resource_unreference(&tmp);
649 vmw_resource_unreference(&res);
653 rep->sid = user_srf->base.hash.key;
654 if (rep->sid == SVGA3D_INVALID_ID)
655 DRM_ERROR("Created bad Surface ID.\n");
657 vmw_resource_unreference(&res);
666 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
667 struct drm_file *file_priv)
669 union drm_vmw_surface_reference_arg *arg =
670 (union drm_vmw_surface_reference_arg *)data;
671 struct drm_vmw_surface_arg *req = &arg->req;
672 struct drm_vmw_surface_create_req *rep = &arg->rep;
673 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
674 struct vmw_surface *srf;
675 struct vmw_user_surface *user_srf;
676 struct drm_vmw_size __user *user_sizes;
677 struct ttm_base_object *base;
680 base = ttm_base_object_lookup(tfile, req->sid);
681 if (unlikely(base == NULL)) {
682 DRM_ERROR("Could not find surface to reference.\n");
686 if (unlikely(base->object_type != VMW_RES_SURFACE))
687 goto out_bad_resource;
689 user_srf = container_of(base, struct vmw_user_surface, base);
690 srf = &user_srf->srf;
692 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
693 if (unlikely(ret != 0)) {
694 DRM_ERROR("Could not add a reference to a surface.\n");
695 goto out_no_reference;
698 rep->flags = srf->flags;
699 rep->format = srf->format;
700 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
701 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
705 ret = copy_to_user(user_sizes, srf->sizes,
706 srf->num_sizes * sizeof(*srf->sizes));
707 if (unlikely(ret != 0)) {
708 DRM_ERROR("copy_to_user failed %p %u\n",
709 user_sizes, srf->num_sizes);
714 ttm_base_object_unref(&base);
719 int vmw_surface_check(struct vmw_private *dev_priv,
720 struct ttm_object_file *tfile,
721 uint32_t handle, int *id)
723 struct ttm_base_object *base;
724 struct vmw_user_surface *user_srf;
728 base = ttm_base_object_lookup(tfile, handle);
729 if (unlikely(base == NULL))
732 if (unlikely(base->object_type != VMW_RES_SURFACE))
733 goto out_bad_surface;
735 user_srf = container_of(base, struct vmw_user_surface, base);
736 *id = user_srf->srf.res.id;
741 * FIXME: May deadlock here when called from the
742 * command parsing code.
745 ttm_base_object_unref(&base);
753 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
754 unsigned long num_pages)
756 static size_t bo_user_size = ~0;
758 size_t page_array_size =
759 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
761 if (unlikely(bo_user_size == ~0)) {
762 bo_user_size = glob->ttm_bo_extra_size +
763 ttm_round_pot(sizeof(struct vmw_dma_buffer));
766 return bo_user_size + page_array_size;
769 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
771 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
772 struct ttm_bo_global *glob = bo->glob;
774 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
778 int vmw_dmabuf_init(struct vmw_private *dev_priv,
779 struct vmw_dma_buffer *vmw_bo,
780 size_t size, struct ttm_placement *placement,
782 void (*bo_free) (struct ttm_buffer_object *bo))
784 struct ttm_bo_device *bdev = &dev_priv->bdev;
785 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
792 vmw_dmabuf_acc_size(bdev->glob,
793 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
795 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
796 if (unlikely(ret != 0)) {
797 /* we must free the bo here as
798 * ttm_buffer_object_init does so as well */
799 bo_free(&vmw_bo->base);
803 memset(vmw_bo, 0, sizeof(*vmw_bo));
805 INIT_LIST_HEAD(&vmw_bo->validate_list);
807 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
808 ttm_bo_type_device, placement,
810 NULL, acc_size, bo_free);
814 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
816 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
817 struct ttm_bo_global *glob = bo->glob;
819 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
823 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
825 struct vmw_user_dma_buffer *vmw_user_bo;
826 struct ttm_base_object *base = *p_base;
827 struct ttm_buffer_object *bo;
831 if (unlikely(base == NULL))
834 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
835 bo = &vmw_user_bo->dma.base;
839 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
840 struct drm_file *file_priv)
842 struct vmw_private *dev_priv = vmw_priv(dev);
843 union drm_vmw_alloc_dmabuf_arg *arg =
844 (union drm_vmw_alloc_dmabuf_arg *)data;
845 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
846 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
847 struct vmw_user_dma_buffer *vmw_user_bo;
848 struct ttm_buffer_object *tmp;
849 struct vmw_master *vmaster = vmw_master(file_priv->master);
852 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
853 if (unlikely(vmw_user_bo == NULL))
856 ret = ttm_read_lock(&vmaster->lock, true);
857 if (unlikely(ret != 0)) {
862 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
863 &vmw_vram_sys_placement, true,
864 &vmw_user_dmabuf_destroy);
865 if (unlikely(ret != 0))
868 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
869 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
873 &vmw_user_dmabuf_release, NULL);
874 if (unlikely(ret != 0))
875 goto out_no_base_object;
877 rep->handle = vmw_user_bo->base.hash.key;
878 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
879 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
880 rep->cur_gmr_offset = 0;
886 ttm_read_unlock(&vmaster->lock);
891 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
892 struct drm_file *file_priv)
894 struct drm_vmw_unref_dmabuf_arg *arg =
895 (struct drm_vmw_unref_dmabuf_arg *)data;
897 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
902 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
903 uint32_t cur_validate_node)
905 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
907 if (likely(vmw_bo->on_validate_list))
908 return vmw_bo->cur_validate_node;
910 vmw_bo->cur_validate_node = cur_validate_node;
911 vmw_bo->on_validate_list = true;
913 return cur_validate_node;
916 void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
918 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
920 vmw_bo->on_validate_list = false;
923 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
924 uint32_t handle, struct vmw_dma_buffer **out)
926 struct vmw_user_dma_buffer *vmw_user_bo;
927 struct ttm_base_object *base;
929 base = ttm_base_object_lookup(tfile, handle);
930 if (unlikely(base == NULL)) {
931 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
932 (unsigned long)handle);
936 if (unlikely(base->object_type != ttm_buffer_type)) {
937 ttm_base_object_unref(&base);
938 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
939 (unsigned long)handle);
943 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
944 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
945 ttm_base_object_unref(&base);
946 *out = &vmw_user_bo->dma;
955 static void vmw_stream_destroy(struct vmw_resource *res)
957 struct vmw_private *dev_priv = res->dev_priv;
958 struct vmw_stream *stream;
961 DRM_INFO("%s: unref\n", __func__);
962 stream = container_of(res, struct vmw_stream, res);
964 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
968 static int vmw_stream_init(struct vmw_private *dev_priv,
969 struct vmw_stream *stream,
970 void (*res_free) (struct vmw_resource *res))
972 struct vmw_resource *res = &stream->res;
975 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
976 VMW_RES_STREAM, res_free);
978 if (unlikely(ret != 0)) {
979 if (res_free == NULL)
982 res_free(&stream->res);
986 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
988 vmw_resource_unreference(&res);
992 DRM_INFO("%s: claimed\n", __func__);
994 vmw_resource_activate(&stream->res, vmw_stream_destroy);
999 * User-space context management:
1002 static void vmw_user_stream_free(struct vmw_resource *res)
1004 struct vmw_user_stream *stream =
1005 container_of(res, struct vmw_user_stream, stream.res);
1011 * This function is called when user space has no more references on the
1012 * base object. It releases the base-object's reference on the resource object.
1015 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1017 struct ttm_base_object *base = *p_base;
1018 struct vmw_user_stream *stream =
1019 container_of(base, struct vmw_user_stream, base);
1020 struct vmw_resource *res = &stream->stream.res;
1023 vmw_resource_unreference(&res);
1026 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1027 struct drm_file *file_priv)
1029 struct vmw_private *dev_priv = vmw_priv(dev);
1030 struct vmw_resource *res;
1031 struct vmw_user_stream *stream;
1032 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1033 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1036 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1037 if (unlikely(res == NULL))
1040 if (res->res_free != &vmw_user_stream_free) {
1045 stream = container_of(res, struct vmw_user_stream, stream.res);
1046 if (stream->base.tfile != tfile) {
1051 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1053 vmw_resource_unreference(&res);
1057 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1058 struct drm_file *file_priv)
1060 struct vmw_private *dev_priv = vmw_priv(dev);
1061 struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1062 struct vmw_resource *res;
1063 struct vmw_resource *tmp;
1064 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1065 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1068 if (unlikely(stream == NULL))
1071 res = &stream->stream.res;
1072 stream->base.shareable = false;
1073 stream->base.tfile = NULL;
1075 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1076 if (unlikely(ret != 0))
1079 tmp = vmw_resource_reference(res);
1080 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1081 &vmw_user_stream_base_release, NULL);
1083 if (unlikely(ret != 0)) {
1084 vmw_resource_unreference(&tmp);
1088 arg->stream_id = res->id;
1090 vmw_resource_unreference(&res);
1094 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1095 struct ttm_object_file *tfile,
1096 uint32_t *inout_id, struct vmw_resource **out)
1098 struct vmw_user_stream *stream;
1099 struct vmw_resource *res;
1102 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1103 if (unlikely(res == NULL))
1106 if (res->res_free != &vmw_user_stream_free) {
1111 stream = container_of(res, struct vmw_user_stream, stream.res);
1112 if (stream->base.tfile != tfile) {
1117 *inout_id = stream->stream.stream_id;
1121 vmw_resource_unreference(&res);