1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_resource_priv.h"
35 #define VMW_RES_EVICT_ERR_COUNT 10
37 struct vmw_user_dma_buffer {
38 struct ttm_prime_object prime;
39 struct vmw_dma_buffer dma;
42 struct vmw_bo_user_rep {
48 struct vmw_resource res;
52 struct vmw_user_stream {
53 struct ttm_base_object base;
54 struct vmw_stream stream;
58 static uint64_t vmw_user_stream_size;
60 static const struct vmw_res_func vmw_stream_func = {
61 .res_type = vmw_res_stream,
62 .needs_backup = false,
64 .type_name = "video streams",
65 .backup_placement = NULL,
72 static inline struct vmw_dma_buffer *
73 vmw_dma_buffer(struct ttm_buffer_object *bo)
75 return container_of(bo, struct vmw_dma_buffer, base);
78 static inline struct vmw_user_dma_buffer *
79 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
81 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
82 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
85 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
92 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
94 return kref_get_unless_zero(&res->kref) ? res : NULL;
98 * vmw_resource_release_id - release a resource id to the id manager.
100 * @res: Pointer to the resource.
102 * Release the resource id to the resource id manager and set it to -1
104 void vmw_resource_release_id(struct vmw_resource *res)
106 struct vmw_private *dev_priv = res->dev_priv;
107 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
109 write_lock(&dev_priv->resource_lock);
111 idr_remove(idr, res->id);
113 write_unlock(&dev_priv->resource_lock);
116 static void vmw_resource_release(struct kref *kref)
118 struct vmw_resource *res =
119 container_of(kref, struct vmw_resource, kref);
120 struct vmw_private *dev_priv = res->dev_priv;
122 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
125 list_del_init(&res->lru_head);
126 write_unlock(&dev_priv->resource_lock);
128 struct ttm_buffer_object *bo = &res->backup->base;
130 ttm_bo_reserve(bo, false, false, false, NULL);
131 if (!list_empty(&res->mob_head) &&
132 res->func->unbind != NULL) {
133 struct ttm_validate_buffer val_buf;
136 res->func->unbind(res, false, &val_buf);
138 res->backup_dirty = false;
139 list_del_init(&res->mob_head);
140 ttm_bo_unreserve(bo);
141 vmw_dmabuf_unreference(&res->backup);
144 if (likely(res->hw_destroy != NULL)) {
145 res->hw_destroy(res);
146 mutex_lock(&dev_priv->binding_mutex);
147 vmw_context_binding_res_list_kill(&res->binding_head);
148 mutex_unlock(&dev_priv->binding_mutex);
152 if (res->res_free != NULL)
157 write_lock(&dev_priv->resource_lock);
163 void vmw_resource_unreference(struct vmw_resource **p_res)
165 struct vmw_resource *res = *p_res;
166 struct vmw_private *dev_priv = res->dev_priv;
169 write_lock(&dev_priv->resource_lock);
170 kref_put(&res->kref, vmw_resource_release);
171 write_unlock(&dev_priv->resource_lock);
176 * vmw_resource_alloc_id - release a resource id to the id manager.
178 * @res: Pointer to the resource.
180 * Allocate the lowest free resource from the resource manager, and set
181 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
183 int vmw_resource_alloc_id(struct vmw_resource *res)
185 struct vmw_private *dev_priv = res->dev_priv;
187 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
189 BUG_ON(res->id != -1);
191 idr_preload(GFP_KERNEL);
192 write_lock(&dev_priv->resource_lock);
194 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
198 write_unlock(&dev_priv->resource_lock);
200 return ret < 0 ? ret : 0;
204 * vmw_resource_init - initialize a struct vmw_resource
206 * @dev_priv: Pointer to a device private struct.
207 * @res: The struct vmw_resource to initialize.
208 * @obj_type: Resource object type.
209 * @delay_id: Boolean whether to defer device id allocation until
210 * the first validation.
211 * @res_free: Resource destructor.
212 * @func: Resource function table.
214 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
216 void (*res_free) (struct vmw_resource *res),
217 const struct vmw_res_func *func)
219 kref_init(&res->kref);
220 res->hw_destroy = NULL;
221 res->res_free = res_free;
223 res->dev_priv = dev_priv;
225 INIT_LIST_HEAD(&res->lru_head);
226 INIT_LIST_HEAD(&res->mob_head);
227 INIT_LIST_HEAD(&res->binding_head);
230 res->backup_offset = 0;
231 res->backup_dirty = false;
232 res->res_dirty = false;
236 return vmw_resource_alloc_id(res);
240 * vmw_resource_activate
242 * @res: Pointer to the newly created resource
243 * @hw_destroy: Destroy function. NULL if none.
245 * Activate a resource after the hardware has been made aware of it.
246 * Set tye destroy function to @destroy. Typically this frees the
247 * resource and destroys the hardware resources associated with it.
248 * Activate basically means that the function vmw_resource_lookup will
251 void vmw_resource_activate(struct vmw_resource *res,
252 void (*hw_destroy) (struct vmw_resource *))
254 struct vmw_private *dev_priv = res->dev_priv;
256 write_lock(&dev_priv->resource_lock);
258 res->hw_destroy = hw_destroy;
259 write_unlock(&dev_priv->resource_lock);
262 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
263 struct idr *idr, int id)
265 struct vmw_resource *res;
267 read_lock(&dev_priv->resource_lock);
268 res = idr_find(idr, id);
269 if (res && res->avail)
270 kref_get(&res->kref);
273 read_unlock(&dev_priv->resource_lock);
275 if (unlikely(res == NULL))
282 * vmw_user_resource_lookup_handle - lookup a struct resource from a
283 * TTM user-space handle and perform basic type checks
285 * @dev_priv: Pointer to a device private struct
286 * @tfile: Pointer to a struct ttm_object_file identifying the caller
287 * @handle: The TTM user-space handle
288 * @converter: Pointer to an object describing the resource type
289 * @p_res: On successful return the location pointed to will contain
290 * a pointer to a refcounted struct vmw_resource.
292 * If the handle can't be found or is associated with an incorrect resource
293 * type, -EINVAL will be returned.
295 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
296 struct ttm_object_file *tfile,
298 const struct vmw_user_resource_conv
300 struct vmw_resource **p_res)
302 struct ttm_base_object *base;
303 struct vmw_resource *res;
306 base = ttm_base_object_lookup(tfile, handle);
307 if (unlikely(base == NULL))
310 if (unlikely(ttm_base_object_type(base) != converter->object_type))
311 goto out_bad_resource;
313 res = converter->base_obj_to_res(base);
315 read_lock(&dev_priv->resource_lock);
316 if (!res->avail || res->res_free != converter->res_free) {
317 read_unlock(&dev_priv->resource_lock);
318 goto out_bad_resource;
321 kref_get(&res->kref);
322 read_unlock(&dev_priv->resource_lock);
328 ttm_base_object_unref(&base);
334 * Helper function that looks either a surface or dmabuf.
336 * The pointer this pointed at by out_surf and out_buf needs to be null.
338 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
339 struct ttm_object_file *tfile,
341 struct vmw_surface **out_surf,
342 struct vmw_dma_buffer **out_buf)
344 struct vmw_resource *res;
347 BUG_ON(*out_surf || *out_buf);
349 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
350 user_surface_converter,
353 *out_surf = vmw_res_to_srf(res);
358 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
367 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
369 * @dev_priv: Pointer to a struct vmw_private identifying the device.
370 * @size: The requested buffer size.
371 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
373 static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
376 static size_t struct_size, user_struct_size;
377 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
378 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
380 if (unlikely(struct_size == 0)) {
381 size_t backend_size = ttm_round_pot(vmw_tt_size);
383 struct_size = backend_size +
384 ttm_round_pot(sizeof(struct vmw_dma_buffer));
385 user_struct_size = backend_size +
386 ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
389 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
391 ttm_round_pot(num_pages * sizeof(dma_addr_t));
393 return ((user) ? user_struct_size : struct_size) +
397 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
399 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
404 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
406 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
408 ttm_prime_object_kfree(vmw_user_bo, prime);
411 int vmw_dmabuf_init(struct vmw_private *dev_priv,
412 struct vmw_dma_buffer *vmw_bo,
413 size_t size, struct ttm_placement *placement,
415 void (*bo_free) (struct ttm_buffer_object *bo))
417 struct ttm_bo_device *bdev = &dev_priv->bdev;
420 bool user = (bo_free == &vmw_user_dmabuf_destroy);
422 BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
424 acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
425 memset(vmw_bo, 0, sizeof(*vmw_bo));
427 INIT_LIST_HEAD(&vmw_bo->res_list);
429 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
430 ttm_bo_type_device, placement,
432 NULL, acc_size, NULL, bo_free);
436 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
438 struct vmw_user_dma_buffer *vmw_user_bo;
439 struct ttm_base_object *base = *p_base;
440 struct ttm_buffer_object *bo;
444 if (unlikely(base == NULL))
447 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
449 bo = &vmw_user_bo->dma.base;
453 static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
454 enum ttm_ref_type ref_type)
456 struct vmw_user_dma_buffer *user_bo;
457 user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
460 case TTM_REF_SYNCCPU_WRITE:
461 ttm_bo_synccpu_write_release(&user_bo->dma.base);
469 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
471 * @dev_priv: Pointer to a struct device private.
472 * @tfile: Pointer to a struct ttm_object_file on which to register the user
474 * @size: Size of the dma buffer.
475 * @shareable: Boolean whether the buffer is shareable with other open files.
476 * @handle: Pointer to where the handle value should be assigned.
477 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
478 * should be assigned.
480 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
481 struct ttm_object_file *tfile,
485 struct vmw_dma_buffer **p_dma_buf)
487 struct vmw_user_dma_buffer *user_bo;
488 struct ttm_buffer_object *tmp;
491 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
492 if (unlikely(user_bo == NULL)) {
493 DRM_ERROR("Failed to allocate a buffer.\n");
497 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
498 (dev_priv->has_mob) ?
500 &vmw_vram_sys_placement, true,
501 &vmw_user_dmabuf_destroy);
502 if (unlikely(ret != 0))
505 tmp = ttm_bo_reference(&user_bo->dma.base);
506 ret = ttm_prime_object_init(tfile,
511 &vmw_user_dmabuf_release,
512 &vmw_user_dmabuf_ref_obj_release);
513 if (unlikely(ret != 0)) {
515 goto out_no_base_object;
518 *p_dma_buf = &user_bo->dma;
519 *handle = user_bo->prime.base.hash.key;
526 * vmw_user_dmabuf_verify_access - verify access permissions on this
529 * @bo: Pointer to the buffer object being accessed
530 * @tfile: Identifying the caller.
532 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
533 struct ttm_object_file *tfile)
535 struct vmw_user_dma_buffer *vmw_user_bo;
537 if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
540 vmw_user_bo = vmw_user_dma_buffer(bo);
542 /* Check that the caller has opened the object. */
543 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
546 DRM_ERROR("Could not grant buffer access.\n");
551 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
552 * access, idling previous GPU operations on the buffer and optionally
553 * blocking it for further command submissions.
555 * @user_bo: Pointer to the buffer object being grabbed for CPU access
556 * @tfile: Identifying the caller.
557 * @flags: Flags indicating how the grab should be performed.
559 * A blocking grab will be automatically released when @tfile is closed.
561 static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
562 struct ttm_object_file *tfile,
565 struct ttm_buffer_object *bo = &user_bo->dma.base;
569 if (flags & drm_vmw_synccpu_allow_cs) {
570 struct ttm_bo_device *bdev = bo->bdev;
572 spin_lock(&bdev->fence_lock);
573 ret = ttm_bo_wait(bo, false, true,
574 !!(flags & drm_vmw_synccpu_dontblock));
575 spin_unlock(&bdev->fence_lock);
579 ret = ttm_bo_synccpu_write_grab
580 (bo, !!(flags & drm_vmw_synccpu_dontblock));
581 if (unlikely(ret != 0))
584 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
585 TTM_REF_SYNCCPU_WRITE, &existed);
586 if (ret != 0 || existed)
587 ttm_bo_synccpu_write_release(&user_bo->dma.base);
593 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
594 * and unblock command submission on the buffer if blocked.
596 * @handle: Handle identifying the buffer object.
597 * @tfile: Identifying the caller.
598 * @flags: Flags indicating the type of release.
600 static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
601 struct ttm_object_file *tfile,
604 if (!(flags & drm_vmw_synccpu_allow_cs))
605 return ttm_ref_object_base_unref(tfile, handle,
606 TTM_REF_SYNCCPU_WRITE);
612 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
615 * @dev: Identifies the drm device.
616 * @data: Pointer to the ioctl argument.
617 * @file_priv: Identifies the caller.
619 * This function checks the ioctl arguments for validity and calls the
620 * relevant synccpu functions.
622 int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
623 struct drm_file *file_priv)
625 struct drm_vmw_synccpu_arg *arg =
626 (struct drm_vmw_synccpu_arg *) data;
627 struct vmw_dma_buffer *dma_buf;
628 struct vmw_user_dma_buffer *user_bo;
629 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
632 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
633 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
634 drm_vmw_synccpu_dontblock |
635 drm_vmw_synccpu_allow_cs)) != 0) {
636 DRM_ERROR("Illegal synccpu flags.\n");
641 case drm_vmw_synccpu_grab:
642 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
643 if (unlikely(ret != 0))
646 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
648 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
649 vmw_dmabuf_unreference(&dma_buf);
650 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
652 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
653 (unsigned int) arg->handle);
657 case drm_vmw_synccpu_release:
658 ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
660 if (unlikely(ret != 0)) {
661 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
662 (unsigned int) arg->handle);
667 DRM_ERROR("Invalid synccpu operation.\n");
674 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
675 struct drm_file *file_priv)
677 struct vmw_private *dev_priv = vmw_priv(dev);
678 union drm_vmw_alloc_dmabuf_arg *arg =
679 (union drm_vmw_alloc_dmabuf_arg *)data;
680 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
681 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
682 struct vmw_dma_buffer *dma_buf;
686 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
687 if (unlikely(ret != 0))
690 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
691 req->size, false, &handle, &dma_buf);
692 if (unlikely(ret != 0))
695 rep->handle = handle;
696 rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
697 rep->cur_gmr_id = handle;
698 rep->cur_gmr_offset = 0;
700 vmw_dmabuf_unreference(&dma_buf);
703 ttm_read_unlock(&dev_priv->reservation_sem);
708 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
709 struct drm_file *file_priv)
711 struct drm_vmw_unref_dmabuf_arg *arg =
712 (struct drm_vmw_unref_dmabuf_arg *)data;
714 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
719 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
720 uint32_t handle, struct vmw_dma_buffer **out)
722 struct vmw_user_dma_buffer *vmw_user_bo;
723 struct ttm_base_object *base;
725 base = ttm_base_object_lookup(tfile, handle);
726 if (unlikely(base == NULL)) {
727 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
728 (unsigned long)handle);
732 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
733 ttm_base_object_unref(&base);
734 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
735 (unsigned long)handle);
739 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
741 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
742 ttm_base_object_unref(&base);
743 *out = &vmw_user_bo->dma;
748 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
749 struct vmw_dma_buffer *dma_buf,
752 struct vmw_user_dma_buffer *user_bo;
754 if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
757 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
759 *handle = user_bo->prime.base.hash.key;
760 return ttm_ref_object_add(tfile, &user_bo->prime.base,
761 TTM_REF_USAGE, NULL);
768 static void vmw_stream_destroy(struct vmw_resource *res)
770 struct vmw_private *dev_priv = res->dev_priv;
771 struct vmw_stream *stream;
774 DRM_INFO("%s: unref\n", __func__);
775 stream = container_of(res, struct vmw_stream, res);
777 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
781 static int vmw_stream_init(struct vmw_private *dev_priv,
782 struct vmw_stream *stream,
783 void (*res_free) (struct vmw_resource *res))
785 struct vmw_resource *res = &stream->res;
788 ret = vmw_resource_init(dev_priv, res, false, res_free,
791 if (unlikely(ret != 0)) {
792 if (res_free == NULL)
795 res_free(&stream->res);
799 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
801 vmw_resource_unreference(&res);
805 DRM_INFO("%s: claimed\n", __func__);
807 vmw_resource_activate(&stream->res, vmw_stream_destroy);
811 static void vmw_user_stream_free(struct vmw_resource *res)
813 struct vmw_user_stream *stream =
814 container_of(res, struct vmw_user_stream, stream.res);
815 struct vmw_private *dev_priv = res->dev_priv;
817 ttm_base_object_kfree(stream, base);
818 ttm_mem_global_free(vmw_mem_glob(dev_priv),
819 vmw_user_stream_size);
823 * This function is called when user space has no more references on the
824 * base object. It releases the base-object's reference on the resource object.
827 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
829 struct ttm_base_object *base = *p_base;
830 struct vmw_user_stream *stream =
831 container_of(base, struct vmw_user_stream, base);
832 struct vmw_resource *res = &stream->stream.res;
835 vmw_resource_unreference(&res);
838 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
839 struct drm_file *file_priv)
841 struct vmw_private *dev_priv = vmw_priv(dev);
842 struct vmw_resource *res;
843 struct vmw_user_stream *stream;
844 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
845 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
846 struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
850 res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
851 if (unlikely(res == NULL))
854 if (res->res_free != &vmw_user_stream_free) {
859 stream = container_of(res, struct vmw_user_stream, stream.res);
860 if (stream->base.tfile != tfile) {
865 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
867 vmw_resource_unreference(&res);
871 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
872 struct drm_file *file_priv)
874 struct vmw_private *dev_priv = vmw_priv(dev);
875 struct vmw_user_stream *stream;
876 struct vmw_resource *res;
877 struct vmw_resource *tmp;
878 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
879 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
883 * Approximate idr memory usage with 128 bytes. It will be limited
884 * by maximum number_of streams anyway?
887 if (unlikely(vmw_user_stream_size == 0))
888 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
890 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
891 if (unlikely(ret != 0))
894 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
895 vmw_user_stream_size,
897 if (unlikely(ret != 0)) {
898 if (ret != -ERESTARTSYS)
899 DRM_ERROR("Out of graphics memory for stream"
905 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
906 if (unlikely(stream == NULL)) {
907 ttm_mem_global_free(vmw_mem_glob(dev_priv),
908 vmw_user_stream_size);
913 res = &stream->stream.res;
914 stream->base.shareable = false;
915 stream->base.tfile = NULL;
918 * From here on, the destructor takes over resource freeing.
921 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
922 if (unlikely(ret != 0))
925 tmp = vmw_resource_reference(res);
926 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
927 &vmw_user_stream_base_release, NULL);
929 if (unlikely(ret != 0)) {
930 vmw_resource_unreference(&tmp);
934 arg->stream_id = res->id;
936 vmw_resource_unreference(&res);
938 ttm_read_unlock(&dev_priv->reservation_sem);
942 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
943 struct ttm_object_file *tfile,
944 uint32_t *inout_id, struct vmw_resource **out)
946 struct vmw_user_stream *stream;
947 struct vmw_resource *res;
950 res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
952 if (unlikely(res == NULL))
955 if (res->res_free != &vmw_user_stream_free) {
960 stream = container_of(res, struct vmw_user_stream, stream.res);
961 if (stream->base.tfile != tfile) {
966 *inout_id = stream->stream.stream_id;
970 vmw_resource_unreference(&res);
976 * vmw_dumb_create - Create a dumb kms buffer
978 * @file_priv: Pointer to a struct drm_file identifying the caller.
979 * @dev: Pointer to the drm device.
980 * @args: Pointer to a struct drm_mode_create_dumb structure
982 * This is a driver callback for the core drm create_dumb functionality.
983 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
984 * that the arguments have a different format.
986 int vmw_dumb_create(struct drm_file *file_priv,
987 struct drm_device *dev,
988 struct drm_mode_create_dumb *args)
990 struct vmw_private *dev_priv = vmw_priv(dev);
991 struct vmw_dma_buffer *dma_buf;
994 args->pitch = args->width * ((args->bpp + 7) / 8);
995 args->size = args->pitch * args->height;
997 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
998 if (unlikely(ret != 0))
1001 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1002 args->size, false, &args->handle,
1004 if (unlikely(ret != 0))
1007 vmw_dmabuf_unreference(&dma_buf);
1009 ttm_read_unlock(&dev_priv->reservation_sem);
1014 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1016 * @file_priv: Pointer to a struct drm_file identifying the caller.
1017 * @dev: Pointer to the drm device.
1018 * @handle: Handle identifying the dumb buffer.
1019 * @offset: The address space offset returned.
1021 * This is a driver callback for the core drm dumb_map_offset functionality.
1023 int vmw_dumb_map_offset(struct drm_file *file_priv,
1024 struct drm_device *dev, uint32_t handle,
1027 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1028 struct vmw_dma_buffer *out_buf;
1031 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
1035 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1036 vmw_dmabuf_unreference(&out_buf);
1041 * vmw_dumb_destroy - Destroy a dumb boffer
1043 * @file_priv: Pointer to a struct drm_file identifying the caller.
1044 * @dev: Pointer to the drm device.
1045 * @handle: Handle identifying the dumb buffer.
1047 * This is a driver callback for the core drm dumb_destroy functionality.
1049 int vmw_dumb_destroy(struct drm_file *file_priv,
1050 struct drm_device *dev,
1053 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1054 handle, TTM_REF_USAGE);
1058 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1060 * @res: The resource for which to allocate a backup buffer.
1061 * @interruptible: Whether any sleeps during allocation should be
1062 * performed while interruptible.
1064 static int vmw_resource_buf_alloc(struct vmw_resource *res,
1067 unsigned long size =
1068 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1069 struct vmw_dma_buffer *backup;
1072 if (likely(res->backup)) {
1073 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1077 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1078 if (unlikely(backup == NULL))
1081 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1082 res->func->backup_placement,
1084 &vmw_dmabuf_bo_free);
1085 if (unlikely(ret != 0))
1088 res->backup = backup;
1095 * vmw_resource_do_validate - Make a resource up-to-date and visible
1098 * @res: The resource to make visible to the device.
1099 * @val_buf: Information about a buffer possibly
1100 * containing backup data if a bind operation is needed.
1102 * On hardware resource shortage, this function returns -EBUSY and
1103 * should be retried once resources have been freed up.
1105 static int vmw_resource_do_validate(struct vmw_resource *res,
1106 struct ttm_validate_buffer *val_buf)
1109 const struct vmw_res_func *func = res->func;
1111 if (unlikely(res->id == -1)) {
1112 ret = func->create(res);
1113 if (unlikely(ret != 0))
1118 ((func->needs_backup && list_empty(&res->mob_head) &&
1119 val_buf->bo != NULL) ||
1120 (!func->needs_backup && val_buf->bo != NULL))) {
1121 ret = func->bind(res, val_buf);
1122 if (unlikely(ret != 0))
1123 goto out_bind_failed;
1124 if (func->needs_backup)
1125 list_add_tail(&res->mob_head, &res->backup->res_list);
1129 * Only do this on write operations, and move to
1130 * vmw_resource_unreserve if it can be called after
1131 * backup buffers have been unreserved. Otherwise
1134 res->res_dirty = true;
1145 * vmw_resource_unreserve - Unreserve a resource previously reserved for
1146 * command submission.
1148 * @res: Pointer to the struct vmw_resource to unreserve.
1149 * @new_backup: Pointer to new backup buffer if command submission
1151 * @new_backup_offset: New backup offset if @new_backup is !NULL.
1153 * Currently unreserving a resource means putting it back on the device's
1154 * resource lru list, so that it can be evicted if necessary.
1156 void vmw_resource_unreserve(struct vmw_resource *res,
1157 struct vmw_dma_buffer *new_backup,
1158 unsigned long new_backup_offset)
1160 struct vmw_private *dev_priv = res->dev_priv;
1162 if (!list_empty(&res->lru_head))
1165 if (new_backup && new_backup != res->backup) {
1168 lockdep_assert_held(&res->backup->base.resv->lock.base);
1169 list_del_init(&res->mob_head);
1170 vmw_dmabuf_unreference(&res->backup);
1173 res->backup = vmw_dmabuf_reference(new_backup);
1174 lockdep_assert_held(&new_backup->base.resv->lock.base);
1175 list_add_tail(&res->mob_head, &new_backup->res_list);
1178 res->backup_offset = new_backup_offset;
1180 if (!res->func->may_evict || res->id == -1)
1183 write_lock(&dev_priv->resource_lock);
1184 list_add_tail(&res->lru_head,
1185 &res->dev_priv->res_lru[res->func->res_type]);
1186 write_unlock(&dev_priv->resource_lock);
1190 * vmw_resource_check_buffer - Check whether a backup buffer is needed
1191 * for a resource and in that case, allocate
1192 * one, reserve and validate it.
1194 * @res: The resource for which to allocate a backup buffer.
1195 * @interruptible: Whether any sleeps during allocation should be
1196 * performed while interruptible.
1197 * @val_buf: On successful return contains data about the
1198 * reserved and validated backup buffer.
1201 vmw_resource_check_buffer(struct vmw_resource *res,
1203 struct ttm_validate_buffer *val_buf)
1205 struct list_head val_list;
1206 bool backup_dirty = false;
1209 if (unlikely(res->backup == NULL)) {
1210 ret = vmw_resource_buf_alloc(res, interruptible);
1211 if (unlikely(ret != 0))
1215 INIT_LIST_HEAD(&val_list);
1216 val_buf->bo = ttm_bo_reference(&res->backup->base);
1217 list_add_tail(&val_buf->head, &val_list);
1218 ret = ttm_eu_reserve_buffers(NULL, &val_list);
1219 if (unlikely(ret != 0))
1220 goto out_no_reserve;
1222 if (res->func->needs_backup && list_empty(&res->mob_head))
1225 backup_dirty = res->backup_dirty;
1226 ret = ttm_bo_validate(&res->backup->base,
1227 res->func->backup_placement,
1230 if (unlikely(ret != 0))
1231 goto out_no_validate;
1236 ttm_eu_backoff_reservation(NULL, &val_list);
1238 ttm_bo_unref(&val_buf->bo);
1240 vmw_dmabuf_unreference(&res->backup);
1246 * vmw_resource_reserve - Reserve a resource for command submission
1248 * @res: The resource to reserve.
1250 * This function takes the resource off the LRU list and make sure
1251 * a backup buffer is present for guest-backed resources. However,
1252 * the buffer may not be bound to the resource at this point.
1255 int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1257 struct vmw_private *dev_priv = res->dev_priv;
1260 write_lock(&dev_priv->resource_lock);
1261 list_del_init(&res->lru_head);
1262 write_unlock(&dev_priv->resource_lock);
1264 if (res->func->needs_backup && res->backup == NULL &&
1266 ret = vmw_resource_buf_alloc(res, true);
1267 if (unlikely(ret != 0))
1275 * vmw_resource_backoff_reservation - Unreserve and unreference a
1278 * @val_buf: Backup buffer information.
1281 vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1283 struct list_head val_list;
1285 if (likely(val_buf->bo == NULL))
1288 INIT_LIST_HEAD(&val_list);
1289 list_add_tail(&val_buf->head, &val_list);
1290 ttm_eu_backoff_reservation(NULL, &val_list);
1291 ttm_bo_unref(&val_buf->bo);
1295 * vmw_resource_do_evict - Evict a resource, and transfer its data
1296 * to a backup buffer.
1298 * @res: The resource to evict.
1299 * @interruptible: Whether to wait interruptible.
1301 int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1303 struct ttm_validate_buffer val_buf;
1304 const struct vmw_res_func *func = res->func;
1307 BUG_ON(!func->may_evict);
1310 ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1311 if (unlikely(ret != 0))
1314 if (unlikely(func->unbind != NULL &&
1315 (!func->needs_backup || !list_empty(&res->mob_head)))) {
1316 ret = func->unbind(res, res->res_dirty, &val_buf);
1317 if (unlikely(ret != 0))
1319 list_del_init(&res->mob_head);
1321 ret = func->destroy(res);
1322 res->backup_dirty = true;
1323 res->res_dirty = false;
1325 vmw_resource_backoff_reservation(&val_buf);
1332 * vmw_resource_validate - Make a resource up-to-date and visible
1335 * @res: The resource to make visible to the device.
1337 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1338 * be reserved and validated.
1339 * On hardware resource shortage, this function will repeatedly evict
1340 * resources of the same type until the validation succeeds.
1342 int vmw_resource_validate(struct vmw_resource *res)
1345 struct vmw_resource *evict_res;
1346 struct vmw_private *dev_priv = res->dev_priv;
1347 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1348 struct ttm_validate_buffer val_buf;
1349 unsigned err_count = 0;
1351 if (likely(!res->func->may_evict))
1356 val_buf.bo = &res->backup->base;
1358 ret = vmw_resource_do_validate(res, &val_buf);
1359 if (likely(ret != -EBUSY))
1362 write_lock(&dev_priv->resource_lock);
1363 if (list_empty(lru_list) || !res->func->may_evict) {
1364 DRM_ERROR("Out of device device resources "
1365 "for %s.\n", res->func->type_name);
1367 write_unlock(&dev_priv->resource_lock);
1371 evict_res = vmw_resource_reference
1372 (list_first_entry(lru_list, struct vmw_resource,
1374 list_del_init(&evict_res->lru_head);
1376 write_unlock(&dev_priv->resource_lock);
1378 ret = vmw_resource_do_evict(evict_res, true);
1379 if (unlikely(ret != 0)) {
1380 write_lock(&dev_priv->resource_lock);
1381 list_add_tail(&evict_res->lru_head, lru_list);
1382 write_unlock(&dev_priv->resource_lock);
1383 if (ret == -ERESTARTSYS ||
1384 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1385 vmw_resource_unreference(&evict_res);
1386 goto out_no_validate;
1390 vmw_resource_unreference(&evict_res);
1393 if (unlikely(ret != 0))
1394 goto out_no_validate;
1395 else if (!res->func->needs_backup && res->backup) {
1396 list_del_init(&res->mob_head);
1397 vmw_dmabuf_unreference(&res->backup);
1407 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1408 * object without unreserving it.
1410 * @bo: Pointer to the struct ttm_buffer_object to fence.
1411 * @fence: Pointer to the fence. If NULL, this function will
1412 * insert a fence into the command stream..
1414 * Contrary to the ttm_eu version of this function, it takes only
1415 * a single buffer object instead of a list, and it also doesn't
1416 * unreserve the buffer object, which needs to be done separately.
1418 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1419 struct vmw_fence_obj *fence)
1421 struct ttm_bo_device *bdev = bo->bdev;
1422 struct ttm_bo_driver *driver = bdev->driver;
1423 struct vmw_fence_obj *old_fence_obj;
1424 struct vmw_private *dev_priv =
1425 container_of(bdev, struct vmw_private, bdev);
1428 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1430 driver->sync_obj_ref(fence);
1432 spin_lock(&bdev->fence_lock);
1434 old_fence_obj = bo->sync_obj;
1435 bo->sync_obj = fence;
1437 spin_unlock(&bdev->fence_lock);
1440 vmw_fence_obj_unreference(&old_fence_obj);
1444 * vmw_resource_move_notify - TTM move_notify_callback
1446 * @bo: The TTM buffer object about to move.
1447 * @mem: The truct ttm_mem_reg indicating to what memory
1448 * region the move is taking place.
1450 * Evicts the Guest Backed hardware resource if the backup
1451 * buffer is being moved out of MOB memory.
1452 * Note that this function should not race with the resource
1453 * validation code as long as it accesses only members of struct
1454 * resource that remain static while bo::res is !NULL and
1455 * while we have @bo reserved. struct resource::backup is *not* a
1456 * static member. The resource validation code will take care
1457 * to set @bo::res to NULL, while having @bo reserved when the
1458 * buffer is no longer bound to the resource, so @bo:res can be
1459 * used to determine whether there is a need to unbind and whether
1460 * it is safe to unbind.
1462 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1463 struct ttm_mem_reg *mem)
1465 struct vmw_dma_buffer *dma_buf;
1470 if (bo->destroy != vmw_dmabuf_bo_free &&
1471 bo->destroy != vmw_user_dmabuf_destroy)
1474 dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1476 if (mem->mem_type != VMW_PL_MOB) {
1477 struct vmw_resource *res, *n;
1478 struct ttm_bo_device *bdev = bo->bdev;
1479 struct ttm_validate_buffer val_buf;
1483 list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1485 if (unlikely(res->func->unbind == NULL))
1488 (void) res->func->unbind(res, true, &val_buf);
1489 res->backup_dirty = true;
1490 res->res_dirty = false;
1491 list_del_init(&res->mob_head);
1494 spin_lock(&bdev->fence_lock);
1495 (void) ttm_bo_wait(bo, false, false, false);
1496 spin_unlock(&bdev->fence_lock);
1501 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1503 * @res: The resource being queried.
1505 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1507 return res->func->needs_backup;
1511 * vmw_resource_evict_type - Evict all resources of a specific type
1513 * @dev_priv: Pointer to a device private struct
1514 * @type: The resource type to evict
1516 * To avoid thrashing starvation or as part of the hibernation sequence,
1517 * try to evict all evictable resources of a specific type.
1519 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1520 enum vmw_res_type type)
1522 struct list_head *lru_list = &dev_priv->res_lru[type];
1523 struct vmw_resource *evict_res;
1524 unsigned err_count = 0;
1528 write_lock(&dev_priv->resource_lock);
1530 if (list_empty(lru_list))
1533 evict_res = vmw_resource_reference(
1534 list_first_entry(lru_list, struct vmw_resource,
1536 list_del_init(&evict_res->lru_head);
1537 write_unlock(&dev_priv->resource_lock);
1539 ret = vmw_resource_do_evict(evict_res, false);
1540 if (unlikely(ret != 0)) {
1541 write_lock(&dev_priv->resource_lock);
1542 list_add_tail(&evict_res->lru_head, lru_list);
1543 write_unlock(&dev_priv->resource_lock);
1544 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1545 vmw_resource_unreference(&evict_res);
1550 vmw_resource_unreference(&evict_res);
1554 write_unlock(&dev_priv->resource_lock);
1558 * vmw_resource_evict_all - Evict all evictable resources
1560 * @dev_priv: Pointer to a device private struct
1562 * To avoid thrashing starvation or as part of the hibernation sequence,
1563 * evict all evictable resources. In particular this means that all
1564 * guest-backed resources that are registered with the device are
1565 * evicted and the OTable becomes clean.
1567 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1569 enum vmw_res_type type;
1571 mutex_lock(&dev_priv->cmdbuf_mutex);
1573 for (type = 0; type < vmw_res_max; ++type)
1574 vmw_resource_evict_type(dev_priv, type);
1576 mutex_unlock(&dev_priv->cmdbuf_mutex);