Merge tag 'samsung-defconfig' of git://git.kernel.org/pub/scm/linux/kernel/git/kgene...
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/drmP.h>
33 #include "vmwgfx_resource_priv.h"
34
35 #define VMW_RES_EVICT_ERR_COUNT 10
36
37 struct vmw_user_dma_buffer {
38         struct ttm_prime_object prime;
39         struct vmw_dma_buffer dma;
40 };
41
42 struct vmw_bo_user_rep {
43         uint32_t handle;
44         uint64_t map_handle;
45 };
46
47 struct vmw_stream {
48         struct vmw_resource res;
49         uint32_t stream_id;
50 };
51
52 struct vmw_user_stream {
53         struct ttm_base_object base;
54         struct vmw_stream stream;
55 };
56
57
58 static uint64_t vmw_user_stream_size;
59
60 static const struct vmw_res_func vmw_stream_func = {
61         .res_type = vmw_res_stream,
62         .needs_backup = false,
63         .may_evict = false,
64         .type_name = "video streams",
65         .backup_placement = NULL,
66         .create = NULL,
67         .destroy = NULL,
68         .bind = NULL,
69         .unbind = NULL
70 };
71
72 static inline struct vmw_dma_buffer *
73 vmw_dma_buffer(struct ttm_buffer_object *bo)
74 {
75         return container_of(bo, struct vmw_dma_buffer, base);
76 }
77
78 static inline struct vmw_user_dma_buffer *
79 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
80 {
81         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
82         return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
83 }
84
85 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
86 {
87         kref_get(&res->kref);
88         return res;
89 }
90
91 struct vmw_resource *
92 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
93 {
94         return kref_get_unless_zero(&res->kref) ? res : NULL;
95 }
96
97 /**
98  * vmw_resource_release_id - release a resource id to the id manager.
99  *
100  * @res: Pointer to the resource.
101  *
102  * Release the resource id to the resource id manager and set it to -1
103  */
104 void vmw_resource_release_id(struct vmw_resource *res)
105 {
106         struct vmw_private *dev_priv = res->dev_priv;
107         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
108
109         write_lock(&dev_priv->resource_lock);
110         if (res->id != -1)
111                 idr_remove(idr, res->id);
112         res->id = -1;
113         write_unlock(&dev_priv->resource_lock);
114 }
115
116 static void vmw_resource_release(struct kref *kref)
117 {
118         struct vmw_resource *res =
119             container_of(kref, struct vmw_resource, kref);
120         struct vmw_private *dev_priv = res->dev_priv;
121         int id;
122         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
123
124         res->avail = false;
125         list_del_init(&res->lru_head);
126         write_unlock(&dev_priv->resource_lock);
127         if (res->backup) {
128                 struct ttm_buffer_object *bo = &res->backup->base;
129
130                 ttm_bo_reserve(bo, false, false, false, NULL);
131                 if (!list_empty(&res->mob_head) &&
132                     res->func->unbind != NULL) {
133                         struct ttm_validate_buffer val_buf;
134
135                         val_buf.bo = bo;
136                         val_buf.shared = false;
137                         res->func->unbind(res, false, &val_buf);
138                 }
139                 res->backup_dirty = false;
140                 list_del_init(&res->mob_head);
141                 ttm_bo_unreserve(bo);
142                 vmw_dmabuf_unreference(&res->backup);
143         }
144
145         if (likely(res->hw_destroy != NULL)) {
146                 res->hw_destroy(res);
147                 mutex_lock(&dev_priv->binding_mutex);
148                 vmw_context_binding_res_list_kill(&res->binding_head);
149                 mutex_unlock(&dev_priv->binding_mutex);
150         }
151
152         id = res->id;
153         if (res->res_free != NULL)
154                 res->res_free(res);
155         else
156                 kfree(res);
157
158         write_lock(&dev_priv->resource_lock);
159
160         if (id != -1)
161                 idr_remove(idr, id);
162 }
163
164 void vmw_resource_unreference(struct vmw_resource **p_res)
165 {
166         struct vmw_resource *res = *p_res;
167         struct vmw_private *dev_priv = res->dev_priv;
168
169         *p_res = NULL;
170         write_lock(&dev_priv->resource_lock);
171         kref_put(&res->kref, vmw_resource_release);
172         write_unlock(&dev_priv->resource_lock);
173 }
174
175
176 /**
177  * vmw_resource_alloc_id - release a resource id to the id manager.
178  *
179  * @res: Pointer to the resource.
180  *
181  * Allocate the lowest free resource from the resource manager, and set
182  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
183  */
184 int vmw_resource_alloc_id(struct vmw_resource *res)
185 {
186         struct vmw_private *dev_priv = res->dev_priv;
187         int ret;
188         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
189
190         BUG_ON(res->id != -1);
191
192         idr_preload(GFP_KERNEL);
193         write_lock(&dev_priv->resource_lock);
194
195         ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
196         if (ret >= 0)
197                 res->id = ret;
198
199         write_unlock(&dev_priv->resource_lock);
200         idr_preload_end();
201         return ret < 0 ? ret : 0;
202 }
203
204 /**
205  * vmw_resource_init - initialize a struct vmw_resource
206  *
207  * @dev_priv:       Pointer to a device private struct.
208  * @res:            The struct vmw_resource to initialize.
209  * @obj_type:       Resource object type.
210  * @delay_id:       Boolean whether to defer device id allocation until
211  *                  the first validation.
212  * @res_free:       Resource destructor.
213  * @func:           Resource function table.
214  */
215 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
216                       bool delay_id,
217                       void (*res_free) (struct vmw_resource *res),
218                       const struct vmw_res_func *func)
219 {
220         kref_init(&res->kref);
221         res->hw_destroy = NULL;
222         res->res_free = res_free;
223         res->avail = false;
224         res->dev_priv = dev_priv;
225         res->func = func;
226         INIT_LIST_HEAD(&res->lru_head);
227         INIT_LIST_HEAD(&res->mob_head);
228         INIT_LIST_HEAD(&res->binding_head);
229         res->id = -1;
230         res->backup = NULL;
231         res->backup_offset = 0;
232         res->backup_dirty = false;
233         res->res_dirty = false;
234         if (delay_id)
235                 return 0;
236         else
237                 return vmw_resource_alloc_id(res);
238 }
239
240 /**
241  * vmw_resource_activate
242  *
243  * @res:        Pointer to the newly created resource
244  * @hw_destroy: Destroy function. NULL if none.
245  *
246  * Activate a resource after the hardware has been made aware of it.
247  * Set tye destroy function to @destroy. Typically this frees the
248  * resource and destroys the hardware resources associated with it.
249  * Activate basically means that the function vmw_resource_lookup will
250  * find it.
251  */
252 void vmw_resource_activate(struct vmw_resource *res,
253                            void (*hw_destroy) (struct vmw_resource *))
254 {
255         struct vmw_private *dev_priv = res->dev_priv;
256
257         write_lock(&dev_priv->resource_lock);
258         res->avail = true;
259         res->hw_destroy = hw_destroy;
260         write_unlock(&dev_priv->resource_lock);
261 }
262
263 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
264                                          struct idr *idr, int id)
265 {
266         struct vmw_resource *res;
267
268         read_lock(&dev_priv->resource_lock);
269         res = idr_find(idr, id);
270         if (res && res->avail)
271                 kref_get(&res->kref);
272         else
273                 res = NULL;
274         read_unlock(&dev_priv->resource_lock);
275
276         if (unlikely(res == NULL))
277                 return NULL;
278
279         return res;
280 }
281
282 /**
283  * vmw_user_resource_lookup_handle - lookup a struct resource from a
284  * TTM user-space handle and perform basic type checks
285  *
286  * @dev_priv:     Pointer to a device private struct
287  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
288  * @handle:       The TTM user-space handle
289  * @converter:    Pointer to an object describing the resource type
290  * @p_res:        On successful return the location pointed to will contain
291  *                a pointer to a refcounted struct vmw_resource.
292  *
293  * If the handle can't be found or is associated with an incorrect resource
294  * type, -EINVAL will be returned.
295  */
296 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
297                                     struct ttm_object_file *tfile,
298                                     uint32_t handle,
299                                     const struct vmw_user_resource_conv
300                                     *converter,
301                                     struct vmw_resource **p_res)
302 {
303         struct ttm_base_object *base;
304         struct vmw_resource *res;
305         int ret = -EINVAL;
306
307         base = ttm_base_object_lookup(tfile, handle);
308         if (unlikely(base == NULL))
309                 return -EINVAL;
310
311         if (unlikely(ttm_base_object_type(base) != converter->object_type))
312                 goto out_bad_resource;
313
314         res = converter->base_obj_to_res(base);
315
316         read_lock(&dev_priv->resource_lock);
317         if (!res->avail || res->res_free != converter->res_free) {
318                 read_unlock(&dev_priv->resource_lock);
319                 goto out_bad_resource;
320         }
321
322         kref_get(&res->kref);
323         read_unlock(&dev_priv->resource_lock);
324
325         *p_res = res;
326         ret = 0;
327
328 out_bad_resource:
329         ttm_base_object_unref(&base);
330
331         return ret;
332 }
333
334 /**
335  * Helper function that looks either a surface or dmabuf.
336  *
337  * The pointer this pointed at by out_surf and out_buf needs to be null.
338  */
339 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
340                            struct ttm_object_file *tfile,
341                            uint32_t handle,
342                            struct vmw_surface **out_surf,
343                            struct vmw_dma_buffer **out_buf)
344 {
345         struct vmw_resource *res;
346         int ret;
347
348         BUG_ON(*out_surf || *out_buf);
349
350         ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
351                                               user_surface_converter,
352                                               &res);
353         if (!ret) {
354                 *out_surf = vmw_res_to_srf(res);
355                 return 0;
356         }
357
358         *out_surf = NULL;
359         ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
360         return ret;
361 }
362
363 /**
364  * Buffer management.
365  */
366
367 /**
368  * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
369  *
370  * @dev_priv: Pointer to a struct vmw_private identifying the device.
371  * @size: The requested buffer size.
372  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
373  */
374 static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
375                                   bool user)
376 {
377         static size_t struct_size, user_struct_size;
378         size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
379         size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
380
381         if (unlikely(struct_size == 0)) {
382                 size_t backend_size = ttm_round_pot(vmw_tt_size);
383
384                 struct_size = backend_size +
385                         ttm_round_pot(sizeof(struct vmw_dma_buffer));
386                 user_struct_size = backend_size +
387                         ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
388         }
389
390         if (dev_priv->map_mode == vmw_dma_alloc_coherent)
391                 page_array_size +=
392                         ttm_round_pot(num_pages * sizeof(dma_addr_t));
393
394         return ((user) ? user_struct_size : struct_size) +
395                 page_array_size;
396 }
397
398 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
399 {
400         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
401
402         kfree(vmw_bo);
403 }
404
405 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
406 {
407         struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
408
409         ttm_prime_object_kfree(vmw_user_bo, prime);
410 }
411
412 int vmw_dmabuf_init(struct vmw_private *dev_priv,
413                     struct vmw_dma_buffer *vmw_bo,
414                     size_t size, struct ttm_placement *placement,
415                     bool interruptible,
416                     void (*bo_free) (struct ttm_buffer_object *bo))
417 {
418         struct ttm_bo_device *bdev = &dev_priv->bdev;
419         size_t acc_size;
420         int ret;
421         bool user = (bo_free == &vmw_user_dmabuf_destroy);
422
423         BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
424
425         acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
426         memset(vmw_bo, 0, sizeof(*vmw_bo));
427
428         INIT_LIST_HEAD(&vmw_bo->res_list);
429
430         ret = ttm_bo_init(bdev, &vmw_bo->base, size,
431                           ttm_bo_type_device, placement,
432                           0, interruptible,
433                           NULL, acc_size, NULL, NULL, bo_free);
434         return ret;
435 }
436
437 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
438 {
439         struct vmw_user_dma_buffer *vmw_user_bo;
440         struct ttm_base_object *base = *p_base;
441         struct ttm_buffer_object *bo;
442
443         *p_base = NULL;
444
445         if (unlikely(base == NULL))
446                 return;
447
448         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
449                                    prime.base);
450         bo = &vmw_user_bo->dma.base;
451         ttm_bo_unref(&bo);
452 }
453
454 static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
455                                             enum ttm_ref_type ref_type)
456 {
457         struct vmw_user_dma_buffer *user_bo;
458         user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
459
460         switch (ref_type) {
461         case TTM_REF_SYNCCPU_WRITE:
462                 ttm_bo_synccpu_write_release(&user_bo->dma.base);
463                 break;
464         default:
465                 BUG();
466         }
467 }
468
469 /**
470  * vmw_user_dmabuf_alloc - Allocate a user dma buffer
471  *
472  * @dev_priv: Pointer to a struct device private.
473  * @tfile: Pointer to a struct ttm_object_file on which to register the user
474  * object.
475  * @size: Size of the dma buffer.
476  * @shareable: Boolean whether the buffer is shareable with other open files.
477  * @handle: Pointer to where the handle value should be assigned.
478  * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
479  * should be assigned.
480  */
481 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
482                           struct ttm_object_file *tfile,
483                           uint32_t size,
484                           bool shareable,
485                           uint32_t *handle,
486                           struct vmw_dma_buffer **p_dma_buf)
487 {
488         struct vmw_user_dma_buffer *user_bo;
489         struct ttm_buffer_object *tmp;
490         int ret;
491
492         user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
493         if (unlikely(user_bo == NULL)) {
494                 DRM_ERROR("Failed to allocate a buffer.\n");
495                 return -ENOMEM;
496         }
497
498         ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
499                               (dev_priv->has_mob) ?
500                               &vmw_sys_placement :
501                               &vmw_vram_sys_placement, true,
502                               &vmw_user_dmabuf_destroy);
503         if (unlikely(ret != 0))
504                 return ret;
505
506         tmp = ttm_bo_reference(&user_bo->dma.base);
507         ret = ttm_prime_object_init(tfile,
508                                     size,
509                                     &user_bo->prime,
510                                     shareable,
511                                     ttm_buffer_type,
512                                     &vmw_user_dmabuf_release,
513                                     &vmw_user_dmabuf_ref_obj_release);
514         if (unlikely(ret != 0)) {
515                 ttm_bo_unref(&tmp);
516                 goto out_no_base_object;
517         }
518
519         *p_dma_buf = &user_bo->dma;
520         *handle = user_bo->prime.base.hash.key;
521
522 out_no_base_object:
523         return ret;
524 }
525
526 /**
527  * vmw_user_dmabuf_verify_access - verify access permissions on this
528  * buffer object.
529  *
530  * @bo: Pointer to the buffer object being accessed
531  * @tfile: Identifying the caller.
532  */
533 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
534                                   struct ttm_object_file *tfile)
535 {
536         struct vmw_user_dma_buffer *vmw_user_bo;
537
538         if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
539                 return -EPERM;
540
541         vmw_user_bo = vmw_user_dma_buffer(bo);
542
543         /* Check that the caller has opened the object. */
544         if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
545                 return 0;
546
547         DRM_ERROR("Could not grant buffer access.\n");
548         return -EPERM;
549 }
550
551 /**
552  * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
553  * access, idling previous GPU operations on the buffer and optionally
554  * blocking it for further command submissions.
555  *
556  * @user_bo: Pointer to the buffer object being grabbed for CPU access
557  * @tfile: Identifying the caller.
558  * @flags: Flags indicating how the grab should be performed.
559  *
560  * A blocking grab will be automatically released when @tfile is closed.
561  */
562 static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
563                                         struct ttm_object_file *tfile,
564                                         uint32_t flags)
565 {
566         struct ttm_buffer_object *bo = &user_bo->dma.base;
567         bool existed;
568         int ret;
569
570         if (flags & drm_vmw_synccpu_allow_cs) {
571                 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
572                 long lret;
573
574                 if (nonblock)
575                         return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
576
577                 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
578                 if (!lret)
579                         return -EBUSY;
580                 else if (lret < 0)
581                         return lret;
582                 return 0;
583         }
584
585         ret = ttm_bo_synccpu_write_grab
586                 (bo, !!(flags & drm_vmw_synccpu_dontblock));
587         if (unlikely(ret != 0))
588                 return ret;
589
590         ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
591                                  TTM_REF_SYNCCPU_WRITE, &existed);
592         if (ret != 0 || existed)
593                 ttm_bo_synccpu_write_release(&user_bo->dma.base);
594
595         return ret;
596 }
597
598 /**
599  * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
600  * and unblock command submission on the buffer if blocked.
601  *
602  * @handle: Handle identifying the buffer object.
603  * @tfile: Identifying the caller.
604  * @flags: Flags indicating the type of release.
605  */
606 static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
607                                            struct ttm_object_file *tfile,
608                                            uint32_t flags)
609 {
610         if (!(flags & drm_vmw_synccpu_allow_cs))
611                 return ttm_ref_object_base_unref(tfile, handle,
612                                                  TTM_REF_SYNCCPU_WRITE);
613
614         return 0;
615 }
616
617 /**
618  * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
619  * functionality.
620  *
621  * @dev: Identifies the drm device.
622  * @data: Pointer to the ioctl argument.
623  * @file_priv: Identifies the caller.
624  *
625  * This function checks the ioctl arguments for validity and calls the
626  * relevant synccpu functions.
627  */
628 int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
629                                   struct drm_file *file_priv)
630 {
631         struct drm_vmw_synccpu_arg *arg =
632                 (struct drm_vmw_synccpu_arg *) data;
633         struct vmw_dma_buffer *dma_buf;
634         struct vmw_user_dma_buffer *user_bo;
635         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
636         int ret;
637
638         if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
639             || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
640                                drm_vmw_synccpu_dontblock |
641                                drm_vmw_synccpu_allow_cs)) != 0) {
642                 DRM_ERROR("Illegal synccpu flags.\n");
643                 return -EINVAL;
644         }
645
646         switch (arg->op) {
647         case drm_vmw_synccpu_grab:
648                 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
649                 if (unlikely(ret != 0))
650                         return ret;
651
652                 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
653                                        dma);
654                 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
655                 vmw_dmabuf_unreference(&dma_buf);
656                 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
657                              ret != -EBUSY)) {
658                         DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
659                                   (unsigned int) arg->handle);
660                         return ret;
661                 }
662                 break;
663         case drm_vmw_synccpu_release:
664                 ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
665                                                       arg->flags);
666                 if (unlikely(ret != 0)) {
667                         DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
668                                   (unsigned int) arg->handle);
669                         return ret;
670                 }
671                 break;
672         default:
673                 DRM_ERROR("Invalid synccpu operation.\n");
674                 return -EINVAL;
675         }
676
677         return 0;
678 }
679
680 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
681                            struct drm_file *file_priv)
682 {
683         struct vmw_private *dev_priv = vmw_priv(dev);
684         union drm_vmw_alloc_dmabuf_arg *arg =
685             (union drm_vmw_alloc_dmabuf_arg *)data;
686         struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
687         struct drm_vmw_dmabuf_rep *rep = &arg->rep;
688         struct vmw_dma_buffer *dma_buf;
689         uint32_t handle;
690         int ret;
691
692         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
693         if (unlikely(ret != 0))
694                 return ret;
695
696         ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
697                                     req->size, false, &handle, &dma_buf);
698         if (unlikely(ret != 0))
699                 goto out_no_dmabuf;
700
701         rep->handle = handle;
702         rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
703         rep->cur_gmr_id = handle;
704         rep->cur_gmr_offset = 0;
705
706         vmw_dmabuf_unreference(&dma_buf);
707
708 out_no_dmabuf:
709         ttm_read_unlock(&dev_priv->reservation_sem);
710
711         return ret;
712 }
713
714 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
715                            struct drm_file *file_priv)
716 {
717         struct drm_vmw_unref_dmabuf_arg *arg =
718             (struct drm_vmw_unref_dmabuf_arg *)data;
719
720         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
721                                          arg->handle,
722                                          TTM_REF_USAGE);
723 }
724
725 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
726                            uint32_t handle, struct vmw_dma_buffer **out)
727 {
728         struct vmw_user_dma_buffer *vmw_user_bo;
729         struct ttm_base_object *base;
730
731         base = ttm_base_object_lookup(tfile, handle);
732         if (unlikely(base == NULL)) {
733                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
734                        (unsigned long)handle);
735                 return -ESRCH;
736         }
737
738         if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
739                 ttm_base_object_unref(&base);
740                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
741                        (unsigned long)handle);
742                 return -EINVAL;
743         }
744
745         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
746                                    prime.base);
747         (void)ttm_bo_reference(&vmw_user_bo->dma.base);
748         ttm_base_object_unref(&base);
749         *out = &vmw_user_bo->dma;
750
751         return 0;
752 }
753
754 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
755                               struct vmw_dma_buffer *dma_buf,
756                               uint32_t *handle)
757 {
758         struct vmw_user_dma_buffer *user_bo;
759
760         if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
761                 return -EINVAL;
762
763         user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
764
765         *handle = user_bo->prime.base.hash.key;
766         return ttm_ref_object_add(tfile, &user_bo->prime.base,
767                                   TTM_REF_USAGE, NULL);
768 }
769
770 /*
771  * Stream management
772  */
773
774 static void vmw_stream_destroy(struct vmw_resource *res)
775 {
776         struct vmw_private *dev_priv = res->dev_priv;
777         struct vmw_stream *stream;
778         int ret;
779
780         DRM_INFO("%s: unref\n", __func__);
781         stream = container_of(res, struct vmw_stream, res);
782
783         ret = vmw_overlay_unref(dev_priv, stream->stream_id);
784         WARN_ON(ret != 0);
785 }
786
787 static int vmw_stream_init(struct vmw_private *dev_priv,
788                            struct vmw_stream *stream,
789                            void (*res_free) (struct vmw_resource *res))
790 {
791         struct vmw_resource *res = &stream->res;
792         int ret;
793
794         ret = vmw_resource_init(dev_priv, res, false, res_free,
795                                 &vmw_stream_func);
796
797         if (unlikely(ret != 0)) {
798                 if (res_free == NULL)
799                         kfree(stream);
800                 else
801                         res_free(&stream->res);
802                 return ret;
803         }
804
805         ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
806         if (ret) {
807                 vmw_resource_unreference(&res);
808                 return ret;
809         }
810
811         DRM_INFO("%s: claimed\n", __func__);
812
813         vmw_resource_activate(&stream->res, vmw_stream_destroy);
814         return 0;
815 }
816
817 static void vmw_user_stream_free(struct vmw_resource *res)
818 {
819         struct vmw_user_stream *stream =
820             container_of(res, struct vmw_user_stream, stream.res);
821         struct vmw_private *dev_priv = res->dev_priv;
822
823         ttm_base_object_kfree(stream, base);
824         ttm_mem_global_free(vmw_mem_glob(dev_priv),
825                             vmw_user_stream_size);
826 }
827
828 /**
829  * This function is called when user space has no more references on the
830  * base object. It releases the base-object's reference on the resource object.
831  */
832
833 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
834 {
835         struct ttm_base_object *base = *p_base;
836         struct vmw_user_stream *stream =
837             container_of(base, struct vmw_user_stream, base);
838         struct vmw_resource *res = &stream->stream.res;
839
840         *p_base = NULL;
841         vmw_resource_unreference(&res);
842 }
843
844 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
845                            struct drm_file *file_priv)
846 {
847         struct vmw_private *dev_priv = vmw_priv(dev);
848         struct vmw_resource *res;
849         struct vmw_user_stream *stream;
850         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
851         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
852         struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
853         int ret = 0;
854
855
856         res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
857         if (unlikely(res == NULL))
858                 return -EINVAL;
859
860         if (res->res_free != &vmw_user_stream_free) {
861                 ret = -EINVAL;
862                 goto out;
863         }
864
865         stream = container_of(res, struct vmw_user_stream, stream.res);
866         if (stream->base.tfile != tfile) {
867                 ret = -EINVAL;
868                 goto out;
869         }
870
871         ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
872 out:
873         vmw_resource_unreference(&res);
874         return ret;
875 }
876
877 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
878                            struct drm_file *file_priv)
879 {
880         struct vmw_private *dev_priv = vmw_priv(dev);
881         struct vmw_user_stream *stream;
882         struct vmw_resource *res;
883         struct vmw_resource *tmp;
884         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
885         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
886         int ret;
887
888         /*
889          * Approximate idr memory usage with 128 bytes. It will be limited
890          * by maximum number_of streams anyway?
891          */
892
893         if (unlikely(vmw_user_stream_size == 0))
894                 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
895
896         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
897         if (unlikely(ret != 0))
898                 return ret;
899
900         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
901                                    vmw_user_stream_size,
902                                    false, true);
903         if (unlikely(ret != 0)) {
904                 if (ret != -ERESTARTSYS)
905                         DRM_ERROR("Out of graphics memory for stream"
906                                   " creation.\n");
907                 goto out_unlock;
908         }
909
910
911         stream = kmalloc(sizeof(*stream), GFP_KERNEL);
912         if (unlikely(stream == NULL)) {
913                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
914                                     vmw_user_stream_size);
915                 ret = -ENOMEM;
916                 goto out_unlock;
917         }
918
919         res = &stream->stream.res;
920         stream->base.shareable = false;
921         stream->base.tfile = NULL;
922
923         /*
924          * From here on, the destructor takes over resource freeing.
925          */
926
927         ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
928         if (unlikely(ret != 0))
929                 goto out_unlock;
930
931         tmp = vmw_resource_reference(res);
932         ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
933                                    &vmw_user_stream_base_release, NULL);
934
935         if (unlikely(ret != 0)) {
936                 vmw_resource_unreference(&tmp);
937                 goto out_err;
938         }
939
940         arg->stream_id = res->id;
941 out_err:
942         vmw_resource_unreference(&res);
943 out_unlock:
944         ttm_read_unlock(&dev_priv->reservation_sem);
945         return ret;
946 }
947
948 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
949                            struct ttm_object_file *tfile,
950                            uint32_t *inout_id, struct vmw_resource **out)
951 {
952         struct vmw_user_stream *stream;
953         struct vmw_resource *res;
954         int ret;
955
956         res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
957                                   *inout_id);
958         if (unlikely(res == NULL))
959                 return -EINVAL;
960
961         if (res->res_free != &vmw_user_stream_free) {
962                 ret = -EINVAL;
963                 goto err_ref;
964         }
965
966         stream = container_of(res, struct vmw_user_stream, stream.res);
967         if (stream->base.tfile != tfile) {
968                 ret = -EPERM;
969                 goto err_ref;
970         }
971
972         *inout_id = stream->stream.stream_id;
973         *out = res;
974         return 0;
975 err_ref:
976         vmw_resource_unreference(&res);
977         return ret;
978 }
979
980
981 /**
982  * vmw_dumb_create - Create a dumb kms buffer
983  *
984  * @file_priv: Pointer to a struct drm_file identifying the caller.
985  * @dev: Pointer to the drm device.
986  * @args: Pointer to a struct drm_mode_create_dumb structure
987  *
988  * This is a driver callback for the core drm create_dumb functionality.
989  * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
990  * that the arguments have a different format.
991  */
992 int vmw_dumb_create(struct drm_file *file_priv,
993                     struct drm_device *dev,
994                     struct drm_mode_create_dumb *args)
995 {
996         struct vmw_private *dev_priv = vmw_priv(dev);
997         struct vmw_dma_buffer *dma_buf;
998         int ret;
999
1000         args->pitch = args->width * ((args->bpp + 7) / 8);
1001         args->size = args->pitch * args->height;
1002
1003         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1004         if (unlikely(ret != 0))
1005                 return ret;
1006
1007         ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1008                                     args->size, false, &args->handle,
1009                                     &dma_buf);
1010         if (unlikely(ret != 0))
1011                 goto out_no_dmabuf;
1012
1013         vmw_dmabuf_unreference(&dma_buf);
1014 out_no_dmabuf:
1015         ttm_read_unlock(&dev_priv->reservation_sem);
1016         return ret;
1017 }
1018
1019 /**
1020  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1021  *
1022  * @file_priv: Pointer to a struct drm_file identifying the caller.
1023  * @dev: Pointer to the drm device.
1024  * @handle: Handle identifying the dumb buffer.
1025  * @offset: The address space offset returned.
1026  *
1027  * This is a driver callback for the core drm dumb_map_offset functionality.
1028  */
1029 int vmw_dumb_map_offset(struct drm_file *file_priv,
1030                         struct drm_device *dev, uint32_t handle,
1031                         uint64_t *offset)
1032 {
1033         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1034         struct vmw_dma_buffer *out_buf;
1035         int ret;
1036
1037         ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
1038         if (ret != 0)
1039                 return -EINVAL;
1040
1041         *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1042         vmw_dmabuf_unreference(&out_buf);
1043         return 0;
1044 }
1045
1046 /**
1047  * vmw_dumb_destroy - Destroy a dumb boffer
1048  *
1049  * @file_priv: Pointer to a struct drm_file identifying the caller.
1050  * @dev: Pointer to the drm device.
1051  * @handle: Handle identifying the dumb buffer.
1052  *
1053  * This is a driver callback for the core drm dumb_destroy functionality.
1054  */
1055 int vmw_dumb_destroy(struct drm_file *file_priv,
1056                      struct drm_device *dev,
1057                      uint32_t handle)
1058 {
1059         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1060                                          handle, TTM_REF_USAGE);
1061 }
1062
1063 /**
1064  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1065  *
1066  * @res:            The resource for which to allocate a backup buffer.
1067  * @interruptible:  Whether any sleeps during allocation should be
1068  *                  performed while interruptible.
1069  */
1070 static int vmw_resource_buf_alloc(struct vmw_resource *res,
1071                                   bool interruptible)
1072 {
1073         unsigned long size =
1074                 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1075         struct vmw_dma_buffer *backup;
1076         int ret;
1077
1078         if (likely(res->backup)) {
1079                 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1080                 return 0;
1081         }
1082
1083         backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1084         if (unlikely(backup == NULL))
1085                 return -ENOMEM;
1086
1087         ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1088                               res->func->backup_placement,
1089                               interruptible,
1090                               &vmw_dmabuf_bo_free);
1091         if (unlikely(ret != 0))
1092                 goto out_no_dmabuf;
1093
1094         res->backup = backup;
1095
1096 out_no_dmabuf:
1097         return ret;
1098 }
1099
1100 /**
1101  * vmw_resource_do_validate - Make a resource up-to-date and visible
1102  *                            to the device.
1103  *
1104  * @res:            The resource to make visible to the device.
1105  * @val_buf:        Information about a buffer possibly
1106  *                  containing backup data if a bind operation is needed.
1107  *
1108  * On hardware resource shortage, this function returns -EBUSY and
1109  * should be retried once resources have been freed up.
1110  */
1111 static int vmw_resource_do_validate(struct vmw_resource *res,
1112                                     struct ttm_validate_buffer *val_buf)
1113 {
1114         int ret = 0;
1115         const struct vmw_res_func *func = res->func;
1116
1117         if (unlikely(res->id == -1)) {
1118                 ret = func->create(res);
1119                 if (unlikely(ret != 0))
1120                         return ret;
1121         }
1122
1123         if (func->bind &&
1124             ((func->needs_backup && list_empty(&res->mob_head) &&
1125               val_buf->bo != NULL) ||
1126              (!func->needs_backup && val_buf->bo != NULL))) {
1127                 ret = func->bind(res, val_buf);
1128                 if (unlikely(ret != 0))
1129                         goto out_bind_failed;
1130                 if (func->needs_backup)
1131                         list_add_tail(&res->mob_head, &res->backup->res_list);
1132         }
1133
1134         /*
1135          * Only do this on write operations, and move to
1136          * vmw_resource_unreserve if it can be called after
1137          * backup buffers have been unreserved. Otherwise
1138          * sort out locking.
1139          */
1140         res->res_dirty = true;
1141
1142         return 0;
1143
1144 out_bind_failed:
1145         func->destroy(res);
1146
1147         return ret;
1148 }
1149
1150 /**
1151  * vmw_resource_unreserve - Unreserve a resource previously reserved for
1152  * command submission.
1153  *
1154  * @res:               Pointer to the struct vmw_resource to unreserve.
1155  * @new_backup:        Pointer to new backup buffer if command submission
1156  *                     switched.
1157  * @new_backup_offset: New backup offset if @new_backup is !NULL.
1158  *
1159  * Currently unreserving a resource means putting it back on the device's
1160  * resource lru list, so that it can be evicted if necessary.
1161  */
1162 void vmw_resource_unreserve(struct vmw_resource *res,
1163                             struct vmw_dma_buffer *new_backup,
1164                             unsigned long new_backup_offset)
1165 {
1166         struct vmw_private *dev_priv = res->dev_priv;
1167
1168         if (!list_empty(&res->lru_head))
1169                 return;
1170
1171         if (new_backup && new_backup != res->backup) {
1172
1173                 if (res->backup) {
1174                         lockdep_assert_held(&res->backup->base.resv->lock.base);
1175                         list_del_init(&res->mob_head);
1176                         vmw_dmabuf_unreference(&res->backup);
1177                 }
1178
1179                 res->backup = vmw_dmabuf_reference(new_backup);
1180                 lockdep_assert_held(&new_backup->base.resv->lock.base);
1181                 list_add_tail(&res->mob_head, &new_backup->res_list);
1182         }
1183         if (new_backup)
1184                 res->backup_offset = new_backup_offset;
1185
1186         if (!res->func->may_evict || res->id == -1)
1187                 return;
1188
1189         write_lock(&dev_priv->resource_lock);
1190         list_add_tail(&res->lru_head,
1191                       &res->dev_priv->res_lru[res->func->res_type]);
1192         write_unlock(&dev_priv->resource_lock);
1193 }
1194
1195 /**
1196  * vmw_resource_check_buffer - Check whether a backup buffer is needed
1197  *                             for a resource and in that case, allocate
1198  *                             one, reserve and validate it.
1199  *
1200  * @res:            The resource for which to allocate a backup buffer.
1201  * @interruptible:  Whether any sleeps during allocation should be
1202  *                  performed while interruptible.
1203  * @val_buf:        On successful return contains data about the
1204  *                  reserved and validated backup buffer.
1205  */
1206 static int
1207 vmw_resource_check_buffer(struct vmw_resource *res,
1208                           bool interruptible,
1209                           struct ttm_validate_buffer *val_buf)
1210 {
1211         struct list_head val_list;
1212         bool backup_dirty = false;
1213         int ret;
1214
1215         if (unlikely(res->backup == NULL)) {
1216                 ret = vmw_resource_buf_alloc(res, interruptible);
1217                 if (unlikely(ret != 0))
1218                         return ret;
1219         }
1220
1221         INIT_LIST_HEAD(&val_list);
1222         val_buf->bo = ttm_bo_reference(&res->backup->base);
1223         val_buf->shared = false;
1224         list_add_tail(&val_buf->head, &val_list);
1225         ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible);
1226         if (unlikely(ret != 0))
1227                 goto out_no_reserve;
1228
1229         if (res->func->needs_backup && list_empty(&res->mob_head))
1230                 return 0;
1231
1232         backup_dirty = res->backup_dirty;
1233         ret = ttm_bo_validate(&res->backup->base,
1234                               res->func->backup_placement,
1235                               true, false);
1236
1237         if (unlikely(ret != 0))
1238                 goto out_no_validate;
1239
1240         return 0;
1241
1242 out_no_validate:
1243         ttm_eu_backoff_reservation(NULL, &val_list);
1244 out_no_reserve:
1245         ttm_bo_unref(&val_buf->bo);
1246         if (backup_dirty)
1247                 vmw_dmabuf_unreference(&res->backup);
1248
1249         return ret;
1250 }
1251
1252 /**
1253  * vmw_resource_reserve - Reserve a resource for command submission
1254  *
1255  * @res:            The resource to reserve.
1256  *
1257  * This function takes the resource off the LRU list and make sure
1258  * a backup buffer is present for guest-backed resources. However,
1259  * the buffer may not be bound to the resource at this point.
1260  *
1261  */
1262 int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1263 {
1264         struct vmw_private *dev_priv = res->dev_priv;
1265         int ret;
1266
1267         write_lock(&dev_priv->resource_lock);
1268         list_del_init(&res->lru_head);
1269         write_unlock(&dev_priv->resource_lock);
1270
1271         if (res->func->needs_backup && res->backup == NULL &&
1272             !no_backup) {
1273                 ret = vmw_resource_buf_alloc(res, true);
1274                 if (unlikely(ret != 0))
1275                         return ret;
1276         }
1277
1278         return 0;
1279 }
1280
1281 /**
1282  * vmw_resource_backoff_reservation - Unreserve and unreference a
1283  *                                    backup buffer
1284  *.
1285  * @val_buf:        Backup buffer information.
1286  */
1287 static void
1288 vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1289 {
1290         struct list_head val_list;
1291
1292         if (likely(val_buf->bo == NULL))
1293                 return;
1294
1295         INIT_LIST_HEAD(&val_list);
1296         list_add_tail(&val_buf->head, &val_list);
1297         ttm_eu_backoff_reservation(NULL, &val_list);
1298         ttm_bo_unref(&val_buf->bo);
1299 }
1300
1301 /**
1302  * vmw_resource_do_evict - Evict a resource, and transfer its data
1303  *                         to a backup buffer.
1304  *
1305  * @res:            The resource to evict.
1306  * @interruptible:  Whether to wait interruptible.
1307  */
1308 int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1309 {
1310         struct ttm_validate_buffer val_buf;
1311         const struct vmw_res_func *func = res->func;
1312         int ret;
1313
1314         BUG_ON(!func->may_evict);
1315
1316         val_buf.bo = NULL;
1317         val_buf.shared = false;
1318         ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1319         if (unlikely(ret != 0))
1320                 return ret;
1321
1322         if (unlikely(func->unbind != NULL &&
1323                      (!func->needs_backup || !list_empty(&res->mob_head)))) {
1324                 ret = func->unbind(res, res->res_dirty, &val_buf);
1325                 if (unlikely(ret != 0))
1326                         goto out_no_unbind;
1327                 list_del_init(&res->mob_head);
1328         }
1329         ret = func->destroy(res);
1330         res->backup_dirty = true;
1331         res->res_dirty = false;
1332 out_no_unbind:
1333         vmw_resource_backoff_reservation(&val_buf);
1334
1335         return ret;
1336 }
1337
1338
1339 /**
1340  * vmw_resource_validate - Make a resource up-to-date and visible
1341  *                         to the device.
1342  *
1343  * @res:            The resource to make visible to the device.
1344  *
1345  * On succesful return, any backup DMA buffer pointed to by @res->backup will
1346  * be reserved and validated.
1347  * On hardware resource shortage, this function will repeatedly evict
1348  * resources of the same type until the validation succeeds.
1349  */
1350 int vmw_resource_validate(struct vmw_resource *res)
1351 {
1352         int ret;
1353         struct vmw_resource *evict_res;
1354         struct vmw_private *dev_priv = res->dev_priv;
1355         struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1356         struct ttm_validate_buffer val_buf;
1357         unsigned err_count = 0;
1358
1359         if (likely(!res->func->may_evict))
1360                 return 0;
1361
1362         val_buf.bo = NULL;
1363         val_buf.shared = false;
1364         if (res->backup)
1365                 val_buf.bo = &res->backup->base;
1366         do {
1367                 ret = vmw_resource_do_validate(res, &val_buf);
1368                 if (likely(ret != -EBUSY))
1369                         break;
1370
1371                 write_lock(&dev_priv->resource_lock);
1372                 if (list_empty(lru_list) || !res->func->may_evict) {
1373                         DRM_ERROR("Out of device device resources "
1374                                   "for %s.\n", res->func->type_name);
1375                         ret = -EBUSY;
1376                         write_unlock(&dev_priv->resource_lock);
1377                         break;
1378                 }
1379
1380                 evict_res = vmw_resource_reference
1381                         (list_first_entry(lru_list, struct vmw_resource,
1382                                           lru_head));
1383                 list_del_init(&evict_res->lru_head);
1384
1385                 write_unlock(&dev_priv->resource_lock);
1386
1387                 ret = vmw_resource_do_evict(evict_res, true);
1388                 if (unlikely(ret != 0)) {
1389                         write_lock(&dev_priv->resource_lock);
1390                         list_add_tail(&evict_res->lru_head, lru_list);
1391                         write_unlock(&dev_priv->resource_lock);
1392                         if (ret == -ERESTARTSYS ||
1393                             ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1394                                 vmw_resource_unreference(&evict_res);
1395                                 goto out_no_validate;
1396                         }
1397                 }
1398
1399                 vmw_resource_unreference(&evict_res);
1400         } while (1);
1401
1402         if (unlikely(ret != 0))
1403                 goto out_no_validate;
1404         else if (!res->func->needs_backup && res->backup) {
1405                 list_del_init(&res->mob_head);
1406                 vmw_dmabuf_unreference(&res->backup);
1407         }
1408
1409         return 0;
1410
1411 out_no_validate:
1412         return ret;
1413 }
1414
1415 /**
1416  * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1417  *                       object without unreserving it.
1418  *
1419  * @bo:             Pointer to the struct ttm_buffer_object to fence.
1420  * @fence:          Pointer to the fence. If NULL, this function will
1421  *                  insert a fence into the command stream..
1422  *
1423  * Contrary to the ttm_eu version of this function, it takes only
1424  * a single buffer object instead of a list, and it also doesn't
1425  * unreserve the buffer object, which needs to be done separately.
1426  */
1427 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1428                          struct vmw_fence_obj *fence)
1429 {
1430         struct ttm_bo_device *bdev = bo->bdev;
1431
1432         struct vmw_private *dev_priv =
1433                 container_of(bdev, struct vmw_private, bdev);
1434
1435         if (fence == NULL) {
1436                 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1437                 reservation_object_add_excl_fence(bo->resv, &fence->base);
1438                 fence_put(&fence->base);
1439         } else
1440                 reservation_object_add_excl_fence(bo->resv, &fence->base);
1441 }
1442
1443 /**
1444  * vmw_resource_move_notify - TTM move_notify_callback
1445  *
1446  * @bo:             The TTM buffer object about to move.
1447  * @mem:            The truct ttm_mem_reg indicating to what memory
1448  *                  region the move is taking place.
1449  *
1450  * Evicts the Guest Backed hardware resource if the backup
1451  * buffer is being moved out of MOB memory.
1452  * Note that this function should not race with the resource
1453  * validation code as long as it accesses only members of struct
1454  * resource that remain static while bo::res is !NULL and
1455  * while we have @bo reserved. struct resource::backup is *not* a
1456  * static member. The resource validation code will take care
1457  * to set @bo::res to NULL, while having @bo reserved when the
1458  * buffer is no longer bound to the resource, so @bo:res can be
1459  * used to determine whether there is a need to unbind and whether
1460  * it is safe to unbind.
1461  */
1462 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1463                               struct ttm_mem_reg *mem)
1464 {
1465         struct vmw_dma_buffer *dma_buf;
1466
1467         if (mem == NULL)
1468                 return;
1469
1470         if (bo->destroy != vmw_dmabuf_bo_free &&
1471             bo->destroy != vmw_user_dmabuf_destroy)
1472                 return;
1473
1474         dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1475
1476         if (mem->mem_type != VMW_PL_MOB) {
1477                 struct vmw_resource *res, *n;
1478                 struct ttm_validate_buffer val_buf;
1479
1480                 val_buf.bo = bo;
1481                 val_buf.shared = false;
1482
1483                 list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1484
1485                         if (unlikely(res->func->unbind == NULL))
1486                                 continue;
1487
1488                         (void) res->func->unbind(res, true, &val_buf);
1489                         res->backup_dirty = true;
1490                         res->res_dirty = false;
1491                         list_del_init(&res->mob_head);
1492                 }
1493
1494                 (void) ttm_bo_wait(bo, false, false, false);
1495         }
1496 }
1497
1498 /**
1499  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1500  *
1501  * @res:            The resource being queried.
1502  */
1503 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1504 {
1505         return res->func->needs_backup;
1506 }
1507
1508 /**
1509  * vmw_resource_evict_type - Evict all resources of a specific type
1510  *
1511  * @dev_priv:       Pointer to a device private struct
1512  * @type:           The resource type to evict
1513  *
1514  * To avoid thrashing starvation or as part of the hibernation sequence,
1515  * try to evict all evictable resources of a specific type.
1516  */
1517 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1518                                     enum vmw_res_type type)
1519 {
1520         struct list_head *lru_list = &dev_priv->res_lru[type];
1521         struct vmw_resource *evict_res;
1522         unsigned err_count = 0;
1523         int ret;
1524
1525         do {
1526                 write_lock(&dev_priv->resource_lock);
1527
1528                 if (list_empty(lru_list))
1529                         goto out_unlock;
1530
1531                 evict_res = vmw_resource_reference(
1532                         list_first_entry(lru_list, struct vmw_resource,
1533                                          lru_head));
1534                 list_del_init(&evict_res->lru_head);
1535                 write_unlock(&dev_priv->resource_lock);
1536
1537                 ret = vmw_resource_do_evict(evict_res, false);
1538                 if (unlikely(ret != 0)) {
1539                         write_lock(&dev_priv->resource_lock);
1540                         list_add_tail(&evict_res->lru_head, lru_list);
1541                         write_unlock(&dev_priv->resource_lock);
1542                         if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1543                                 vmw_resource_unreference(&evict_res);
1544                                 return;
1545                         }
1546                 }
1547
1548                 vmw_resource_unreference(&evict_res);
1549         } while (1);
1550
1551 out_unlock:
1552         write_unlock(&dev_priv->resource_lock);
1553 }
1554
1555 /**
1556  * vmw_resource_evict_all - Evict all evictable resources
1557  *
1558  * @dev_priv:       Pointer to a device private struct
1559  *
1560  * To avoid thrashing starvation or as part of the hibernation sequence,
1561  * evict all evictable resources. In particular this means that all
1562  * guest-backed resources that are registered with the device are
1563  * evicted and the OTable becomes clean.
1564  */
1565 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1566 {
1567         enum vmw_res_type type;
1568
1569         mutex_lock(&dev_priv->cmdbuf_mutex);
1570
1571         for (type = 0; type < vmw_res_max; ++type)
1572                 vmw_resource_evict_type(dev_priv, type);
1573
1574         mutex_unlock(&dev_priv->cmdbuf_mutex);
1575 }