44ac46bb5629cc9192ee9df1553f0538160d1690
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/drmP.h>
33
34 /**
35  * struct vmw_user_resource_conv - Identify a derived user-exported resource
36  * type and provide a function to convert its ttm_base_object pointer to
37  * a struct vmw_resource
38  */
39 struct vmw_user_resource_conv {
40         enum ttm_object_type object_type;
41         struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base);
42         void (*res_free) (struct vmw_resource *res);
43 };
44
45 /**
46  * struct vmw_res_func - members and functions common for a resource type
47  *
48  * @res_type:          Enum that identifies the lru list to use for eviction.
49  * @needs_backup:      Whether the resource is guest-backed and needs
50  *                     persistent buffer storage.
51  * @type_name:         String that identifies the resource type.
52  * @backup_placement:  TTM placement for backup buffers.
53  * @may_evict          Whether the resource may be evicted.
54  * @create:            Create a hardware resource.
55  * @destroy:           Destroy a hardware resource.
56  * @bind:              Bind a hardware resource to persistent buffer storage.
57  * @unbind:            Unbind a hardware resource from persistent
58  *                     buffer storage.
59  */
60
61 struct vmw_res_func {
62         enum vmw_res_type res_type;
63         bool needs_backup;
64         const char *type_name;
65         struct ttm_placement *backup_placement;
66         bool may_evict;
67
68         int (*create) (struct vmw_resource *res);
69         int (*destroy) (struct vmw_resource *res);
70         int (*bind) (struct vmw_resource *res,
71                      struct ttm_validate_buffer *val_buf);
72         int (*unbind) (struct vmw_resource *res,
73                        bool readback,
74                        struct ttm_validate_buffer *val_buf);
75 };
76
77 struct vmw_user_dma_buffer {
78         struct ttm_base_object base;
79         struct vmw_dma_buffer dma;
80 };
81
82 struct vmw_bo_user_rep {
83         uint32_t handle;
84         uint64_t map_handle;
85 };
86
87 struct vmw_stream {
88         struct vmw_resource res;
89         uint32_t stream_id;
90 };
91
92 struct vmw_user_stream {
93         struct ttm_base_object base;
94         struct vmw_stream stream;
95 };
96
97
98 static uint64_t vmw_user_stream_size;
99
100 static const struct vmw_res_func vmw_stream_func = {
101         .res_type = vmw_res_stream,
102         .needs_backup = false,
103         .may_evict = false,
104         .type_name = "video streams",
105         .backup_placement = NULL,
106         .create = NULL,
107         .destroy = NULL,
108         .bind = NULL,
109         .unbind = NULL
110 };
111
112 struct vmw_user_context {
113         struct ttm_base_object base;
114         struct vmw_resource res;
115 };
116
117 static void vmw_user_context_free(struct vmw_resource *res);
118 static struct vmw_resource *
119 vmw_user_context_base_to_res(struct ttm_base_object *base);
120
121 static uint64_t vmw_user_context_size;
122
123 static const struct vmw_user_resource_conv user_context_conv = {
124         .object_type = VMW_RES_CONTEXT,
125         .base_obj_to_res = vmw_user_context_base_to_res,
126         .res_free = vmw_user_context_free
127 };
128
129 const struct vmw_user_resource_conv *user_context_converter =
130         &user_context_conv;
131
132
133 static const struct vmw_res_func vmw_legacy_context_func = {
134         .res_type = vmw_res_context,
135         .needs_backup = false,
136         .may_evict = false,
137         .type_name = "legacy contexts",
138         .backup_placement = NULL,
139         .create = NULL,
140         .destroy = NULL,
141         .bind = NULL,
142         .unbind = NULL
143 };
144
145
146 /**
147  * struct vmw_user_surface - User-space visible surface resource
148  *
149  * @base:           The TTM base object handling user-space visibility.
150  * @srf:            The surface metadata.
151  * @size:           TTM accounting size for the surface.
152  */
153 struct vmw_user_surface {
154         struct ttm_base_object base;
155         struct vmw_surface srf;
156         uint32_t size;
157         uint32_t backup_handle;
158 };
159
160 /**
161  * struct vmw_surface_offset - Backing store mip level offset info
162  *
163  * @face:           Surface face.
164  * @mip:            Mip level.
165  * @bo_offset:      Offset into backing store of this mip level.
166  *
167  */
168 struct vmw_surface_offset {
169         uint32_t face;
170         uint32_t mip;
171         uint32_t bo_offset;
172 };
173
174 static void vmw_user_surface_free(struct vmw_resource *res);
175 static struct vmw_resource *
176 vmw_user_surface_base_to_res(struct ttm_base_object *base);
177 static int vmw_legacy_srf_bind(struct vmw_resource *res,
178                                struct ttm_validate_buffer *val_buf);
179 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
180                                  bool readback,
181                                  struct ttm_validate_buffer *val_buf);
182 static int vmw_legacy_srf_create(struct vmw_resource *res);
183 static int vmw_legacy_srf_destroy(struct vmw_resource *res);
184
185 static const struct vmw_user_resource_conv user_surface_conv = {
186         .object_type = VMW_RES_SURFACE,
187         .base_obj_to_res = vmw_user_surface_base_to_res,
188         .res_free = vmw_user_surface_free
189 };
190
191 const struct vmw_user_resource_conv *user_surface_converter =
192         &user_surface_conv;
193
194
195 static uint64_t vmw_user_surface_size;
196
197 static const struct vmw_res_func vmw_legacy_surface_func = {
198         .res_type = vmw_res_surface,
199         .needs_backup = false,
200         .may_evict = true,
201         .type_name = "legacy surfaces",
202         .backup_placement = &vmw_srf_placement,
203         .create = &vmw_legacy_srf_create,
204         .destroy = &vmw_legacy_srf_destroy,
205         .bind = &vmw_legacy_srf_bind,
206         .unbind = &vmw_legacy_srf_unbind
207 };
208
209
210 static inline struct vmw_dma_buffer *
211 vmw_dma_buffer(struct ttm_buffer_object *bo)
212 {
213         return container_of(bo, struct vmw_dma_buffer, base);
214 }
215
216 static inline struct vmw_user_dma_buffer *
217 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
218 {
219         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
220         return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
221 }
222
223 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
224 {
225         kref_get(&res->kref);
226         return res;
227 }
228
229
230 /**
231  * vmw_resource_release_id - release a resource id to the id manager.
232  *
233  * @res: Pointer to the resource.
234  *
235  * Release the resource id to the resource id manager and set it to -1
236  */
237 static void vmw_resource_release_id(struct vmw_resource *res)
238 {
239         struct vmw_private *dev_priv = res->dev_priv;
240         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
241
242         write_lock(&dev_priv->resource_lock);
243         if (res->id != -1)
244                 idr_remove(idr, res->id);
245         res->id = -1;
246         write_unlock(&dev_priv->resource_lock);
247 }
248
249 static void vmw_resource_release(struct kref *kref)
250 {
251         struct vmw_resource *res =
252             container_of(kref, struct vmw_resource, kref);
253         struct vmw_private *dev_priv = res->dev_priv;
254         int id;
255         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
256
257         res->avail = false;
258         list_del_init(&res->lru_head);
259         write_unlock(&dev_priv->resource_lock);
260         if (res->backup) {
261                 struct ttm_buffer_object *bo = &res->backup->base;
262
263                 ttm_bo_reserve(bo, false, false, false, 0);
264                 if (!list_empty(&res->mob_head) &&
265                     res->func->unbind != NULL) {
266                         struct ttm_validate_buffer val_buf;
267
268                         val_buf.bo = bo;
269                         res->func->unbind(res, false, &val_buf);
270                 }
271                 res->backup_dirty = false;
272                 list_del_init(&res->mob_head);
273                 ttm_bo_unreserve(bo);
274                 vmw_dmabuf_unreference(&res->backup);
275         }
276
277         if (likely(res->hw_destroy != NULL))
278                 res->hw_destroy(res);
279
280         id = res->id;
281         if (res->res_free != NULL)
282                 res->res_free(res);
283         else
284                 kfree(res);
285
286         write_lock(&dev_priv->resource_lock);
287
288         if (id != -1)
289                 idr_remove(idr, id);
290 }
291
292 void vmw_resource_unreference(struct vmw_resource **p_res)
293 {
294         struct vmw_resource *res = *p_res;
295         struct vmw_private *dev_priv = res->dev_priv;
296
297         *p_res = NULL;
298         write_lock(&dev_priv->resource_lock);
299         kref_put(&res->kref, vmw_resource_release);
300         write_unlock(&dev_priv->resource_lock);
301 }
302
303
304 /**
305  * vmw_resource_alloc_id - release a resource id to the id manager.
306  *
307  * @res: Pointer to the resource.
308  *
309  * Allocate the lowest free resource from the resource manager, and set
310  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
311  */
312 static int vmw_resource_alloc_id(struct vmw_resource *res)
313 {
314         struct vmw_private *dev_priv = res->dev_priv;
315         int ret;
316         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
317
318         BUG_ON(res->id != -1);
319
320         do {
321                 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
322                         return -ENOMEM;
323
324                 write_lock(&dev_priv->resource_lock);
325                 ret = idr_get_new_above(idr, res, 1, &res->id);
326                 write_unlock(&dev_priv->resource_lock);
327
328         } while (ret == -EAGAIN);
329
330         return ret;
331 }
332
333 /**
334  * vmw_resource_init - initialize a struct vmw_resource
335  *
336  * @dev_priv:       Pointer to a device private struct.
337  * @res:            The struct vmw_resource to initialize.
338  * @obj_type:       Resource object type.
339  * @delay_id:       Boolean whether to defer device id allocation until
340  *                  the first validation.
341  * @res_free:       Resource destructor.
342  * @func:           Resource function table.
343  */
344 static int vmw_resource_init(struct vmw_private *dev_priv,
345                              struct vmw_resource *res,
346                              bool delay_id,
347                              void (*res_free) (struct vmw_resource *res),
348                              const struct vmw_res_func *func)
349 {
350         kref_init(&res->kref);
351         res->hw_destroy = NULL;
352         res->res_free = res_free;
353         res->avail = false;
354         res->dev_priv = dev_priv;
355         res->func = func;
356         INIT_LIST_HEAD(&res->lru_head);
357         INIT_LIST_HEAD(&res->mob_head);
358         res->id = -1;
359         res->backup = NULL;
360         res->backup_offset = 0;
361         res->backup_dirty = false;
362         res->res_dirty = false;
363         if (delay_id)
364                 return 0;
365         else
366                 return vmw_resource_alloc_id(res);
367 }
368
369 /**
370  * vmw_resource_activate
371  *
372  * @res:        Pointer to the newly created resource
373  * @hw_destroy: Destroy function. NULL if none.
374  *
375  * Activate a resource after the hardware has been made aware of it.
376  * Set tye destroy function to @destroy. Typically this frees the
377  * resource and destroys the hardware resources associated with it.
378  * Activate basically means that the function vmw_resource_lookup will
379  * find it.
380  */
381 static void vmw_resource_activate(struct vmw_resource *res,
382                                   void (*hw_destroy) (struct vmw_resource *))
383 {
384         struct vmw_private *dev_priv = res->dev_priv;
385
386         write_lock(&dev_priv->resource_lock);
387         res->avail = true;
388         res->hw_destroy = hw_destroy;
389         write_unlock(&dev_priv->resource_lock);
390 }
391
392 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
393                                          struct idr *idr, int id)
394 {
395         struct vmw_resource *res;
396
397         read_lock(&dev_priv->resource_lock);
398         res = idr_find(idr, id);
399         if (res && res->avail)
400                 kref_get(&res->kref);
401         else
402                 res = NULL;
403         read_unlock(&dev_priv->resource_lock);
404
405         if (unlikely(res == NULL))
406                 return NULL;
407
408         return res;
409 }
410
411 /**
412  * Context management:
413  */
414
415 static void vmw_hw_context_destroy(struct vmw_resource *res)
416 {
417
418         struct vmw_private *dev_priv = res->dev_priv;
419         struct {
420                 SVGA3dCmdHeader header;
421                 SVGA3dCmdDestroyContext body;
422         } *cmd;
423
424
425         vmw_execbuf_release_pinned_bo(dev_priv);
426         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
427         if (unlikely(cmd == NULL)) {
428                 DRM_ERROR("Failed reserving FIFO space for surface "
429                           "destruction.\n");
430                 return;
431         }
432
433         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
434         cmd->header.size = cpu_to_le32(sizeof(cmd->body));
435         cmd->body.cid = cpu_to_le32(res->id);
436
437         vmw_fifo_commit(dev_priv, sizeof(*cmd));
438         vmw_3d_resource_dec(dev_priv, false);
439 }
440
441 static int vmw_context_init(struct vmw_private *dev_priv,
442                             struct vmw_resource *res,
443                             void (*res_free) (struct vmw_resource *res))
444 {
445         int ret;
446
447         struct {
448                 SVGA3dCmdHeader header;
449                 SVGA3dCmdDefineContext body;
450         } *cmd;
451
452         ret = vmw_resource_init(dev_priv, res, false,
453                                 res_free, &vmw_legacy_context_func);
454
455         if (unlikely(ret != 0)) {
456                 DRM_ERROR("Failed to allocate a resource id.\n");
457                 goto out_early;
458         }
459
460         if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
461                 DRM_ERROR("Out of hw context ids.\n");
462                 vmw_resource_unreference(&res);
463                 return -ENOMEM;
464         }
465
466         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
467         if (unlikely(cmd == NULL)) {
468                 DRM_ERROR("Fifo reserve failed.\n");
469                 vmw_resource_unreference(&res);
470                 return -ENOMEM;
471         }
472
473         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
474         cmd->header.size = cpu_to_le32(sizeof(cmd->body));
475         cmd->body.cid = cpu_to_le32(res->id);
476
477         vmw_fifo_commit(dev_priv, sizeof(*cmd));
478         (void) vmw_3d_resource_inc(dev_priv, false);
479         vmw_resource_activate(res, vmw_hw_context_destroy);
480         return 0;
481
482 out_early:
483         if (res_free == NULL)
484                 kfree(res);
485         else
486                 res_free(res);
487         return ret;
488 }
489
490 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
491 {
492         struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
493         int ret;
494
495         if (unlikely(res == NULL))
496                 return NULL;
497
498         ret = vmw_context_init(dev_priv, res, NULL);
499
500         return (ret == 0) ? res : NULL;
501 }
502
503 /**
504  * User-space context management:
505  */
506
507 static struct vmw_resource *
508 vmw_user_context_base_to_res(struct ttm_base_object *base)
509 {
510         return &(container_of(base, struct vmw_user_context, base)->res);
511 }
512
513 static void vmw_user_context_free(struct vmw_resource *res)
514 {
515         struct vmw_user_context *ctx =
516             container_of(res, struct vmw_user_context, res);
517         struct vmw_private *dev_priv = res->dev_priv;
518
519         ttm_base_object_kfree(ctx, base);
520         ttm_mem_global_free(vmw_mem_glob(dev_priv),
521                             vmw_user_context_size);
522 }
523
524 /**
525  * This function is called when user space has no more references on the
526  * base object. It releases the base-object's reference on the resource object.
527  */
528
529 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
530 {
531         struct ttm_base_object *base = *p_base;
532         struct vmw_user_context *ctx =
533             container_of(base, struct vmw_user_context, base);
534         struct vmw_resource *res = &ctx->res;
535
536         *p_base = NULL;
537         vmw_resource_unreference(&res);
538 }
539
540 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
541                               struct drm_file *file_priv)
542 {
543         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
544         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
545
546         return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
547 }
548
549 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
550                              struct drm_file *file_priv)
551 {
552         struct vmw_private *dev_priv = vmw_priv(dev);
553         struct vmw_user_context *ctx;
554         struct vmw_resource *res;
555         struct vmw_resource *tmp;
556         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
557         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
558         struct vmw_master *vmaster = vmw_master(file_priv->master);
559         int ret;
560
561
562         /*
563          * Approximate idr memory usage with 128 bytes. It will be limited
564          * by maximum number_of contexts anyway.
565          */
566
567         if (unlikely(vmw_user_context_size == 0))
568                 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
569
570         ret = ttm_read_lock(&vmaster->lock, true);
571         if (unlikely(ret != 0))
572                 return ret;
573
574         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
575                                    vmw_user_context_size,
576                                    false, true);
577         if (unlikely(ret != 0)) {
578                 if (ret != -ERESTARTSYS)
579                         DRM_ERROR("Out of graphics memory for context"
580                                   " creation.\n");
581                 goto out_unlock;
582         }
583
584         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
585         if (unlikely(ctx == NULL)) {
586                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
587                                     vmw_user_context_size);
588                 ret = -ENOMEM;
589                 goto out_unlock;
590         }
591
592         res = &ctx->res;
593         ctx->base.shareable = false;
594         ctx->base.tfile = NULL;
595
596         /*
597          * From here on, the destructor takes over resource freeing.
598          */
599
600         ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
601         if (unlikely(ret != 0))
602                 goto out_unlock;
603
604         tmp = vmw_resource_reference(&ctx->res);
605         ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
606                                    &vmw_user_context_base_release, NULL);
607
608         if (unlikely(ret != 0)) {
609                 vmw_resource_unreference(&tmp);
610                 goto out_err;
611         }
612
613         arg->cid = ctx->base.hash.key;
614 out_err:
615         vmw_resource_unreference(&res);
616 out_unlock:
617         ttm_read_unlock(&vmaster->lock);
618         return ret;
619
620 }
621
622 /**
623  * struct vmw_bpp - Bits per pixel info for surface storage size computation.
624  *
625  * @bpp:         Bits per pixel.
626  * @s_bpp:       Stride bits per pixel. See definition below.
627  *
628  */
629 struct vmw_bpp {
630         uint8_t bpp;
631         uint8_t s_bpp;
632 };
633
634 /*
635  * Size table for the supported SVGA3D surface formats. It consists of
636  * two values. The bpp value and the s_bpp value which is short for
637  * "stride bits per pixel" The values are given in such a way that the
638  * minimum stride for the image is calculated using
639  *
640  * min_stride = w*s_bpp
641  *
642  * and the total memory requirement for the image is
643  *
644  * h*min_stride*bpp/s_bpp
645  *
646  */
647 static const struct vmw_bpp vmw_sf_bpp[] = {
648         [SVGA3D_FORMAT_INVALID] = {0, 0},
649         [SVGA3D_X8R8G8B8] = {32, 32},
650         [SVGA3D_A8R8G8B8] = {32, 32},
651         [SVGA3D_R5G6B5] = {16, 16},
652         [SVGA3D_X1R5G5B5] = {16, 16},
653         [SVGA3D_A1R5G5B5] = {16, 16},
654         [SVGA3D_A4R4G4B4] = {16, 16},
655         [SVGA3D_Z_D32] = {32, 32},
656         [SVGA3D_Z_D16] = {16, 16},
657         [SVGA3D_Z_D24S8] = {32, 32},
658         [SVGA3D_Z_D15S1] = {16, 16},
659         [SVGA3D_LUMINANCE8] = {8, 8},
660         [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
661         [SVGA3D_LUMINANCE16] = {16, 16},
662         [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
663         [SVGA3D_DXT1] = {4, 16},
664         [SVGA3D_DXT2] = {8, 32},
665         [SVGA3D_DXT3] = {8, 32},
666         [SVGA3D_DXT4] = {8, 32},
667         [SVGA3D_DXT5] = {8, 32},
668         [SVGA3D_BUMPU8V8] = {16, 16},
669         [SVGA3D_BUMPL6V5U5] = {16, 16},
670         [SVGA3D_BUMPX8L8V8U8] = {32, 32},
671         [SVGA3D_ARGB_S10E5] = {16, 16},
672         [SVGA3D_ARGB_S23E8] = {32, 32},
673         [SVGA3D_A2R10G10B10] = {32, 32},
674         [SVGA3D_V8U8] = {16, 16},
675         [SVGA3D_Q8W8V8U8] = {32, 32},
676         [SVGA3D_CxV8U8] = {16, 16},
677         [SVGA3D_X8L8V8U8] = {32, 32},
678         [SVGA3D_A2W10V10U10] = {32, 32},
679         [SVGA3D_ALPHA8] = {8, 8},
680         [SVGA3D_R_S10E5] = {16, 16},
681         [SVGA3D_R_S23E8] = {32, 32},
682         [SVGA3D_RG_S10E5] = {16, 16},
683         [SVGA3D_RG_S23E8] = {32, 32},
684         [SVGA3D_BUFFER] = {8, 8},
685         [SVGA3D_Z_D24X8] = {32, 32},
686         [SVGA3D_V16U16] = {32, 32},
687         [SVGA3D_G16R16] = {32, 32},
688         [SVGA3D_A16B16G16R16] = {64,  64},
689         [SVGA3D_UYVY] = {12, 12},
690         [SVGA3D_YUY2] = {12, 12},
691         [SVGA3D_NV12] = {12, 8},
692         [SVGA3D_AYUV] = {32, 32},
693         [SVGA3D_BC4_UNORM] = {4,  16},
694         [SVGA3D_BC5_UNORM] = {8,  32},
695         [SVGA3D_Z_DF16] = {16,  16},
696         [SVGA3D_Z_DF24] = {24,  24},
697         [SVGA3D_Z_D24S8_INT] = {32,  32}
698 };
699
700
701 /**
702  * struct vmw_surface_dma - SVGA3D DMA command
703  */
704 struct vmw_surface_dma {
705         SVGA3dCmdHeader header;
706         SVGA3dCmdSurfaceDMA body;
707         SVGA3dCopyBox cb;
708         SVGA3dCmdSurfaceDMASuffix suffix;
709 };
710
711 /**
712  * struct vmw_surface_define - SVGA3D Surface Define command
713  */
714 struct vmw_surface_define {
715         SVGA3dCmdHeader header;
716         SVGA3dCmdDefineSurface body;
717 };
718
719 /**
720  * struct vmw_surface_destroy - SVGA3D Surface Destroy command
721  */
722 struct vmw_surface_destroy {
723         SVGA3dCmdHeader header;
724         SVGA3dCmdDestroySurface body;
725 };
726
727
728 /**
729  * vmw_surface_dma_size - Compute fifo size for a dma command.
730  *
731  * @srf: Pointer to a struct vmw_surface
732  *
733  * Computes the required size for a surface dma command for backup or
734  * restoration of the surface represented by @srf.
735  */
736 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
737 {
738         return srf->num_sizes * sizeof(struct vmw_surface_dma);
739 }
740
741
742 /**
743  * vmw_surface_define_size - Compute fifo size for a surface define command.
744  *
745  * @srf: Pointer to a struct vmw_surface
746  *
747  * Computes the required size for a surface define command for the definition
748  * of the surface represented by @srf.
749  */
750 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
751 {
752         return sizeof(struct vmw_surface_define) + srf->num_sizes *
753                 sizeof(SVGA3dSize);
754 }
755
756
757 /**
758  * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
759  *
760  * Computes the required size for a surface destroy command for the destruction
761  * of a hw surface.
762  */
763 static inline uint32_t vmw_surface_destroy_size(void)
764 {
765         return sizeof(struct vmw_surface_destroy);
766 }
767
768 /**
769  * vmw_surface_destroy_encode - Encode a surface_destroy command.
770  *
771  * @id: The surface id
772  * @cmd_space: Pointer to memory area in which the commands should be encoded.
773  */
774 static void vmw_surface_destroy_encode(uint32_t id,
775                                        void *cmd_space)
776 {
777         struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
778                 cmd_space;
779
780         cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
781         cmd->header.size = sizeof(cmd->body);
782         cmd->body.sid = id;
783 }
784
785 /**
786  * vmw_surface_define_encode - Encode a surface_define command.
787  *
788  * @srf: Pointer to a struct vmw_surface object.
789  * @cmd_space: Pointer to memory area in which the commands should be encoded.
790  */
791 static void vmw_surface_define_encode(const struct vmw_surface *srf,
792                                       void *cmd_space)
793 {
794         struct vmw_surface_define *cmd = (struct vmw_surface_define *)
795                 cmd_space;
796         struct drm_vmw_size *src_size;
797         SVGA3dSize *cmd_size;
798         uint32_t cmd_len;
799         int i;
800
801         cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
802
803         cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
804         cmd->header.size = cmd_len;
805         cmd->body.sid = srf->res.id;
806         cmd->body.surfaceFlags = srf->flags;
807         cmd->body.format = cpu_to_le32(srf->format);
808         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
809                 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
810
811         cmd += 1;
812         cmd_size = (SVGA3dSize *) cmd;
813         src_size = srf->sizes;
814
815         for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
816                 cmd_size->width = src_size->width;
817                 cmd_size->height = src_size->height;
818                 cmd_size->depth = src_size->depth;
819         }
820 }
821
822 /**
823  * vmw_surface_dma_encode - Encode a surface_dma command.
824  *
825  * @srf: Pointer to a struct vmw_surface object.
826  * @cmd_space: Pointer to memory area in which the commands should be encoded.
827  * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
828  * should be placed or read from.
829  * @to_surface: Boolean whether to DMA to the surface or from the surface.
830  */
831 static void vmw_surface_dma_encode(struct vmw_surface *srf,
832                                    void *cmd_space,
833                                    const SVGAGuestPtr *ptr,
834                                    bool to_surface)
835 {
836         uint32_t i;
837         uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
838         uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
839         struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
840
841         for (i = 0; i < srf->num_sizes; ++i) {
842                 SVGA3dCmdHeader *header = &cmd->header;
843                 SVGA3dCmdSurfaceDMA *body = &cmd->body;
844                 SVGA3dCopyBox *cb = &cmd->cb;
845                 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
846                 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
847                 const struct drm_vmw_size *cur_size = &srf->sizes[i];
848
849                 header->id = SVGA_3D_CMD_SURFACE_DMA;
850                 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
851
852                 body->guest.ptr = *ptr;
853                 body->guest.ptr.offset += cur_offset->bo_offset;
854                 body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
855                 body->host.sid = srf->res.id;
856                 body->host.face = cur_offset->face;
857                 body->host.mipmap = cur_offset->mip;
858                 body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
859                                   SVGA3D_READ_HOST_VRAM);
860                 cb->x = 0;
861                 cb->y = 0;
862                 cb->z = 0;
863                 cb->srcx = 0;
864                 cb->srcy = 0;
865                 cb->srcz = 0;
866                 cb->w = cur_size->width;
867                 cb->h = cur_size->height;
868                 cb->d = cur_size->depth;
869
870                 suffix->suffixSize = sizeof(*suffix);
871                 suffix->maximumOffset = body->guest.pitch*cur_size->height*
872                         cur_size->depth*bpp / stride_bpp;
873                 suffix->flags.discard = 0;
874                 suffix->flags.unsynchronized = 0;
875                 suffix->flags.reserved = 0;
876                 ++cmd;
877         }
878 };
879
880
881 /**
882  * vmw_hw_surface_destroy - destroy a Device surface
883  *
884  * @res:        Pointer to a struct vmw_resource embedded in a struct
885  *              vmw_surface.
886  *
887  * Destroys a the device surface associated with a struct vmw_surface if
888  * any, and adjusts accounting and resource count accordingly.
889  */
890 static void vmw_hw_surface_destroy(struct vmw_resource *res)
891 {
892
893         struct vmw_private *dev_priv = res->dev_priv;
894         struct vmw_surface *srf;
895         void *cmd;
896
897         if (res->id != -1) {
898
899                 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
900                 if (unlikely(cmd == NULL)) {
901                         DRM_ERROR("Failed reserving FIFO space for surface "
902                                   "destruction.\n");
903                         return;
904                 }
905
906                 vmw_surface_destroy_encode(res->id, cmd);
907                 vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
908
909                 /*
910                  * used_memory_size_atomic, or separate lock
911                  * to avoid taking dev_priv::cmdbuf_mutex in
912                  * the destroy path.
913                  */
914
915                 mutex_lock(&dev_priv->cmdbuf_mutex);
916                 srf = vmw_res_to_srf(res);
917                 dev_priv->used_memory_size -= res->backup_size;
918                 mutex_unlock(&dev_priv->cmdbuf_mutex);
919         }
920         vmw_3d_resource_dec(dev_priv, false);
921 }
922
923 /**
924  * vmw_legacy_srf_create - Create a device surface as part of the
925  * resource validation process.
926  *
927  * @res: Pointer to a struct vmw_surface.
928  *
929  * If the surface doesn't have a hw id.
930  *
931  * Returns -EBUSY if there wasn't sufficient device resources to
932  * complete the validation. Retry after freeing up resources.
933  *
934  * May return other errors if the kernel is out of guest resources.
935  */
936 static int vmw_legacy_srf_create(struct vmw_resource *res)
937 {
938         struct vmw_private *dev_priv = res->dev_priv;
939         struct vmw_surface *srf;
940         uint32_t submit_size;
941         uint8_t *cmd;
942         int ret;
943
944         if (likely(res->id != -1))
945                 return 0;
946
947         srf = vmw_res_to_srf(res);
948         if (unlikely(dev_priv->used_memory_size + res->backup_size >=
949                      dev_priv->memory_size))
950                 return -EBUSY;
951
952         /*
953          * Alloc id for the resource.
954          */
955
956         ret = vmw_resource_alloc_id(res);
957         if (unlikely(ret != 0)) {
958                 DRM_ERROR("Failed to allocate a surface id.\n");
959                 goto out_no_id;
960         }
961
962         if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
963                 ret = -EBUSY;
964                 goto out_no_fifo;
965         }
966
967         /*
968          * Encode surface define- commands.
969          */
970
971         submit_size = vmw_surface_define_size(srf);
972         cmd = vmw_fifo_reserve(dev_priv, submit_size);
973         if (unlikely(cmd == NULL)) {
974                 DRM_ERROR("Failed reserving FIFO space for surface "
975                           "creation.\n");
976                 ret = -ENOMEM;
977                 goto out_no_fifo;
978         }
979
980         vmw_surface_define_encode(srf, cmd);
981         vmw_fifo_commit(dev_priv, submit_size);
982         /*
983          * Surface memory usage accounting.
984          */
985
986         dev_priv->used_memory_size += res->backup_size;
987         return 0;
988
989 out_no_fifo:
990         vmw_resource_release_id(res);
991 out_no_id:
992         return ret;
993 }
994
995 /**
996  * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
997  *
998  * @res:            Pointer to a struct vmw_res embedded in a struct
999  *                  vmw_surface.
1000  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
1001  *                  information about the backup buffer.
1002  * @bind:           Boolean wether to DMA to the surface.
1003  *
1004  * Transfer backup data to or from a legacy surface as part of the
1005  * validation process.
1006  * May return other errors if the kernel is out of guest resources.
1007  * The backup buffer will be fenced or idle upon successful completion,
1008  * and if the surface needs persistent backup storage, the backup buffer
1009  * will also be returned reserved iff @bind is true.
1010  */
1011 static int vmw_legacy_srf_dma(struct vmw_resource *res,
1012                               struct ttm_validate_buffer *val_buf,
1013                               bool bind)
1014 {
1015         SVGAGuestPtr ptr;
1016         struct vmw_fence_obj *fence;
1017         uint32_t submit_size;
1018         struct vmw_surface *srf = vmw_res_to_srf(res);
1019         uint8_t *cmd;
1020         struct vmw_private *dev_priv = res->dev_priv;
1021
1022         BUG_ON(val_buf->bo == NULL);
1023
1024         submit_size = vmw_surface_dma_size(srf);
1025         cmd = vmw_fifo_reserve(dev_priv, submit_size);
1026         if (unlikely(cmd == NULL)) {
1027                 DRM_ERROR("Failed reserving FIFO space for surface "
1028                           "DMA.\n");
1029                 return -ENOMEM;
1030         }
1031         vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
1032         vmw_surface_dma_encode(srf, cmd, &ptr, bind);
1033
1034         vmw_fifo_commit(dev_priv, submit_size);
1035
1036         /*
1037          * Create a fence object and fence the backup buffer.
1038          */
1039
1040         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
1041                                           &fence, NULL);
1042
1043         vmw_fence_single_bo(val_buf->bo, fence);
1044
1045         if (likely(fence != NULL))
1046                 vmw_fence_obj_unreference(&fence);
1047
1048         return 0;
1049 }
1050
1051 /**
1052  * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
1053  *                       surface validation process.
1054  *
1055  * @res:            Pointer to a struct vmw_res embedded in a struct
1056  *                  vmw_surface.
1057  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
1058  *                  information about the backup buffer.
1059  *
1060  * This function will copy backup data to the surface if the
1061  * backup buffer is dirty.
1062  */
1063 static int vmw_legacy_srf_bind(struct vmw_resource *res,
1064                                struct ttm_validate_buffer *val_buf)
1065 {
1066         if (!res->backup_dirty)
1067                 return 0;
1068
1069         return vmw_legacy_srf_dma(res, val_buf, true);
1070 }
1071
1072
1073 /**
1074  * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
1075  *                         surface eviction process.
1076  *
1077  * @res:            Pointer to a struct vmw_res embedded in a struct
1078  *                  vmw_surface.
1079  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
1080  *                  information about the backup buffer.
1081  *
1082  * This function will copy backup data from the surface.
1083  */
1084 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
1085                                  bool readback,
1086                                  struct ttm_validate_buffer *val_buf)
1087 {
1088         if (unlikely(readback))
1089                 return vmw_legacy_srf_dma(res, val_buf, false);
1090         return 0;
1091 }
1092
1093 /**
1094  * vmw_legacy_srf_destroy - Destroy a device surface as part of a
1095  *                          resource eviction process.
1096  *
1097  * @res:            Pointer to a struct vmw_res embedded in a struct
1098  *                  vmw_surface.
1099  */
1100 static int vmw_legacy_srf_destroy(struct vmw_resource *res)
1101 {
1102         struct vmw_private *dev_priv = res->dev_priv;
1103         uint32_t submit_size;
1104         uint8_t *cmd;
1105
1106         BUG_ON(res->id == -1);
1107
1108         /*
1109          * Encode the dma- and surface destroy commands.
1110          */
1111
1112         submit_size = vmw_surface_destroy_size();
1113         cmd = vmw_fifo_reserve(dev_priv, submit_size);
1114         if (unlikely(cmd == NULL)) {
1115                 DRM_ERROR("Failed reserving FIFO space for surface "
1116                           "eviction.\n");
1117                 return -ENOMEM;
1118         }
1119
1120         vmw_surface_destroy_encode(res->id, cmd);
1121         vmw_fifo_commit(dev_priv, submit_size);
1122
1123         /*
1124          * Surface memory usage accounting.
1125          */
1126
1127         dev_priv->used_memory_size -= res->backup_size;
1128
1129         /*
1130          * Release the surface ID.
1131          */
1132
1133         vmw_resource_release_id(res);
1134
1135         return 0;
1136 }
1137
1138
1139 /**
1140  * vmw_surface_init - initialize a struct vmw_surface
1141  *
1142  * @dev_priv:       Pointer to a device private struct.
1143  * @srf:            Pointer to the struct vmw_surface to initialize.
1144  * @res_free:       Pointer to a resource destructor used to free
1145  *                  the object.
1146  */
1147 static int vmw_surface_init(struct vmw_private *dev_priv,
1148                             struct vmw_surface *srf,
1149                             void (*res_free) (struct vmw_resource *res))
1150 {
1151         int ret;
1152         struct vmw_resource *res = &srf->res;
1153
1154         BUG_ON(res_free == NULL);
1155         (void) vmw_3d_resource_inc(dev_priv, false);
1156         ret = vmw_resource_init(dev_priv, res, true, res_free,
1157                                 &vmw_legacy_surface_func);
1158
1159         if (unlikely(ret != 0)) {
1160                 vmw_3d_resource_dec(dev_priv, false);
1161                 res_free(res);
1162                 return ret;
1163         }
1164
1165         /*
1166          * The surface won't be visible to hardware until a
1167          * surface validate.
1168          */
1169
1170         vmw_resource_activate(res, vmw_hw_surface_destroy);
1171         return ret;
1172 }
1173
1174 /**
1175  * vmw_user_surface_base_to_res - TTM base object to resource converter for
1176  *                                user visible surfaces
1177  *
1178  * @base:           Pointer to a TTM base object
1179  *
1180  * Returns the struct vmw_resource embedded in a struct vmw_surface
1181  * for the user-visible object identified by the TTM base object @base.
1182  */
1183 static struct vmw_resource *
1184 vmw_user_surface_base_to_res(struct ttm_base_object *base)
1185 {
1186         return &(container_of(base, struct vmw_user_surface, base)->srf.res);
1187 }
1188
1189 /**
1190  * vmw_user_surface_free - User visible surface resource destructor
1191  *
1192  * @res:            A struct vmw_resource embedded in a struct vmw_surface.
1193  */
1194 static void vmw_user_surface_free(struct vmw_resource *res)
1195 {
1196         struct vmw_surface *srf = vmw_res_to_srf(res);
1197         struct vmw_user_surface *user_srf =
1198             container_of(srf, struct vmw_user_surface, srf);
1199         struct vmw_private *dev_priv = srf->res.dev_priv;
1200         uint32_t size = user_srf->size;
1201
1202         kfree(srf->offsets);
1203         kfree(srf->sizes);
1204         kfree(srf->snooper.image);
1205         ttm_base_object_kfree(user_srf, base);
1206         ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1207 }
1208
1209 /**
1210  * vmw_user_surface_free - User visible surface TTM base object destructor
1211  *
1212  * @p_base:         Pointer to a pointer to a TTM base object
1213  *                  embedded in a struct vmw_user_surface.
1214  *
1215  * Drops the base object's reference on its resource, and the
1216  * pointer pointed to by *p_base is set to NULL.
1217  */
1218 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
1219 {
1220         struct ttm_base_object *base = *p_base;
1221         struct vmw_user_surface *user_srf =
1222             container_of(base, struct vmw_user_surface, base);
1223         struct vmw_resource *res = &user_srf->srf.res;
1224
1225         *p_base = NULL;
1226         vmw_resource_unreference(&res);
1227 }
1228
1229 /**
1230  * vmw_user_surface_destroy_ioctl - Ioctl function implementing
1231  *                                  the user surface destroy functionality.
1232  *
1233  * @dev:            Pointer to a struct drm_device.
1234  * @data:           Pointer to data copied from / to user-space.
1235  * @file_priv:      Pointer to a drm file private structure.
1236  */
1237 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1238                               struct drm_file *file_priv)
1239 {
1240         struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
1241         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1242
1243         return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
1244 }
1245
1246 /**
1247  * vmw_user_surface_define_ioctl - Ioctl function implementing
1248  *                                  the user surface define functionality.
1249  *
1250  * @dev:            Pointer to a struct drm_device.
1251  * @data:           Pointer to data copied from / to user-space.
1252  * @file_priv:      Pointer to a drm file private structure.
1253  */
1254 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1255                              struct drm_file *file_priv)
1256 {
1257         struct vmw_private *dev_priv = vmw_priv(dev);
1258         struct vmw_user_surface *user_srf;
1259         struct vmw_surface *srf;
1260         struct vmw_resource *res;
1261         struct vmw_resource *tmp;
1262         union drm_vmw_surface_create_arg *arg =
1263             (union drm_vmw_surface_create_arg *)data;
1264         struct drm_vmw_surface_create_req *req = &arg->req;
1265         struct drm_vmw_surface_arg *rep = &arg->rep;
1266         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1267         struct drm_vmw_size __user *user_sizes;
1268         int ret;
1269         int i, j;
1270         uint32_t cur_bo_offset;
1271         struct drm_vmw_size *cur_size;
1272         struct vmw_surface_offset *cur_offset;
1273         uint32_t stride_bpp;
1274         uint32_t bpp;
1275         uint32_t num_sizes;
1276         uint32_t size;
1277         struct vmw_master *vmaster = vmw_master(file_priv->master);
1278
1279         if (unlikely(vmw_user_surface_size == 0))
1280                 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1281                         128;
1282
1283         num_sizes = 0;
1284         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
1285                 num_sizes += req->mip_levels[i];
1286
1287         if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
1288             DRM_VMW_MAX_MIP_LEVELS)
1289                 return -EINVAL;
1290
1291         size = vmw_user_surface_size + 128 +
1292                 ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
1293                 ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
1294
1295
1296         ret = ttm_read_lock(&vmaster->lock, true);
1297         if (unlikely(ret != 0))
1298                 return ret;
1299
1300         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1301                                    size, false, true);
1302         if (unlikely(ret != 0)) {
1303                 if (ret != -ERESTARTSYS)
1304                         DRM_ERROR("Out of graphics memory for surface"
1305                                   " creation.\n");
1306                 goto out_unlock;
1307         }
1308
1309         user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1310         if (unlikely(user_srf == NULL)) {
1311                 ret = -ENOMEM;
1312                 goto out_no_user_srf;
1313         }
1314
1315         srf = &user_srf->srf;
1316         res = &srf->res;
1317
1318         srf->flags = req->flags;
1319         srf->format = req->format;
1320         srf->scanout = req->scanout;
1321
1322         memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
1323         srf->num_sizes = num_sizes;
1324         user_srf->size = size;
1325
1326         srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
1327         if (unlikely(srf->sizes == NULL)) {
1328                 ret = -ENOMEM;
1329                 goto out_no_sizes;
1330         }
1331         srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
1332                                GFP_KERNEL);
1333         if (unlikely(srf->sizes == NULL)) {
1334                 ret = -ENOMEM;
1335                 goto out_no_offsets;
1336         }
1337
1338         user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1339             req->size_addr;
1340
1341         ret = copy_from_user(srf->sizes, user_sizes,
1342                              srf->num_sizes * sizeof(*srf->sizes));
1343         if (unlikely(ret != 0)) {
1344                 ret = -EFAULT;
1345                 goto out_no_copy;
1346         }
1347
1348         srf->base_size = *srf->sizes;
1349         srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
1350         srf->multisample_count = 1;
1351
1352         cur_bo_offset = 0;
1353         cur_offset = srf->offsets;
1354         cur_size = srf->sizes;
1355
1356         bpp = vmw_sf_bpp[srf->format].bpp;
1357         stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
1358
1359         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
1360                 for (j = 0; j < srf->mip_levels[i]; ++j) {
1361                         uint32_t stride =
1362                                 (cur_size->width * stride_bpp + 7) >> 3;
1363
1364                         cur_offset->face = i;
1365                         cur_offset->mip = j;
1366                         cur_offset->bo_offset = cur_bo_offset;
1367                         cur_bo_offset += stride * cur_size->height *
1368                                 cur_size->depth * bpp / stride_bpp;
1369                         ++cur_offset;
1370                         ++cur_size;
1371                 }
1372         }
1373         res->backup_size = cur_bo_offset;
1374
1375         if (srf->scanout &&
1376             srf->num_sizes == 1 &&
1377             srf->sizes[0].width == 64 &&
1378             srf->sizes[0].height == 64 &&
1379             srf->format == SVGA3D_A8R8G8B8) {
1380
1381                 /* allocate image area and clear it */
1382                 srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
1383                 if (!srf->snooper.image) {
1384                         DRM_ERROR("Failed to allocate cursor_image\n");
1385                         ret = -ENOMEM;
1386                         goto out_no_copy;
1387                 }
1388         } else {
1389                 srf->snooper.image = NULL;
1390         }
1391         srf->snooper.crtc = NULL;
1392
1393         user_srf->base.shareable = false;
1394         user_srf->base.tfile = NULL;
1395
1396         /**
1397          * From this point, the generic resource management functions
1398          * destroy the object on failure.
1399          */
1400
1401         ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1402         if (unlikely(ret != 0))
1403                 goto out_unlock;
1404
1405         tmp = vmw_resource_reference(&srf->res);
1406         ret = ttm_base_object_init(tfile, &user_srf->base,
1407                                    req->shareable, VMW_RES_SURFACE,
1408                                    &vmw_user_surface_base_release, NULL);
1409
1410         if (unlikely(ret != 0)) {
1411                 vmw_resource_unreference(&tmp);
1412                 vmw_resource_unreference(&res);
1413                 goto out_unlock;
1414         }
1415
1416         rep->sid = user_srf->base.hash.key;
1417         vmw_resource_unreference(&res);
1418
1419         ttm_read_unlock(&vmaster->lock);
1420         return 0;
1421 out_no_copy:
1422         kfree(srf->offsets);
1423 out_no_offsets:
1424         kfree(srf->sizes);
1425 out_no_sizes:
1426         kfree(user_srf);
1427 out_no_user_srf:
1428         ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1429 out_unlock:
1430         ttm_read_unlock(&vmaster->lock);
1431         return ret;
1432 }
1433
1434 /**
1435  * vmw_user_surface_define_ioctl - Ioctl function implementing
1436  *                                  the user surface reference functionality.
1437  *
1438  * @dev:            Pointer to a struct drm_device.
1439  * @data:           Pointer to data copied from / to user-space.
1440  * @file_priv:      Pointer to a drm file private structure.
1441  */
1442 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1443                                 struct drm_file *file_priv)
1444 {
1445         union drm_vmw_surface_reference_arg *arg =
1446             (union drm_vmw_surface_reference_arg *)data;
1447         struct drm_vmw_surface_arg *req = &arg->req;
1448         struct drm_vmw_surface_create_req *rep = &arg->rep;
1449         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1450         struct vmw_surface *srf;
1451         struct vmw_user_surface *user_srf;
1452         struct drm_vmw_size __user *user_sizes;
1453         struct ttm_base_object *base;
1454         int ret = -EINVAL;
1455
1456         base = ttm_base_object_lookup(tfile, req->sid);
1457         if (unlikely(base == NULL)) {
1458                 DRM_ERROR("Could not find surface to reference.\n");
1459                 return -EINVAL;
1460         }
1461
1462         if (unlikely(base->object_type != VMW_RES_SURFACE))
1463                 goto out_bad_resource;
1464
1465         user_srf = container_of(base, struct vmw_user_surface, base);
1466         srf = &user_srf->srf;
1467
1468         ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
1469         if (unlikely(ret != 0)) {
1470                 DRM_ERROR("Could not add a reference to a surface.\n");
1471                 goto out_no_reference;
1472         }
1473
1474         rep->flags = srf->flags;
1475         rep->format = srf->format;
1476         memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
1477         user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1478             rep->size_addr;
1479
1480         if (user_sizes)
1481                 ret = copy_to_user(user_sizes, srf->sizes,
1482                                    srf->num_sizes * sizeof(*srf->sizes));
1483         if (unlikely(ret != 0)) {
1484                 DRM_ERROR("copy_to_user failed %p %u\n",
1485                           user_sizes, srf->num_sizes);
1486                 ret = -EFAULT;
1487         }
1488 out_bad_resource:
1489 out_no_reference:
1490         ttm_base_object_unref(&base);
1491
1492         return ret;
1493 }
1494
1495 /**
1496  * vmw_user_resource_lookup_handle - lookup a struct resource from a
1497  * TTM user-space handle and perform basic type checks
1498  *
1499  * @dev_priv:     Pointer to a device private struct
1500  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
1501  * @handle:       The TTM user-space handle
1502  * @converter:    Pointer to an object describing the resource type
1503  * @p_res:        On successful return the location pointed to will contain
1504  *                a pointer to a refcounted struct vmw_resource.
1505  *
1506  * If the handle can't be found or is associated with an incorrect resource
1507  * type, -EINVAL will be returned.
1508  */
1509 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
1510                                     struct ttm_object_file *tfile,
1511                                     uint32_t handle,
1512                                     const struct vmw_user_resource_conv
1513                                     *converter,
1514                                     struct vmw_resource **p_res)
1515 {
1516         struct ttm_base_object *base;
1517         struct vmw_resource *res;
1518         int ret = -EINVAL;
1519
1520         base = ttm_base_object_lookup(tfile, handle);
1521         if (unlikely(base == NULL))
1522                 return -EINVAL;
1523
1524         if (unlikely(base->object_type != converter->object_type))
1525                 goto out_bad_resource;
1526
1527         res = converter->base_obj_to_res(base);
1528
1529         read_lock(&dev_priv->resource_lock);
1530         if (!res->avail || res->res_free != converter->res_free) {
1531                 read_unlock(&dev_priv->resource_lock);
1532                 goto out_bad_resource;
1533         }
1534
1535         kref_get(&res->kref);
1536         read_unlock(&dev_priv->resource_lock);
1537
1538         *p_res = res;
1539         ret = 0;
1540
1541 out_bad_resource:
1542         ttm_base_object_unref(&base);
1543
1544         return ret;
1545 }
1546
1547 /**
1548  * Helper function that looks either a surface or dmabuf.
1549  *
1550  * The pointer this pointed at by out_surf and out_buf needs to be null.
1551  */
1552 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
1553                            struct ttm_object_file *tfile,
1554                            uint32_t handle,
1555                            struct vmw_surface **out_surf,
1556                            struct vmw_dma_buffer **out_buf)
1557 {
1558         struct vmw_resource *res;
1559         int ret;
1560
1561         BUG_ON(*out_surf || *out_buf);
1562
1563         ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
1564                                               user_surface_converter,
1565                                               &res);
1566         if (!ret) {
1567                 *out_surf = vmw_res_to_srf(res);
1568                 return 0;
1569         }
1570
1571         *out_surf = NULL;
1572         ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
1573         return ret;
1574 }
1575
1576 /**
1577  * Buffer management.
1578  */
1579 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
1580 {
1581         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1582
1583         kfree(vmw_bo);
1584 }
1585
1586 int vmw_dmabuf_init(struct vmw_private *dev_priv,
1587                     struct vmw_dma_buffer *vmw_bo,
1588                     size_t size, struct ttm_placement *placement,
1589                     bool interruptible,
1590                     void (*bo_free) (struct ttm_buffer_object *bo))
1591 {
1592         struct ttm_bo_device *bdev = &dev_priv->bdev;
1593         size_t acc_size;
1594         int ret;
1595
1596         BUG_ON(!bo_free);
1597
1598         acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
1599         memset(vmw_bo, 0, sizeof(*vmw_bo));
1600
1601         INIT_LIST_HEAD(&vmw_bo->res_list);
1602
1603         ret = ttm_bo_init(bdev, &vmw_bo->base, size,
1604                           ttm_bo_type_device, placement,
1605                           0, interruptible,
1606                           NULL, acc_size, NULL, bo_free);
1607         return ret;
1608 }
1609
1610 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
1611 {
1612         struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
1613
1614         ttm_base_object_kfree(vmw_user_bo, base);
1615 }
1616
1617 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
1618 {
1619         struct vmw_user_dma_buffer *vmw_user_bo;
1620         struct ttm_base_object *base = *p_base;
1621         struct ttm_buffer_object *bo;
1622
1623         *p_base = NULL;
1624
1625         if (unlikely(base == NULL))
1626                 return;
1627
1628         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
1629         bo = &vmw_user_bo->dma.base;
1630         ttm_bo_unref(&bo);
1631 }
1632
1633 /**
1634  * vmw_user_dmabuf_alloc - Allocate a user dma buffer
1635  *
1636  * @dev_priv: Pointer to a struct device private.
1637  * @tfile: Pointer to a struct ttm_object_file on which to register the user
1638  * object.
1639  * @size: Size of the dma buffer.
1640  * @shareable: Boolean whether the buffer is shareable with other open files.
1641  * @handle: Pointer to where the handle value should be assigned.
1642  * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
1643  * should be assigned.
1644  */
1645 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
1646                           struct ttm_object_file *tfile,
1647                           uint32_t size,
1648                           bool shareable,
1649                           uint32_t *handle,
1650                           struct vmw_dma_buffer **p_dma_buf)
1651 {
1652         struct vmw_user_dma_buffer *user_bo;
1653         struct ttm_buffer_object *tmp;
1654         int ret;
1655
1656         user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
1657         if (unlikely(user_bo == NULL)) {
1658                 DRM_ERROR("Failed to allocate a buffer.\n");
1659                 return -ENOMEM;
1660         }
1661
1662         ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
1663                               &vmw_vram_sys_placement, true,
1664                               &vmw_user_dmabuf_destroy);
1665         if (unlikely(ret != 0))
1666                 return ret;
1667
1668         tmp = ttm_bo_reference(&user_bo->dma.base);
1669         ret = ttm_base_object_init(tfile,
1670                                    &user_bo->base,
1671                                    shareable,
1672                                    ttm_buffer_type,
1673                                    &vmw_user_dmabuf_release, NULL);
1674         if (unlikely(ret != 0)) {
1675                 ttm_bo_unref(&tmp);
1676                 goto out_no_base_object;
1677         }
1678
1679         *p_dma_buf = &user_bo->dma;
1680         *handle = user_bo->base.hash.key;
1681
1682 out_no_base_object:
1683         return ret;
1684 }
1685
1686 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
1687                            struct drm_file *file_priv)
1688 {
1689         struct vmw_private *dev_priv = vmw_priv(dev);
1690         union drm_vmw_alloc_dmabuf_arg *arg =
1691             (union drm_vmw_alloc_dmabuf_arg *)data;
1692         struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
1693         struct drm_vmw_dmabuf_rep *rep = &arg->rep;
1694         struct vmw_dma_buffer *dma_buf;
1695         uint32_t handle;
1696         struct vmw_master *vmaster = vmw_master(file_priv->master);
1697         int ret;
1698
1699         ret = ttm_read_lock(&vmaster->lock, true);
1700         if (unlikely(ret != 0))
1701                 return ret;
1702
1703         ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1704                                     req->size, false, &handle, &dma_buf);
1705         if (unlikely(ret != 0))
1706                 goto out_no_dmabuf;
1707
1708         rep->handle = handle;
1709         rep->map_handle = dma_buf->base.addr_space_offset;
1710         rep->cur_gmr_id = handle;
1711         rep->cur_gmr_offset = 0;
1712
1713         vmw_dmabuf_unreference(&dma_buf);
1714
1715 out_no_dmabuf:
1716         ttm_read_unlock(&vmaster->lock);
1717
1718         return ret;
1719 }
1720
1721 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
1722                            struct drm_file *file_priv)
1723 {
1724         struct drm_vmw_unref_dmabuf_arg *arg =
1725             (struct drm_vmw_unref_dmabuf_arg *)data;
1726
1727         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1728                                          arg->handle,
1729                                          TTM_REF_USAGE);
1730 }
1731
1732 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
1733                            uint32_t handle, struct vmw_dma_buffer **out)
1734 {
1735         struct vmw_user_dma_buffer *vmw_user_bo;
1736         struct ttm_base_object *base;
1737
1738         base = ttm_base_object_lookup(tfile, handle);
1739         if (unlikely(base == NULL)) {
1740                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
1741                        (unsigned long)handle);
1742                 return -ESRCH;
1743         }
1744
1745         if (unlikely(base->object_type != ttm_buffer_type)) {
1746                 ttm_base_object_unref(&base);
1747                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
1748                        (unsigned long)handle);
1749                 return -EINVAL;
1750         }
1751
1752         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
1753         (void)ttm_bo_reference(&vmw_user_bo->dma.base);
1754         ttm_base_object_unref(&base);
1755         *out = &vmw_user_bo->dma;
1756
1757         return 0;
1758 }
1759
1760 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
1761                               struct vmw_dma_buffer *dma_buf)
1762 {
1763         struct vmw_user_dma_buffer *user_bo;
1764
1765         if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
1766                 return -EINVAL;
1767
1768         user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
1769         return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
1770 }
1771
1772 /*
1773  * Stream management
1774  */
1775
1776 static void vmw_stream_destroy(struct vmw_resource *res)
1777 {
1778         struct vmw_private *dev_priv = res->dev_priv;
1779         struct vmw_stream *stream;
1780         int ret;
1781
1782         DRM_INFO("%s: unref\n", __func__);
1783         stream = container_of(res, struct vmw_stream, res);
1784
1785         ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1786         WARN_ON(ret != 0);
1787 }
1788
1789 static int vmw_stream_init(struct vmw_private *dev_priv,
1790                            struct vmw_stream *stream,
1791                            void (*res_free) (struct vmw_resource *res))
1792 {
1793         struct vmw_resource *res = &stream->res;
1794         int ret;
1795
1796         ret = vmw_resource_init(dev_priv, res, false, res_free,
1797                                 &vmw_stream_func);
1798
1799         if (unlikely(ret != 0)) {
1800                 if (res_free == NULL)
1801                         kfree(stream);
1802                 else
1803                         res_free(&stream->res);
1804                 return ret;
1805         }
1806
1807         ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1808         if (ret) {
1809                 vmw_resource_unreference(&res);
1810                 return ret;
1811         }
1812
1813         DRM_INFO("%s: claimed\n", __func__);
1814
1815         vmw_resource_activate(&stream->res, vmw_stream_destroy);
1816         return 0;
1817 }
1818
1819 static void vmw_user_stream_free(struct vmw_resource *res)
1820 {
1821         struct vmw_user_stream *stream =
1822             container_of(res, struct vmw_user_stream, stream.res);
1823         struct vmw_private *dev_priv = res->dev_priv;
1824
1825         ttm_base_object_kfree(stream, base);
1826         ttm_mem_global_free(vmw_mem_glob(dev_priv),
1827                             vmw_user_stream_size);
1828 }
1829
1830 /**
1831  * This function is called when user space has no more references on the
1832  * base object. It releases the base-object's reference on the resource object.
1833  */
1834
1835 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1836 {
1837         struct ttm_base_object *base = *p_base;
1838         struct vmw_user_stream *stream =
1839             container_of(base, struct vmw_user_stream, base);
1840         struct vmw_resource *res = &stream->stream.res;
1841
1842         *p_base = NULL;
1843         vmw_resource_unreference(&res);
1844 }
1845
1846 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1847                            struct drm_file *file_priv)
1848 {
1849         struct vmw_private *dev_priv = vmw_priv(dev);
1850         struct vmw_resource *res;
1851         struct vmw_user_stream *stream;
1852         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1853         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1854         struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
1855         int ret = 0;
1856
1857
1858         res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
1859         if (unlikely(res == NULL))
1860                 return -EINVAL;
1861
1862         if (res->res_free != &vmw_user_stream_free) {
1863                 ret = -EINVAL;
1864                 goto out;
1865         }
1866
1867         stream = container_of(res, struct vmw_user_stream, stream.res);
1868         if (stream->base.tfile != tfile) {
1869                 ret = -EINVAL;
1870                 goto out;
1871         }
1872
1873         ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1874 out:
1875         vmw_resource_unreference(&res);
1876         return ret;
1877 }
1878
1879 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1880                            struct drm_file *file_priv)
1881 {
1882         struct vmw_private *dev_priv = vmw_priv(dev);
1883         struct vmw_user_stream *stream;
1884         struct vmw_resource *res;
1885         struct vmw_resource *tmp;
1886         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1887         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1888         struct vmw_master *vmaster = vmw_master(file_priv->master);
1889         int ret;
1890
1891         /*
1892          * Approximate idr memory usage with 128 bytes. It will be limited
1893          * by maximum number_of streams anyway?
1894          */
1895
1896         if (unlikely(vmw_user_stream_size == 0))
1897                 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
1898
1899         ret = ttm_read_lock(&vmaster->lock, true);
1900         if (unlikely(ret != 0))
1901                 return ret;
1902
1903         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1904                                    vmw_user_stream_size,
1905                                    false, true);
1906         if (unlikely(ret != 0)) {
1907                 if (ret != -ERESTARTSYS)
1908                         DRM_ERROR("Out of graphics memory for stream"
1909                                   " creation.\n");
1910                 goto out_unlock;
1911         }
1912
1913
1914         stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1915         if (unlikely(stream == NULL)) {
1916                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
1917                                     vmw_user_stream_size);
1918                 ret = -ENOMEM;
1919                 goto out_unlock;
1920         }
1921
1922         res = &stream->stream.res;
1923         stream->base.shareable = false;
1924         stream->base.tfile = NULL;
1925
1926         /*
1927          * From here on, the destructor takes over resource freeing.
1928          */
1929
1930         ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1931         if (unlikely(ret != 0))
1932                 goto out_unlock;
1933
1934         tmp = vmw_resource_reference(res);
1935         ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1936                                    &vmw_user_stream_base_release, NULL);
1937
1938         if (unlikely(ret != 0)) {
1939                 vmw_resource_unreference(&tmp);
1940                 goto out_err;
1941         }
1942
1943         arg->stream_id = res->id;
1944 out_err:
1945         vmw_resource_unreference(&res);
1946 out_unlock:
1947         ttm_read_unlock(&vmaster->lock);
1948         return ret;
1949 }
1950
1951 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1952                            struct ttm_object_file *tfile,
1953                            uint32_t *inout_id, struct vmw_resource **out)
1954 {
1955         struct vmw_user_stream *stream;
1956         struct vmw_resource *res;
1957         int ret;
1958
1959         res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
1960                                   *inout_id);
1961         if (unlikely(res == NULL))
1962                 return -EINVAL;
1963
1964         if (res->res_free != &vmw_user_stream_free) {
1965                 ret = -EINVAL;
1966                 goto err_ref;
1967         }
1968
1969         stream = container_of(res, struct vmw_user_stream, stream.res);
1970         if (stream->base.tfile != tfile) {
1971                 ret = -EPERM;
1972                 goto err_ref;
1973         }
1974
1975         *inout_id = stream->stream.stream_id;
1976         *out = res;
1977         return 0;
1978 err_ref:
1979         vmw_resource_unreference(&res);
1980         return ret;
1981 }
1982
1983
1984 int vmw_dumb_create(struct drm_file *file_priv,
1985                     struct drm_device *dev,
1986                     struct drm_mode_create_dumb *args)
1987 {
1988         struct vmw_private *dev_priv = vmw_priv(dev);
1989         struct vmw_master *vmaster = vmw_master(file_priv->master);
1990         struct vmw_user_dma_buffer *vmw_user_bo;
1991         struct ttm_buffer_object *tmp;
1992         int ret;
1993
1994         args->pitch = args->width * ((args->bpp + 7) / 8);
1995         args->size = args->pitch * args->height;
1996
1997         vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
1998         if (vmw_user_bo == NULL)
1999                 return -ENOMEM;
2000
2001         ret = ttm_read_lock(&vmaster->lock, true);
2002         if (ret != 0) {
2003                 kfree(vmw_user_bo);
2004                 return ret;
2005         }
2006
2007         ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
2008                               &vmw_vram_sys_placement, true,
2009                               &vmw_user_dmabuf_destroy);
2010         if (ret != 0)
2011                 goto out_no_dmabuf;
2012
2013         tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
2014         ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
2015                                    &vmw_user_bo->base,
2016                                    false,
2017                                    ttm_buffer_type,
2018                                    &vmw_user_dmabuf_release, NULL);
2019         if (unlikely(ret != 0))
2020                 goto out_no_base_object;
2021
2022         args->handle = vmw_user_bo->base.hash.key;
2023
2024 out_no_base_object:
2025         ttm_bo_unref(&tmp);
2026 out_no_dmabuf:
2027         ttm_read_unlock(&vmaster->lock);
2028         return ret;
2029 }
2030
2031 int vmw_dumb_map_offset(struct drm_file *file_priv,
2032                         struct drm_device *dev, uint32_t handle,
2033                         uint64_t *offset)
2034 {
2035         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
2036         struct vmw_dma_buffer *out_buf;
2037         int ret;
2038
2039         ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
2040         if (ret != 0)
2041                 return -EINVAL;
2042
2043         *offset = out_buf->base.addr_space_offset;
2044         vmw_dmabuf_unreference(&out_buf);
2045         return 0;
2046 }
2047
2048 int vmw_dumb_destroy(struct drm_file *file_priv,
2049                      struct drm_device *dev,
2050                      uint32_t handle)
2051 {
2052         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
2053                                          handle, TTM_REF_USAGE);
2054 }
2055
2056 /**
2057  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
2058  *
2059  * @res:            The resource for which to allocate a backup buffer.
2060  * @interruptible:  Whether any sleeps during allocation should be
2061  *                  performed while interruptible.
2062  */
2063 static int vmw_resource_buf_alloc(struct vmw_resource *res,
2064                                   bool interruptible)
2065 {
2066         unsigned long size =
2067                 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
2068         struct vmw_dma_buffer *backup;
2069         int ret;
2070
2071         if (likely(res->backup)) {
2072                 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
2073                 return 0;
2074         }
2075
2076         backup = kzalloc(sizeof(*backup), GFP_KERNEL);
2077         if (unlikely(backup == NULL))
2078                 return -ENOMEM;
2079
2080         ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
2081                               res->func->backup_placement,
2082                               interruptible,
2083                               &vmw_dmabuf_bo_free);
2084         if (unlikely(ret != 0))
2085                 goto out_no_dmabuf;
2086
2087         res->backup = backup;
2088
2089 out_no_dmabuf:
2090         return ret;
2091 }
2092
2093 /**
2094  * vmw_resource_do_validate - Make a resource up-to-date and visible
2095  *                            to the device.
2096  *
2097  * @res:            The resource to make visible to the device.
2098  * @val_buf:        Information about a buffer possibly
2099  *                  containing backup data if a bind operation is needed.
2100  *
2101  * On hardware resource shortage, this function returns -EBUSY and
2102  * should be retried once resources have been freed up.
2103  */
2104 static int vmw_resource_do_validate(struct vmw_resource *res,
2105                                     struct ttm_validate_buffer *val_buf)
2106 {
2107         int ret = 0;
2108         const struct vmw_res_func *func = res->func;
2109
2110         if (unlikely(res->id == -1)) {
2111                 ret = func->create(res);
2112                 if (unlikely(ret != 0))
2113                         return ret;
2114         }
2115
2116         if (func->bind &&
2117             ((func->needs_backup && list_empty(&res->mob_head) &&
2118               val_buf->bo != NULL) ||
2119              (!func->needs_backup && val_buf->bo != NULL))) {
2120                 ret = func->bind(res, val_buf);
2121                 if (unlikely(ret != 0))
2122                         goto out_bind_failed;
2123                 if (func->needs_backup)
2124                         list_add_tail(&res->mob_head, &res->backup->res_list);
2125         }
2126
2127         /*
2128          * Only do this on write operations, and move to
2129          * vmw_resource_unreserve if it can be called after
2130          * backup buffers have been unreserved. Otherwise
2131          * sort out locking.
2132          */
2133         res->res_dirty = true;
2134
2135         return 0;
2136
2137 out_bind_failed:
2138         func->destroy(res);
2139
2140         return ret;
2141 }
2142
2143 /**
2144  * vmw_resource_unreserve - Unreserve a resource previously reserved for
2145  * command submission.
2146  *
2147  * @res:               Pointer to the struct vmw_resource to unreserve.
2148  * @new_backup:        Pointer to new backup buffer if command submission
2149  *                     switched.
2150  * @new_backup_offset: New backup offset if @new_backup is !NULL.
2151  *
2152  * Currently unreserving a resource means putting it back on the device's
2153  * resource lru list, so that it can be evicted if necessary.
2154  */
2155 void vmw_resource_unreserve(struct vmw_resource *res,
2156                             struct vmw_dma_buffer *new_backup,
2157                             unsigned long new_backup_offset)
2158 {
2159         struct vmw_private *dev_priv = res->dev_priv;
2160
2161         if (!list_empty(&res->lru_head))
2162                 return;
2163
2164         if (new_backup && new_backup != res->backup) {
2165
2166                 if (res->backup) {
2167                         BUG_ON(atomic_read(&res->backup->base.reserved) == 0);
2168                         list_del_init(&res->mob_head);
2169                         vmw_dmabuf_unreference(&res->backup);
2170                 }
2171
2172                 res->backup = vmw_dmabuf_reference(new_backup);
2173                 BUG_ON(atomic_read(&new_backup->base.reserved) == 0);
2174                 list_add_tail(&res->mob_head, &new_backup->res_list);
2175         }
2176         if (new_backup)
2177                 res->backup_offset = new_backup_offset;
2178
2179         if (!res->func->may_evict)
2180                 return;
2181
2182         write_lock(&dev_priv->resource_lock);
2183         list_add_tail(&res->lru_head,
2184                       &res->dev_priv->res_lru[res->func->res_type]);
2185         write_unlock(&dev_priv->resource_lock);
2186 }
2187
2188 /**
2189  * vmw_resource_check_buffer - Check whether a backup buffer is needed
2190  *                             for a resource and in that case, allocate
2191  *                             one, reserve and validate it.
2192  *
2193  * @res:            The resource for which to allocate a backup buffer.
2194  * @interruptible:  Whether any sleeps during allocation should be
2195  *                  performed while interruptible.
2196  * @val_buf:        On successful return contains data about the
2197  *                  reserved and validated backup buffer.
2198  */
2199 int vmw_resource_check_buffer(struct vmw_resource *res,
2200                               bool interruptible,
2201                               struct ttm_validate_buffer *val_buf)
2202 {
2203         struct list_head val_list;
2204         bool backup_dirty = false;
2205         int ret;
2206
2207         if (unlikely(res->backup == NULL)) {
2208                 ret = vmw_resource_buf_alloc(res, interruptible);
2209                 if (unlikely(ret != 0))
2210                         return ret;
2211         }
2212
2213         INIT_LIST_HEAD(&val_list);
2214         val_buf->bo = ttm_bo_reference(&res->backup->base);
2215         list_add_tail(&val_buf->head, &val_list);
2216         ret = ttm_eu_reserve_buffers(&val_list);
2217         if (unlikely(ret != 0))
2218                 goto out_no_reserve;
2219
2220         if (res->func->needs_backup && list_empty(&res->mob_head))
2221                 return 0;
2222
2223         backup_dirty = res->backup_dirty;
2224         ret = ttm_bo_validate(&res->backup->base,
2225                               res->func->backup_placement,
2226                               true, false, false);
2227
2228         if (unlikely(ret != 0))
2229                 goto out_no_validate;
2230
2231         return 0;
2232
2233 out_no_validate:
2234         ttm_eu_backoff_reservation(&val_list);
2235 out_no_reserve:
2236         ttm_bo_unref(&val_buf->bo);
2237         if (backup_dirty)
2238                 vmw_dmabuf_unreference(&res->backup);
2239
2240         return ret;
2241 }
2242
2243 /**
2244  * vmw_resource_reserve - Reserve a resource for command submission
2245  *
2246  * @res:            The resource to reserve.
2247  *
2248  * This function takes the resource off the LRU list and make sure
2249  * a backup buffer is present for guest-backed resources. However,
2250  * the buffer may not be bound to the resource at this point.
2251  *
2252  */
2253 int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
2254 {
2255         struct vmw_private *dev_priv = res->dev_priv;
2256         int ret;
2257
2258         write_lock(&dev_priv->resource_lock);
2259         list_del_init(&res->lru_head);
2260         write_unlock(&dev_priv->resource_lock);
2261
2262         if (res->func->needs_backup && res->backup == NULL &&
2263             !no_backup) {
2264                 ret = vmw_resource_buf_alloc(res, true);
2265                 if (unlikely(ret != 0))
2266                         return ret;
2267         }
2268
2269         return 0;
2270 }
2271
2272 /**
2273  * vmw_resource_backoff_reservation - Unreserve and unreference a
2274  *                                    backup buffer
2275  *.
2276  * @val_buf:        Backup buffer information.
2277  */
2278 void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
2279 {
2280         struct list_head val_list;
2281
2282         if (likely(val_buf->bo == NULL))
2283                 return;
2284
2285         INIT_LIST_HEAD(&val_list);
2286         list_add_tail(&val_buf->head, &val_list);
2287         ttm_eu_backoff_reservation(&val_list);
2288         ttm_bo_unref(&val_buf->bo);
2289 }
2290
2291 /**
2292  * vmw_resource_do_evict - Evict a resource, and transfer its data
2293  *                         to a backup buffer.
2294  *
2295  * @res:            The resource to evict.
2296  */
2297 int vmw_resource_do_evict(struct vmw_resource *res)
2298 {
2299         struct ttm_validate_buffer val_buf;
2300         const struct vmw_res_func *func = res->func;
2301         int ret;
2302
2303         BUG_ON(!func->may_evict);
2304
2305         val_buf.bo = NULL;
2306         ret = vmw_resource_check_buffer(res, true, &val_buf);
2307         if (unlikely(ret != 0))
2308                 return ret;
2309
2310         if (unlikely(func->unbind != NULL &&
2311                      (!func->needs_backup || !list_empty(&res->mob_head)))) {
2312                 ret = func->unbind(res, res->res_dirty, &val_buf);
2313                 if (unlikely(ret != 0))
2314                         goto out_no_unbind;
2315                 list_del_init(&res->mob_head);
2316         }
2317         ret = func->destroy(res);
2318         res->backup_dirty = true;
2319         res->res_dirty = false;
2320 out_no_unbind:
2321         vmw_resource_backoff_reservation(&val_buf);
2322
2323         return ret;
2324 }
2325
2326
2327 /**
2328  * vmw_resource_validate - Make a resource up-to-date and visible
2329  *                         to the device.
2330  *
2331  * @res:            The resource to make visible to the device.
2332  *
2333  * On succesful return, any backup DMA buffer pointed to by @res->backup will
2334  * be reserved and validated.
2335  * On hardware resource shortage, this function will repeatedly evict
2336  * resources of the same type until the validation succeeds.
2337  */
2338 int vmw_resource_validate(struct vmw_resource *res)
2339 {
2340         int ret;
2341         struct vmw_resource *evict_res;
2342         struct vmw_private *dev_priv = res->dev_priv;
2343         struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
2344         struct ttm_validate_buffer val_buf;
2345
2346         if (likely(!res->func->may_evict))
2347                 return 0;
2348
2349         val_buf.bo = NULL;
2350         if (res->backup)
2351                 val_buf.bo = &res->backup->base;
2352         do {
2353                 ret = vmw_resource_do_validate(res, &val_buf);
2354                 if (likely(ret != -EBUSY))
2355                         break;
2356
2357                 write_lock(&dev_priv->resource_lock);
2358                 if (list_empty(lru_list) || !res->func->may_evict) {
2359                         DRM_ERROR("Out of device device id entries "
2360                                   "for %s.\n", res->func->type_name);
2361                         ret = -EBUSY;
2362                         write_unlock(&dev_priv->resource_lock);
2363                         break;
2364                 }
2365
2366                 evict_res = vmw_resource_reference
2367                         (list_first_entry(lru_list, struct vmw_resource,
2368                                           lru_head));
2369                 list_del_init(&evict_res->lru_head);
2370
2371                 write_unlock(&dev_priv->resource_lock);
2372                 vmw_resource_do_evict(evict_res);
2373                 vmw_resource_unreference(&evict_res);
2374         } while (1);
2375
2376         if (unlikely(ret != 0))
2377                 goto out_no_validate;
2378         else if (!res->func->needs_backup && res->backup) {
2379                 list_del_init(&res->mob_head);
2380                 vmw_dmabuf_unreference(&res->backup);
2381         }
2382
2383         return 0;
2384
2385 out_no_validate:
2386         return ret;
2387 }
2388
2389 /**
2390  * vmw_fence_single_bo - Utility function to fence a single TTM buffer
2391  *                       object without unreserving it.
2392  *
2393  * @bo:             Pointer to the struct ttm_buffer_object to fence.
2394  * @fence:          Pointer to the fence. If NULL, this function will
2395  *                  insert a fence into the command stream..
2396  *
2397  * Contrary to the ttm_eu version of this function, it takes only
2398  * a single buffer object instead of a list, and it also doesn't
2399  * unreserve the buffer object, which needs to be done separately.
2400  */
2401 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
2402                          struct vmw_fence_obj *fence)
2403 {
2404         struct ttm_bo_device *bdev = bo->bdev;
2405         struct ttm_bo_driver *driver = bdev->driver;
2406         struct vmw_fence_obj *old_fence_obj;
2407         struct vmw_private *dev_priv =
2408                 container_of(bdev, struct vmw_private, bdev);
2409
2410         if (fence == NULL)
2411                 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
2412         else
2413                 driver->sync_obj_ref(fence);
2414
2415         spin_lock(&bdev->fence_lock);
2416
2417         old_fence_obj = bo->sync_obj;
2418         bo->sync_obj = fence;
2419
2420         spin_unlock(&bdev->fence_lock);
2421
2422         if (old_fence_obj)
2423                 vmw_fence_obj_unreference(&old_fence_obj);
2424 }
2425
2426 /**
2427  * vmw_resource_move_notify - TTM move_notify_callback
2428  *
2429  * @bo:             The TTM buffer object about to move.
2430  * @mem:            The truct ttm_mem_reg indicating to what memory
2431  *                  region the move is taking place.
2432  *
2433  * For now does nothing.
2434  */
2435 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
2436                               struct ttm_mem_reg *mem)
2437 {
2438 }
2439
2440 /**
2441  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
2442  *
2443  * @res:            The resource being queried.
2444  */
2445 bool vmw_resource_needs_backup(const struct vmw_resource *res)
2446 {
2447         return res->func->needs_backup;
2448 }
2449
2450 /**
2451  * vmw_resource_evict_type - Evict all resources of a specific type
2452  *
2453  * @dev_priv:       Pointer to a device private struct
2454  * @type:           The resource type to evict
2455  *
2456  * To avoid thrashing starvation or as part of the hibernation sequence,
2457  * evict all evictable resources of a specific type.
2458  */
2459 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
2460                                     enum vmw_res_type type)
2461 {
2462         struct list_head *lru_list = &dev_priv->res_lru[type];
2463         struct vmw_resource *evict_res;
2464
2465         do {
2466                 write_lock(&dev_priv->resource_lock);
2467
2468                 if (list_empty(lru_list))
2469                         goto out_unlock;
2470
2471                 evict_res = vmw_resource_reference(
2472                         list_first_entry(lru_list, struct vmw_resource,
2473                                          lru_head));
2474                 list_del_init(&evict_res->lru_head);
2475                 write_unlock(&dev_priv->resource_lock);
2476                 vmw_resource_do_evict(evict_res);
2477                 vmw_resource_unreference(&evict_res);
2478         } while (1);
2479
2480 out_unlock:
2481         write_unlock(&dev_priv->resource_lock);
2482 }
2483
2484 /**
2485  * vmw_resource_evict_all - Evict all evictable resources
2486  *
2487  * @dev_priv:       Pointer to a device private struct
2488  *
2489  * To avoid thrashing starvation or as part of the hibernation sequence,
2490  * evict all evictable resources. In particular this means that all
2491  * guest-backed resources that are registered with the device are
2492  * evicted and the OTable becomes clean.
2493  */
2494 void vmw_resource_evict_all(struct vmw_private *dev_priv)
2495 {
2496         enum vmw_res_type type;
2497
2498         mutex_lock(&dev_priv->cmdbuf_mutex);
2499
2500         for (type = 0; type < vmw_res_max; ++type)
2501                 vmw_resource_evict_type(dev_priv, type);
2502
2503         mutex_unlock(&dev_priv->cmdbuf_mutex);
2504 }