vmwgfx: Fix up query processing
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
32 #include "drmP.h"
33
34 struct vmw_user_context {
35         struct ttm_base_object base;
36         struct vmw_resource res;
37 };
38
39 struct vmw_user_surface {
40         struct ttm_base_object base;
41         struct vmw_surface srf;
42 };
43
44 struct vmw_user_dma_buffer {
45         struct ttm_base_object base;
46         struct vmw_dma_buffer dma;
47 };
48
49 struct vmw_bo_user_rep {
50         uint32_t handle;
51         uint64_t map_handle;
52 };
53
54 struct vmw_stream {
55         struct vmw_resource res;
56         uint32_t stream_id;
57 };
58
59 struct vmw_user_stream {
60         struct ttm_base_object base;
61         struct vmw_stream stream;
62 };
63
64 static inline struct vmw_dma_buffer *
65 vmw_dma_buffer(struct ttm_buffer_object *bo)
66 {
67         return container_of(bo, struct vmw_dma_buffer, base);
68 }
69
70 static inline struct vmw_user_dma_buffer *
71 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
72 {
73         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
74         return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
75 }
76
77 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
78 {
79         kref_get(&res->kref);
80         return res;
81 }
82
83 static void vmw_resource_release(struct kref *kref)
84 {
85         struct vmw_resource *res =
86             container_of(kref, struct vmw_resource, kref);
87         struct vmw_private *dev_priv = res->dev_priv;
88
89         idr_remove(res->idr, res->id);
90         write_unlock(&dev_priv->resource_lock);
91
92         if (likely(res->hw_destroy != NULL))
93                 res->hw_destroy(res);
94
95         if (res->res_free != NULL)
96                 res->res_free(res);
97         else
98                 kfree(res);
99
100         write_lock(&dev_priv->resource_lock);
101 }
102
103 void vmw_resource_unreference(struct vmw_resource **p_res)
104 {
105         struct vmw_resource *res = *p_res;
106         struct vmw_private *dev_priv = res->dev_priv;
107
108         *p_res = NULL;
109         write_lock(&dev_priv->resource_lock);
110         kref_put(&res->kref, vmw_resource_release);
111         write_unlock(&dev_priv->resource_lock);
112 }
113
114 static int vmw_resource_init(struct vmw_private *dev_priv,
115                              struct vmw_resource *res,
116                              struct idr *idr,
117                              enum ttm_object_type obj_type,
118                              void (*res_free) (struct vmw_resource *res))
119 {
120         int ret;
121
122         kref_init(&res->kref);
123         res->hw_destroy = NULL;
124         res->res_free = res_free;
125         res->res_type = obj_type;
126         res->idr = idr;
127         res->avail = false;
128         res->dev_priv = dev_priv;
129         INIT_LIST_HEAD(&res->query_head);
130         do {
131                 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
132                         return -ENOMEM;
133
134                 write_lock(&dev_priv->resource_lock);
135                 ret = idr_get_new_above(idr, res, 1, &res->id);
136                 write_unlock(&dev_priv->resource_lock);
137
138         } while (ret == -EAGAIN);
139
140         return ret;
141 }
142
143 /**
144  * vmw_resource_activate
145  *
146  * @res:        Pointer to the newly created resource
147  * @hw_destroy: Destroy function. NULL if none.
148  *
149  * Activate a resource after the hardware has been made aware of it.
150  * Set tye destroy function to @destroy. Typically this frees the
151  * resource and destroys the hardware resources associated with it.
152  * Activate basically means that the function vmw_resource_lookup will
153  * find it.
154  */
155
156 static void vmw_resource_activate(struct vmw_resource *res,
157                                   void (*hw_destroy) (struct vmw_resource *))
158 {
159         struct vmw_private *dev_priv = res->dev_priv;
160
161         write_lock(&dev_priv->resource_lock);
162         res->avail = true;
163         res->hw_destroy = hw_destroy;
164         write_unlock(&dev_priv->resource_lock);
165 }
166
167 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
168                                          struct idr *idr, int id)
169 {
170         struct vmw_resource *res;
171
172         read_lock(&dev_priv->resource_lock);
173         res = idr_find(idr, id);
174         if (res && res->avail)
175                 kref_get(&res->kref);
176         else
177                 res = NULL;
178         read_unlock(&dev_priv->resource_lock);
179
180         if (unlikely(res == NULL))
181                 return NULL;
182
183         return res;
184 }
185
186 /**
187  * Context management:
188  */
189
190 static void vmw_hw_context_destroy(struct vmw_resource *res)
191 {
192
193         struct vmw_private *dev_priv = res->dev_priv;
194         struct {
195                 SVGA3dCmdHeader header;
196                 SVGA3dCmdDestroyContext body;
197         } *cmd;
198
199
200         vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
201
202         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
203         if (unlikely(cmd == NULL)) {
204                 DRM_ERROR("Failed reserving FIFO space for surface "
205                           "destruction.\n");
206                 return;
207         }
208
209         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
210         cmd->header.size = cpu_to_le32(sizeof(cmd->body));
211         cmd->body.cid = cpu_to_le32(res->id);
212
213         vmw_fifo_commit(dev_priv, sizeof(*cmd));
214         vmw_3d_resource_dec(dev_priv, false);
215 }
216
217 static int vmw_context_init(struct vmw_private *dev_priv,
218                             struct vmw_resource *res,
219                             void (*res_free) (struct vmw_resource *res))
220 {
221         int ret;
222
223         struct {
224                 SVGA3dCmdHeader header;
225                 SVGA3dCmdDefineContext body;
226         } *cmd;
227
228         ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
229                                 VMW_RES_CONTEXT, res_free);
230
231         if (unlikely(ret != 0)) {
232                 if (res_free == NULL)
233                         kfree(res);
234                 else
235                         res_free(res);
236                 return ret;
237         }
238
239         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
240         if (unlikely(cmd == NULL)) {
241                 DRM_ERROR("Fifo reserve failed.\n");
242                 vmw_resource_unreference(&res);
243                 return -ENOMEM;
244         }
245
246         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
247         cmd->header.size = cpu_to_le32(sizeof(cmd->body));
248         cmd->body.cid = cpu_to_le32(res->id);
249
250         vmw_fifo_commit(dev_priv, sizeof(*cmd));
251         (void) vmw_3d_resource_inc(dev_priv, false);
252         vmw_resource_activate(res, vmw_hw_context_destroy);
253         return 0;
254 }
255
256 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
257 {
258         struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
259         int ret;
260
261         if (unlikely(res == NULL))
262                 return NULL;
263
264         ret = vmw_context_init(dev_priv, res, NULL);
265         return (ret == 0) ? res : NULL;
266 }
267
268 /**
269  * User-space context management:
270  */
271
272 static void vmw_user_context_free(struct vmw_resource *res)
273 {
274         struct vmw_user_context *ctx =
275             container_of(res, struct vmw_user_context, res);
276
277         kfree(ctx);
278 }
279
280 /**
281  * This function is called when user space has no more references on the
282  * base object. It releases the base-object's reference on the resource object.
283  */
284
285 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
286 {
287         struct ttm_base_object *base = *p_base;
288         struct vmw_user_context *ctx =
289             container_of(base, struct vmw_user_context, base);
290         struct vmw_resource *res = &ctx->res;
291
292         *p_base = NULL;
293         vmw_resource_unreference(&res);
294 }
295
296 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
297                               struct drm_file *file_priv)
298 {
299         struct vmw_private *dev_priv = vmw_priv(dev);
300         struct vmw_resource *res;
301         struct vmw_user_context *ctx;
302         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
303         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
304         int ret = 0;
305
306         res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
307         if (unlikely(res == NULL))
308                 return -EINVAL;
309
310         if (res->res_free != &vmw_user_context_free) {
311                 ret = -EINVAL;
312                 goto out;
313         }
314
315         ctx = container_of(res, struct vmw_user_context, res);
316         if (ctx->base.tfile != tfile && !ctx->base.shareable) {
317                 ret = -EPERM;
318                 goto out;
319         }
320
321         ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
322 out:
323         vmw_resource_unreference(&res);
324         return ret;
325 }
326
327 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
328                              struct drm_file *file_priv)
329 {
330         struct vmw_private *dev_priv = vmw_priv(dev);
331         struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
332         struct vmw_resource *res;
333         struct vmw_resource *tmp;
334         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
335         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
336         int ret;
337
338         if (unlikely(ctx == NULL))
339                 return -ENOMEM;
340
341         res = &ctx->res;
342         ctx->base.shareable = false;
343         ctx->base.tfile = NULL;
344
345         ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
346         if (unlikely(ret != 0))
347                 return ret;
348
349         tmp = vmw_resource_reference(&ctx->res);
350         ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
351                                    &vmw_user_context_base_release, NULL);
352
353         if (unlikely(ret != 0)) {
354                 vmw_resource_unreference(&tmp);
355                 goto out_err;
356         }
357
358         arg->cid = res->id;
359 out_err:
360         vmw_resource_unreference(&res);
361         return ret;
362
363 }
364
365 int vmw_context_check(struct vmw_private *dev_priv,
366                       struct ttm_object_file *tfile,
367                       int id,
368                       struct vmw_resource **p_res)
369 {
370         struct vmw_resource *res;
371         int ret = 0;
372
373         read_lock(&dev_priv->resource_lock);
374         res = idr_find(&dev_priv->context_idr, id);
375         if (res && res->avail) {
376                 struct vmw_user_context *ctx =
377                         container_of(res, struct vmw_user_context, res);
378                 if (ctx->base.tfile != tfile && !ctx->base.shareable)
379                         ret = -EPERM;
380                 if (p_res)
381                         *p_res = vmw_resource_reference(res);
382         } else
383                 ret = -EINVAL;
384         read_unlock(&dev_priv->resource_lock);
385
386         return ret;
387 }
388
389
390 /**
391  * Surface management.
392  */
393
394 static void vmw_hw_surface_destroy(struct vmw_resource *res)
395 {
396
397         struct vmw_private *dev_priv = res->dev_priv;
398         struct {
399                 SVGA3dCmdHeader header;
400                 SVGA3dCmdDestroySurface body;
401         } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
402
403         if (unlikely(cmd == NULL)) {
404                 DRM_ERROR("Failed reserving FIFO space for surface "
405                           "destruction.\n");
406                 return;
407         }
408
409         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
410         cmd->header.size = cpu_to_le32(sizeof(cmd->body));
411         cmd->body.sid = cpu_to_le32(res->id);
412
413         vmw_fifo_commit(dev_priv, sizeof(*cmd));
414         vmw_3d_resource_dec(dev_priv, false);
415 }
416
417 void vmw_surface_res_free(struct vmw_resource *res)
418 {
419         struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
420
421         kfree(srf->sizes);
422         kfree(srf->snooper.image);
423         kfree(srf);
424 }
425
426 int vmw_surface_init(struct vmw_private *dev_priv,
427                      struct vmw_surface *srf,
428                      void (*res_free) (struct vmw_resource *res))
429 {
430         int ret;
431         struct {
432                 SVGA3dCmdHeader header;
433                 SVGA3dCmdDefineSurface body;
434         } *cmd;
435         SVGA3dSize *cmd_size;
436         struct vmw_resource *res = &srf->res;
437         struct drm_vmw_size *src_size;
438         size_t submit_size;
439         uint32_t cmd_len;
440         int i;
441
442         BUG_ON(res_free == NULL);
443         ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
444                                 VMW_RES_SURFACE, res_free);
445
446         if (unlikely(ret != 0)) {
447                 res_free(res);
448                 return ret;
449         }
450
451         submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
452         cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
453
454         cmd = vmw_fifo_reserve(dev_priv, submit_size);
455         if (unlikely(cmd == NULL)) {
456                 DRM_ERROR("Fifo reserve failed for create surface.\n");
457                 vmw_resource_unreference(&res);
458                 return -ENOMEM;
459         }
460
461         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
462         cmd->header.size = cpu_to_le32(cmd_len);
463         cmd->body.sid = cpu_to_le32(res->id);
464         cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
465         cmd->body.format = cpu_to_le32(srf->format);
466         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
467                 cmd->body.face[i].numMipLevels =
468                     cpu_to_le32(srf->mip_levels[i]);
469         }
470
471         cmd += 1;
472         cmd_size = (SVGA3dSize *) cmd;
473         src_size = srf->sizes;
474
475         for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
476                 cmd_size->width = cpu_to_le32(src_size->width);
477                 cmd_size->height = cpu_to_le32(src_size->height);
478                 cmd_size->depth = cpu_to_le32(src_size->depth);
479         }
480
481         vmw_fifo_commit(dev_priv, submit_size);
482         (void) vmw_3d_resource_inc(dev_priv, false);
483         vmw_resource_activate(res, vmw_hw_surface_destroy);
484         return 0;
485 }
486
487 static void vmw_user_surface_free(struct vmw_resource *res)
488 {
489         struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
490         struct vmw_user_surface *user_srf =
491             container_of(srf, struct vmw_user_surface, srf);
492
493         kfree(srf->sizes);
494         kfree(srf->snooper.image);
495         kfree(user_srf);
496 }
497
498 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
499                                    struct ttm_object_file *tfile,
500                                    uint32_t handle, struct vmw_surface **out)
501 {
502         struct vmw_resource *res;
503         struct vmw_surface *srf;
504         struct vmw_user_surface *user_srf;
505         struct ttm_base_object *base;
506         int ret = -EINVAL;
507
508         base = ttm_base_object_lookup(tfile, handle);
509         if (unlikely(base == NULL))
510                 return -EINVAL;
511
512         if (unlikely(base->object_type != VMW_RES_SURFACE))
513                 goto out_bad_resource;
514
515         user_srf = container_of(base, struct vmw_user_surface, base);
516         srf = &user_srf->srf;
517         res = &srf->res;
518
519         read_lock(&dev_priv->resource_lock);
520
521         if (!res->avail || res->res_free != &vmw_user_surface_free) {
522                 read_unlock(&dev_priv->resource_lock);
523                 goto out_bad_resource;
524         }
525
526         kref_get(&res->kref);
527         read_unlock(&dev_priv->resource_lock);
528
529         *out = srf;
530         ret = 0;
531
532 out_bad_resource:
533         ttm_base_object_unref(&base);
534
535         return ret;
536 }
537
538 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
539 {
540         struct ttm_base_object *base = *p_base;
541         struct vmw_user_surface *user_srf =
542             container_of(base, struct vmw_user_surface, base);
543         struct vmw_resource *res = &user_srf->srf.res;
544
545         *p_base = NULL;
546         vmw_resource_unreference(&res);
547 }
548
549 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
550                               struct drm_file *file_priv)
551 {
552         struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
553         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
554
555         return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
556 }
557
558 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
559                              struct drm_file *file_priv)
560 {
561         struct vmw_private *dev_priv = vmw_priv(dev);
562         struct vmw_user_surface *user_srf =
563             kmalloc(sizeof(*user_srf), GFP_KERNEL);
564         struct vmw_surface *srf;
565         struct vmw_resource *res;
566         struct vmw_resource *tmp;
567         union drm_vmw_surface_create_arg *arg =
568             (union drm_vmw_surface_create_arg *)data;
569         struct drm_vmw_surface_create_req *req = &arg->req;
570         struct drm_vmw_surface_arg *rep = &arg->rep;
571         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
572         struct drm_vmw_size __user *user_sizes;
573         int ret;
574         int i;
575
576         if (unlikely(user_srf == NULL))
577                 return -ENOMEM;
578
579         srf = &user_srf->srf;
580         res = &srf->res;
581
582         srf->flags = req->flags;
583         srf->format = req->format;
584         srf->scanout = req->scanout;
585         memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
586         srf->num_sizes = 0;
587         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
588                 srf->num_sizes += srf->mip_levels[i];
589
590         if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
591             DRM_VMW_MAX_MIP_LEVELS) {
592                 ret = -EINVAL;
593                 goto out_err0;
594         }
595
596         srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
597         if (unlikely(srf->sizes == NULL)) {
598                 ret = -ENOMEM;
599                 goto out_err0;
600         }
601
602         user_sizes = (struct drm_vmw_size __user *)(unsigned long)
603             req->size_addr;
604
605         ret = copy_from_user(srf->sizes, user_sizes,
606                              srf->num_sizes * sizeof(*srf->sizes));
607         if (unlikely(ret != 0)) {
608                 ret = -EFAULT;
609                 goto out_err1;
610         }
611
612         if (srf->scanout &&
613             srf->num_sizes == 1 &&
614             srf->sizes[0].width == 64 &&
615             srf->sizes[0].height == 64 &&
616             srf->format == SVGA3D_A8R8G8B8) {
617
618                 /* allocate image area and clear it */
619                 srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
620                 if (!srf->snooper.image) {
621                         DRM_ERROR("Failed to allocate cursor_image\n");
622                         ret = -ENOMEM;
623                         goto out_err1;
624                 }
625         } else {
626                 srf->snooper.image = NULL;
627         }
628         srf->snooper.crtc = NULL;
629
630         user_srf->base.shareable = false;
631         user_srf->base.tfile = NULL;
632
633         /**
634          * From this point, the generic resource management functions
635          * destroy the object on failure.
636          */
637
638         ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
639         if (unlikely(ret != 0))
640                 return ret;
641
642         tmp = vmw_resource_reference(&srf->res);
643         ret = ttm_base_object_init(tfile, &user_srf->base,
644                                    req->shareable, VMW_RES_SURFACE,
645                                    &vmw_user_surface_base_release, NULL);
646
647         if (unlikely(ret != 0)) {
648                 vmw_resource_unreference(&tmp);
649                 vmw_resource_unreference(&res);
650                 return ret;
651         }
652
653         rep->sid = user_srf->base.hash.key;
654         if (rep->sid == SVGA3D_INVALID_ID)
655                 DRM_ERROR("Created bad Surface ID.\n");
656
657         vmw_resource_unreference(&res);
658         return 0;
659 out_err1:
660         kfree(srf->sizes);
661 out_err0:
662         kfree(user_srf);
663         return ret;
664 }
665
666 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
667                                 struct drm_file *file_priv)
668 {
669         union drm_vmw_surface_reference_arg *arg =
670             (union drm_vmw_surface_reference_arg *)data;
671         struct drm_vmw_surface_arg *req = &arg->req;
672         struct drm_vmw_surface_create_req *rep = &arg->rep;
673         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
674         struct vmw_surface *srf;
675         struct vmw_user_surface *user_srf;
676         struct drm_vmw_size __user *user_sizes;
677         struct ttm_base_object *base;
678         int ret = -EINVAL;
679
680         base = ttm_base_object_lookup(tfile, req->sid);
681         if (unlikely(base == NULL)) {
682                 DRM_ERROR("Could not find surface to reference.\n");
683                 return -EINVAL;
684         }
685
686         if (unlikely(base->object_type != VMW_RES_SURFACE))
687                 goto out_bad_resource;
688
689         user_srf = container_of(base, struct vmw_user_surface, base);
690         srf = &user_srf->srf;
691
692         ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
693         if (unlikely(ret != 0)) {
694                 DRM_ERROR("Could not add a reference to a surface.\n");
695                 goto out_no_reference;
696         }
697
698         rep->flags = srf->flags;
699         rep->format = srf->format;
700         memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
701         user_sizes = (struct drm_vmw_size __user *)(unsigned long)
702             rep->size_addr;
703
704         if (user_sizes)
705                 ret = copy_to_user(user_sizes, srf->sizes,
706                                    srf->num_sizes * sizeof(*srf->sizes));
707         if (unlikely(ret != 0)) {
708                 DRM_ERROR("copy_to_user failed %p %u\n",
709                           user_sizes, srf->num_sizes);
710                 ret = -EFAULT;
711         }
712 out_bad_resource:
713 out_no_reference:
714         ttm_base_object_unref(&base);
715
716         return ret;
717 }
718
719 int vmw_surface_check(struct vmw_private *dev_priv,
720                       struct ttm_object_file *tfile,
721                       uint32_t handle, int *id)
722 {
723         struct ttm_base_object *base;
724         struct vmw_user_surface *user_srf;
725
726         int ret = -EPERM;
727
728         base = ttm_base_object_lookup(tfile, handle);
729         if (unlikely(base == NULL))
730                 return -EINVAL;
731
732         if (unlikely(base->object_type != VMW_RES_SURFACE))
733                 goto out_bad_surface;
734
735         user_srf = container_of(base, struct vmw_user_surface, base);
736         *id = user_srf->srf.res.id;
737         ret = 0;
738
739 out_bad_surface:
740         /**
741          * FIXME: May deadlock here when called from the
742          * command parsing code.
743          */
744
745         ttm_base_object_unref(&base);
746         return ret;
747 }
748
749 /**
750  * Buffer management.
751  */
752
753 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
754                                   unsigned long num_pages)
755 {
756         static size_t bo_user_size = ~0;
757
758         size_t page_array_size =
759             (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
760
761         if (unlikely(bo_user_size == ~0)) {
762                 bo_user_size = glob->ttm_bo_extra_size +
763                     ttm_round_pot(sizeof(struct vmw_dma_buffer));
764         }
765
766         return bo_user_size + page_array_size;
767 }
768
769 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
770 {
771         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
772         struct ttm_bo_global *glob = bo->glob;
773
774         ttm_mem_global_free(glob->mem_glob, bo->acc_size);
775         kfree(vmw_bo);
776 }
777
778 int vmw_dmabuf_init(struct vmw_private *dev_priv,
779                     struct vmw_dma_buffer *vmw_bo,
780                     size_t size, struct ttm_placement *placement,
781                     bool interruptible,
782                     void (*bo_free) (struct ttm_buffer_object *bo))
783 {
784         struct ttm_bo_device *bdev = &dev_priv->bdev;
785         struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
786         size_t acc_size;
787         int ret;
788
789         BUG_ON(!bo_free);
790
791         acc_size =
792             vmw_dmabuf_acc_size(bdev->glob,
793                                 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
794
795         ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
796         if (unlikely(ret != 0)) {
797                 /* we must free the bo here as
798                  * ttm_buffer_object_init does so as well */
799                 bo_free(&vmw_bo->base);
800                 return ret;
801         }
802
803         memset(vmw_bo, 0, sizeof(*vmw_bo));
804
805         INIT_LIST_HEAD(&vmw_bo->validate_list);
806
807         ret = ttm_bo_init(bdev, &vmw_bo->base, size,
808                           ttm_bo_type_device, placement,
809                           0, 0, interruptible,
810                           NULL, acc_size, bo_free);
811         return ret;
812 }
813
814 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
815 {
816         struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
817         struct ttm_bo_global *glob = bo->glob;
818
819         ttm_mem_global_free(glob->mem_glob, bo->acc_size);
820         kfree(vmw_user_bo);
821 }
822
823 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
824 {
825         struct vmw_user_dma_buffer *vmw_user_bo;
826         struct ttm_base_object *base = *p_base;
827         struct ttm_buffer_object *bo;
828
829         *p_base = NULL;
830
831         if (unlikely(base == NULL))
832                 return;
833
834         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
835         bo = &vmw_user_bo->dma.base;
836         ttm_bo_unref(&bo);
837 }
838
839 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
840                            struct drm_file *file_priv)
841 {
842         struct vmw_private *dev_priv = vmw_priv(dev);
843         union drm_vmw_alloc_dmabuf_arg *arg =
844             (union drm_vmw_alloc_dmabuf_arg *)data;
845         struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
846         struct drm_vmw_dmabuf_rep *rep = &arg->rep;
847         struct vmw_user_dma_buffer *vmw_user_bo;
848         struct ttm_buffer_object *tmp;
849         struct vmw_master *vmaster = vmw_master(file_priv->master);
850         int ret;
851
852         vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
853         if (unlikely(vmw_user_bo == NULL))
854                 return -ENOMEM;
855
856         ret = ttm_read_lock(&vmaster->lock, true);
857         if (unlikely(ret != 0)) {
858                 kfree(vmw_user_bo);
859                 return ret;
860         }
861
862         ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
863                               &vmw_vram_sys_placement, true,
864                               &vmw_user_dmabuf_destroy);
865         if (unlikely(ret != 0))
866                 goto out_no_dmabuf;
867
868         tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
869         ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
870                                    &vmw_user_bo->base,
871                                    false,
872                                    ttm_buffer_type,
873                                    &vmw_user_dmabuf_release, NULL);
874         if (unlikely(ret != 0))
875                 goto out_no_base_object;
876         else {
877                 rep->handle = vmw_user_bo->base.hash.key;
878                 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
879                 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
880                 rep->cur_gmr_offset = 0;
881         }
882
883 out_no_base_object:
884         ttm_bo_unref(&tmp);
885 out_no_dmabuf:
886         ttm_read_unlock(&vmaster->lock);
887
888         return ret;
889 }
890
891 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
892                            struct drm_file *file_priv)
893 {
894         struct drm_vmw_unref_dmabuf_arg *arg =
895             (struct drm_vmw_unref_dmabuf_arg *)data;
896
897         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
898                                          arg->handle,
899                                          TTM_REF_USAGE);
900 }
901
902 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
903                                   uint32_t cur_validate_node)
904 {
905         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
906
907         if (likely(vmw_bo->on_validate_list))
908                 return vmw_bo->cur_validate_node;
909
910         vmw_bo->cur_validate_node = cur_validate_node;
911         vmw_bo->on_validate_list = true;
912
913         return cur_validate_node;
914 }
915
916 void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
917 {
918         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
919
920         vmw_bo->on_validate_list = false;
921 }
922
923 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
924                            uint32_t handle, struct vmw_dma_buffer **out)
925 {
926         struct vmw_user_dma_buffer *vmw_user_bo;
927         struct ttm_base_object *base;
928
929         base = ttm_base_object_lookup(tfile, handle);
930         if (unlikely(base == NULL)) {
931                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
932                        (unsigned long)handle);
933                 return -ESRCH;
934         }
935
936         if (unlikely(base->object_type != ttm_buffer_type)) {
937                 ttm_base_object_unref(&base);
938                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
939                        (unsigned long)handle);
940                 return -EINVAL;
941         }
942
943         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
944         (void)ttm_bo_reference(&vmw_user_bo->dma.base);
945         ttm_base_object_unref(&base);
946         *out = &vmw_user_bo->dma;
947
948         return 0;
949 }
950
951 /*
952  * Stream management
953  */
954
955 static void vmw_stream_destroy(struct vmw_resource *res)
956 {
957         struct vmw_private *dev_priv = res->dev_priv;
958         struct vmw_stream *stream;
959         int ret;
960
961         DRM_INFO("%s: unref\n", __func__);
962         stream = container_of(res, struct vmw_stream, res);
963
964         ret = vmw_overlay_unref(dev_priv, stream->stream_id);
965         WARN_ON(ret != 0);
966 }
967
968 static int vmw_stream_init(struct vmw_private *dev_priv,
969                            struct vmw_stream *stream,
970                            void (*res_free) (struct vmw_resource *res))
971 {
972         struct vmw_resource *res = &stream->res;
973         int ret;
974
975         ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
976                                 VMW_RES_STREAM, res_free);
977
978         if (unlikely(ret != 0)) {
979                 if (res_free == NULL)
980                         kfree(stream);
981                 else
982                         res_free(&stream->res);
983                 return ret;
984         }
985
986         ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
987         if (ret) {
988                 vmw_resource_unreference(&res);
989                 return ret;
990         }
991
992         DRM_INFO("%s: claimed\n", __func__);
993
994         vmw_resource_activate(&stream->res, vmw_stream_destroy);
995         return 0;
996 }
997
998 /**
999  * User-space context management:
1000  */
1001
1002 static void vmw_user_stream_free(struct vmw_resource *res)
1003 {
1004         struct vmw_user_stream *stream =
1005             container_of(res, struct vmw_user_stream, stream.res);
1006
1007         kfree(stream);
1008 }
1009
1010 /**
1011  * This function is called when user space has no more references on the
1012  * base object. It releases the base-object's reference on the resource object.
1013  */
1014
1015 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1016 {
1017         struct ttm_base_object *base = *p_base;
1018         struct vmw_user_stream *stream =
1019             container_of(base, struct vmw_user_stream, base);
1020         struct vmw_resource *res = &stream->stream.res;
1021
1022         *p_base = NULL;
1023         vmw_resource_unreference(&res);
1024 }
1025
1026 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1027                            struct drm_file *file_priv)
1028 {
1029         struct vmw_private *dev_priv = vmw_priv(dev);
1030         struct vmw_resource *res;
1031         struct vmw_user_stream *stream;
1032         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1033         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1034         int ret = 0;
1035
1036         res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1037         if (unlikely(res == NULL))
1038                 return -EINVAL;
1039
1040         if (res->res_free != &vmw_user_stream_free) {
1041                 ret = -EINVAL;
1042                 goto out;
1043         }
1044
1045         stream = container_of(res, struct vmw_user_stream, stream.res);
1046         if (stream->base.tfile != tfile) {
1047                 ret = -EINVAL;
1048                 goto out;
1049         }
1050
1051         ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1052 out:
1053         vmw_resource_unreference(&res);
1054         return ret;
1055 }
1056
1057 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1058                            struct drm_file *file_priv)
1059 {
1060         struct vmw_private *dev_priv = vmw_priv(dev);
1061         struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1062         struct vmw_resource *res;
1063         struct vmw_resource *tmp;
1064         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1065         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1066         int ret;
1067
1068         if (unlikely(stream == NULL))
1069                 return -ENOMEM;
1070
1071         res = &stream->stream.res;
1072         stream->base.shareable = false;
1073         stream->base.tfile = NULL;
1074
1075         ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1076         if (unlikely(ret != 0))
1077                 return ret;
1078
1079         tmp = vmw_resource_reference(res);
1080         ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1081                                    &vmw_user_stream_base_release, NULL);
1082
1083         if (unlikely(ret != 0)) {
1084                 vmw_resource_unreference(&tmp);
1085                 goto out_err;
1086         }
1087
1088         arg->stream_id = res->id;
1089 out_err:
1090         vmw_resource_unreference(&res);
1091         return ret;
1092 }
1093
1094 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1095                            struct ttm_object_file *tfile,
1096                            uint32_t *inout_id, struct vmw_resource **out)
1097 {
1098         struct vmw_user_stream *stream;
1099         struct vmw_resource *res;
1100         int ret;
1101
1102         res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1103         if (unlikely(res == NULL))
1104                 return -EINVAL;
1105
1106         if (res->res_free != &vmw_user_stream_free) {
1107                 ret = -EINVAL;
1108                 goto err_ref;
1109         }
1110
1111         stream = container_of(res, struct vmw_user_stream, stream.res);
1112         if (stream->base.tfile != tfile) {
1113                 ret = -EPERM;
1114                 goto err_ref;
1115         }
1116
1117         *inout_id = stream->stream.stream_id;
1118         *out = res;
1119         return 0;
1120 err_ref:
1121         vmw_resource_unreference(&res);
1122         return ret;
1123 }