e0a41818d9d062cb1dd58ce439a4f2fffe08cfc7
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
32 #include "drmP.h"
33
34 struct vmw_user_context {
35         struct ttm_base_object base;
36         struct vmw_resource res;
37 };
38
39 struct vmw_user_surface {
40         struct ttm_base_object base;
41         struct vmw_surface srf;
42 };
43
44 struct vmw_user_dma_buffer {
45         struct ttm_base_object base;
46         struct vmw_dma_buffer dma;
47 };
48
49 struct vmw_bo_user_rep {
50         uint32_t handle;
51         uint64_t map_handle;
52 };
53
54 struct vmw_stream {
55         struct vmw_resource res;
56         uint32_t stream_id;
57 };
58
59 struct vmw_user_stream {
60         struct ttm_base_object base;
61         struct vmw_stream stream;
62 };
63
64 static inline struct vmw_dma_buffer *
65 vmw_dma_buffer(struct ttm_buffer_object *bo)
66 {
67         return container_of(bo, struct vmw_dma_buffer, base);
68 }
69
70 static inline struct vmw_user_dma_buffer *
71 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
72 {
73         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
74         return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
75 }
76
77 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
78 {
79         kref_get(&res->kref);
80         return res;
81 }
82
83 static void vmw_resource_release(struct kref *kref)
84 {
85         struct vmw_resource *res =
86             container_of(kref, struct vmw_resource, kref);
87         struct vmw_private *dev_priv = res->dev_priv;
88
89         idr_remove(res->idr, res->id);
90         write_unlock(&dev_priv->resource_lock);
91
92         if (likely(res->hw_destroy != NULL))
93                 res->hw_destroy(res);
94
95         if (res->res_free != NULL)
96                 res->res_free(res);
97         else
98                 kfree(res);
99
100         write_lock(&dev_priv->resource_lock);
101 }
102
103 void vmw_resource_unreference(struct vmw_resource **p_res)
104 {
105         struct vmw_resource *res = *p_res;
106         struct vmw_private *dev_priv = res->dev_priv;
107
108         *p_res = NULL;
109         write_lock(&dev_priv->resource_lock);
110         kref_put(&res->kref, vmw_resource_release);
111         write_unlock(&dev_priv->resource_lock);
112 }
113
114 static int vmw_resource_init(struct vmw_private *dev_priv,
115                              struct vmw_resource *res,
116                              struct idr *idr,
117                              enum ttm_object_type obj_type,
118                              void (*res_free) (struct vmw_resource *res))
119 {
120         int ret;
121
122         kref_init(&res->kref);
123         res->hw_destroy = NULL;
124         res->res_free = res_free;
125         res->res_type = obj_type;
126         res->idr = idr;
127         res->avail = false;
128         res->dev_priv = dev_priv;
129         INIT_LIST_HEAD(&res->query_head);
130         INIT_LIST_HEAD(&res->validate_head);
131         do {
132                 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
133                         return -ENOMEM;
134
135                 write_lock(&dev_priv->resource_lock);
136                 ret = idr_get_new_above(idr, res, 1, &res->id);
137                 write_unlock(&dev_priv->resource_lock);
138
139         } while (ret == -EAGAIN);
140
141         return ret;
142 }
143
144 /**
145  * vmw_resource_activate
146  *
147  * @res:        Pointer to the newly created resource
148  * @hw_destroy: Destroy function. NULL if none.
149  *
150  * Activate a resource after the hardware has been made aware of it.
151  * Set tye destroy function to @destroy. Typically this frees the
152  * resource and destroys the hardware resources associated with it.
153  * Activate basically means that the function vmw_resource_lookup will
154  * find it.
155  */
156
157 static void vmw_resource_activate(struct vmw_resource *res,
158                                   void (*hw_destroy) (struct vmw_resource *))
159 {
160         struct vmw_private *dev_priv = res->dev_priv;
161
162         write_lock(&dev_priv->resource_lock);
163         res->avail = true;
164         res->hw_destroy = hw_destroy;
165         write_unlock(&dev_priv->resource_lock);
166 }
167
168 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
169                                          struct idr *idr, int id)
170 {
171         struct vmw_resource *res;
172
173         read_lock(&dev_priv->resource_lock);
174         res = idr_find(idr, id);
175         if (res && res->avail)
176                 kref_get(&res->kref);
177         else
178                 res = NULL;
179         read_unlock(&dev_priv->resource_lock);
180
181         if (unlikely(res == NULL))
182                 return NULL;
183
184         return res;
185 }
186
187 /**
188  * Context management:
189  */
190
191 static void vmw_hw_context_destroy(struct vmw_resource *res)
192 {
193
194         struct vmw_private *dev_priv = res->dev_priv;
195         struct {
196                 SVGA3dCmdHeader header;
197                 SVGA3dCmdDestroyContext body;
198         } *cmd;
199
200
201         vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
202
203         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
204         if (unlikely(cmd == NULL)) {
205                 DRM_ERROR("Failed reserving FIFO space for surface "
206                           "destruction.\n");
207                 return;
208         }
209
210         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
211         cmd->header.size = cpu_to_le32(sizeof(cmd->body));
212         cmd->body.cid = cpu_to_le32(res->id);
213
214         vmw_fifo_commit(dev_priv, sizeof(*cmd));
215         vmw_3d_resource_dec(dev_priv, false);
216 }
217
218 static int vmw_context_init(struct vmw_private *dev_priv,
219                             struct vmw_resource *res,
220                             void (*res_free) (struct vmw_resource *res))
221 {
222         int ret;
223
224         struct {
225                 SVGA3dCmdHeader header;
226                 SVGA3dCmdDefineContext body;
227         } *cmd;
228
229         ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
230                                 VMW_RES_CONTEXT, res_free);
231
232         if (unlikely(ret != 0)) {
233                 if (res_free == NULL)
234                         kfree(res);
235                 else
236                         res_free(res);
237                 return ret;
238         }
239
240         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
241         if (unlikely(cmd == NULL)) {
242                 DRM_ERROR("Fifo reserve failed.\n");
243                 vmw_resource_unreference(&res);
244                 return -ENOMEM;
245         }
246
247         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
248         cmd->header.size = cpu_to_le32(sizeof(cmd->body));
249         cmd->body.cid = cpu_to_le32(res->id);
250
251         vmw_fifo_commit(dev_priv, sizeof(*cmd));
252         (void) vmw_3d_resource_inc(dev_priv, false);
253         vmw_resource_activate(res, vmw_hw_context_destroy);
254         return 0;
255 }
256
257 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
258 {
259         struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
260         int ret;
261
262         if (unlikely(res == NULL))
263                 return NULL;
264
265         ret = vmw_context_init(dev_priv, res, NULL);
266         return (ret == 0) ? res : NULL;
267 }
268
269 /**
270  * User-space context management:
271  */
272
273 static void vmw_user_context_free(struct vmw_resource *res)
274 {
275         struct vmw_user_context *ctx =
276             container_of(res, struct vmw_user_context, res);
277
278         kfree(ctx);
279 }
280
281 /**
282  * This function is called when user space has no more references on the
283  * base object. It releases the base-object's reference on the resource object.
284  */
285
286 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
287 {
288         struct ttm_base_object *base = *p_base;
289         struct vmw_user_context *ctx =
290             container_of(base, struct vmw_user_context, base);
291         struct vmw_resource *res = &ctx->res;
292
293         *p_base = NULL;
294         vmw_resource_unreference(&res);
295 }
296
297 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
298                               struct drm_file *file_priv)
299 {
300         struct vmw_private *dev_priv = vmw_priv(dev);
301         struct vmw_resource *res;
302         struct vmw_user_context *ctx;
303         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
304         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
305         int ret = 0;
306
307         res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
308         if (unlikely(res == NULL))
309                 return -EINVAL;
310
311         if (res->res_free != &vmw_user_context_free) {
312                 ret = -EINVAL;
313                 goto out;
314         }
315
316         ctx = container_of(res, struct vmw_user_context, res);
317         if (ctx->base.tfile != tfile && !ctx->base.shareable) {
318                 ret = -EPERM;
319                 goto out;
320         }
321
322         ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
323 out:
324         vmw_resource_unreference(&res);
325         return ret;
326 }
327
328 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
329                              struct drm_file *file_priv)
330 {
331         struct vmw_private *dev_priv = vmw_priv(dev);
332         struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
333         struct vmw_resource *res;
334         struct vmw_resource *tmp;
335         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
336         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
337         int ret;
338
339         if (unlikely(ctx == NULL))
340                 return -ENOMEM;
341
342         res = &ctx->res;
343         ctx->base.shareable = false;
344         ctx->base.tfile = NULL;
345
346         ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
347         if (unlikely(ret != 0))
348                 return ret;
349
350         tmp = vmw_resource_reference(&ctx->res);
351         ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
352                                    &vmw_user_context_base_release, NULL);
353
354         if (unlikely(ret != 0)) {
355                 vmw_resource_unreference(&tmp);
356                 goto out_err;
357         }
358
359         arg->cid = res->id;
360 out_err:
361         vmw_resource_unreference(&res);
362         return ret;
363
364 }
365
366 int vmw_context_check(struct vmw_private *dev_priv,
367                       struct ttm_object_file *tfile,
368                       int id,
369                       struct vmw_resource **p_res)
370 {
371         struct vmw_resource *res;
372         int ret = 0;
373
374         read_lock(&dev_priv->resource_lock);
375         res = idr_find(&dev_priv->context_idr, id);
376         if (res && res->avail) {
377                 struct vmw_user_context *ctx =
378                         container_of(res, struct vmw_user_context, res);
379                 if (ctx->base.tfile != tfile && !ctx->base.shareable)
380                         ret = -EPERM;
381                 if (p_res)
382                         *p_res = vmw_resource_reference(res);
383         } else
384                 ret = -EINVAL;
385         read_unlock(&dev_priv->resource_lock);
386
387         return ret;
388 }
389
390
391 /**
392  * Surface management.
393  */
394
395 static void vmw_hw_surface_destroy(struct vmw_resource *res)
396 {
397
398         struct vmw_private *dev_priv = res->dev_priv;
399         struct {
400                 SVGA3dCmdHeader header;
401                 SVGA3dCmdDestroySurface body;
402         } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
403
404         if (unlikely(cmd == NULL)) {
405                 DRM_ERROR("Failed reserving FIFO space for surface "
406                           "destruction.\n");
407                 return;
408         }
409
410         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
411         cmd->header.size = cpu_to_le32(sizeof(cmd->body));
412         cmd->body.sid = cpu_to_le32(res->id);
413
414         vmw_fifo_commit(dev_priv, sizeof(*cmd));
415         vmw_3d_resource_dec(dev_priv, false);
416 }
417
418 void vmw_surface_res_free(struct vmw_resource *res)
419 {
420         struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
421
422         kfree(srf->sizes);
423         kfree(srf->snooper.image);
424         kfree(srf);
425 }
426
427 int vmw_surface_init(struct vmw_private *dev_priv,
428                      struct vmw_surface *srf,
429                      void (*res_free) (struct vmw_resource *res))
430 {
431         int ret;
432         struct {
433                 SVGA3dCmdHeader header;
434                 SVGA3dCmdDefineSurface body;
435         } *cmd;
436         SVGA3dSize *cmd_size;
437         struct vmw_resource *res = &srf->res;
438         struct drm_vmw_size *src_size;
439         size_t submit_size;
440         uint32_t cmd_len;
441         int i;
442
443         BUG_ON(res_free == NULL);
444         ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
445                                 VMW_RES_SURFACE, res_free);
446
447         if (unlikely(ret != 0)) {
448                 res_free(res);
449                 return ret;
450         }
451
452         submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
453         cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
454
455         cmd = vmw_fifo_reserve(dev_priv, submit_size);
456         if (unlikely(cmd == NULL)) {
457                 DRM_ERROR("Fifo reserve failed for create surface.\n");
458                 vmw_resource_unreference(&res);
459                 return -ENOMEM;
460         }
461
462         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
463         cmd->header.size = cpu_to_le32(cmd_len);
464         cmd->body.sid = cpu_to_le32(res->id);
465         cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
466         cmd->body.format = cpu_to_le32(srf->format);
467         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
468                 cmd->body.face[i].numMipLevels =
469                     cpu_to_le32(srf->mip_levels[i]);
470         }
471
472         cmd += 1;
473         cmd_size = (SVGA3dSize *) cmd;
474         src_size = srf->sizes;
475
476         for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
477                 cmd_size->width = cpu_to_le32(src_size->width);
478                 cmd_size->height = cpu_to_le32(src_size->height);
479                 cmd_size->depth = cpu_to_le32(src_size->depth);
480         }
481
482         vmw_fifo_commit(dev_priv, submit_size);
483         (void) vmw_3d_resource_inc(dev_priv, false);
484         vmw_resource_activate(res, vmw_hw_surface_destroy);
485         return 0;
486 }
487
488 static void vmw_user_surface_free(struct vmw_resource *res)
489 {
490         struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
491         struct vmw_user_surface *user_srf =
492             container_of(srf, struct vmw_user_surface, srf);
493
494         kfree(srf->sizes);
495         kfree(srf->snooper.image);
496         kfree(user_srf);
497 }
498
499 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
500                                    struct ttm_object_file *tfile,
501                                    uint32_t handle, struct vmw_surface **out)
502 {
503         struct vmw_resource *res;
504         struct vmw_surface *srf;
505         struct vmw_user_surface *user_srf;
506         struct ttm_base_object *base;
507         int ret = -EINVAL;
508
509         base = ttm_base_object_lookup(tfile, handle);
510         if (unlikely(base == NULL))
511                 return -EINVAL;
512
513         if (unlikely(base->object_type != VMW_RES_SURFACE))
514                 goto out_bad_resource;
515
516         user_srf = container_of(base, struct vmw_user_surface, base);
517         srf = &user_srf->srf;
518         res = &srf->res;
519
520         read_lock(&dev_priv->resource_lock);
521
522         if (!res->avail || res->res_free != &vmw_user_surface_free) {
523                 read_unlock(&dev_priv->resource_lock);
524                 goto out_bad_resource;
525         }
526
527         kref_get(&res->kref);
528         read_unlock(&dev_priv->resource_lock);
529
530         *out = srf;
531         ret = 0;
532
533 out_bad_resource:
534         ttm_base_object_unref(&base);
535
536         return ret;
537 }
538
539 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
540 {
541         struct ttm_base_object *base = *p_base;
542         struct vmw_user_surface *user_srf =
543             container_of(base, struct vmw_user_surface, base);
544         struct vmw_resource *res = &user_srf->srf.res;
545
546         *p_base = NULL;
547         vmw_resource_unreference(&res);
548 }
549
550 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
551                               struct drm_file *file_priv)
552 {
553         struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
554         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
555
556         return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
557 }
558
559 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
560                              struct drm_file *file_priv)
561 {
562         struct vmw_private *dev_priv = vmw_priv(dev);
563         struct vmw_user_surface *user_srf =
564             kmalloc(sizeof(*user_srf), GFP_KERNEL);
565         struct vmw_surface *srf;
566         struct vmw_resource *res;
567         struct vmw_resource *tmp;
568         union drm_vmw_surface_create_arg *arg =
569             (union drm_vmw_surface_create_arg *)data;
570         struct drm_vmw_surface_create_req *req = &arg->req;
571         struct drm_vmw_surface_arg *rep = &arg->rep;
572         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
573         struct drm_vmw_size __user *user_sizes;
574         int ret;
575         int i;
576
577         if (unlikely(user_srf == NULL))
578                 return -ENOMEM;
579
580         srf = &user_srf->srf;
581         res = &srf->res;
582
583         srf->flags = req->flags;
584         srf->format = req->format;
585         srf->scanout = req->scanout;
586         memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
587         srf->num_sizes = 0;
588         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
589                 srf->num_sizes += srf->mip_levels[i];
590
591         if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
592             DRM_VMW_MAX_MIP_LEVELS) {
593                 ret = -EINVAL;
594                 goto out_err0;
595         }
596
597         srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
598         if (unlikely(srf->sizes == NULL)) {
599                 ret = -ENOMEM;
600                 goto out_err0;
601         }
602
603         user_sizes = (struct drm_vmw_size __user *)(unsigned long)
604             req->size_addr;
605
606         ret = copy_from_user(srf->sizes, user_sizes,
607                              srf->num_sizes * sizeof(*srf->sizes));
608         if (unlikely(ret != 0)) {
609                 ret = -EFAULT;
610                 goto out_err1;
611         }
612
613         if (srf->scanout &&
614             srf->num_sizes == 1 &&
615             srf->sizes[0].width == 64 &&
616             srf->sizes[0].height == 64 &&
617             srf->format == SVGA3D_A8R8G8B8) {
618
619                 /* allocate image area and clear it */
620                 srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
621                 if (!srf->snooper.image) {
622                         DRM_ERROR("Failed to allocate cursor_image\n");
623                         ret = -ENOMEM;
624                         goto out_err1;
625                 }
626         } else {
627                 srf->snooper.image = NULL;
628         }
629         srf->snooper.crtc = NULL;
630
631         user_srf->base.shareable = false;
632         user_srf->base.tfile = NULL;
633
634         /**
635          * From this point, the generic resource management functions
636          * destroy the object on failure.
637          */
638
639         ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
640         if (unlikely(ret != 0))
641                 return ret;
642
643         tmp = vmw_resource_reference(&srf->res);
644         ret = ttm_base_object_init(tfile, &user_srf->base,
645                                    req->shareable, VMW_RES_SURFACE,
646                                    &vmw_user_surface_base_release, NULL);
647
648         if (unlikely(ret != 0)) {
649                 vmw_resource_unreference(&tmp);
650                 vmw_resource_unreference(&res);
651                 return ret;
652         }
653
654         rep->sid = user_srf->base.hash.key;
655         if (rep->sid == SVGA3D_INVALID_ID)
656                 DRM_ERROR("Created bad Surface ID.\n");
657
658         vmw_resource_unreference(&res);
659         return 0;
660 out_err1:
661         kfree(srf->sizes);
662 out_err0:
663         kfree(user_srf);
664         return ret;
665 }
666
667 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
668                                 struct drm_file *file_priv)
669 {
670         union drm_vmw_surface_reference_arg *arg =
671             (union drm_vmw_surface_reference_arg *)data;
672         struct drm_vmw_surface_arg *req = &arg->req;
673         struct drm_vmw_surface_create_req *rep = &arg->rep;
674         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
675         struct vmw_surface *srf;
676         struct vmw_user_surface *user_srf;
677         struct drm_vmw_size __user *user_sizes;
678         struct ttm_base_object *base;
679         int ret = -EINVAL;
680
681         base = ttm_base_object_lookup(tfile, req->sid);
682         if (unlikely(base == NULL)) {
683                 DRM_ERROR("Could not find surface to reference.\n");
684                 return -EINVAL;
685         }
686
687         if (unlikely(base->object_type != VMW_RES_SURFACE))
688                 goto out_bad_resource;
689
690         user_srf = container_of(base, struct vmw_user_surface, base);
691         srf = &user_srf->srf;
692
693         ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
694         if (unlikely(ret != 0)) {
695                 DRM_ERROR("Could not add a reference to a surface.\n");
696                 goto out_no_reference;
697         }
698
699         rep->flags = srf->flags;
700         rep->format = srf->format;
701         memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
702         user_sizes = (struct drm_vmw_size __user *)(unsigned long)
703             rep->size_addr;
704
705         if (user_sizes)
706                 ret = copy_to_user(user_sizes, srf->sizes,
707                                    srf->num_sizes * sizeof(*srf->sizes));
708         if (unlikely(ret != 0)) {
709                 DRM_ERROR("copy_to_user failed %p %u\n",
710                           user_sizes, srf->num_sizes);
711                 ret = -EFAULT;
712         }
713 out_bad_resource:
714 out_no_reference:
715         ttm_base_object_unref(&base);
716
717         return ret;
718 }
719
720 int vmw_surface_check(struct vmw_private *dev_priv,
721                       struct ttm_object_file *tfile,
722                       uint32_t handle, int *id)
723 {
724         struct ttm_base_object *base;
725         struct vmw_user_surface *user_srf;
726
727         int ret = -EPERM;
728
729         base = ttm_base_object_lookup(tfile, handle);
730         if (unlikely(base == NULL))
731                 return -EINVAL;
732
733         if (unlikely(base->object_type != VMW_RES_SURFACE))
734                 goto out_bad_surface;
735
736         user_srf = container_of(base, struct vmw_user_surface, base);
737         *id = user_srf->srf.res.id;
738         ret = 0;
739
740 out_bad_surface:
741         /**
742          * FIXME: May deadlock here when called from the
743          * command parsing code.
744          */
745
746         ttm_base_object_unref(&base);
747         return ret;
748 }
749
750 /**
751  * Buffer management.
752  */
753
754 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
755                                   unsigned long num_pages)
756 {
757         static size_t bo_user_size = ~0;
758
759         size_t page_array_size =
760             (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
761
762         if (unlikely(bo_user_size == ~0)) {
763                 bo_user_size = glob->ttm_bo_extra_size +
764                     ttm_round_pot(sizeof(struct vmw_dma_buffer));
765         }
766
767         return bo_user_size + page_array_size;
768 }
769
770 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
771 {
772         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
773         struct ttm_bo_global *glob = bo->glob;
774
775         ttm_mem_global_free(glob->mem_glob, bo->acc_size);
776         kfree(vmw_bo);
777 }
778
779 int vmw_dmabuf_init(struct vmw_private *dev_priv,
780                     struct vmw_dma_buffer *vmw_bo,
781                     size_t size, struct ttm_placement *placement,
782                     bool interruptible,
783                     void (*bo_free) (struct ttm_buffer_object *bo))
784 {
785         struct ttm_bo_device *bdev = &dev_priv->bdev;
786         struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
787         size_t acc_size;
788         int ret;
789
790         BUG_ON(!bo_free);
791
792         acc_size =
793             vmw_dmabuf_acc_size(bdev->glob,
794                                 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
795
796         ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
797         if (unlikely(ret != 0)) {
798                 /* we must free the bo here as
799                  * ttm_buffer_object_init does so as well */
800                 bo_free(&vmw_bo->base);
801                 return ret;
802         }
803
804         memset(vmw_bo, 0, sizeof(*vmw_bo));
805
806         INIT_LIST_HEAD(&vmw_bo->validate_list);
807
808         ret = ttm_bo_init(bdev, &vmw_bo->base, size,
809                           ttm_bo_type_device, placement,
810                           0, 0, interruptible,
811                           NULL, acc_size, bo_free);
812         return ret;
813 }
814
815 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
816 {
817         struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
818         struct ttm_bo_global *glob = bo->glob;
819
820         ttm_mem_global_free(glob->mem_glob, bo->acc_size);
821         kfree(vmw_user_bo);
822 }
823
824 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
825 {
826         struct vmw_user_dma_buffer *vmw_user_bo;
827         struct ttm_base_object *base = *p_base;
828         struct ttm_buffer_object *bo;
829
830         *p_base = NULL;
831
832         if (unlikely(base == NULL))
833                 return;
834
835         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
836         bo = &vmw_user_bo->dma.base;
837         ttm_bo_unref(&bo);
838 }
839
840 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
841                            struct drm_file *file_priv)
842 {
843         struct vmw_private *dev_priv = vmw_priv(dev);
844         union drm_vmw_alloc_dmabuf_arg *arg =
845             (union drm_vmw_alloc_dmabuf_arg *)data;
846         struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
847         struct drm_vmw_dmabuf_rep *rep = &arg->rep;
848         struct vmw_user_dma_buffer *vmw_user_bo;
849         struct ttm_buffer_object *tmp;
850         struct vmw_master *vmaster = vmw_master(file_priv->master);
851         int ret;
852
853         vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
854         if (unlikely(vmw_user_bo == NULL))
855                 return -ENOMEM;
856
857         ret = ttm_read_lock(&vmaster->lock, true);
858         if (unlikely(ret != 0)) {
859                 kfree(vmw_user_bo);
860                 return ret;
861         }
862
863         ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
864                               &vmw_vram_sys_placement, true,
865                               &vmw_user_dmabuf_destroy);
866         if (unlikely(ret != 0))
867                 goto out_no_dmabuf;
868
869         tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
870         ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
871                                    &vmw_user_bo->base,
872                                    false,
873                                    ttm_buffer_type,
874                                    &vmw_user_dmabuf_release, NULL);
875         if (unlikely(ret != 0))
876                 goto out_no_base_object;
877         else {
878                 rep->handle = vmw_user_bo->base.hash.key;
879                 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
880                 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
881                 rep->cur_gmr_offset = 0;
882         }
883
884 out_no_base_object:
885         ttm_bo_unref(&tmp);
886 out_no_dmabuf:
887         ttm_read_unlock(&vmaster->lock);
888
889         return ret;
890 }
891
892 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
893                            struct drm_file *file_priv)
894 {
895         struct drm_vmw_unref_dmabuf_arg *arg =
896             (struct drm_vmw_unref_dmabuf_arg *)data;
897
898         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
899                                          arg->handle,
900                                          TTM_REF_USAGE);
901 }
902
903 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
904                                   uint32_t cur_validate_node)
905 {
906         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
907
908         if (likely(vmw_bo->on_validate_list))
909                 return vmw_bo->cur_validate_node;
910
911         vmw_bo->cur_validate_node = cur_validate_node;
912         vmw_bo->on_validate_list = true;
913
914         return cur_validate_node;
915 }
916
917 void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
918 {
919         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
920
921         vmw_bo->on_validate_list = false;
922 }
923
924 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
925                            uint32_t handle, struct vmw_dma_buffer **out)
926 {
927         struct vmw_user_dma_buffer *vmw_user_bo;
928         struct ttm_base_object *base;
929
930         base = ttm_base_object_lookup(tfile, handle);
931         if (unlikely(base == NULL)) {
932                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
933                        (unsigned long)handle);
934                 return -ESRCH;
935         }
936
937         if (unlikely(base->object_type != ttm_buffer_type)) {
938                 ttm_base_object_unref(&base);
939                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
940                        (unsigned long)handle);
941                 return -EINVAL;
942         }
943
944         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
945         (void)ttm_bo_reference(&vmw_user_bo->dma.base);
946         ttm_base_object_unref(&base);
947         *out = &vmw_user_bo->dma;
948
949         return 0;
950 }
951
952 /*
953  * Stream management
954  */
955
956 static void vmw_stream_destroy(struct vmw_resource *res)
957 {
958         struct vmw_private *dev_priv = res->dev_priv;
959         struct vmw_stream *stream;
960         int ret;
961
962         DRM_INFO("%s: unref\n", __func__);
963         stream = container_of(res, struct vmw_stream, res);
964
965         ret = vmw_overlay_unref(dev_priv, stream->stream_id);
966         WARN_ON(ret != 0);
967 }
968
969 static int vmw_stream_init(struct vmw_private *dev_priv,
970                            struct vmw_stream *stream,
971                            void (*res_free) (struct vmw_resource *res))
972 {
973         struct vmw_resource *res = &stream->res;
974         int ret;
975
976         ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
977                                 VMW_RES_STREAM, res_free);
978
979         if (unlikely(ret != 0)) {
980                 if (res_free == NULL)
981                         kfree(stream);
982                 else
983                         res_free(&stream->res);
984                 return ret;
985         }
986
987         ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
988         if (ret) {
989                 vmw_resource_unreference(&res);
990                 return ret;
991         }
992
993         DRM_INFO("%s: claimed\n", __func__);
994
995         vmw_resource_activate(&stream->res, vmw_stream_destroy);
996         return 0;
997 }
998
999 /**
1000  * User-space context management:
1001  */
1002
1003 static void vmw_user_stream_free(struct vmw_resource *res)
1004 {
1005         struct vmw_user_stream *stream =
1006             container_of(res, struct vmw_user_stream, stream.res);
1007
1008         kfree(stream);
1009 }
1010
1011 /**
1012  * This function is called when user space has no more references on the
1013  * base object. It releases the base-object's reference on the resource object.
1014  */
1015
1016 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1017 {
1018         struct ttm_base_object *base = *p_base;
1019         struct vmw_user_stream *stream =
1020             container_of(base, struct vmw_user_stream, base);
1021         struct vmw_resource *res = &stream->stream.res;
1022
1023         *p_base = NULL;
1024         vmw_resource_unreference(&res);
1025 }
1026
1027 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1028                            struct drm_file *file_priv)
1029 {
1030         struct vmw_private *dev_priv = vmw_priv(dev);
1031         struct vmw_resource *res;
1032         struct vmw_user_stream *stream;
1033         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1034         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1035         int ret = 0;
1036
1037         res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1038         if (unlikely(res == NULL))
1039                 return -EINVAL;
1040
1041         if (res->res_free != &vmw_user_stream_free) {
1042                 ret = -EINVAL;
1043                 goto out;
1044         }
1045
1046         stream = container_of(res, struct vmw_user_stream, stream.res);
1047         if (stream->base.tfile != tfile) {
1048                 ret = -EINVAL;
1049                 goto out;
1050         }
1051
1052         ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1053 out:
1054         vmw_resource_unreference(&res);
1055         return ret;
1056 }
1057
1058 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1059                            struct drm_file *file_priv)
1060 {
1061         struct vmw_private *dev_priv = vmw_priv(dev);
1062         struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1063         struct vmw_resource *res;
1064         struct vmw_resource *tmp;
1065         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1066         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1067         int ret;
1068
1069         if (unlikely(stream == NULL))
1070                 return -ENOMEM;
1071
1072         res = &stream->stream.res;
1073         stream->base.shareable = false;
1074         stream->base.tfile = NULL;
1075
1076         ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1077         if (unlikely(ret != 0))
1078                 return ret;
1079
1080         tmp = vmw_resource_reference(res);
1081         ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1082                                    &vmw_user_stream_base_release, NULL);
1083
1084         if (unlikely(ret != 0)) {
1085                 vmw_resource_unreference(&tmp);
1086                 goto out_err;
1087         }
1088
1089         arg->stream_id = res->id;
1090 out_err:
1091         vmw_resource_unreference(&res);
1092         return ret;
1093 }
1094
1095 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1096                            struct ttm_object_file *tfile,
1097                            uint32_t *inout_id, struct vmw_resource **out)
1098 {
1099         struct vmw_user_stream *stream;
1100         struct vmw_resource *res;
1101         int ret;
1102
1103         res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1104         if (unlikely(res == NULL))
1105                 return -EINVAL;
1106
1107         if (res->res_free != &vmw_user_stream_free) {
1108                 ret = -EINVAL;
1109                 goto err_ref;
1110         }
1111
1112         stream = container_of(res, struct vmw_user_stream, stream.res);
1113         if (stream->base.tfile != tfile) {
1114                 ret = -EPERM;
1115                 goto err_ref;
1116         }
1117
1118         *inout_id = stream->stream.stream_id;
1119         *out = res;
1120         return 0;
1121 err_ref:
1122         vmw_resource_unreference(&res);
1123         return ret;
1124 }