drm/ttm: allow fence to be added as shared
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32
33 #define VMW_RES_HT_ORDER 12
34
35 /**
36  * struct vmw_resource_relocation - Relocation info for resources
37  *
38  * @head: List head for the software context's relocation list.
39  * @res: Non-ref-counted pointer to the resource.
40  * @offset: Offset of 4 byte entries into the command buffer where the
41  * id that needs fixup is located.
42  */
43 struct vmw_resource_relocation {
44         struct list_head head;
45         const struct vmw_resource *res;
46         unsigned long offset;
47 };
48
49 /**
50  * struct vmw_resource_val_node - Validation info for resources
51  *
52  * @head: List head for the software context's resource list.
53  * @hash: Hash entry for quick resouce to val_node lookup.
54  * @res: Ref-counted pointer to the resource.
55  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56  * @new_backup: Refcounted pointer to the new backup buffer.
57  * @staged_bindings: If @res is a context, tracks bindings set up during
58  * the command batch. Otherwise NULL.
59  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60  * @first_usage: Set to true the first time the resource is referenced in
61  * the command stream.
62  * @no_buffer_needed: Resources do not need to allocate buffer backup on
63  * reservation. The command stream will provide one.
64  */
65 struct vmw_resource_val_node {
66         struct list_head head;
67         struct drm_hash_item hash;
68         struct vmw_resource *res;
69         struct vmw_dma_buffer *new_backup;
70         struct vmw_ctx_binding_state *staged_bindings;
71         unsigned long new_backup_offset;
72         bool first_usage;
73         bool no_buffer_needed;
74 };
75
76 /**
77  * struct vmw_cmd_entry - Describe a command for the verifier
78  *
79  * @user_allow: Whether allowed from the execbuf ioctl.
80  * @gb_disable: Whether disabled if guest-backed objects are available.
81  * @gb_enable: Whether enabled iff guest-backed objects are available.
82  */
83 struct vmw_cmd_entry {
84         int (*func) (struct vmw_private *, struct vmw_sw_context *,
85                      SVGA3dCmdHeader *);
86         bool user_allow;
87         bool gb_disable;
88         bool gb_enable;
89 };
90
91 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)  \
92         [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93                                        (_gb_disable), (_gb_enable)}
94
95 /**
96  * vmw_resource_unreserve - unreserve resources previously reserved for
97  * command submission.
98  *
99  * @list_head: list of resources to unreserve.
100  * @backoff: Whether command submission failed.
101  */
102 static void vmw_resource_list_unreserve(struct list_head *list,
103                                         bool backoff)
104 {
105         struct vmw_resource_val_node *val;
106
107         list_for_each_entry(val, list, head) {
108                 struct vmw_resource *res = val->res;
109                 struct vmw_dma_buffer *new_backup =
110                         backoff ? NULL : val->new_backup;
111
112                 /*
113                  * Transfer staged context bindings to the
114                  * persistent context binding tracker.
115                  */
116                 if (unlikely(val->staged_bindings)) {
117                         if (!backoff) {
118                                 vmw_context_binding_state_transfer
119                                         (val->res, val->staged_bindings);
120                         }
121                         kfree(val->staged_bindings);
122                         val->staged_bindings = NULL;
123                 }
124                 vmw_resource_unreserve(res, new_backup,
125                         val->new_backup_offset);
126                 vmw_dmabuf_unreference(&val->new_backup);
127         }
128 }
129
130
131 /**
132  * vmw_resource_val_add - Add a resource to the software context's
133  * resource list if it's not already on it.
134  *
135  * @sw_context: Pointer to the software context.
136  * @res: Pointer to the resource.
137  * @p_node On successful return points to a valid pointer to a
138  * struct vmw_resource_val_node, if non-NULL on entry.
139  */
140 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
141                                 struct vmw_resource *res,
142                                 struct vmw_resource_val_node **p_node)
143 {
144         struct vmw_resource_val_node *node;
145         struct drm_hash_item *hash;
146         int ret;
147
148         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
149                                     &hash) == 0)) {
150                 node = container_of(hash, struct vmw_resource_val_node, hash);
151                 node->first_usage = false;
152                 if (unlikely(p_node != NULL))
153                         *p_node = node;
154                 return 0;
155         }
156
157         node = kzalloc(sizeof(*node), GFP_KERNEL);
158         if (unlikely(node == NULL)) {
159                 DRM_ERROR("Failed to allocate a resource validation "
160                           "entry.\n");
161                 return -ENOMEM;
162         }
163
164         node->hash.key = (unsigned long) res;
165         ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
166         if (unlikely(ret != 0)) {
167                 DRM_ERROR("Failed to initialize a resource validation "
168                           "entry.\n");
169                 kfree(node);
170                 return ret;
171         }
172         list_add_tail(&node->head, &sw_context->resource_list);
173         node->res = vmw_resource_reference(res);
174         node->first_usage = true;
175
176         if (unlikely(p_node != NULL))
177                 *p_node = node;
178
179         return 0;
180 }
181
182 /**
183  * vmw_resource_context_res_add - Put resources previously bound to a context on
184  * the validation list
185  *
186  * @dev_priv: Pointer to a device private structure
187  * @sw_context: Pointer to a software context used for this command submission
188  * @ctx: Pointer to the context resource
189  *
190  * This function puts all resources that were previously bound to @ctx on
191  * the resource validation list. This is part of the context state reemission
192  */
193 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
194                                         struct vmw_sw_context *sw_context,
195                                         struct vmw_resource *ctx)
196 {
197         struct list_head *binding_list;
198         struct vmw_ctx_binding *entry;
199         int ret = 0;
200         struct vmw_resource *res;
201
202         mutex_lock(&dev_priv->binding_mutex);
203         binding_list = vmw_context_binding_list(ctx);
204
205         list_for_each_entry(entry, binding_list, ctx_list) {
206                 res = vmw_resource_reference_unless_doomed(entry->bi.res);
207                 if (unlikely(res == NULL))
208                         continue;
209
210                 ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
211                 vmw_resource_unreference(&res);
212                 if (unlikely(ret != 0))
213                         break;
214         }
215
216         mutex_unlock(&dev_priv->binding_mutex);
217         return ret;
218 }
219
220 /**
221  * vmw_resource_relocation_add - Add a relocation to the relocation list
222  *
223  * @list: Pointer to head of relocation list.
224  * @res: The resource.
225  * @offset: Offset into the command buffer currently being parsed where the
226  * id that needs fixup is located. Granularity is 4 bytes.
227  */
228 static int vmw_resource_relocation_add(struct list_head *list,
229                                        const struct vmw_resource *res,
230                                        unsigned long offset)
231 {
232         struct vmw_resource_relocation *rel;
233
234         rel = kmalloc(sizeof(*rel), GFP_KERNEL);
235         if (unlikely(rel == NULL)) {
236                 DRM_ERROR("Failed to allocate a resource relocation.\n");
237                 return -ENOMEM;
238         }
239
240         rel->res = res;
241         rel->offset = offset;
242         list_add_tail(&rel->head, list);
243
244         return 0;
245 }
246
247 /**
248  * vmw_resource_relocations_free - Free all relocations on a list
249  *
250  * @list: Pointer to the head of the relocation list.
251  */
252 static void vmw_resource_relocations_free(struct list_head *list)
253 {
254         struct vmw_resource_relocation *rel, *n;
255
256         list_for_each_entry_safe(rel, n, list, head) {
257                 list_del(&rel->head);
258                 kfree(rel);
259         }
260 }
261
262 /**
263  * vmw_resource_relocations_apply - Apply all relocations on a list
264  *
265  * @cb: Pointer to the start of the command buffer bein patch. This need
266  * not be the same buffer as the one being parsed when the relocation
267  * list was built, but the contents must be the same modulo the
268  * resource ids.
269  * @list: Pointer to the head of the relocation list.
270  */
271 static void vmw_resource_relocations_apply(uint32_t *cb,
272                                            struct list_head *list)
273 {
274         struct vmw_resource_relocation *rel;
275
276         list_for_each_entry(rel, list, head) {
277                 if (likely(rel->res != NULL))
278                         cb[rel->offset] = rel->res->id;
279                 else
280                         cb[rel->offset] = SVGA_3D_CMD_NOP;
281         }
282 }
283
284 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
285                            struct vmw_sw_context *sw_context,
286                            SVGA3dCmdHeader *header)
287 {
288         return capable(CAP_SYS_ADMIN) ? : -EINVAL;
289 }
290
291 static int vmw_cmd_ok(struct vmw_private *dev_priv,
292                       struct vmw_sw_context *sw_context,
293                       SVGA3dCmdHeader *header)
294 {
295         return 0;
296 }
297
298 /**
299  * vmw_bo_to_validate_list - add a bo to a validate list
300  *
301  * @sw_context: The software context used for this command submission batch.
302  * @bo: The buffer object to add.
303  * @validate_as_mob: Validate this buffer as a MOB.
304  * @p_val_node: If non-NULL Will be updated with the validate node number
305  * on return.
306  *
307  * Returns -EINVAL if the limit of number of buffer objects per command
308  * submission is reached.
309  */
310 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
311                                    struct ttm_buffer_object *bo,
312                                    bool validate_as_mob,
313                                    uint32_t *p_val_node)
314 {
315         uint32_t val_node;
316         struct vmw_validate_buffer *vval_buf;
317         struct ttm_validate_buffer *val_buf;
318         struct drm_hash_item *hash;
319         int ret;
320
321         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
322                                     &hash) == 0)) {
323                 vval_buf = container_of(hash, struct vmw_validate_buffer,
324                                         hash);
325                 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
326                         DRM_ERROR("Inconsistent buffer usage.\n");
327                         return -EINVAL;
328                 }
329                 val_buf = &vval_buf->base;
330                 val_node = vval_buf - sw_context->val_bufs;
331         } else {
332                 val_node = sw_context->cur_val_buf;
333                 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
334                         DRM_ERROR("Max number of DMA buffers per submission "
335                                   "exceeded.\n");
336                         return -EINVAL;
337                 }
338                 vval_buf = &sw_context->val_bufs[val_node];
339                 vval_buf->hash.key = (unsigned long) bo;
340                 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
341                 if (unlikely(ret != 0)) {
342                         DRM_ERROR("Failed to initialize a buffer validation "
343                                   "entry.\n");
344                         return ret;
345                 }
346                 ++sw_context->cur_val_buf;
347                 val_buf = &vval_buf->base;
348                 val_buf->bo = ttm_bo_reference(bo);
349                 val_buf->shared = false;
350                 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
351                 vval_buf->validate_as_mob = validate_as_mob;
352         }
353
354         if (p_val_node)
355                 *p_val_node = val_node;
356
357         return 0;
358 }
359
360 /**
361  * vmw_resources_reserve - Reserve all resources on the sw_context's
362  * resource list.
363  *
364  * @sw_context: Pointer to the software context.
365  *
366  * Note that since vmware's command submission currently is protected by
367  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
368  * since only a single thread at once will attempt this.
369  */
370 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
371 {
372         struct vmw_resource_val_node *val;
373         int ret;
374
375         list_for_each_entry(val, &sw_context->resource_list, head) {
376                 struct vmw_resource *res = val->res;
377
378                 ret = vmw_resource_reserve(res, val->no_buffer_needed);
379                 if (unlikely(ret != 0))
380                         return ret;
381
382                 if (res->backup) {
383                         struct ttm_buffer_object *bo = &res->backup->base;
384
385                         ret = vmw_bo_to_validate_list
386                                 (sw_context, bo,
387                                  vmw_resource_needs_backup(res), NULL);
388
389                         if (unlikely(ret != 0))
390                                 return ret;
391                 }
392         }
393         return 0;
394 }
395
396 /**
397  * vmw_resources_validate - Validate all resources on the sw_context's
398  * resource list.
399  *
400  * @sw_context: Pointer to the software context.
401  *
402  * Before this function is called, all resource backup buffers must have
403  * been validated.
404  */
405 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
406 {
407         struct vmw_resource_val_node *val;
408         int ret;
409
410         list_for_each_entry(val, &sw_context->resource_list, head) {
411                 struct vmw_resource *res = val->res;
412
413                 ret = vmw_resource_validate(res);
414                 if (unlikely(ret != 0)) {
415                         if (ret != -ERESTARTSYS)
416                                 DRM_ERROR("Failed to validate resource.\n");
417                         return ret;
418                 }
419         }
420         return 0;
421 }
422
423
424 /**
425  * vmw_cmd_res_reloc_add - Add a resource to a software context's
426  * relocation- and validation lists.
427  *
428  * @dev_priv: Pointer to a struct vmw_private identifying the device.
429  * @sw_context: Pointer to the software context.
430  * @res_type: Resource type.
431  * @id_loc: Pointer to where the id that needs translation is located.
432  * @res: Valid pointer to a struct vmw_resource.
433  * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
434  * used for this resource is returned here.
435  */
436 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
437                                  struct vmw_sw_context *sw_context,
438                                  enum vmw_res_type res_type,
439                                  uint32_t *id_loc,
440                                  struct vmw_resource *res,
441                                  struct vmw_resource_val_node **p_val)
442 {
443         int ret;
444         struct vmw_resource_val_node *node;
445
446         *p_val = NULL;
447         ret = vmw_resource_relocation_add(&sw_context->res_relocations,
448                                           res,
449                                           id_loc - sw_context->buf_start);
450         if (unlikely(ret != 0))
451                 goto out_err;
452
453         ret = vmw_resource_val_add(sw_context, res, &node);
454         if (unlikely(ret != 0))
455                 goto out_err;
456
457         if (res_type == vmw_res_context && dev_priv->has_mob &&
458             node->first_usage) {
459
460                 /*
461                  * Put contexts first on the list to be able to exit
462                  * list traversal for contexts early.
463                  */
464                 list_del(&node->head);
465                 list_add(&node->head, &sw_context->resource_list);
466
467                 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
468                 if (unlikely(ret != 0))
469                         goto out_err;
470                 node->staged_bindings =
471                         kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
472                 if (node->staged_bindings == NULL) {
473                         DRM_ERROR("Failed to allocate context binding "
474                                   "information.\n");
475                         goto out_err;
476                 }
477                 INIT_LIST_HEAD(&node->staged_bindings->list);
478         }
479
480         if (p_val)
481                 *p_val = node;
482
483 out_err:
484         return ret;
485 }
486
487
488 /**
489  * vmw_cmd_res_check - Check that a resource is present and if so, put it
490  * on the resource validate list unless it's already there.
491  *
492  * @dev_priv: Pointer to a device private structure.
493  * @sw_context: Pointer to the software context.
494  * @res_type: Resource type.
495  * @converter: User-space visisble type specific information.
496  * @id_loc: Pointer to the location in the command buffer currently being
497  * parsed from where the user-space resource id handle is located.
498  * @p_val: Pointer to pointer to resource validalidation node. Populated
499  * on exit.
500  */
501 static int
502 vmw_cmd_res_check(struct vmw_private *dev_priv,
503                   struct vmw_sw_context *sw_context,
504                   enum vmw_res_type res_type,
505                   const struct vmw_user_resource_conv *converter,
506                   uint32_t *id_loc,
507                   struct vmw_resource_val_node **p_val)
508 {
509         struct vmw_res_cache_entry *rcache =
510                 &sw_context->res_cache[res_type];
511         struct vmw_resource *res;
512         struct vmw_resource_val_node *node;
513         int ret;
514
515         if (*id_loc == SVGA3D_INVALID_ID) {
516                 if (p_val)
517                         *p_val = NULL;
518                 if (res_type == vmw_res_context) {
519                         DRM_ERROR("Illegal context invalid id.\n");
520                         return -EINVAL;
521                 }
522                 return 0;
523         }
524
525         /*
526          * Fastpath in case of repeated commands referencing the same
527          * resource
528          */
529
530         if (likely(rcache->valid && *id_loc == rcache->handle)) {
531                 const struct vmw_resource *res = rcache->res;
532
533                 rcache->node->first_usage = false;
534                 if (p_val)
535                         *p_val = rcache->node;
536
537                 return vmw_resource_relocation_add
538                         (&sw_context->res_relocations, res,
539                          id_loc - sw_context->buf_start);
540         }
541
542         ret = vmw_user_resource_lookup_handle(dev_priv,
543                                               sw_context->fp->tfile,
544                                               *id_loc,
545                                               converter,
546                                               &res);
547         if (unlikely(ret != 0)) {
548                 DRM_ERROR("Could not find or use resource 0x%08x.\n",
549                           (unsigned) *id_loc);
550                 dump_stack();
551                 return ret;
552         }
553
554         rcache->valid = true;
555         rcache->res = res;
556         rcache->handle = *id_loc;
557
558         ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
559                                     res, &node);
560         if (unlikely(ret != 0))
561                 goto out_no_reloc;
562
563         rcache->node = node;
564         if (p_val)
565                 *p_val = node;
566         vmw_resource_unreference(&res);
567         return 0;
568
569 out_no_reloc:
570         BUG_ON(sw_context->error_resource != NULL);
571         sw_context->error_resource = res;
572
573         return ret;
574 }
575
576 /**
577  * vmw_rebind_contexts - Rebind all resources previously bound to
578  * referenced contexts.
579  *
580  * @sw_context: Pointer to the software context.
581  *
582  * Rebind context binding points that have been scrubbed because of eviction.
583  */
584 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
585 {
586         struct vmw_resource_val_node *val;
587         int ret;
588
589         list_for_each_entry(val, &sw_context->resource_list, head) {
590                 if (unlikely(!val->staged_bindings))
591                         break;
592
593                 ret = vmw_context_rebind_all(val->res);
594                 if (unlikely(ret != 0)) {
595                         if (ret != -ERESTARTSYS)
596                                 DRM_ERROR("Failed to rebind context.\n");
597                         return ret;
598                 }
599         }
600
601         return 0;
602 }
603
604 /**
605  * vmw_cmd_cid_check - Check a command header for valid context information.
606  *
607  * @dev_priv: Pointer to a device private structure.
608  * @sw_context: Pointer to the software context.
609  * @header: A command header with an embedded user-space context handle.
610  *
611  * Convenience function: Call vmw_cmd_res_check with the user-space context
612  * handle embedded in @header.
613  */
614 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
615                              struct vmw_sw_context *sw_context,
616                              SVGA3dCmdHeader *header)
617 {
618         struct vmw_cid_cmd {
619                 SVGA3dCmdHeader header;
620                 uint32_t cid;
621         } *cmd;
622
623         cmd = container_of(header, struct vmw_cid_cmd, header);
624         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
625                                  user_context_converter, &cmd->cid, NULL);
626 }
627
628 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
629                                            struct vmw_sw_context *sw_context,
630                                            SVGA3dCmdHeader *header)
631 {
632         struct vmw_sid_cmd {
633                 SVGA3dCmdHeader header;
634                 SVGA3dCmdSetRenderTarget body;
635         } *cmd;
636         struct vmw_resource_val_node *ctx_node;
637         struct vmw_resource_val_node *res_node;
638         int ret;
639
640         cmd = container_of(header, struct vmw_sid_cmd, header);
641
642         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
643                                 user_context_converter, &cmd->body.cid,
644                                 &ctx_node);
645         if (unlikely(ret != 0))
646                 return ret;
647
648         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
649                                 user_surface_converter,
650                                 &cmd->body.target.sid, &res_node);
651         if (unlikely(ret != 0))
652                 return ret;
653
654         if (dev_priv->has_mob) {
655                 struct vmw_ctx_bindinfo bi;
656
657                 bi.ctx = ctx_node->res;
658                 bi.res = res_node ? res_node->res : NULL;
659                 bi.bt = vmw_ctx_binding_rt;
660                 bi.i1.rt_type = cmd->body.type;
661                 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
662         }
663
664         return 0;
665 }
666
667 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
668                                       struct vmw_sw_context *sw_context,
669                                       SVGA3dCmdHeader *header)
670 {
671         struct vmw_sid_cmd {
672                 SVGA3dCmdHeader header;
673                 SVGA3dCmdSurfaceCopy body;
674         } *cmd;
675         int ret;
676
677         cmd = container_of(header, struct vmw_sid_cmd, header);
678         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
679                                 user_surface_converter,
680                                 &cmd->body.src.sid, NULL);
681         if (unlikely(ret != 0))
682                 return ret;
683         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
684                                  user_surface_converter,
685                                  &cmd->body.dest.sid, NULL);
686 }
687
688 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
689                                      struct vmw_sw_context *sw_context,
690                                      SVGA3dCmdHeader *header)
691 {
692         struct vmw_sid_cmd {
693                 SVGA3dCmdHeader header;
694                 SVGA3dCmdSurfaceStretchBlt body;
695         } *cmd;
696         int ret;
697
698         cmd = container_of(header, struct vmw_sid_cmd, header);
699         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
700                                 user_surface_converter,
701                                 &cmd->body.src.sid, NULL);
702         if (unlikely(ret != 0))
703                 return ret;
704         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
705                                  user_surface_converter,
706                                  &cmd->body.dest.sid, NULL);
707 }
708
709 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
710                                          struct vmw_sw_context *sw_context,
711                                          SVGA3dCmdHeader *header)
712 {
713         struct vmw_sid_cmd {
714                 SVGA3dCmdHeader header;
715                 SVGA3dCmdBlitSurfaceToScreen body;
716         } *cmd;
717
718         cmd = container_of(header, struct vmw_sid_cmd, header);
719
720         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
721                                  user_surface_converter,
722                                  &cmd->body.srcImage.sid, NULL);
723 }
724
725 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
726                                  struct vmw_sw_context *sw_context,
727                                  SVGA3dCmdHeader *header)
728 {
729         struct vmw_sid_cmd {
730                 SVGA3dCmdHeader header;
731                 SVGA3dCmdPresent body;
732         } *cmd;
733
734
735         cmd = container_of(header, struct vmw_sid_cmd, header);
736
737         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
738                                  user_surface_converter, &cmd->body.sid,
739                                  NULL);
740 }
741
742 /**
743  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
744  *
745  * @dev_priv: The device private structure.
746  * @new_query_bo: The new buffer holding query results.
747  * @sw_context: The software context used for this command submission.
748  *
749  * This function checks whether @new_query_bo is suitable for holding
750  * query results, and if another buffer currently is pinned for query
751  * results. If so, the function prepares the state of @sw_context for
752  * switching pinned buffers after successful submission of the current
753  * command batch.
754  */
755 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
756                                        struct ttm_buffer_object *new_query_bo,
757                                        struct vmw_sw_context *sw_context)
758 {
759         struct vmw_res_cache_entry *ctx_entry =
760                 &sw_context->res_cache[vmw_res_context];
761         int ret;
762
763         BUG_ON(!ctx_entry->valid);
764         sw_context->last_query_ctx = ctx_entry->res;
765
766         if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
767
768                 if (unlikely(new_query_bo->num_pages > 4)) {
769                         DRM_ERROR("Query buffer too large.\n");
770                         return -EINVAL;
771                 }
772
773                 if (unlikely(sw_context->cur_query_bo != NULL)) {
774                         sw_context->needs_post_query_barrier = true;
775                         ret = vmw_bo_to_validate_list(sw_context,
776                                                       sw_context->cur_query_bo,
777                                                       dev_priv->has_mob, NULL);
778                         if (unlikely(ret != 0))
779                                 return ret;
780                 }
781                 sw_context->cur_query_bo = new_query_bo;
782
783                 ret = vmw_bo_to_validate_list(sw_context,
784                                               dev_priv->dummy_query_bo,
785                                               dev_priv->has_mob, NULL);
786                 if (unlikely(ret != 0))
787                         return ret;
788
789         }
790
791         return 0;
792 }
793
794
795 /**
796  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
797  *
798  * @dev_priv: The device private structure.
799  * @sw_context: The software context used for this command submission batch.
800  *
801  * This function will check if we're switching query buffers, and will then,
802  * issue a dummy occlusion query wait used as a query barrier. When the fence
803  * object following that query wait has signaled, we are sure that all
804  * preceding queries have finished, and the old query buffer can be unpinned.
805  * However, since both the new query buffer and the old one are fenced with
806  * that fence, we can do an asynchronus unpin now, and be sure that the
807  * old query buffer won't be moved until the fence has signaled.
808  *
809  * As mentioned above, both the new - and old query buffers need to be fenced
810  * using a sequence emitted *after* calling this function.
811  */
812 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
813                                      struct vmw_sw_context *sw_context)
814 {
815         /*
816          * The validate list should still hold references to all
817          * contexts here.
818          */
819
820         if (sw_context->needs_post_query_barrier) {
821                 struct vmw_res_cache_entry *ctx_entry =
822                         &sw_context->res_cache[vmw_res_context];
823                 struct vmw_resource *ctx;
824                 int ret;
825
826                 BUG_ON(!ctx_entry->valid);
827                 ctx = ctx_entry->res;
828
829                 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
830
831                 if (unlikely(ret != 0))
832                         DRM_ERROR("Out of fifo space for dummy query.\n");
833         }
834
835         if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
836                 if (dev_priv->pinned_bo) {
837                         vmw_bo_pin(dev_priv->pinned_bo, false);
838                         ttm_bo_unref(&dev_priv->pinned_bo);
839                 }
840
841                 if (!sw_context->needs_post_query_barrier) {
842                         vmw_bo_pin(sw_context->cur_query_bo, true);
843
844                         /*
845                          * We pin also the dummy_query_bo buffer so that we
846                          * don't need to validate it when emitting
847                          * dummy queries in context destroy paths.
848                          */
849
850                         vmw_bo_pin(dev_priv->dummy_query_bo, true);
851                         dev_priv->dummy_query_bo_pinned = true;
852
853                         BUG_ON(sw_context->last_query_ctx == NULL);
854                         dev_priv->query_cid = sw_context->last_query_ctx->id;
855                         dev_priv->query_cid_valid = true;
856                         dev_priv->pinned_bo =
857                                 ttm_bo_reference(sw_context->cur_query_bo);
858                 }
859         }
860 }
861
862 /**
863  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
864  * handle to a MOB id.
865  *
866  * @dev_priv: Pointer to a device private structure.
867  * @sw_context: The software context used for this command batch validation.
868  * @id: Pointer to the user-space handle to be translated.
869  * @vmw_bo_p: Points to a location that, on successful return will carry
870  * a reference-counted pointer to the DMA buffer identified by the
871  * user-space handle in @id.
872  *
873  * This function saves information needed to translate a user-space buffer
874  * handle to a MOB id. The translation does not take place immediately, but
875  * during a call to vmw_apply_relocations(). This function builds a relocation
876  * list and a list of buffers to validate. The former needs to be freed using
877  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
878  * needs to be freed using vmw_clear_validations.
879  */
880 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
881                                  struct vmw_sw_context *sw_context,
882                                  SVGAMobId *id,
883                                  struct vmw_dma_buffer **vmw_bo_p)
884 {
885         struct vmw_dma_buffer *vmw_bo = NULL;
886         struct ttm_buffer_object *bo;
887         uint32_t handle = *id;
888         struct vmw_relocation *reloc;
889         int ret;
890
891         ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
892         if (unlikely(ret != 0)) {
893                 DRM_ERROR("Could not find or use MOB buffer.\n");
894                 return -EINVAL;
895         }
896         bo = &vmw_bo->base;
897
898         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
899                 DRM_ERROR("Max number relocations per submission"
900                           " exceeded\n");
901                 ret = -EINVAL;
902                 goto out_no_reloc;
903         }
904
905         reloc = &sw_context->relocs[sw_context->cur_reloc++];
906         reloc->mob_loc = id;
907         reloc->location = NULL;
908
909         ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
910         if (unlikely(ret != 0))
911                 goto out_no_reloc;
912
913         *vmw_bo_p = vmw_bo;
914         return 0;
915
916 out_no_reloc:
917         vmw_dmabuf_unreference(&vmw_bo);
918         vmw_bo_p = NULL;
919         return ret;
920 }
921
922 /**
923  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
924  * handle to a valid SVGAGuestPtr
925  *
926  * @dev_priv: Pointer to a device private structure.
927  * @sw_context: The software context used for this command batch validation.
928  * @ptr: Pointer to the user-space handle to be translated.
929  * @vmw_bo_p: Points to a location that, on successful return will carry
930  * a reference-counted pointer to the DMA buffer identified by the
931  * user-space handle in @id.
932  *
933  * This function saves information needed to translate a user-space buffer
934  * handle to a valid SVGAGuestPtr. The translation does not take place
935  * immediately, but during a call to vmw_apply_relocations().
936  * This function builds a relocation list and a list of buffers to validate.
937  * The former needs to be freed using either vmw_apply_relocations() or
938  * vmw_free_relocations(). The latter needs to be freed using
939  * vmw_clear_validations.
940  */
941 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
942                                    struct vmw_sw_context *sw_context,
943                                    SVGAGuestPtr *ptr,
944                                    struct vmw_dma_buffer **vmw_bo_p)
945 {
946         struct vmw_dma_buffer *vmw_bo = NULL;
947         struct ttm_buffer_object *bo;
948         uint32_t handle = ptr->gmrId;
949         struct vmw_relocation *reloc;
950         int ret;
951
952         ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
953         if (unlikely(ret != 0)) {
954                 DRM_ERROR("Could not find or use GMR region.\n");
955                 return -EINVAL;
956         }
957         bo = &vmw_bo->base;
958
959         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
960                 DRM_ERROR("Max number relocations per submission"
961                           " exceeded\n");
962                 ret = -EINVAL;
963                 goto out_no_reloc;
964         }
965
966         reloc = &sw_context->relocs[sw_context->cur_reloc++];
967         reloc->location = ptr;
968
969         ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
970         if (unlikely(ret != 0))
971                 goto out_no_reloc;
972
973         *vmw_bo_p = vmw_bo;
974         return 0;
975
976 out_no_reloc:
977         vmw_dmabuf_unreference(&vmw_bo);
978         vmw_bo_p = NULL;
979         return ret;
980 }
981
982 /**
983  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
984  *
985  * @dev_priv: Pointer to a device private struct.
986  * @sw_context: The software context used for this command submission.
987  * @header: Pointer to the command header in the command stream.
988  */
989 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
990                                   struct vmw_sw_context *sw_context,
991                                   SVGA3dCmdHeader *header)
992 {
993         struct vmw_begin_gb_query_cmd {
994                 SVGA3dCmdHeader header;
995                 SVGA3dCmdBeginGBQuery q;
996         } *cmd;
997
998         cmd = container_of(header, struct vmw_begin_gb_query_cmd,
999                            header);
1000
1001         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1002                                  user_context_converter, &cmd->q.cid,
1003                                  NULL);
1004 }
1005
1006 /**
1007  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1008  *
1009  * @dev_priv: Pointer to a device private struct.
1010  * @sw_context: The software context used for this command submission.
1011  * @header: Pointer to the command header in the command stream.
1012  */
1013 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1014                                struct vmw_sw_context *sw_context,
1015                                SVGA3dCmdHeader *header)
1016 {
1017         struct vmw_begin_query_cmd {
1018                 SVGA3dCmdHeader header;
1019                 SVGA3dCmdBeginQuery q;
1020         } *cmd;
1021
1022         cmd = container_of(header, struct vmw_begin_query_cmd,
1023                            header);
1024
1025         if (unlikely(dev_priv->has_mob)) {
1026                 struct {
1027                         SVGA3dCmdHeader header;
1028                         SVGA3dCmdBeginGBQuery q;
1029                 } gb_cmd;
1030
1031                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1032
1033                 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1034                 gb_cmd.header.size = cmd->header.size;
1035                 gb_cmd.q.cid = cmd->q.cid;
1036                 gb_cmd.q.type = cmd->q.type;
1037
1038                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1039                 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1040         }
1041
1042         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1043                                  user_context_converter, &cmd->q.cid,
1044                                  NULL);
1045 }
1046
1047 /**
1048  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1049  *
1050  * @dev_priv: Pointer to a device private struct.
1051  * @sw_context: The software context used for this command submission.
1052  * @header: Pointer to the command header in the command stream.
1053  */
1054 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1055                                 struct vmw_sw_context *sw_context,
1056                                 SVGA3dCmdHeader *header)
1057 {
1058         struct vmw_dma_buffer *vmw_bo;
1059         struct vmw_query_cmd {
1060                 SVGA3dCmdHeader header;
1061                 SVGA3dCmdEndGBQuery q;
1062         } *cmd;
1063         int ret;
1064
1065         cmd = container_of(header, struct vmw_query_cmd, header);
1066         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1067         if (unlikely(ret != 0))
1068                 return ret;
1069
1070         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1071                                     &cmd->q.mobid,
1072                                     &vmw_bo);
1073         if (unlikely(ret != 0))
1074                 return ret;
1075
1076         ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1077
1078         vmw_dmabuf_unreference(&vmw_bo);
1079         return ret;
1080 }
1081
1082 /**
1083  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1084  *
1085  * @dev_priv: Pointer to a device private struct.
1086  * @sw_context: The software context used for this command submission.
1087  * @header: Pointer to the command header in the command stream.
1088  */
1089 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1090                              struct vmw_sw_context *sw_context,
1091                              SVGA3dCmdHeader *header)
1092 {
1093         struct vmw_dma_buffer *vmw_bo;
1094         struct vmw_query_cmd {
1095                 SVGA3dCmdHeader header;
1096                 SVGA3dCmdEndQuery q;
1097         } *cmd;
1098         int ret;
1099
1100         cmd = container_of(header, struct vmw_query_cmd, header);
1101         if (dev_priv->has_mob) {
1102                 struct {
1103                         SVGA3dCmdHeader header;
1104                         SVGA3dCmdEndGBQuery q;
1105                 } gb_cmd;
1106
1107                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1108
1109                 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1110                 gb_cmd.header.size = cmd->header.size;
1111                 gb_cmd.q.cid = cmd->q.cid;
1112                 gb_cmd.q.type = cmd->q.type;
1113                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1114                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1115
1116                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1117                 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1118         }
1119
1120         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1121         if (unlikely(ret != 0))
1122                 return ret;
1123
1124         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1125                                       &cmd->q.guestResult,
1126                                       &vmw_bo);
1127         if (unlikely(ret != 0))
1128                 return ret;
1129
1130         ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1131
1132         vmw_dmabuf_unreference(&vmw_bo);
1133         return ret;
1134 }
1135
1136 /**
1137  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1138  *
1139  * @dev_priv: Pointer to a device private struct.
1140  * @sw_context: The software context used for this command submission.
1141  * @header: Pointer to the command header in the command stream.
1142  */
1143 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1144                                  struct vmw_sw_context *sw_context,
1145                                  SVGA3dCmdHeader *header)
1146 {
1147         struct vmw_dma_buffer *vmw_bo;
1148         struct vmw_query_cmd {
1149                 SVGA3dCmdHeader header;
1150                 SVGA3dCmdWaitForGBQuery q;
1151         } *cmd;
1152         int ret;
1153
1154         cmd = container_of(header, struct vmw_query_cmd, header);
1155         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1156         if (unlikely(ret != 0))
1157                 return ret;
1158
1159         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1160                                     &cmd->q.mobid,
1161                                     &vmw_bo);
1162         if (unlikely(ret != 0))
1163                 return ret;
1164
1165         vmw_dmabuf_unreference(&vmw_bo);
1166         return 0;
1167 }
1168
1169 /**
1170  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1171  *
1172  * @dev_priv: Pointer to a device private struct.
1173  * @sw_context: The software context used for this command submission.
1174  * @header: Pointer to the command header in the command stream.
1175  */
1176 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1177                               struct vmw_sw_context *sw_context,
1178                               SVGA3dCmdHeader *header)
1179 {
1180         struct vmw_dma_buffer *vmw_bo;
1181         struct vmw_query_cmd {
1182                 SVGA3dCmdHeader header;
1183                 SVGA3dCmdWaitForQuery q;
1184         } *cmd;
1185         int ret;
1186
1187         cmd = container_of(header, struct vmw_query_cmd, header);
1188         if (dev_priv->has_mob) {
1189                 struct {
1190                         SVGA3dCmdHeader header;
1191                         SVGA3dCmdWaitForGBQuery q;
1192                 } gb_cmd;
1193
1194                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1195
1196                 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1197                 gb_cmd.header.size = cmd->header.size;
1198                 gb_cmd.q.cid = cmd->q.cid;
1199                 gb_cmd.q.type = cmd->q.type;
1200                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1201                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1202
1203                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1204                 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1205         }
1206
1207         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1208         if (unlikely(ret != 0))
1209                 return ret;
1210
1211         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1212                                       &cmd->q.guestResult,
1213                                       &vmw_bo);
1214         if (unlikely(ret != 0))
1215                 return ret;
1216
1217         vmw_dmabuf_unreference(&vmw_bo);
1218         return 0;
1219 }
1220
1221 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1222                        struct vmw_sw_context *sw_context,
1223                        SVGA3dCmdHeader *header)
1224 {
1225         struct vmw_dma_buffer *vmw_bo = NULL;
1226         struct vmw_surface *srf = NULL;
1227         struct vmw_dma_cmd {
1228                 SVGA3dCmdHeader header;
1229                 SVGA3dCmdSurfaceDMA dma;
1230         } *cmd;
1231         int ret;
1232         SVGA3dCmdSurfaceDMASuffix *suffix;
1233         uint32_t bo_size;
1234
1235         cmd = container_of(header, struct vmw_dma_cmd, header);
1236         suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1237                                                header->size - sizeof(*suffix));
1238
1239         /* Make sure device and verifier stays in sync. */
1240         if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1241                 DRM_ERROR("Invalid DMA suffix size.\n");
1242                 return -EINVAL;
1243         }
1244
1245         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1246                                       &cmd->dma.guest.ptr,
1247                                       &vmw_bo);
1248         if (unlikely(ret != 0))
1249                 return ret;
1250
1251         /* Make sure DMA doesn't cross BO boundaries. */
1252         bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1253         if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1254                 DRM_ERROR("Invalid DMA offset.\n");
1255                 return -EINVAL;
1256         }
1257
1258         bo_size -= cmd->dma.guest.ptr.offset;
1259         if (unlikely(suffix->maximumOffset > bo_size))
1260                 suffix->maximumOffset = bo_size;
1261
1262         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1263                                 user_surface_converter, &cmd->dma.host.sid,
1264                                 NULL);
1265         if (unlikely(ret != 0)) {
1266                 if (unlikely(ret != -ERESTARTSYS))
1267                         DRM_ERROR("could not find surface for DMA.\n");
1268                 goto out_no_surface;
1269         }
1270
1271         srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1272
1273         vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1274                              header);
1275
1276 out_no_surface:
1277         vmw_dmabuf_unreference(&vmw_bo);
1278         return ret;
1279 }
1280
1281 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1282                         struct vmw_sw_context *sw_context,
1283                         SVGA3dCmdHeader *header)
1284 {
1285         struct vmw_draw_cmd {
1286                 SVGA3dCmdHeader header;
1287                 SVGA3dCmdDrawPrimitives body;
1288         } *cmd;
1289         SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1290                 (unsigned long)header + sizeof(*cmd));
1291         SVGA3dPrimitiveRange *range;
1292         uint32_t i;
1293         uint32_t maxnum;
1294         int ret;
1295
1296         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1297         if (unlikely(ret != 0))
1298                 return ret;
1299
1300         cmd = container_of(header, struct vmw_draw_cmd, header);
1301         maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1302
1303         if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1304                 DRM_ERROR("Illegal number of vertex declarations.\n");
1305                 return -EINVAL;
1306         }
1307
1308         for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1309                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1310                                         user_surface_converter,
1311                                         &decl->array.surfaceId, NULL);
1312                 if (unlikely(ret != 0))
1313                         return ret;
1314         }
1315
1316         maxnum = (header->size - sizeof(cmd->body) -
1317                   cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1318         if (unlikely(cmd->body.numRanges > maxnum)) {
1319                 DRM_ERROR("Illegal number of index ranges.\n");
1320                 return -EINVAL;
1321         }
1322
1323         range = (SVGA3dPrimitiveRange *) decl;
1324         for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1325                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1326                                         user_surface_converter,
1327                                         &range->indexArray.surfaceId, NULL);
1328                 if (unlikely(ret != 0))
1329                         return ret;
1330         }
1331         return 0;
1332 }
1333
1334
1335 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1336                              struct vmw_sw_context *sw_context,
1337                              SVGA3dCmdHeader *header)
1338 {
1339         struct vmw_tex_state_cmd {
1340                 SVGA3dCmdHeader header;
1341                 SVGA3dCmdSetTextureState state;
1342         } *cmd;
1343
1344         SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1345           ((unsigned long) header + header->size + sizeof(header));
1346         SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1347                 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1348         struct vmw_resource_val_node *ctx_node;
1349         struct vmw_resource_val_node *res_node;
1350         int ret;
1351
1352         cmd = container_of(header, struct vmw_tex_state_cmd,
1353                            header);
1354
1355         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1356                                 user_context_converter, &cmd->state.cid,
1357                                 &ctx_node);
1358         if (unlikely(ret != 0))
1359                 return ret;
1360
1361         for (; cur_state < last_state; ++cur_state) {
1362                 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1363                         continue;
1364
1365                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1366                                         user_surface_converter,
1367                                         &cur_state->value, &res_node);
1368                 if (unlikely(ret != 0))
1369                         return ret;
1370
1371                 if (dev_priv->has_mob) {
1372                         struct vmw_ctx_bindinfo bi;
1373
1374                         bi.ctx = ctx_node->res;
1375                         bi.res = res_node ? res_node->res : NULL;
1376                         bi.bt = vmw_ctx_binding_tex;
1377                         bi.i1.texture_stage = cur_state->stage;
1378                         vmw_context_binding_add(ctx_node->staged_bindings,
1379                                                 &bi);
1380                 }
1381         }
1382
1383         return 0;
1384 }
1385
1386 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1387                                       struct vmw_sw_context *sw_context,
1388                                       void *buf)
1389 {
1390         struct vmw_dma_buffer *vmw_bo;
1391         int ret;
1392
1393         struct {
1394                 uint32_t header;
1395                 SVGAFifoCmdDefineGMRFB body;
1396         } *cmd = buf;
1397
1398         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1399                                       &cmd->body.ptr,
1400                                       &vmw_bo);
1401         if (unlikely(ret != 0))
1402                 return ret;
1403
1404         vmw_dmabuf_unreference(&vmw_bo);
1405
1406         return ret;
1407 }
1408
1409 /**
1410  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1411  *
1412  * @dev_priv: Pointer to a device private struct.
1413  * @sw_context: The software context being used for this batch.
1414  * @res_type: The resource type.
1415  * @converter: Information about user-space binding for this resource type.
1416  * @res_id: Pointer to the user-space resource handle in the command stream.
1417  * @buf_id: Pointer to the user-space backup buffer handle in the command
1418  * stream.
1419  * @backup_offset: Offset of backup into MOB.
1420  *
1421  * This function prepares for registering a switch of backup buffers
1422  * in the resource metadata just prior to unreserving.
1423  */
1424 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1425                                  struct vmw_sw_context *sw_context,
1426                                  enum vmw_res_type res_type,
1427                                  const struct vmw_user_resource_conv
1428                                  *converter,
1429                                  uint32_t *res_id,
1430                                  uint32_t *buf_id,
1431                                  unsigned long backup_offset)
1432 {
1433         int ret;
1434         struct vmw_dma_buffer *dma_buf;
1435         struct vmw_resource_val_node *val_node;
1436
1437         ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1438                                 converter, res_id, &val_node);
1439         if (unlikely(ret != 0))
1440                 return ret;
1441
1442         ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1443         if (unlikely(ret != 0))
1444                 return ret;
1445
1446         if (val_node->first_usage)
1447                 val_node->no_buffer_needed = true;
1448
1449         vmw_dmabuf_unreference(&val_node->new_backup);
1450         val_node->new_backup = dma_buf;
1451         val_node->new_backup_offset = backup_offset;
1452
1453         return 0;
1454 }
1455
1456 /**
1457  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1458  * command
1459  *
1460  * @dev_priv: Pointer to a device private struct.
1461  * @sw_context: The software context being used for this batch.
1462  * @header: Pointer to the command header in the command stream.
1463  */
1464 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1465                                    struct vmw_sw_context *sw_context,
1466                                    SVGA3dCmdHeader *header)
1467 {
1468         struct vmw_bind_gb_surface_cmd {
1469                 SVGA3dCmdHeader header;
1470                 SVGA3dCmdBindGBSurface body;
1471         } *cmd;
1472
1473         cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1474
1475         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1476                                      user_surface_converter,
1477                                      &cmd->body.sid, &cmd->body.mobid,
1478                                      0);
1479 }
1480
1481 /**
1482  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1483  * command
1484  *
1485  * @dev_priv: Pointer to a device private struct.
1486  * @sw_context: The software context being used for this batch.
1487  * @header: Pointer to the command header in the command stream.
1488  */
1489 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1490                                    struct vmw_sw_context *sw_context,
1491                                    SVGA3dCmdHeader *header)
1492 {
1493         struct vmw_gb_surface_cmd {
1494                 SVGA3dCmdHeader header;
1495                 SVGA3dCmdUpdateGBImage body;
1496         } *cmd;
1497
1498         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1499
1500         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1501                                  user_surface_converter,
1502                                  &cmd->body.image.sid, NULL);
1503 }
1504
1505 /**
1506  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1507  * command
1508  *
1509  * @dev_priv: Pointer to a device private struct.
1510  * @sw_context: The software context being used for this batch.
1511  * @header: Pointer to the command header in the command stream.
1512  */
1513 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1514                                      struct vmw_sw_context *sw_context,
1515                                      SVGA3dCmdHeader *header)
1516 {
1517         struct vmw_gb_surface_cmd {
1518                 SVGA3dCmdHeader header;
1519                 SVGA3dCmdUpdateGBSurface body;
1520         } *cmd;
1521
1522         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1523
1524         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1525                                  user_surface_converter,
1526                                  &cmd->body.sid, NULL);
1527 }
1528
1529 /**
1530  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1531  * command
1532  *
1533  * @dev_priv: Pointer to a device private struct.
1534  * @sw_context: The software context being used for this batch.
1535  * @header: Pointer to the command header in the command stream.
1536  */
1537 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1538                                      struct vmw_sw_context *sw_context,
1539                                      SVGA3dCmdHeader *header)
1540 {
1541         struct vmw_gb_surface_cmd {
1542                 SVGA3dCmdHeader header;
1543                 SVGA3dCmdReadbackGBImage body;
1544         } *cmd;
1545
1546         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1547
1548         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1549                                  user_surface_converter,
1550                                  &cmd->body.image.sid, NULL);
1551 }
1552
1553 /**
1554  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1555  * command
1556  *
1557  * @dev_priv: Pointer to a device private struct.
1558  * @sw_context: The software context being used for this batch.
1559  * @header: Pointer to the command header in the command stream.
1560  */
1561 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1562                                        struct vmw_sw_context *sw_context,
1563                                        SVGA3dCmdHeader *header)
1564 {
1565         struct vmw_gb_surface_cmd {
1566                 SVGA3dCmdHeader header;
1567                 SVGA3dCmdReadbackGBSurface body;
1568         } *cmd;
1569
1570         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1571
1572         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1573                                  user_surface_converter,
1574                                  &cmd->body.sid, NULL);
1575 }
1576
1577 /**
1578  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1579  * command
1580  *
1581  * @dev_priv: Pointer to a device private struct.
1582  * @sw_context: The software context being used for this batch.
1583  * @header: Pointer to the command header in the command stream.
1584  */
1585 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1586                                        struct vmw_sw_context *sw_context,
1587                                        SVGA3dCmdHeader *header)
1588 {
1589         struct vmw_gb_surface_cmd {
1590                 SVGA3dCmdHeader header;
1591                 SVGA3dCmdInvalidateGBImage body;
1592         } *cmd;
1593
1594         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1595
1596         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1597                                  user_surface_converter,
1598                                  &cmd->body.image.sid, NULL);
1599 }
1600
1601 /**
1602  * vmw_cmd_invalidate_gb_surface - Validate an
1603  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1604  *
1605  * @dev_priv: Pointer to a device private struct.
1606  * @sw_context: The software context being used for this batch.
1607  * @header: Pointer to the command header in the command stream.
1608  */
1609 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1610                                          struct vmw_sw_context *sw_context,
1611                                          SVGA3dCmdHeader *header)
1612 {
1613         struct vmw_gb_surface_cmd {
1614                 SVGA3dCmdHeader header;
1615                 SVGA3dCmdInvalidateGBSurface body;
1616         } *cmd;
1617
1618         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1619
1620         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1621                                  user_surface_converter,
1622                                  &cmd->body.sid, NULL);
1623 }
1624
1625
1626 /**
1627  * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1628  * command
1629  *
1630  * @dev_priv: Pointer to a device private struct.
1631  * @sw_context: The software context being used for this batch.
1632  * @header: Pointer to the command header in the command stream.
1633  */
1634 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1635                                  struct vmw_sw_context *sw_context,
1636                                  SVGA3dCmdHeader *header)
1637 {
1638         struct vmw_shader_define_cmd {
1639                 SVGA3dCmdHeader header;
1640                 SVGA3dCmdDefineShader body;
1641         } *cmd;
1642         int ret;
1643         size_t size;
1644         struct vmw_resource_val_node *val;
1645
1646         cmd = container_of(header, struct vmw_shader_define_cmd,
1647                            header);
1648
1649         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1650                                 user_context_converter, &cmd->body.cid,
1651                                 &val);
1652         if (unlikely(ret != 0))
1653                 return ret;
1654
1655         if (unlikely(!dev_priv->has_mob))
1656                 return 0;
1657
1658         size = cmd->header.size - sizeof(cmd->body);
1659         ret = vmw_compat_shader_add(dev_priv,
1660                                     vmw_context_res_man(val->res),
1661                                     cmd->body.shid, cmd + 1,
1662                                     cmd->body.type, size,
1663                                     &sw_context->staged_cmd_res);
1664         if (unlikely(ret != 0))
1665                 return ret;
1666
1667         return vmw_resource_relocation_add(&sw_context->res_relocations,
1668                                            NULL, &cmd->header.id -
1669                                            sw_context->buf_start);
1670
1671         return 0;
1672 }
1673
1674 /**
1675  * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1676  * command
1677  *
1678  * @dev_priv: Pointer to a device private struct.
1679  * @sw_context: The software context being used for this batch.
1680  * @header: Pointer to the command header in the command stream.
1681  */
1682 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1683                                   struct vmw_sw_context *sw_context,
1684                                   SVGA3dCmdHeader *header)
1685 {
1686         struct vmw_shader_destroy_cmd {
1687                 SVGA3dCmdHeader header;
1688                 SVGA3dCmdDestroyShader body;
1689         } *cmd;
1690         int ret;
1691         struct vmw_resource_val_node *val;
1692
1693         cmd = container_of(header, struct vmw_shader_destroy_cmd,
1694                            header);
1695
1696         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1697                                 user_context_converter, &cmd->body.cid,
1698                                 &val);
1699         if (unlikely(ret != 0))
1700                 return ret;
1701
1702         if (unlikely(!dev_priv->has_mob))
1703                 return 0;
1704
1705         ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
1706                                        cmd->body.shid,
1707                                        cmd->body.type,
1708                                        &sw_context->staged_cmd_res);
1709         if (unlikely(ret != 0))
1710                 return ret;
1711
1712         return vmw_resource_relocation_add(&sw_context->res_relocations,
1713                                            NULL, &cmd->header.id -
1714                                            sw_context->buf_start);
1715
1716         return 0;
1717 }
1718
1719 /**
1720  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1721  * command
1722  *
1723  * @dev_priv: Pointer to a device private struct.
1724  * @sw_context: The software context being used for this batch.
1725  * @header: Pointer to the command header in the command stream.
1726  */
1727 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1728                               struct vmw_sw_context *sw_context,
1729                               SVGA3dCmdHeader *header)
1730 {
1731         struct vmw_set_shader_cmd {
1732                 SVGA3dCmdHeader header;
1733                 SVGA3dCmdSetShader body;
1734         } *cmd;
1735         struct vmw_resource_val_node *ctx_node, *res_node = NULL;
1736         struct vmw_ctx_bindinfo bi;
1737         struct vmw_resource *res = NULL;
1738         int ret;
1739
1740         cmd = container_of(header, struct vmw_set_shader_cmd,
1741                            header);
1742
1743         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1744                                 user_context_converter, &cmd->body.cid,
1745                                 &ctx_node);
1746         if (unlikely(ret != 0))
1747                 return ret;
1748
1749         if (!dev_priv->has_mob)
1750                 return 0;
1751
1752         if (cmd->body.shid != SVGA3D_INVALID_ID) {
1753                 res = vmw_compat_shader_lookup
1754                         (vmw_context_res_man(ctx_node->res),
1755                          cmd->body.shid,
1756                          cmd->body.type);
1757
1758                 if (!IS_ERR(res)) {
1759                         ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
1760                                                     vmw_res_shader,
1761                                                     &cmd->body.shid, res,
1762                                                     &res_node);
1763                         vmw_resource_unreference(&res);
1764                         if (unlikely(ret != 0))
1765                                 return ret;
1766                 }
1767         }
1768
1769         if (!res_node) {
1770                 ret = vmw_cmd_res_check(dev_priv, sw_context,
1771                                         vmw_res_shader,
1772                                         user_shader_converter,
1773                                         &cmd->body.shid, &res_node);
1774                 if (unlikely(ret != 0))
1775                         return ret;
1776         }
1777
1778         bi.ctx = ctx_node->res;
1779         bi.res = res_node ? res_node->res : NULL;
1780         bi.bt = vmw_ctx_binding_shader;
1781         bi.i1.shader_type = cmd->body.type;
1782         return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1783 }
1784
1785 /**
1786  * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1787  * command
1788  *
1789  * @dev_priv: Pointer to a device private struct.
1790  * @sw_context: The software context being used for this batch.
1791  * @header: Pointer to the command header in the command stream.
1792  */
1793 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
1794                                     struct vmw_sw_context *sw_context,
1795                                     SVGA3dCmdHeader *header)
1796 {
1797         struct vmw_set_shader_const_cmd {
1798                 SVGA3dCmdHeader header;
1799                 SVGA3dCmdSetShaderConst body;
1800         } *cmd;
1801         int ret;
1802
1803         cmd = container_of(header, struct vmw_set_shader_const_cmd,
1804                            header);
1805
1806         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1807                                 user_context_converter, &cmd->body.cid,
1808                                 NULL);
1809         if (unlikely(ret != 0))
1810                 return ret;
1811
1812         if (dev_priv->has_mob)
1813                 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1814
1815         return 0;
1816 }
1817
1818 /**
1819  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1820  * command
1821  *
1822  * @dev_priv: Pointer to a device private struct.
1823  * @sw_context: The software context being used for this batch.
1824  * @header: Pointer to the command header in the command stream.
1825  */
1826 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1827                                   struct vmw_sw_context *sw_context,
1828                                   SVGA3dCmdHeader *header)
1829 {
1830         struct vmw_bind_gb_shader_cmd {
1831                 SVGA3dCmdHeader header;
1832                 SVGA3dCmdBindGBShader body;
1833         } *cmd;
1834
1835         cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1836                            header);
1837
1838         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1839                                      user_shader_converter,
1840                                      &cmd->body.shid, &cmd->body.mobid,
1841                                      cmd->body.offsetInBytes);
1842 }
1843
1844 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1845                                 struct vmw_sw_context *sw_context,
1846                                 void *buf, uint32_t *size)
1847 {
1848         uint32_t size_remaining = *size;
1849         uint32_t cmd_id;
1850
1851         cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1852         switch (cmd_id) {
1853         case SVGA_CMD_UPDATE:
1854                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
1855                 break;
1856         case SVGA_CMD_DEFINE_GMRFB:
1857                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1858                 break;
1859         case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1860                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1861                 break;
1862         case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1863                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1864                 break;
1865         default:
1866                 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1867                 return -EINVAL;
1868         }
1869
1870         if (*size > size_remaining) {
1871                 DRM_ERROR("Invalid SVGA command (size mismatch):"
1872                           " %u.\n", cmd_id);
1873                 return -EINVAL;
1874         }
1875
1876         if (unlikely(!sw_context->kernel)) {
1877                 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1878                 return -EPERM;
1879         }
1880
1881         if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1882                 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1883
1884         return 0;
1885 }
1886
1887 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1888         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1889                     false, false, false),
1890         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1891                     false, false, false),
1892         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1893                     true, false, false),
1894         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1895                     true, false, false),
1896         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1897                     true, false, false),
1898         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1899                     false, false, false),
1900         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1901                     false, false, false),
1902         VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1903                     true, false, false),
1904         VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1905                     true, false, false),
1906         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1907                     true, false, false),
1908         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1909                     &vmw_cmd_set_render_target_check, true, false, false),
1910         VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1911                     true, false, false),
1912         VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1913                     true, false, false),
1914         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1915                     true, false, false),
1916         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1917                     true, false, false),
1918         VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1919                     true, false, false),
1920         VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1921                     true, false, false),
1922         VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1923                     true, false, false),
1924         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1925                     false, false, false),
1926         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1927                     true, false, false),
1928         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1929                     true, false, false),
1930         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1931                     true, false, false),
1932         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1933                     true, false, false),
1934         VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1935                     true, false, false),
1936         VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1937                     true, false, false),
1938         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1939                     true, false, false),
1940         VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1941                     true, false, false),
1942         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1943                     true, false, false),
1944         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1945                     true, false, false),
1946         VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
1947                     &vmw_cmd_blt_surf_screen_check, false, false, false),
1948         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1949                     false, false, false),
1950         VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1951                     false, false, false),
1952         VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1953                     false, false, false),
1954         VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1955                     false, false, false),
1956         VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1957                     false, false, false),
1958         VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1959                     false, false, false),
1960         VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1961                     false, false, false),
1962         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1963                     false, false, false),
1964         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1965                     false, false, false),
1966         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1967                     false, false, false),
1968         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1969                     false, false, false),
1970         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1971                     false, false, false),
1972         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1973                     false, false, false),
1974         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1975                     false, false, true),
1976         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1977                     false, false, true),
1978         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1979                     false, false, true),
1980         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1981                     false, false, true),
1982         VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1983                     false, false, true),
1984         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1985                     false, false, true),
1986         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1987                     false, false, true),
1988         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1989                     false, false, true),
1990         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1991                     true, false, true),
1992         VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1993                     false, false, true),
1994         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1995                     true, false, true),
1996         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
1997                     &vmw_cmd_update_gb_surface, true, false, true),
1998         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
1999                     &vmw_cmd_readback_gb_image, true, false, true),
2000         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
2001                     &vmw_cmd_readback_gb_surface, true, false, true),
2002         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
2003                     &vmw_cmd_invalidate_gb_image, true, false, true),
2004         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
2005                     &vmw_cmd_invalidate_gb_surface, true, false, true),
2006         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2007                     false, false, true),
2008         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2009                     false, false, true),
2010         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2011                     false, false, true),
2012         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2013                     false, false, true),
2014         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2015                     false, false, true),
2016         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2017                     false, false, true),
2018         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2019                     true, false, true),
2020         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2021                     false, false, true),
2022         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
2023                     false, false, false),
2024         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2025                     true, false, true),
2026         VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2027                     true, false, true),
2028         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2029                     true, false, true),
2030         VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2031                     true, false, true),
2032         VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2033                     false, false, true),
2034         VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2035                     false, false, true),
2036         VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2037                     false, false, true),
2038         VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2039                     false, false, true),
2040         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2041                     false, false, true),
2042         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
2043                     false, false, true),
2044         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
2045                     false, false, true),
2046         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
2047                     false, false, true),
2048         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2049                     false, false, true),
2050         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2051                     false, false, true),
2052         VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
2053                     true, false, true)
2054 };
2055
2056 static int vmw_cmd_check(struct vmw_private *dev_priv,
2057                          struct vmw_sw_context *sw_context,
2058                          void *buf, uint32_t *size)
2059 {
2060         uint32_t cmd_id;
2061         uint32_t size_remaining = *size;
2062         SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
2063         int ret;
2064         const struct vmw_cmd_entry *entry;
2065         bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
2066
2067         cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
2068         /* Handle any none 3D commands */
2069         if (unlikely(cmd_id < SVGA_CMD_MAX))
2070                 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
2071
2072
2073         cmd_id = le32_to_cpu(header->id);
2074         *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
2075
2076         cmd_id -= SVGA_3D_CMD_BASE;
2077         if (unlikely(*size > size_remaining))
2078                 goto out_invalid;
2079
2080         if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
2081                 goto out_invalid;
2082
2083         entry = &vmw_cmd_entries[cmd_id];
2084         if (unlikely(!entry->func))
2085                 goto out_invalid;
2086
2087         if (unlikely(!entry->user_allow && !sw_context->kernel))
2088                 goto out_privileged;
2089
2090         if (unlikely(entry->gb_disable && gb))
2091                 goto out_old;
2092
2093         if (unlikely(entry->gb_enable && !gb))
2094                 goto out_new;
2095
2096         ret = entry->func(dev_priv, sw_context, header);
2097         if (unlikely(ret != 0))
2098                 goto out_invalid;
2099
2100         return 0;
2101 out_invalid:
2102         DRM_ERROR("Invalid SVGA3D command: %d\n",
2103                   cmd_id + SVGA_3D_CMD_BASE);
2104         return -EINVAL;
2105 out_privileged:
2106         DRM_ERROR("Privileged SVGA3D command: %d\n",
2107                   cmd_id + SVGA_3D_CMD_BASE);
2108         return -EPERM;
2109 out_old:
2110         DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
2111                   cmd_id + SVGA_3D_CMD_BASE);
2112         return -EINVAL;
2113 out_new:
2114         DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
2115                   cmd_id + SVGA_3D_CMD_BASE);
2116         return -EINVAL;
2117 }
2118
2119 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
2120                              struct vmw_sw_context *sw_context,
2121                              void *buf,
2122                              uint32_t size)
2123 {
2124         int32_t cur_size = size;
2125         int ret;
2126
2127         sw_context->buf_start = buf;
2128
2129         while (cur_size > 0) {
2130                 size = cur_size;
2131                 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
2132                 if (unlikely(ret != 0))
2133                         return ret;
2134                 buf = (void *)((unsigned long) buf + size);
2135                 cur_size -= size;
2136         }
2137
2138         if (unlikely(cur_size != 0)) {
2139                 DRM_ERROR("Command verifier out of sync.\n");
2140                 return -EINVAL;
2141         }
2142
2143         return 0;
2144 }
2145
2146 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
2147 {
2148         sw_context->cur_reloc = 0;
2149 }
2150
2151 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
2152 {
2153         uint32_t i;
2154         struct vmw_relocation *reloc;
2155         struct ttm_validate_buffer *validate;
2156         struct ttm_buffer_object *bo;
2157
2158         for (i = 0; i < sw_context->cur_reloc; ++i) {
2159                 reloc = &sw_context->relocs[i];
2160                 validate = &sw_context->val_bufs[reloc->index].base;
2161                 bo = validate->bo;
2162                 switch (bo->mem.mem_type) {
2163                 case TTM_PL_VRAM:
2164                         reloc->location->offset += bo->offset;
2165                         reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
2166                         break;
2167                 case VMW_PL_GMR:
2168                         reloc->location->gmrId = bo->mem.start;
2169                         break;
2170                 case VMW_PL_MOB:
2171                         *reloc->mob_loc = bo->mem.start;
2172                         break;
2173                 default:
2174                         BUG();
2175                 }
2176         }
2177         vmw_free_relocations(sw_context);
2178 }
2179
2180 /**
2181  * vmw_resource_list_unrefererence - Free up a resource list and unreference
2182  * all resources referenced by it.
2183  *
2184  * @list: The resource list.
2185  */
2186 static void vmw_resource_list_unreference(struct list_head *list)
2187 {
2188         struct vmw_resource_val_node *val, *val_next;
2189
2190         /*
2191          * Drop references to resources held during command submission.
2192          */
2193
2194         list_for_each_entry_safe(val, val_next, list, head) {
2195                 list_del_init(&val->head);
2196                 vmw_resource_unreference(&val->res);
2197                 if (unlikely(val->staged_bindings))
2198                         kfree(val->staged_bindings);
2199                 kfree(val);
2200         }
2201 }
2202
2203 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
2204 {
2205         struct vmw_validate_buffer *entry, *next;
2206         struct vmw_resource_val_node *val;
2207
2208         /*
2209          * Drop references to DMA buffers held during command submission.
2210          */
2211         list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
2212                                  base.head) {
2213                 list_del(&entry->base.head);
2214                 ttm_bo_unref(&entry->base.bo);
2215                 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
2216                 sw_context->cur_val_buf--;
2217         }
2218         BUG_ON(sw_context->cur_val_buf != 0);
2219
2220         list_for_each_entry(val, &sw_context->resource_list, head)
2221                 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
2222 }
2223
2224 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2225                                       struct ttm_buffer_object *bo,
2226                                       bool validate_as_mob)
2227 {
2228         int ret;
2229
2230
2231         /*
2232          * Don't validate pinned buffers.
2233          */
2234
2235         if (bo == dev_priv->pinned_bo ||
2236             (bo == dev_priv->dummy_query_bo &&
2237              dev_priv->dummy_query_bo_pinned))
2238                 return 0;
2239
2240         if (validate_as_mob)
2241                 return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
2242
2243         /**
2244          * Put BO in VRAM if there is space, otherwise as a GMR.
2245          * If there is no space in VRAM and GMR ids are all used up,
2246          * start evicting GMRs to make room. If the DMA buffer can't be
2247          * used as a GMR, this will return -ENOMEM.
2248          */
2249
2250         ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
2251         if (likely(ret == 0 || ret == -ERESTARTSYS))
2252                 return ret;
2253
2254         /**
2255          * If that failed, try VRAM again, this time evicting
2256          * previous contents.
2257          */
2258
2259         DRM_INFO("Falling through to VRAM.\n");
2260         ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
2261         return ret;
2262 }
2263
2264 static int vmw_validate_buffers(struct vmw_private *dev_priv,
2265                                 struct vmw_sw_context *sw_context)
2266 {
2267         struct vmw_validate_buffer *entry;
2268         int ret;
2269
2270         list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
2271                 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
2272                                                  entry->validate_as_mob);
2273                 if (unlikely(ret != 0))
2274                         return ret;
2275         }
2276         return 0;
2277 }
2278
2279 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
2280                                  uint32_t size)
2281 {
2282         if (likely(sw_context->cmd_bounce_size >= size))
2283                 return 0;
2284
2285         if (sw_context->cmd_bounce_size == 0)
2286                 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
2287
2288         while (sw_context->cmd_bounce_size < size) {
2289                 sw_context->cmd_bounce_size =
2290                         PAGE_ALIGN(sw_context->cmd_bounce_size +
2291                                    (sw_context->cmd_bounce_size >> 1));
2292         }
2293
2294         if (sw_context->cmd_bounce != NULL)
2295                 vfree(sw_context->cmd_bounce);
2296
2297         sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
2298
2299         if (sw_context->cmd_bounce == NULL) {
2300                 DRM_ERROR("Failed to allocate command bounce buffer.\n");
2301                 sw_context->cmd_bounce_size = 0;
2302                 return -ENOMEM;
2303         }
2304
2305         return 0;
2306 }
2307
2308 /**
2309  * vmw_execbuf_fence_commands - create and submit a command stream fence
2310  *
2311  * Creates a fence object and submits a command stream marker.
2312  * If this fails for some reason, We sync the fifo and return NULL.
2313  * It is then safe to fence buffers with a NULL pointer.
2314  *
2315  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2316  * a userspace handle if @p_handle is not NULL, otherwise not.
2317  */
2318
2319 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2320                                struct vmw_private *dev_priv,
2321                                struct vmw_fence_obj **p_fence,
2322                                uint32_t *p_handle)
2323 {
2324         uint32_t sequence;
2325         int ret;
2326         bool synced = false;
2327
2328         /* p_handle implies file_priv. */
2329         BUG_ON(p_handle != NULL && file_priv == NULL);
2330
2331         ret = vmw_fifo_send_fence(dev_priv, &sequence);
2332         if (unlikely(ret != 0)) {
2333                 DRM_ERROR("Fence submission error. Syncing.\n");
2334                 synced = true;
2335         }
2336
2337         if (p_handle != NULL)
2338                 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2339                                             sequence, p_fence, p_handle);
2340         else
2341                 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
2342
2343         if (unlikely(ret != 0 && !synced)) {
2344                 (void) vmw_fallback_wait(dev_priv, false, false,
2345                                          sequence, false,
2346                                          VMW_FENCE_WAIT_TIMEOUT);
2347                 *p_fence = NULL;
2348         }
2349
2350         return 0;
2351 }
2352
2353 /**
2354  * vmw_execbuf_copy_fence_user - copy fence object information to
2355  * user-space.
2356  *
2357  * @dev_priv: Pointer to a vmw_private struct.
2358  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2359  * @ret: Return value from fence object creation.
2360  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2361  * which the information should be copied.
2362  * @fence: Pointer to the fenc object.
2363  * @fence_handle: User-space fence handle.
2364  *
2365  * This function copies fence information to user-space. If copying fails,
2366  * The user-space struct drm_vmw_fence_rep::error member is hopefully
2367  * left untouched, and if it's preloaded with an -EFAULT by user-space,
2368  * the error will hopefully be detected.
2369  * Also if copying fails, user-space will be unable to signal the fence
2370  * object so we wait for it immediately, and then unreference the
2371  * user-space reference.
2372  */
2373 void
2374 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2375                             struct vmw_fpriv *vmw_fp,
2376                             int ret,
2377                             struct drm_vmw_fence_rep __user *user_fence_rep,
2378                             struct vmw_fence_obj *fence,
2379                             uint32_t fence_handle)
2380 {
2381         struct drm_vmw_fence_rep fence_rep;
2382
2383         if (user_fence_rep == NULL)
2384                 return;
2385
2386         memset(&fence_rep, 0, sizeof(fence_rep));
2387
2388         fence_rep.error = ret;
2389         if (ret == 0) {
2390                 BUG_ON(fence == NULL);
2391
2392                 fence_rep.handle = fence_handle;
2393                 fence_rep.seqno = fence->base.seqno;
2394                 vmw_update_seqno(dev_priv, &dev_priv->fifo);
2395                 fence_rep.passed_seqno = dev_priv->last_read_seqno;
2396         }
2397
2398         /*
2399          * copy_to_user errors will be detected by user space not
2400          * seeing fence_rep::error filled in. Typically
2401          * user-space would have pre-set that member to -EFAULT.
2402          */
2403         ret = copy_to_user(user_fence_rep, &fence_rep,
2404                            sizeof(fence_rep));
2405
2406         /*
2407          * User-space lost the fence object. We need to sync
2408          * and unreference the handle.
2409          */
2410         if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2411                 ttm_ref_object_base_unref(vmw_fp->tfile,
2412                                           fence_handle, TTM_REF_USAGE);
2413                 DRM_ERROR("Fence copy error. Syncing.\n");
2414                 (void) vmw_fence_obj_wait(fence, false, false,
2415                                           VMW_FENCE_WAIT_TIMEOUT);
2416         }
2417 }
2418
2419
2420
2421 int vmw_execbuf_process(struct drm_file *file_priv,
2422                         struct vmw_private *dev_priv,
2423                         void __user *user_commands,
2424                         void *kernel_commands,
2425                         uint32_t command_size,
2426                         uint64_t throttle_us,
2427                         struct drm_vmw_fence_rep __user *user_fence_rep,
2428                         struct vmw_fence_obj **out_fence)
2429 {
2430         struct vmw_sw_context *sw_context = &dev_priv->ctx;
2431         struct vmw_fence_obj *fence = NULL;
2432         struct vmw_resource *error_resource;
2433         struct list_head resource_list;
2434         struct ww_acquire_ctx ticket;
2435         uint32_t handle;
2436         void *cmd;
2437         int ret;
2438
2439         ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2440         if (unlikely(ret != 0))
2441                 return -ERESTARTSYS;
2442
2443         if (kernel_commands == NULL) {
2444                 sw_context->kernel = false;
2445
2446                 ret = vmw_resize_cmd_bounce(sw_context, command_size);
2447                 if (unlikely(ret != 0))
2448                         goto out_unlock;
2449
2450
2451                 ret = copy_from_user(sw_context->cmd_bounce,
2452                                      user_commands, command_size);
2453
2454                 if (unlikely(ret != 0)) {
2455                         ret = -EFAULT;
2456                         DRM_ERROR("Failed copying commands.\n");
2457                         goto out_unlock;
2458                 }
2459                 kernel_commands = sw_context->cmd_bounce;
2460         } else
2461                 sw_context->kernel = true;
2462
2463         sw_context->fp = vmw_fpriv(file_priv);
2464         sw_context->cur_reloc = 0;
2465         sw_context->cur_val_buf = 0;
2466         INIT_LIST_HEAD(&sw_context->resource_list);
2467         sw_context->cur_query_bo = dev_priv->pinned_bo;
2468         sw_context->last_query_ctx = NULL;
2469         sw_context->needs_post_query_barrier = false;
2470         memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2471         INIT_LIST_HEAD(&sw_context->validate_nodes);
2472         INIT_LIST_HEAD(&sw_context->res_relocations);
2473         if (!sw_context->res_ht_initialized) {
2474                 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2475                 if (unlikely(ret != 0))
2476                         goto out_unlock;
2477                 sw_context->res_ht_initialized = true;
2478         }
2479         INIT_LIST_HEAD(&sw_context->staged_cmd_res);
2480
2481         INIT_LIST_HEAD(&resource_list);
2482         ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2483                                 command_size);
2484         if (unlikely(ret != 0))
2485                 goto out_err_nores;
2486
2487         ret = vmw_resources_reserve(sw_context);
2488         if (unlikely(ret != 0))
2489                 goto out_err_nores;
2490
2491         ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true);
2492         if (unlikely(ret != 0))
2493                 goto out_err;
2494
2495         ret = vmw_validate_buffers(dev_priv, sw_context);
2496         if (unlikely(ret != 0))
2497                 goto out_err;
2498
2499         ret = vmw_resources_validate(sw_context);
2500         if (unlikely(ret != 0))
2501                 goto out_err;
2502
2503         if (throttle_us) {
2504                 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2505                                    throttle_us);
2506
2507                 if (unlikely(ret != 0))
2508                         goto out_err;
2509         }
2510
2511         ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2512         if (unlikely(ret != 0)) {
2513                 ret = -ERESTARTSYS;
2514                 goto out_err;
2515         }
2516
2517         if (dev_priv->has_mob) {
2518                 ret = vmw_rebind_contexts(sw_context);
2519                 if (unlikely(ret != 0))
2520                         goto out_unlock_binding;
2521         }
2522
2523         cmd = vmw_fifo_reserve(dev_priv, command_size);
2524         if (unlikely(cmd == NULL)) {
2525                 DRM_ERROR("Failed reserving fifo space for commands.\n");
2526                 ret = -ENOMEM;
2527                 goto out_unlock_binding;
2528         }
2529
2530         vmw_apply_relocations(sw_context);
2531         memcpy(cmd, kernel_commands, command_size);
2532
2533         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2534         vmw_resource_relocations_free(&sw_context->res_relocations);
2535
2536         vmw_fifo_commit(dev_priv, command_size);
2537
2538         vmw_query_bo_switch_commit(dev_priv, sw_context);
2539         ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2540                                          &fence,
2541                                          (user_fence_rep) ? &handle : NULL);
2542         /*
2543          * This error is harmless, because if fence submission fails,
2544          * vmw_fifo_send_fence will sync. The error will be propagated to
2545          * user-space in @fence_rep
2546          */
2547
2548         if (ret != 0)
2549                 DRM_ERROR("Fence submission error. Syncing.\n");
2550
2551         vmw_resource_list_unreserve(&sw_context->resource_list, false);
2552         mutex_unlock(&dev_priv->binding_mutex);
2553
2554         ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2555                                     (void *) fence);
2556
2557         if (unlikely(dev_priv->pinned_bo != NULL &&
2558                      !dev_priv->query_cid_valid))
2559                 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
2560
2561         vmw_clear_validations(sw_context);
2562         vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2563                                     user_fence_rep, fence, handle);
2564
2565         /* Don't unreference when handing fence out */
2566         if (unlikely(out_fence != NULL)) {
2567                 *out_fence = fence;
2568                 fence = NULL;
2569         } else if (likely(fence != NULL)) {
2570                 vmw_fence_obj_unreference(&fence);
2571         }
2572
2573         list_splice_init(&sw_context->resource_list, &resource_list);
2574         vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
2575         mutex_unlock(&dev_priv->cmdbuf_mutex);
2576
2577         /*
2578          * Unreference resources outside of the cmdbuf_mutex to
2579          * avoid deadlocks in resource destruction paths.
2580          */
2581         vmw_resource_list_unreference(&resource_list);
2582
2583         return 0;
2584
2585 out_unlock_binding:
2586         mutex_unlock(&dev_priv->binding_mutex);
2587 out_err:
2588         ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2589 out_err_nores:
2590         vmw_resource_list_unreserve(&sw_context->resource_list, true);
2591         vmw_resource_relocations_free(&sw_context->res_relocations);
2592         vmw_free_relocations(sw_context);
2593         vmw_clear_validations(sw_context);
2594         if (unlikely(dev_priv->pinned_bo != NULL &&
2595                      !dev_priv->query_cid_valid))
2596                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2597 out_unlock:
2598         list_splice_init(&sw_context->resource_list, &resource_list);
2599         error_resource = sw_context->error_resource;
2600         sw_context->error_resource = NULL;
2601         vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
2602         mutex_unlock(&dev_priv->cmdbuf_mutex);
2603
2604         /*
2605          * Unreference resources outside of the cmdbuf_mutex to
2606          * avoid deadlocks in resource destruction paths.
2607          */
2608         vmw_resource_list_unreference(&resource_list);
2609         if (unlikely(error_resource != NULL))
2610                 vmw_resource_unreference(&error_resource);
2611
2612         return ret;
2613 }
2614
2615 /**
2616  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2617  *
2618  * @dev_priv: The device private structure.
2619  *
2620  * This function is called to idle the fifo and unpin the query buffer
2621  * if the normal way to do this hits an error, which should typically be
2622  * extremely rare.
2623  */
2624 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2625 {
2626         DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2627
2628         (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2629         vmw_bo_pin(dev_priv->pinned_bo, false);
2630         vmw_bo_pin(dev_priv->dummy_query_bo, false);
2631         dev_priv->dummy_query_bo_pinned = false;
2632 }
2633
2634
2635 /**
2636  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2637  * query bo.
2638  *
2639  * @dev_priv: The device private structure.
2640  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2641  * _after_ a query barrier that flushes all queries touching the current
2642  * buffer pointed to by @dev_priv->pinned_bo
2643  *
2644  * This function should be used to unpin the pinned query bo, or
2645  * as a query barrier when we need to make sure that all queries have
2646  * finished before the next fifo command. (For example on hardware
2647  * context destructions where the hardware may otherwise leak unfinished
2648  * queries).
2649  *
2650  * This function does not return any failure codes, but make attempts
2651  * to do safe unpinning in case of errors.
2652  *
2653  * The function will synchronize on the previous query barrier, and will
2654  * thus not finish until that barrier has executed.
2655  *
2656  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2657  * before calling this function.
2658  */
2659 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2660                                      struct vmw_fence_obj *fence)
2661 {
2662         int ret = 0;
2663         struct list_head validate_list;
2664         struct ttm_validate_buffer pinned_val, query_val;
2665         struct vmw_fence_obj *lfence = NULL;
2666         struct ww_acquire_ctx ticket;
2667
2668         if (dev_priv->pinned_bo == NULL)
2669                 goto out_unlock;
2670
2671         INIT_LIST_HEAD(&validate_list);
2672
2673         pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2674         pinned_val.shared = false;
2675         list_add_tail(&pinned_val.head, &validate_list);
2676
2677         query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2678         query_val.shared = false;
2679         list_add_tail(&query_val.head, &validate_list);
2680
2681         ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
2682         if (unlikely(ret != 0)) {
2683                 vmw_execbuf_unpin_panic(dev_priv);
2684                 goto out_no_reserve;
2685         }
2686
2687         if (dev_priv->query_cid_valid) {
2688                 BUG_ON(fence != NULL);
2689                 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2690                 if (unlikely(ret != 0)) {
2691                         vmw_execbuf_unpin_panic(dev_priv);
2692                         goto out_no_emit;
2693                 }
2694                 dev_priv->query_cid_valid = false;
2695         }
2696
2697         vmw_bo_pin(dev_priv->pinned_bo, false);
2698         vmw_bo_pin(dev_priv->dummy_query_bo, false);
2699         dev_priv->dummy_query_bo_pinned = false;
2700
2701         if (fence == NULL) {
2702                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2703                                                   NULL);
2704                 fence = lfence;
2705         }
2706         ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
2707         if (lfence != NULL)
2708                 vmw_fence_obj_unreference(&lfence);
2709
2710         ttm_bo_unref(&query_val.bo);
2711         ttm_bo_unref(&pinned_val.bo);
2712         ttm_bo_unref(&dev_priv->pinned_bo);
2713
2714 out_unlock:
2715         return;
2716
2717 out_no_emit:
2718         ttm_eu_backoff_reservation(&ticket, &validate_list);
2719 out_no_reserve:
2720         ttm_bo_unref(&query_val.bo);
2721         ttm_bo_unref(&pinned_val.bo);
2722         ttm_bo_unref(&dev_priv->pinned_bo);
2723 }
2724
2725 /**
2726  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2727  * query bo.
2728  *
2729  * @dev_priv: The device private structure.
2730  *
2731  * This function should be used to unpin the pinned query bo, or
2732  * as a query barrier when we need to make sure that all queries have
2733  * finished before the next fifo command. (For example on hardware
2734  * context destructions where the hardware may otherwise leak unfinished
2735  * queries).
2736  *
2737  * This function does not return any failure codes, but make attempts
2738  * to do safe unpinning in case of errors.
2739  *
2740  * The function will synchronize on the previous query barrier, and will
2741  * thus not finish until that barrier has executed.
2742  */
2743 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2744 {
2745         mutex_lock(&dev_priv->cmdbuf_mutex);
2746         if (dev_priv->query_cid_valid)
2747                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2748         mutex_unlock(&dev_priv->cmdbuf_mutex);
2749 }
2750
2751
2752 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2753                       struct drm_file *file_priv)
2754 {
2755         struct vmw_private *dev_priv = vmw_priv(dev);
2756         struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2757         int ret;
2758
2759         /*
2760          * This will allow us to extend the ioctl argument while
2761          * maintaining backwards compatibility:
2762          * We take different code paths depending on the value of
2763          * arg->version.
2764          */
2765
2766         if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2767                 DRM_ERROR("Incorrect execbuf version.\n");
2768                 DRM_ERROR("You're running outdated experimental "
2769                           "vmwgfx user-space drivers.");
2770                 return -EINVAL;
2771         }
2772
2773         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2774         if (unlikely(ret != 0))
2775                 return ret;
2776
2777         ret = vmw_execbuf_process(file_priv, dev_priv,
2778                                   (void __user *)(unsigned long)arg->commands,
2779                                   NULL, arg->command_size, arg->throttle_us,
2780                                   (void __user *)(unsigned long)arg->fence_rep,
2781                                   NULL);
2782
2783         if (unlikely(ret != 0))
2784                 goto out_unlock;
2785
2786         vmw_kms_cursor_post_execbuf(dev_priv);
2787
2788 out_unlock:
2789         ttm_read_unlock(&dev_priv->reservation_sem);
2790         return ret;
2791 }