drm/vmwgfx: Extend the command verifier to handle guest-backed on / off
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32
33 #define VMW_RES_HT_ORDER 12
34
35 /**
36  * struct vmw_resource_relocation - Relocation info for resources
37  *
38  * @head: List head for the software context's relocation list.
39  * @res: Non-ref-counted pointer to the resource.
40  * @offset: Offset of 4 byte entries into the command buffer where the
41  * id that needs fixup is located.
42  */
43 struct vmw_resource_relocation {
44         struct list_head head;
45         const struct vmw_resource *res;
46         unsigned long offset;
47 };
48
49 /**
50  * struct vmw_resource_val_node - Validation info for resources
51  *
52  * @head: List head for the software context's resource list.
53  * @hash: Hash entry for quick resouce to val_node lookup.
54  * @res: Ref-counted pointer to the resource.
55  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56  * @new_backup: Refcounted pointer to the new backup buffer.
57  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
58  * @first_usage: Set to true the first time the resource is referenced in
59  * the command stream.
60  * @no_buffer_needed: Resources do not need to allocate buffer backup on
61  * reservation. The command stream will provide one.
62  */
63 struct vmw_resource_val_node {
64         struct list_head head;
65         struct drm_hash_item hash;
66         struct vmw_resource *res;
67         struct vmw_dma_buffer *new_backup;
68         unsigned long new_backup_offset;
69         bool first_usage;
70         bool no_buffer_needed;
71 };
72
73 /**
74  * struct vmw_cmd_entry - Describe a command for the verifier
75  *
76  * @user_allow: Whether allowed from the execbuf ioctl.
77  * @gb_disable: Whether disabled if guest-backed objects are available.
78  * @gb_enable: Whether enabled iff guest-backed objects are available.
79  */
80 struct vmw_cmd_entry {
81         int (*func) (struct vmw_private *, struct vmw_sw_context *,
82                      SVGA3dCmdHeader *);
83         bool user_allow;
84         bool gb_disable;
85         bool gb_enable;
86 };
87
88 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)  \
89         [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
90                                        (_gb_disable), (_gb_enable)}
91
92 /**
93  * vmw_resource_unreserve - unreserve resources previously reserved for
94  * command submission.
95  *
96  * @list_head: list of resources to unreserve.
97  * @backoff: Whether command submission failed.
98  */
99 static void vmw_resource_list_unreserve(struct list_head *list,
100                                         bool backoff)
101 {
102         struct vmw_resource_val_node *val;
103
104         list_for_each_entry(val, list, head) {
105                 struct vmw_resource *res = val->res;
106                 struct vmw_dma_buffer *new_backup =
107                         backoff ? NULL : val->new_backup;
108
109                 vmw_resource_unreserve(res, new_backup,
110                         val->new_backup_offset);
111                 vmw_dmabuf_unreference(&val->new_backup);
112         }
113 }
114
115
116 /**
117  * vmw_resource_val_add - Add a resource to the software context's
118  * resource list if it's not already on it.
119  *
120  * @sw_context: Pointer to the software context.
121  * @res: Pointer to the resource.
122  * @p_node On successful return points to a valid pointer to a
123  * struct vmw_resource_val_node, if non-NULL on entry.
124  */
125 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
126                                 struct vmw_resource *res,
127                                 struct vmw_resource_val_node **p_node)
128 {
129         struct vmw_resource_val_node *node;
130         struct drm_hash_item *hash;
131         int ret;
132
133         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
134                                     &hash) == 0)) {
135                 node = container_of(hash, struct vmw_resource_val_node, hash);
136                 node->first_usage = false;
137                 if (unlikely(p_node != NULL))
138                         *p_node = node;
139                 return 0;
140         }
141
142         node = kzalloc(sizeof(*node), GFP_KERNEL);
143         if (unlikely(node == NULL)) {
144                 DRM_ERROR("Failed to allocate a resource validation "
145                           "entry.\n");
146                 return -ENOMEM;
147         }
148
149         node->hash.key = (unsigned long) res;
150         ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
151         if (unlikely(ret != 0)) {
152                 DRM_ERROR("Failed to initialize a resource validation "
153                           "entry.\n");
154                 kfree(node);
155                 return ret;
156         }
157         list_add_tail(&node->head, &sw_context->resource_list);
158         node->res = vmw_resource_reference(res);
159         node->first_usage = true;
160
161         if (unlikely(p_node != NULL))
162                 *p_node = node;
163
164         return 0;
165 }
166
167 /**
168  * vmw_resource_relocation_add - Add a relocation to the relocation list
169  *
170  * @list: Pointer to head of relocation list.
171  * @res: The resource.
172  * @offset: Offset into the command buffer currently being parsed where the
173  * id that needs fixup is located. Granularity is 4 bytes.
174  */
175 static int vmw_resource_relocation_add(struct list_head *list,
176                                        const struct vmw_resource *res,
177                                        unsigned long offset)
178 {
179         struct vmw_resource_relocation *rel;
180
181         rel = kmalloc(sizeof(*rel), GFP_KERNEL);
182         if (unlikely(rel == NULL)) {
183                 DRM_ERROR("Failed to allocate a resource relocation.\n");
184                 return -ENOMEM;
185         }
186
187         rel->res = res;
188         rel->offset = offset;
189         list_add_tail(&rel->head, list);
190
191         return 0;
192 }
193
194 /**
195  * vmw_resource_relocations_free - Free all relocations on a list
196  *
197  * @list: Pointer to the head of the relocation list.
198  */
199 static void vmw_resource_relocations_free(struct list_head *list)
200 {
201         struct vmw_resource_relocation *rel, *n;
202
203         list_for_each_entry_safe(rel, n, list, head) {
204                 list_del(&rel->head);
205                 kfree(rel);
206         }
207 }
208
209 /**
210  * vmw_resource_relocations_apply - Apply all relocations on a list
211  *
212  * @cb: Pointer to the start of the command buffer bein patch. This need
213  * not be the same buffer as the one being parsed when the relocation
214  * list was built, but the contents must be the same modulo the
215  * resource ids.
216  * @list: Pointer to the head of the relocation list.
217  */
218 static void vmw_resource_relocations_apply(uint32_t *cb,
219                                            struct list_head *list)
220 {
221         struct vmw_resource_relocation *rel;
222
223         list_for_each_entry(rel, list, head)
224                 cb[rel->offset] = rel->res->id;
225 }
226
227 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
228                            struct vmw_sw_context *sw_context,
229                            SVGA3dCmdHeader *header)
230 {
231         return capable(CAP_SYS_ADMIN) ? : -EINVAL;
232 }
233
234 static int vmw_cmd_ok(struct vmw_private *dev_priv,
235                       struct vmw_sw_context *sw_context,
236                       SVGA3dCmdHeader *header)
237 {
238         return 0;
239 }
240
241 /**
242  * vmw_bo_to_validate_list - add a bo to a validate list
243  *
244  * @sw_context: The software context used for this command submission batch.
245  * @bo: The buffer object to add.
246  * @validate_as_mob: Validate this buffer as a MOB.
247  * @p_val_node: If non-NULL Will be updated with the validate node number
248  * on return.
249  *
250  * Returns -EINVAL if the limit of number of buffer objects per command
251  * submission is reached.
252  */
253 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
254                                    struct ttm_buffer_object *bo,
255                                    bool validate_as_mob,
256                                    uint32_t *p_val_node)
257 {
258         uint32_t val_node;
259         struct vmw_validate_buffer *vval_buf;
260         struct ttm_validate_buffer *val_buf;
261         struct drm_hash_item *hash;
262         int ret;
263
264         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
265                                     &hash) == 0)) {
266                 vval_buf = container_of(hash, struct vmw_validate_buffer,
267                                         hash);
268                 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
269                         DRM_ERROR("Inconsistent buffer usage.\n");
270                         return -EINVAL;
271                 }
272                 val_buf = &vval_buf->base;
273                 val_node = vval_buf - sw_context->val_bufs;
274         } else {
275                 val_node = sw_context->cur_val_buf;
276                 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
277                         DRM_ERROR("Max number of DMA buffers per submission "
278                                   "exceeded.\n");
279                         return -EINVAL;
280                 }
281                 vval_buf = &sw_context->val_bufs[val_node];
282                 vval_buf->hash.key = (unsigned long) bo;
283                 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
284                 if (unlikely(ret != 0)) {
285                         DRM_ERROR("Failed to initialize a buffer validation "
286                                   "entry.\n");
287                         return ret;
288                 }
289                 ++sw_context->cur_val_buf;
290                 val_buf = &vval_buf->base;
291                 val_buf->bo = ttm_bo_reference(bo);
292                 val_buf->reserved = false;
293                 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
294                 vval_buf->validate_as_mob = validate_as_mob;
295         }
296
297         sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
298
299         if (p_val_node)
300                 *p_val_node = val_node;
301
302         return 0;
303 }
304
305 /**
306  * vmw_resources_reserve - Reserve all resources on the sw_context's
307  * resource list.
308  *
309  * @sw_context: Pointer to the software context.
310  *
311  * Note that since vmware's command submission currently is protected by
312  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
313  * since only a single thread at once will attempt this.
314  */
315 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
316 {
317         struct vmw_resource_val_node *val;
318         int ret;
319
320         list_for_each_entry(val, &sw_context->resource_list, head) {
321                 struct vmw_resource *res = val->res;
322
323                 ret = vmw_resource_reserve(res, val->no_buffer_needed);
324                 if (unlikely(ret != 0))
325                         return ret;
326
327                 if (res->backup) {
328                         struct ttm_buffer_object *bo = &res->backup->base;
329
330                         ret = vmw_bo_to_validate_list
331                                 (sw_context, bo,
332                                  vmw_resource_needs_backup(res), NULL);
333
334                         if (unlikely(ret != 0))
335                                 return ret;
336                 }
337         }
338         return 0;
339 }
340
341 /**
342  * vmw_resources_validate - Validate all resources on the sw_context's
343  * resource list.
344  *
345  * @sw_context: Pointer to the software context.
346  *
347  * Before this function is called, all resource backup buffers must have
348  * been validated.
349  */
350 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
351 {
352         struct vmw_resource_val_node *val;
353         int ret;
354
355         list_for_each_entry(val, &sw_context->resource_list, head) {
356                 struct vmw_resource *res = val->res;
357
358                 ret = vmw_resource_validate(res);
359                 if (unlikely(ret != 0)) {
360                         if (ret != -ERESTARTSYS)
361                                 DRM_ERROR("Failed to validate resource.\n");
362                         return ret;
363                 }
364         }
365         return 0;
366 }
367
368 /**
369  * vmw_cmd_res_check - Check that a resource is present and if so, put it
370  * on the resource validate list unless it's already there.
371  *
372  * @dev_priv: Pointer to a device private structure.
373  * @sw_context: Pointer to the software context.
374  * @res_type: Resource type.
375  * @converter: User-space visisble type specific information.
376  * @id: Pointer to the location in the command buffer currently being
377  * parsed from where the user-space resource id handle is located.
378  */
379 static int vmw_cmd_res_check(struct vmw_private *dev_priv,
380                              struct vmw_sw_context *sw_context,
381                              enum vmw_res_type res_type,
382                              const struct vmw_user_resource_conv *converter,
383                              uint32_t *id,
384                              struct vmw_resource_val_node **p_val)
385 {
386         struct vmw_res_cache_entry *rcache =
387                 &sw_context->res_cache[res_type];
388         struct vmw_resource *res;
389         struct vmw_resource_val_node *node;
390         int ret;
391
392         if (*id == SVGA3D_INVALID_ID)
393                 return 0;
394
395         /*
396          * Fastpath in case of repeated commands referencing the same
397          * resource
398          */
399
400         if (likely(rcache->valid && *id == rcache->handle)) {
401                 const struct vmw_resource *res = rcache->res;
402
403                 rcache->node->first_usage = false;
404                 if (p_val)
405                         *p_val = rcache->node;
406
407                 return vmw_resource_relocation_add
408                         (&sw_context->res_relocations, res,
409                          id - sw_context->buf_start);
410         }
411
412         ret = vmw_user_resource_lookup_handle(dev_priv,
413                                               sw_context->tfile,
414                                               *id,
415                                               converter,
416                                               &res);
417         if (unlikely(ret != 0)) {
418                 DRM_ERROR("Could not find or use resource 0x%08x.\n",
419                           (unsigned) *id);
420                 dump_stack();
421                 return ret;
422         }
423
424         rcache->valid = true;
425         rcache->res = res;
426         rcache->handle = *id;
427
428         ret = vmw_resource_relocation_add(&sw_context->res_relocations,
429                                           res,
430                                           id - sw_context->buf_start);
431         if (unlikely(ret != 0))
432                 goto out_no_reloc;
433
434         ret = vmw_resource_val_add(sw_context, res, &node);
435         if (unlikely(ret != 0))
436                 goto out_no_reloc;
437
438         rcache->node = node;
439         if (p_val)
440                 *p_val = node;
441         vmw_resource_unreference(&res);
442         return 0;
443
444 out_no_reloc:
445         BUG_ON(sw_context->error_resource != NULL);
446         sw_context->error_resource = res;
447
448         return ret;
449 }
450
451 /**
452  * vmw_cmd_cid_check - Check a command header for valid context information.
453  *
454  * @dev_priv: Pointer to a device private structure.
455  * @sw_context: Pointer to the software context.
456  * @header: A command header with an embedded user-space context handle.
457  *
458  * Convenience function: Call vmw_cmd_res_check with the user-space context
459  * handle embedded in @header.
460  */
461 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
462                              struct vmw_sw_context *sw_context,
463                              SVGA3dCmdHeader *header)
464 {
465         struct vmw_cid_cmd {
466                 SVGA3dCmdHeader header;
467                 __le32 cid;
468         } *cmd;
469
470         cmd = container_of(header, struct vmw_cid_cmd, header);
471         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
472                                  user_context_converter, &cmd->cid, NULL);
473 }
474
475 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
476                                            struct vmw_sw_context *sw_context,
477                                            SVGA3dCmdHeader *header)
478 {
479         struct vmw_sid_cmd {
480                 SVGA3dCmdHeader header;
481                 SVGA3dCmdSetRenderTarget body;
482         } *cmd;
483         int ret;
484
485         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
486         if (unlikely(ret != 0))
487                 return ret;
488
489         cmd = container_of(header, struct vmw_sid_cmd, header);
490         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
491                                 user_surface_converter,
492                                 &cmd->body.target.sid, NULL);
493         return ret;
494 }
495
496 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
497                                       struct vmw_sw_context *sw_context,
498                                       SVGA3dCmdHeader *header)
499 {
500         struct vmw_sid_cmd {
501                 SVGA3dCmdHeader header;
502                 SVGA3dCmdSurfaceCopy body;
503         } *cmd;
504         int ret;
505
506         cmd = container_of(header, struct vmw_sid_cmd, header);
507         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
508                                 user_surface_converter,
509                                 &cmd->body.src.sid, NULL);
510         if (unlikely(ret != 0))
511                 return ret;
512         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
513                                  user_surface_converter,
514                                  &cmd->body.dest.sid, NULL);
515 }
516
517 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
518                                      struct vmw_sw_context *sw_context,
519                                      SVGA3dCmdHeader *header)
520 {
521         struct vmw_sid_cmd {
522                 SVGA3dCmdHeader header;
523                 SVGA3dCmdSurfaceStretchBlt body;
524         } *cmd;
525         int ret;
526
527         cmd = container_of(header, struct vmw_sid_cmd, header);
528         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
529                                 user_surface_converter,
530                                 &cmd->body.src.sid, NULL);
531         if (unlikely(ret != 0))
532                 return ret;
533         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
534                                  user_surface_converter,
535                                  &cmd->body.dest.sid, NULL);
536 }
537
538 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
539                                          struct vmw_sw_context *sw_context,
540                                          SVGA3dCmdHeader *header)
541 {
542         struct vmw_sid_cmd {
543                 SVGA3dCmdHeader header;
544                 SVGA3dCmdBlitSurfaceToScreen body;
545         } *cmd;
546
547         cmd = container_of(header, struct vmw_sid_cmd, header);
548
549         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
550                                  user_surface_converter,
551                                  &cmd->body.srcImage.sid, NULL);
552 }
553
554 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
555                                  struct vmw_sw_context *sw_context,
556                                  SVGA3dCmdHeader *header)
557 {
558         struct vmw_sid_cmd {
559                 SVGA3dCmdHeader header;
560                 SVGA3dCmdPresent body;
561         } *cmd;
562
563
564         cmd = container_of(header, struct vmw_sid_cmd, header);
565
566         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
567                                  user_surface_converter, &cmd->body.sid,
568                                  NULL);
569 }
570
571 /**
572  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
573  *
574  * @dev_priv: The device private structure.
575  * @new_query_bo: The new buffer holding query results.
576  * @sw_context: The software context used for this command submission.
577  *
578  * This function checks whether @new_query_bo is suitable for holding
579  * query results, and if another buffer currently is pinned for query
580  * results. If so, the function prepares the state of @sw_context for
581  * switching pinned buffers after successful submission of the current
582  * command batch.
583  */
584 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
585                                        struct ttm_buffer_object *new_query_bo,
586                                        struct vmw_sw_context *sw_context)
587 {
588         struct vmw_res_cache_entry *ctx_entry =
589                 &sw_context->res_cache[vmw_res_context];
590         int ret;
591
592         BUG_ON(!ctx_entry->valid);
593         sw_context->last_query_ctx = ctx_entry->res;
594
595         if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
596
597                 if (unlikely(new_query_bo->num_pages > 4)) {
598                         DRM_ERROR("Query buffer too large.\n");
599                         return -EINVAL;
600                 }
601
602                 if (unlikely(sw_context->cur_query_bo != NULL)) {
603                         sw_context->needs_post_query_barrier = true;
604                         ret = vmw_bo_to_validate_list(sw_context,
605                                                       sw_context->cur_query_bo,
606                                                       dev_priv->has_mob, NULL);
607                         if (unlikely(ret != 0))
608                                 return ret;
609                 }
610                 sw_context->cur_query_bo = new_query_bo;
611
612                 ret = vmw_bo_to_validate_list(sw_context,
613                                               dev_priv->dummy_query_bo,
614                                               dev_priv->has_mob, NULL);
615                 if (unlikely(ret != 0))
616                         return ret;
617
618         }
619
620         return 0;
621 }
622
623
624 /**
625  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
626  *
627  * @dev_priv: The device private structure.
628  * @sw_context: The software context used for this command submission batch.
629  *
630  * This function will check if we're switching query buffers, and will then,
631  * issue a dummy occlusion query wait used as a query barrier. When the fence
632  * object following that query wait has signaled, we are sure that all
633  * preceding queries have finished, and the old query buffer can be unpinned.
634  * However, since both the new query buffer and the old one are fenced with
635  * that fence, we can do an asynchronus unpin now, and be sure that the
636  * old query buffer won't be moved until the fence has signaled.
637  *
638  * As mentioned above, both the new - and old query buffers need to be fenced
639  * using a sequence emitted *after* calling this function.
640  */
641 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
642                                      struct vmw_sw_context *sw_context)
643 {
644         /*
645          * The validate list should still hold references to all
646          * contexts here.
647          */
648
649         if (sw_context->needs_post_query_barrier) {
650                 struct vmw_res_cache_entry *ctx_entry =
651                         &sw_context->res_cache[vmw_res_context];
652                 struct vmw_resource *ctx;
653                 int ret;
654
655                 BUG_ON(!ctx_entry->valid);
656                 ctx = ctx_entry->res;
657
658                 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
659
660                 if (unlikely(ret != 0))
661                         DRM_ERROR("Out of fifo space for dummy query.\n");
662         }
663
664         if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
665                 if (dev_priv->pinned_bo) {
666                         vmw_bo_pin(dev_priv->pinned_bo, false);
667                         ttm_bo_unref(&dev_priv->pinned_bo);
668                 }
669
670                 if (!sw_context->needs_post_query_barrier) {
671                         vmw_bo_pin(sw_context->cur_query_bo, true);
672
673                         /*
674                          * We pin also the dummy_query_bo buffer so that we
675                          * don't need to validate it when emitting
676                          * dummy queries in context destroy paths.
677                          */
678
679                         vmw_bo_pin(dev_priv->dummy_query_bo, true);
680                         dev_priv->dummy_query_bo_pinned = true;
681
682                         BUG_ON(sw_context->last_query_ctx == NULL);
683                         dev_priv->query_cid = sw_context->last_query_ctx->id;
684                         dev_priv->query_cid_valid = true;
685                         dev_priv->pinned_bo =
686                                 ttm_bo_reference(sw_context->cur_query_bo);
687                 }
688         }
689 }
690
691 /**
692  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
693  * handle to a MOB id.
694  *
695  * @dev_priv: Pointer to a device private structure.
696  * @sw_context: The software context used for this command batch validation.
697  * @id: Pointer to the user-space handle to be translated.
698  * @vmw_bo_p: Points to a location that, on successful return will carry
699  * a reference-counted pointer to the DMA buffer identified by the
700  * user-space handle in @id.
701  *
702  * This function saves information needed to translate a user-space buffer
703  * handle to a MOB id. The translation does not take place immediately, but
704  * during a call to vmw_apply_relocations(). This function builds a relocation
705  * list and a list of buffers to validate. The former needs to be freed using
706  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
707  * needs to be freed using vmw_clear_validations.
708  */
709 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
710                                  struct vmw_sw_context *sw_context,
711                                  SVGAMobId *id,
712                                  struct vmw_dma_buffer **vmw_bo_p)
713 {
714         struct vmw_dma_buffer *vmw_bo = NULL;
715         struct ttm_buffer_object *bo;
716         uint32_t handle = *id;
717         struct vmw_relocation *reloc;
718         int ret;
719
720         ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
721         if (unlikely(ret != 0)) {
722                 DRM_ERROR("Could not find or use MOB buffer.\n");
723                 return -EINVAL;
724         }
725         bo = &vmw_bo->base;
726
727         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
728                 DRM_ERROR("Max number relocations per submission"
729                           " exceeded\n");
730                 ret = -EINVAL;
731                 goto out_no_reloc;
732         }
733
734         reloc = &sw_context->relocs[sw_context->cur_reloc++];
735         reloc->mob_loc = id;
736         reloc->location = NULL;
737
738         ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
739         if (unlikely(ret != 0))
740                 goto out_no_reloc;
741
742         *vmw_bo_p = vmw_bo;
743         return 0;
744
745 out_no_reloc:
746         vmw_dmabuf_unreference(&vmw_bo);
747         vmw_bo_p = NULL;
748         return ret;
749 }
750
751 /**
752  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
753  * handle to a valid SVGAGuestPtr
754  *
755  * @dev_priv: Pointer to a device private structure.
756  * @sw_context: The software context used for this command batch validation.
757  * @ptr: Pointer to the user-space handle to be translated.
758  * @vmw_bo_p: Points to a location that, on successful return will carry
759  * a reference-counted pointer to the DMA buffer identified by the
760  * user-space handle in @id.
761  *
762  * This function saves information needed to translate a user-space buffer
763  * handle to a valid SVGAGuestPtr. The translation does not take place
764  * immediately, but during a call to vmw_apply_relocations().
765  * This function builds a relocation list and a list of buffers to validate.
766  * The former needs to be freed using either vmw_apply_relocations() or
767  * vmw_free_relocations(). The latter needs to be freed using
768  * vmw_clear_validations.
769  */
770 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
771                                    struct vmw_sw_context *sw_context,
772                                    SVGAGuestPtr *ptr,
773                                    struct vmw_dma_buffer **vmw_bo_p)
774 {
775         struct vmw_dma_buffer *vmw_bo = NULL;
776         struct ttm_buffer_object *bo;
777         uint32_t handle = ptr->gmrId;
778         struct vmw_relocation *reloc;
779         int ret;
780
781         ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
782         if (unlikely(ret != 0)) {
783                 DRM_ERROR("Could not find or use GMR region.\n");
784                 return -EINVAL;
785         }
786         bo = &vmw_bo->base;
787
788         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
789                 DRM_ERROR("Max number relocations per submission"
790                           " exceeded\n");
791                 ret = -EINVAL;
792                 goto out_no_reloc;
793         }
794
795         reloc = &sw_context->relocs[sw_context->cur_reloc++];
796         reloc->location = ptr;
797
798         ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
799         if (unlikely(ret != 0))
800                 goto out_no_reloc;
801
802         *vmw_bo_p = vmw_bo;
803         return 0;
804
805 out_no_reloc:
806         vmw_dmabuf_unreference(&vmw_bo);
807         vmw_bo_p = NULL;
808         return ret;
809 }
810
811 /**
812  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
813  *
814  * @dev_priv: Pointer to a device private struct.
815  * @sw_context: The software context used for this command submission.
816  * @header: Pointer to the command header in the command stream.
817  */
818 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
819                                   struct vmw_sw_context *sw_context,
820                                   SVGA3dCmdHeader *header)
821 {
822         struct vmw_begin_gb_query_cmd {
823                 SVGA3dCmdHeader header;
824                 SVGA3dCmdBeginGBQuery q;
825         } *cmd;
826
827         cmd = container_of(header, struct vmw_begin_gb_query_cmd,
828                            header);
829
830         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
831                                  user_context_converter, &cmd->q.cid,
832                                  NULL);
833 }
834
835 /**
836  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
837  *
838  * @dev_priv: Pointer to a device private struct.
839  * @sw_context: The software context used for this command submission.
840  * @header: Pointer to the command header in the command stream.
841  */
842 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
843                                struct vmw_sw_context *sw_context,
844                                SVGA3dCmdHeader *header)
845 {
846         struct vmw_begin_query_cmd {
847                 SVGA3dCmdHeader header;
848                 SVGA3dCmdBeginQuery q;
849         } *cmd;
850
851         cmd = container_of(header, struct vmw_begin_query_cmd,
852                            header);
853
854         if (unlikely(dev_priv->has_mob)) {
855                 struct {
856                         SVGA3dCmdHeader header;
857                         SVGA3dCmdBeginGBQuery q;
858                 } gb_cmd;
859
860                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
861
862                 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
863                 gb_cmd.header.size = cmd->header.size;
864                 gb_cmd.q.cid = cmd->q.cid;
865                 gb_cmd.q.type = cmd->q.type;
866
867                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
868                 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
869         }
870
871         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
872                                  user_context_converter, &cmd->q.cid,
873                                  NULL);
874 }
875
876 /**
877  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
878  *
879  * @dev_priv: Pointer to a device private struct.
880  * @sw_context: The software context used for this command submission.
881  * @header: Pointer to the command header in the command stream.
882  */
883 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
884                                 struct vmw_sw_context *sw_context,
885                                 SVGA3dCmdHeader *header)
886 {
887         struct vmw_dma_buffer *vmw_bo;
888         struct vmw_query_cmd {
889                 SVGA3dCmdHeader header;
890                 SVGA3dCmdEndGBQuery q;
891         } *cmd;
892         int ret;
893
894         cmd = container_of(header, struct vmw_query_cmd, header);
895         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
896         if (unlikely(ret != 0))
897                 return ret;
898
899         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
900                                     &cmd->q.mobid,
901                                     &vmw_bo);
902         if (unlikely(ret != 0))
903                 return ret;
904
905         ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
906
907         vmw_dmabuf_unreference(&vmw_bo);
908         return ret;
909 }
910
911 /**
912  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
913  *
914  * @dev_priv: Pointer to a device private struct.
915  * @sw_context: The software context used for this command submission.
916  * @header: Pointer to the command header in the command stream.
917  */
918 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
919                              struct vmw_sw_context *sw_context,
920                              SVGA3dCmdHeader *header)
921 {
922         struct vmw_dma_buffer *vmw_bo;
923         struct vmw_query_cmd {
924                 SVGA3dCmdHeader header;
925                 SVGA3dCmdEndQuery q;
926         } *cmd;
927         int ret;
928
929         cmd = container_of(header, struct vmw_query_cmd, header);
930         if (dev_priv->has_mob) {
931                 struct {
932                         SVGA3dCmdHeader header;
933                         SVGA3dCmdEndGBQuery q;
934                 } gb_cmd;
935
936                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
937
938                 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
939                 gb_cmd.header.size = cmd->header.size;
940                 gb_cmd.q.cid = cmd->q.cid;
941                 gb_cmd.q.type = cmd->q.type;
942                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
943                 gb_cmd.q.offset = cmd->q.guestResult.offset;
944
945                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
946                 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
947         }
948
949         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
950         if (unlikely(ret != 0))
951                 return ret;
952
953         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
954                                       &cmd->q.guestResult,
955                                       &vmw_bo);
956         if (unlikely(ret != 0))
957                 return ret;
958
959         ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
960
961         vmw_dmabuf_unreference(&vmw_bo);
962         return ret;
963 }
964
965 /**
966  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
967  *
968  * @dev_priv: Pointer to a device private struct.
969  * @sw_context: The software context used for this command submission.
970  * @header: Pointer to the command header in the command stream.
971  */
972 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
973                                  struct vmw_sw_context *sw_context,
974                                  SVGA3dCmdHeader *header)
975 {
976         struct vmw_dma_buffer *vmw_bo;
977         struct vmw_query_cmd {
978                 SVGA3dCmdHeader header;
979                 SVGA3dCmdWaitForGBQuery q;
980         } *cmd;
981         int ret;
982
983         cmd = container_of(header, struct vmw_query_cmd, header);
984         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
985         if (unlikely(ret != 0))
986                 return ret;
987
988         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
989                                     &cmd->q.mobid,
990                                     &vmw_bo);
991         if (unlikely(ret != 0))
992                 return ret;
993
994         vmw_dmabuf_unreference(&vmw_bo);
995         return 0;
996 }
997
998 /**
999  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1000  *
1001  * @dev_priv: Pointer to a device private struct.
1002  * @sw_context: The software context used for this command submission.
1003  * @header: Pointer to the command header in the command stream.
1004  */
1005 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1006                               struct vmw_sw_context *sw_context,
1007                               SVGA3dCmdHeader *header)
1008 {
1009         struct vmw_dma_buffer *vmw_bo;
1010         struct vmw_query_cmd {
1011                 SVGA3dCmdHeader header;
1012                 SVGA3dCmdWaitForQuery q;
1013         } *cmd;
1014         int ret;
1015
1016         cmd = container_of(header, struct vmw_query_cmd, header);
1017         if (dev_priv->has_mob) {
1018                 struct {
1019                         SVGA3dCmdHeader header;
1020                         SVGA3dCmdWaitForGBQuery q;
1021                 } gb_cmd;
1022
1023                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1024
1025                 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1026                 gb_cmd.header.size = cmd->header.size;
1027                 gb_cmd.q.cid = cmd->q.cid;
1028                 gb_cmd.q.type = cmd->q.type;
1029                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1030                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1031
1032                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1033                 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1034         }
1035
1036         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1037         if (unlikely(ret != 0))
1038                 return ret;
1039
1040         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1041                                       &cmd->q.guestResult,
1042                                       &vmw_bo);
1043         if (unlikely(ret != 0))
1044                 return ret;
1045
1046         vmw_dmabuf_unreference(&vmw_bo);
1047         return 0;
1048 }
1049
1050 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1051                        struct vmw_sw_context *sw_context,
1052                        SVGA3dCmdHeader *header)
1053 {
1054         struct vmw_dma_buffer *vmw_bo = NULL;
1055         struct vmw_surface *srf = NULL;
1056         struct vmw_dma_cmd {
1057                 SVGA3dCmdHeader header;
1058                 SVGA3dCmdSurfaceDMA dma;
1059         } *cmd;
1060         int ret;
1061
1062         cmd = container_of(header, struct vmw_dma_cmd, header);
1063         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1064                                       &cmd->dma.guest.ptr,
1065                                       &vmw_bo);
1066         if (unlikely(ret != 0))
1067                 return ret;
1068
1069         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1070                                 user_surface_converter, &cmd->dma.host.sid,
1071                                 NULL);
1072         if (unlikely(ret != 0)) {
1073                 if (unlikely(ret != -ERESTARTSYS))
1074                         DRM_ERROR("could not find surface for DMA.\n");
1075                 goto out_no_surface;
1076         }
1077
1078         srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1079
1080         vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
1081
1082 out_no_surface:
1083         vmw_dmabuf_unreference(&vmw_bo);
1084         return ret;
1085 }
1086
1087 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1088                         struct vmw_sw_context *sw_context,
1089                         SVGA3dCmdHeader *header)
1090 {
1091         struct vmw_draw_cmd {
1092                 SVGA3dCmdHeader header;
1093                 SVGA3dCmdDrawPrimitives body;
1094         } *cmd;
1095         SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1096                 (unsigned long)header + sizeof(*cmd));
1097         SVGA3dPrimitiveRange *range;
1098         uint32_t i;
1099         uint32_t maxnum;
1100         int ret;
1101
1102         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1103         if (unlikely(ret != 0))
1104                 return ret;
1105
1106         cmd = container_of(header, struct vmw_draw_cmd, header);
1107         maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1108
1109         if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1110                 DRM_ERROR("Illegal number of vertex declarations.\n");
1111                 return -EINVAL;
1112         }
1113
1114         for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1115                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1116                                         user_surface_converter,
1117                                         &decl->array.surfaceId, NULL);
1118                 if (unlikely(ret != 0))
1119                         return ret;
1120         }
1121
1122         maxnum = (header->size - sizeof(cmd->body) -
1123                   cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1124         if (unlikely(cmd->body.numRanges > maxnum)) {
1125                 DRM_ERROR("Illegal number of index ranges.\n");
1126                 return -EINVAL;
1127         }
1128
1129         range = (SVGA3dPrimitiveRange *) decl;
1130         for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1131                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1132                                         user_surface_converter,
1133                                         &range->indexArray.surfaceId, NULL);
1134                 if (unlikely(ret != 0))
1135                         return ret;
1136         }
1137         return 0;
1138 }
1139
1140
1141 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1142                              struct vmw_sw_context *sw_context,
1143                              SVGA3dCmdHeader *header)
1144 {
1145         struct vmw_tex_state_cmd {
1146                 SVGA3dCmdHeader header;
1147                 SVGA3dCmdSetTextureState state;
1148         };
1149
1150         SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1151           ((unsigned long) header + header->size + sizeof(header));
1152         SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1153                 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1154         int ret;
1155
1156         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1157         if (unlikely(ret != 0))
1158                 return ret;
1159
1160         for (; cur_state < last_state; ++cur_state) {
1161                 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1162                         continue;
1163
1164                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1165                                         user_surface_converter,
1166                                         &cur_state->value, NULL);
1167                 if (unlikely(ret != 0))
1168                         return ret;
1169         }
1170
1171         return 0;
1172 }
1173
1174 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1175                                       struct vmw_sw_context *sw_context,
1176                                       void *buf)
1177 {
1178         struct vmw_dma_buffer *vmw_bo;
1179         int ret;
1180
1181         struct {
1182                 uint32_t header;
1183                 SVGAFifoCmdDefineGMRFB body;
1184         } *cmd = buf;
1185
1186         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1187                                       &cmd->body.ptr,
1188                                       &vmw_bo);
1189         if (unlikely(ret != 0))
1190                 return ret;
1191
1192         vmw_dmabuf_unreference(&vmw_bo);
1193
1194         return ret;
1195 }
1196
1197 /**
1198  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1199  *
1200  * @dev_priv: Pointer to a device private struct.
1201  * @sw_context: The software context being used for this batch.
1202  * @res_type: The resource type.
1203  * @converter: Information about user-space binding for this resource type.
1204  * @res_id: Pointer to the user-space resource handle in the command stream.
1205  * @buf_id: Pointer to the user-space backup buffer handle in the command
1206  * stream.
1207  * @backup_offset: Offset of backup into MOB.
1208  *
1209  * This function prepares for registering a switch of backup buffers
1210  * in the resource metadata just prior to unreserving.
1211  */
1212 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1213                                  struct vmw_sw_context *sw_context,
1214                                  enum vmw_res_type res_type,
1215                                  const struct vmw_user_resource_conv
1216                                  *converter,
1217                                  uint32_t *res_id,
1218                                  uint32_t *buf_id,
1219                                  unsigned long backup_offset)
1220 {
1221         int ret;
1222         struct vmw_dma_buffer *dma_buf;
1223         struct vmw_resource_val_node *val_node;
1224
1225         ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1226                                 converter, res_id, &val_node);
1227         if (unlikely(ret != 0))
1228                 return ret;
1229
1230         ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1231         if (unlikely(ret != 0))
1232                 return ret;
1233
1234         if (val_node->first_usage)
1235                 val_node->no_buffer_needed = true;
1236
1237         vmw_dmabuf_unreference(&val_node->new_backup);
1238         val_node->new_backup = dma_buf;
1239         val_node->new_backup_offset = backup_offset;
1240
1241         return 0;
1242 }
1243
1244 /**
1245  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1246  * command
1247  *
1248  * @dev_priv: Pointer to a device private struct.
1249  * @sw_context: The software context being used for this batch.
1250  * @header: Pointer to the command header in the command stream.
1251  */
1252 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1253                                    struct vmw_sw_context *sw_context,
1254                                    SVGA3dCmdHeader *header)
1255 {
1256         struct vmw_bind_gb_surface_cmd {
1257                 SVGA3dCmdHeader header;
1258                 SVGA3dCmdBindGBSurface body;
1259         } *cmd;
1260
1261         cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1262
1263         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1264                                      user_surface_converter,
1265                                      &cmd->body.sid, &cmd->body.mobid,
1266                                      0);
1267 }
1268
1269 /**
1270  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1271  * command
1272  *
1273  * @dev_priv: Pointer to a device private struct.
1274  * @sw_context: The software context being used for this batch.
1275  * @header: Pointer to the command header in the command stream.
1276  */
1277 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1278                                    struct vmw_sw_context *sw_context,
1279                                    SVGA3dCmdHeader *header)
1280 {
1281         struct vmw_gb_surface_cmd {
1282                 SVGA3dCmdHeader header;
1283                 SVGA3dCmdUpdateGBImage body;
1284         } *cmd;
1285
1286         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1287
1288         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1289                                  user_surface_converter,
1290                                  &cmd->body.image.sid, NULL);
1291 }
1292
1293 /**
1294  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1295  * command
1296  *
1297  * @dev_priv: Pointer to a device private struct.
1298  * @sw_context: The software context being used for this batch.
1299  * @header: Pointer to the command header in the command stream.
1300  */
1301 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1302                                      struct vmw_sw_context *sw_context,
1303                                      SVGA3dCmdHeader *header)
1304 {
1305         struct vmw_gb_surface_cmd {
1306                 SVGA3dCmdHeader header;
1307                 SVGA3dCmdUpdateGBSurface body;
1308         } *cmd;
1309
1310         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1311
1312         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1313                                  user_surface_converter,
1314                                  &cmd->body.sid, NULL);
1315 }
1316
1317 /**
1318  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1319  * command
1320  *
1321  * @dev_priv: Pointer to a device private struct.
1322  * @sw_context: The software context being used for this batch.
1323  * @header: Pointer to the command header in the command stream.
1324  */
1325 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1326                                      struct vmw_sw_context *sw_context,
1327                                      SVGA3dCmdHeader *header)
1328 {
1329         struct vmw_gb_surface_cmd {
1330                 SVGA3dCmdHeader header;
1331                 SVGA3dCmdReadbackGBImage body;
1332         } *cmd;
1333
1334         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1335
1336         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1337                                  user_surface_converter,
1338                                  &cmd->body.image.sid, NULL);
1339 }
1340
1341 /**
1342  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1343  * command
1344  *
1345  * @dev_priv: Pointer to a device private struct.
1346  * @sw_context: The software context being used for this batch.
1347  * @header: Pointer to the command header in the command stream.
1348  */
1349 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1350                                        struct vmw_sw_context *sw_context,
1351                                        SVGA3dCmdHeader *header)
1352 {
1353         struct vmw_gb_surface_cmd {
1354                 SVGA3dCmdHeader header;
1355                 SVGA3dCmdReadbackGBSurface body;
1356         } *cmd;
1357
1358         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1359
1360         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1361                                  user_surface_converter,
1362                                  &cmd->body.sid, NULL);
1363 }
1364
1365 /**
1366  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1367  * command
1368  *
1369  * @dev_priv: Pointer to a device private struct.
1370  * @sw_context: The software context being used for this batch.
1371  * @header: Pointer to the command header in the command stream.
1372  */
1373 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1374                                        struct vmw_sw_context *sw_context,
1375                                        SVGA3dCmdHeader *header)
1376 {
1377         struct vmw_gb_surface_cmd {
1378                 SVGA3dCmdHeader header;
1379                 SVGA3dCmdInvalidateGBImage body;
1380         } *cmd;
1381
1382         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1383
1384         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1385                                  user_surface_converter,
1386                                  &cmd->body.image.sid, NULL);
1387 }
1388
1389 /**
1390  * vmw_cmd_invalidate_gb_surface - Validate an
1391  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1392  *
1393  * @dev_priv: Pointer to a device private struct.
1394  * @sw_context: The software context being used for this batch.
1395  * @header: Pointer to the command header in the command stream.
1396  */
1397 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1398                                          struct vmw_sw_context *sw_context,
1399                                          SVGA3dCmdHeader *header)
1400 {
1401         struct vmw_gb_surface_cmd {
1402                 SVGA3dCmdHeader header;
1403                 SVGA3dCmdInvalidateGBSurface body;
1404         } *cmd;
1405
1406         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1407
1408         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1409                                  user_surface_converter,
1410                                  &cmd->body.sid, NULL);
1411 }
1412
1413 /**
1414  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1415  * command
1416  *
1417  * @dev_priv: Pointer to a device private struct.
1418  * @sw_context: The software context being used for this batch.
1419  * @header: Pointer to the command header in the command stream.
1420  */
1421 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1422                               struct vmw_sw_context *sw_context,
1423                               SVGA3dCmdHeader *header)
1424 {
1425         struct vmw_set_shader_cmd {
1426                 SVGA3dCmdHeader header;
1427                 SVGA3dCmdSetShader body;
1428         } *cmd;
1429         int ret;
1430
1431         cmd = container_of(header, struct vmw_set_shader_cmd,
1432                            header);
1433
1434         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1435         if (unlikely(ret != 0))
1436                 return ret;
1437
1438
1439         if (dev_priv->has_mob)
1440                 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
1441                                          user_shader_converter,
1442                                          &cmd->body.shid, NULL);
1443
1444         return 0;
1445 }
1446
1447 /**
1448  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1449  * command
1450  *
1451  * @dev_priv: Pointer to a device private struct.
1452  * @sw_context: The software context being used for this batch.
1453  * @header: Pointer to the command header in the command stream.
1454  */
1455 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1456                                   struct vmw_sw_context *sw_context,
1457                                   SVGA3dCmdHeader *header)
1458 {
1459         struct vmw_bind_gb_shader_cmd {
1460                 SVGA3dCmdHeader header;
1461                 SVGA3dCmdBindGBShader body;
1462         } *cmd;
1463
1464         cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1465                            header);
1466
1467         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1468                                      user_shader_converter,
1469                                      &cmd->body.shid, &cmd->body.mobid,
1470                                      cmd->body.offsetInBytes);
1471 }
1472
1473 /**
1474  * vmw_cmd_bind_gb_shader_consts - Validate an SVGA_3D_CMD_BIND_SHADER_CONSTS
1475  * command
1476  *
1477  * @dev_priv: Pointer to a device private struct.
1478  * @sw_context: The software context being used for this batch.
1479  * @header: Pointer to the command header in the command stream.
1480  */
1481 static int vmw_cmd_bind_gb_shader_consts(struct vmw_private *dev_priv,
1482                                          struct vmw_sw_context *sw_context,
1483                                          SVGA3dCmdHeader *header)
1484 {
1485         struct vmw_bind_gb_sc_cmd {
1486                 SVGA3dCmdHeader header;
1487                 SVGA3dCmdBindGBShaderConsts body;
1488         } *cmd;
1489         int ret;
1490
1491         cmd = container_of(header, struct vmw_bind_gb_sc_cmd,
1492                            header);
1493
1494         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1495                                 user_context_converter,
1496                                 &cmd->body.cid, NULL);
1497         if (unlikely(ret != 0))
1498                 return ret;
1499
1500         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1501                                  user_surface_converter,
1502                                  &cmd->body.sid, NULL);
1503 }
1504
1505 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1506                                 struct vmw_sw_context *sw_context,
1507                                 void *buf, uint32_t *size)
1508 {
1509         uint32_t size_remaining = *size;
1510         uint32_t cmd_id;
1511
1512         cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1513         switch (cmd_id) {
1514         case SVGA_CMD_UPDATE:
1515                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
1516                 break;
1517         case SVGA_CMD_DEFINE_GMRFB:
1518                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1519                 break;
1520         case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1521                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1522                 break;
1523         case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1524                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1525                 break;
1526         default:
1527                 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1528                 return -EINVAL;
1529         }
1530
1531         if (*size > size_remaining) {
1532                 DRM_ERROR("Invalid SVGA command (size mismatch):"
1533                           " %u.\n", cmd_id);
1534                 return -EINVAL;
1535         }
1536
1537         if (unlikely(!sw_context->kernel)) {
1538                 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1539                 return -EPERM;
1540         }
1541
1542         if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1543                 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1544
1545         return 0;
1546 }
1547
1548 static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1549         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1550                     false, false, false),
1551         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1552                     false, false, false),
1553         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1554                     true, false, false),
1555         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1556                     true, false, false),
1557         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1558                     true, false, false),
1559         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1560                     false, false, false),
1561         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1562                     false, false, false),
1563         VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1564                     true, false, false),
1565         VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1566                     true, false, false),
1567         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1568                     true, false, false),
1569         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1570                     &vmw_cmd_set_render_target_check, true, false, false),
1571         VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1572                     true, false, false),
1573         VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1574                     true, false, false),
1575         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1576                     true, false, false),
1577         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1578                     true, false, false),
1579         VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1580                     true, false, false),
1581         VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1582                     true, false, false),
1583         VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1584                     true, false, false),
1585         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1586                     false, false, false),
1587         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check,
1588                     true, true, false),
1589         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check,
1590                     true, true, false),
1591         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1592                     true, false, false),
1593         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check,
1594                     true, true, false),
1595         VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1596                     true, false, false),
1597         VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1598                     true, false, false),
1599         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1600                     true, false, false),
1601         VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1602                     true, false, false),
1603         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1604                     true, false, false),
1605         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1606                     true, false, false),
1607         VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
1608                     &vmw_cmd_blt_surf_screen_check, false, false, false),
1609         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1610                     false, false, false),
1611         VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1612                     false, false, false),
1613         VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1614                     false, false, false),
1615         VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1616                     false, false, false),
1617         VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1618                     false, false, false),
1619         VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1620                     false, false, false),
1621         VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1622                     false, false, false),
1623         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1624                     false, false, false),
1625         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1626                     false, false, false),
1627         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1628                     false, false, false),
1629         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1630                     false, false, false),
1631         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1632                     false, false, false),
1633         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1634                     false, false, false),
1635         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1636                     false, false, true),
1637         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1638                     false, false, true),
1639         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1640                     false, false, true),
1641         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1642                     false, false, true),
1643         VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1644                     false, false, true),
1645         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1646                     false, false, true),
1647         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1648                     false, false, true),
1649         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1650                     false, false, true),
1651         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1652                     true, false, true),
1653         VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1654                     false, false, true),
1655         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1656                     true, false, true),
1657         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
1658                     &vmw_cmd_update_gb_surface, true, false, true),
1659         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
1660                     &vmw_cmd_readback_gb_image, true, false, true),
1661         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
1662                     &vmw_cmd_readback_gb_surface, true, false, true),
1663         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
1664                     &vmw_cmd_invalidate_gb_image, true, false, true),
1665         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
1666                     &vmw_cmd_invalidate_gb_surface, true, false, true),
1667         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
1668                     false, false, true),
1669         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
1670                     false, false, true),
1671         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
1672                     false, false, true),
1673         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
1674                     false, false, true),
1675         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
1676                     false, false, true),
1677         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
1678                     false, false, true),
1679         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
1680                     true, false, true),
1681         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
1682                     false, false, true),
1683         VMW_CMD_DEF(SVGA_3D_CMD_BIND_SHADERCONSTS,
1684                     &vmw_cmd_bind_gb_shader_consts, true, false, true),
1685         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
1686                     true, false, true),
1687         VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
1688                     true, false, true),
1689         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
1690                     true, false, true),
1691         VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
1692                     true, false, true),
1693         VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
1694                     false, false, true),
1695         VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
1696                     false, false, true),
1697         VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
1698                     false, false, true),
1699         VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
1700                     false, false, true),
1701         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
1702                     false, false, true),
1703         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
1704                     false, false, true),
1705         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
1706                     false, false, true),
1707         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
1708                     false, false, true),
1709         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
1710                     false, false, true),
1711         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
1712                     false, false, true),
1713         VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
1714                     true, false, true)
1715 };
1716
1717 static int vmw_cmd_check(struct vmw_private *dev_priv,
1718                          struct vmw_sw_context *sw_context,
1719                          void *buf, uint32_t *size)
1720 {
1721         uint32_t cmd_id;
1722         uint32_t size_remaining = *size;
1723         SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
1724         int ret;
1725         const struct vmw_cmd_entry *entry;
1726         bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
1727
1728         cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1729         /* Handle any none 3D commands */
1730         if (unlikely(cmd_id < SVGA_CMD_MAX))
1731                 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
1732
1733
1734         cmd_id = le32_to_cpu(header->id);
1735         *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
1736
1737         cmd_id -= SVGA_3D_CMD_BASE;
1738         if (unlikely(*size > size_remaining))
1739                 goto out_invalid;
1740
1741         if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
1742                 goto out_invalid;
1743
1744         entry = &vmw_cmd_entries[cmd_id];
1745         if (unlikely(!entry->user_allow && !sw_context->kernel))
1746                 goto out_privileged;
1747
1748         if (unlikely(entry->gb_disable && gb))
1749                 goto out_old;
1750
1751         if (unlikely(entry->gb_enable && !gb))
1752                 goto out_new;
1753
1754         ret = entry->func(dev_priv, sw_context, header);
1755         if (unlikely(ret != 0))
1756                 goto out_invalid;
1757
1758         return 0;
1759 out_invalid:
1760         DRM_ERROR("Invalid SVGA3D command: %d\n",
1761                   cmd_id + SVGA_3D_CMD_BASE);
1762         return -EINVAL;
1763 out_privileged:
1764         DRM_ERROR("Privileged SVGA3D command: %d\n",
1765                   cmd_id + SVGA_3D_CMD_BASE);
1766         return -EPERM;
1767 out_old:
1768         DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
1769                   cmd_id + SVGA_3D_CMD_BASE);
1770         return -EINVAL;
1771 out_new:
1772         DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
1773                   cmd_id + SVGA_3D_CMD_BASE);
1774         return -EINVAL;
1775 }
1776
1777 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
1778                              struct vmw_sw_context *sw_context,
1779                              void *buf,
1780                              uint32_t size)
1781 {
1782         int32_t cur_size = size;
1783         int ret;
1784
1785         sw_context->buf_start = buf;
1786
1787         while (cur_size > 0) {
1788                 size = cur_size;
1789                 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
1790                 if (unlikely(ret != 0))
1791                         return ret;
1792                 buf = (void *)((unsigned long) buf + size);
1793                 cur_size -= size;
1794         }
1795
1796         if (unlikely(cur_size != 0)) {
1797                 DRM_ERROR("Command verifier out of sync.\n");
1798                 return -EINVAL;
1799         }
1800
1801         return 0;
1802 }
1803
1804 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
1805 {
1806         sw_context->cur_reloc = 0;
1807 }
1808
1809 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
1810 {
1811         uint32_t i;
1812         struct vmw_relocation *reloc;
1813         struct ttm_validate_buffer *validate;
1814         struct ttm_buffer_object *bo;
1815
1816         for (i = 0; i < sw_context->cur_reloc; ++i) {
1817                 reloc = &sw_context->relocs[i];
1818                 validate = &sw_context->val_bufs[reloc->index].base;
1819                 bo = validate->bo;
1820                 switch (bo->mem.mem_type) {
1821                 case TTM_PL_VRAM:
1822                         reloc->location->offset += bo->offset;
1823                         reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
1824                         break;
1825                 case VMW_PL_GMR:
1826                         reloc->location->gmrId = bo->mem.start;
1827                         break;
1828                 case VMW_PL_MOB:
1829                         *reloc->mob_loc = bo->mem.start;
1830                         break;
1831                 default:
1832                         BUG();
1833                 }
1834         }
1835         vmw_free_relocations(sw_context);
1836 }
1837
1838 /**
1839  * vmw_resource_list_unrefererence - Free up a resource list and unreference
1840  * all resources referenced by it.
1841  *
1842  * @list: The resource list.
1843  */
1844 static void vmw_resource_list_unreference(struct list_head *list)
1845 {
1846         struct vmw_resource_val_node *val, *val_next;
1847
1848         /*
1849          * Drop references to resources held during command submission.
1850          */
1851
1852         list_for_each_entry_safe(val, val_next, list, head) {
1853                 list_del_init(&val->head);
1854                 vmw_resource_unreference(&val->res);
1855                 kfree(val);
1856         }
1857 }
1858
1859 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
1860 {
1861         struct vmw_validate_buffer *entry, *next;
1862         struct vmw_resource_val_node *val;
1863
1864         /*
1865          * Drop references to DMA buffers held during command submission.
1866          */
1867         list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
1868                                  base.head) {
1869                 list_del(&entry->base.head);
1870                 ttm_bo_unref(&entry->base.bo);
1871                 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
1872                 sw_context->cur_val_buf--;
1873         }
1874         BUG_ON(sw_context->cur_val_buf != 0);
1875
1876         list_for_each_entry(val, &sw_context->resource_list, head)
1877                 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
1878 }
1879
1880 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
1881                                       struct ttm_buffer_object *bo,
1882                                       bool validate_as_mob)
1883 {
1884         int ret;
1885
1886
1887         /*
1888          * Don't validate pinned buffers.
1889          */
1890
1891         if (bo == dev_priv->pinned_bo ||
1892             (bo == dev_priv->dummy_query_bo &&
1893              dev_priv->dummy_query_bo_pinned))
1894                 return 0;
1895
1896         if (validate_as_mob)
1897                 return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
1898
1899         /**
1900          * Put BO in VRAM if there is space, otherwise as a GMR.
1901          * If there is no space in VRAM and GMR ids are all used up,
1902          * start evicting GMRs to make room. If the DMA buffer can't be
1903          * used as a GMR, this will return -ENOMEM.
1904          */
1905
1906         ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
1907         if (likely(ret == 0 || ret == -ERESTARTSYS))
1908                 return ret;
1909
1910         /**
1911          * If that failed, try VRAM again, this time evicting
1912          * previous contents.
1913          */
1914
1915         DRM_INFO("Falling through to VRAM.\n");
1916         ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
1917         return ret;
1918 }
1919
1920 static int vmw_validate_buffers(struct vmw_private *dev_priv,
1921                                 struct vmw_sw_context *sw_context)
1922 {
1923         struct vmw_validate_buffer *entry;
1924         int ret;
1925
1926         list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
1927                 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
1928                                                  entry->validate_as_mob);
1929                 if (unlikely(ret != 0))
1930                         return ret;
1931         }
1932         return 0;
1933 }
1934
1935 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
1936                                  uint32_t size)
1937 {
1938         if (likely(sw_context->cmd_bounce_size >= size))
1939                 return 0;
1940
1941         if (sw_context->cmd_bounce_size == 0)
1942                 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
1943
1944         while (sw_context->cmd_bounce_size < size) {
1945                 sw_context->cmd_bounce_size =
1946                         PAGE_ALIGN(sw_context->cmd_bounce_size +
1947                                    (sw_context->cmd_bounce_size >> 1));
1948         }
1949
1950         if (sw_context->cmd_bounce != NULL)
1951                 vfree(sw_context->cmd_bounce);
1952
1953         sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
1954
1955         if (sw_context->cmd_bounce == NULL) {
1956                 DRM_ERROR("Failed to allocate command bounce buffer.\n");
1957                 sw_context->cmd_bounce_size = 0;
1958                 return -ENOMEM;
1959         }
1960
1961         return 0;
1962 }
1963
1964 /**
1965  * vmw_execbuf_fence_commands - create and submit a command stream fence
1966  *
1967  * Creates a fence object and submits a command stream marker.
1968  * If this fails for some reason, We sync the fifo and return NULL.
1969  * It is then safe to fence buffers with a NULL pointer.
1970  *
1971  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
1972  * a userspace handle if @p_handle is not NULL, otherwise not.
1973  */
1974
1975 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
1976                                struct vmw_private *dev_priv,
1977                                struct vmw_fence_obj **p_fence,
1978                                uint32_t *p_handle)
1979 {
1980         uint32_t sequence;
1981         int ret;
1982         bool synced = false;
1983
1984         /* p_handle implies file_priv. */
1985         BUG_ON(p_handle != NULL && file_priv == NULL);
1986
1987         ret = vmw_fifo_send_fence(dev_priv, &sequence);
1988         if (unlikely(ret != 0)) {
1989                 DRM_ERROR("Fence submission error. Syncing.\n");
1990                 synced = true;
1991         }
1992
1993         if (p_handle != NULL)
1994                 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
1995                                             sequence,
1996                                             DRM_VMW_FENCE_FLAG_EXEC,
1997                                             p_fence, p_handle);
1998         else
1999                 ret = vmw_fence_create(dev_priv->fman, sequence,
2000                                        DRM_VMW_FENCE_FLAG_EXEC,
2001                                        p_fence);
2002
2003         if (unlikely(ret != 0 && !synced)) {
2004                 (void) vmw_fallback_wait(dev_priv, false, false,
2005                                          sequence, false,
2006                                          VMW_FENCE_WAIT_TIMEOUT);
2007                 *p_fence = NULL;
2008         }
2009
2010         return 0;
2011 }
2012
2013 /**
2014  * vmw_execbuf_copy_fence_user - copy fence object information to
2015  * user-space.
2016  *
2017  * @dev_priv: Pointer to a vmw_private struct.
2018  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2019  * @ret: Return value from fence object creation.
2020  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2021  * which the information should be copied.
2022  * @fence: Pointer to the fenc object.
2023  * @fence_handle: User-space fence handle.
2024  *
2025  * This function copies fence information to user-space. If copying fails,
2026  * The user-space struct drm_vmw_fence_rep::error member is hopefully
2027  * left untouched, and if it's preloaded with an -EFAULT by user-space,
2028  * the error will hopefully be detected.
2029  * Also if copying fails, user-space will be unable to signal the fence
2030  * object so we wait for it immediately, and then unreference the
2031  * user-space reference.
2032  */
2033 void
2034 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2035                             struct vmw_fpriv *vmw_fp,
2036                             int ret,
2037                             struct drm_vmw_fence_rep __user *user_fence_rep,
2038                             struct vmw_fence_obj *fence,
2039                             uint32_t fence_handle)
2040 {
2041         struct drm_vmw_fence_rep fence_rep;
2042
2043         if (user_fence_rep == NULL)
2044                 return;
2045
2046         memset(&fence_rep, 0, sizeof(fence_rep));
2047
2048         fence_rep.error = ret;
2049         if (ret == 0) {
2050                 BUG_ON(fence == NULL);
2051
2052                 fence_rep.handle = fence_handle;
2053                 fence_rep.seqno = fence->seqno;
2054                 vmw_update_seqno(dev_priv, &dev_priv->fifo);
2055                 fence_rep.passed_seqno = dev_priv->last_read_seqno;
2056         }
2057
2058         /*
2059          * copy_to_user errors will be detected by user space not
2060          * seeing fence_rep::error filled in. Typically
2061          * user-space would have pre-set that member to -EFAULT.
2062          */
2063         ret = copy_to_user(user_fence_rep, &fence_rep,
2064                            sizeof(fence_rep));
2065
2066         /*
2067          * User-space lost the fence object. We need to sync
2068          * and unreference the handle.
2069          */
2070         if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2071                 ttm_ref_object_base_unref(vmw_fp->tfile,
2072                                           fence_handle, TTM_REF_USAGE);
2073                 DRM_ERROR("Fence copy error. Syncing.\n");
2074                 (void) vmw_fence_obj_wait(fence, fence->signal_mask,
2075                                           false, false,
2076                                           VMW_FENCE_WAIT_TIMEOUT);
2077         }
2078 }
2079
2080 int vmw_execbuf_process(struct drm_file *file_priv,
2081                         struct vmw_private *dev_priv,
2082                         void __user *user_commands,
2083                         void *kernel_commands,
2084                         uint32_t command_size,
2085                         uint64_t throttle_us,
2086                         struct drm_vmw_fence_rep __user *user_fence_rep,
2087                         struct vmw_fence_obj **out_fence)
2088 {
2089         struct vmw_sw_context *sw_context = &dev_priv->ctx;
2090         struct vmw_fence_obj *fence = NULL;
2091         struct vmw_resource *error_resource;
2092         struct list_head resource_list;
2093         struct ww_acquire_ctx ticket;
2094         uint32_t handle;
2095         void *cmd;
2096         int ret;
2097
2098         ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2099         if (unlikely(ret != 0))
2100                 return -ERESTARTSYS;
2101
2102         if (kernel_commands == NULL) {
2103                 sw_context->kernel = false;
2104
2105                 ret = vmw_resize_cmd_bounce(sw_context, command_size);
2106                 if (unlikely(ret != 0))
2107                         goto out_unlock;
2108
2109
2110                 ret = copy_from_user(sw_context->cmd_bounce,
2111                                      user_commands, command_size);
2112
2113                 if (unlikely(ret != 0)) {
2114                         ret = -EFAULT;
2115                         DRM_ERROR("Failed copying commands.\n");
2116                         goto out_unlock;
2117                 }
2118                 kernel_commands = sw_context->cmd_bounce;
2119         } else
2120                 sw_context->kernel = true;
2121
2122         sw_context->tfile = vmw_fpriv(file_priv)->tfile;
2123         sw_context->cur_reloc = 0;
2124         sw_context->cur_val_buf = 0;
2125         sw_context->fence_flags = 0;
2126         INIT_LIST_HEAD(&sw_context->resource_list);
2127         sw_context->cur_query_bo = dev_priv->pinned_bo;
2128         sw_context->last_query_ctx = NULL;
2129         sw_context->needs_post_query_barrier = false;
2130         memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2131         INIT_LIST_HEAD(&sw_context->validate_nodes);
2132         INIT_LIST_HEAD(&sw_context->res_relocations);
2133         if (!sw_context->res_ht_initialized) {
2134                 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2135                 if (unlikely(ret != 0))
2136                         goto out_unlock;
2137                 sw_context->res_ht_initialized = true;
2138         }
2139
2140         INIT_LIST_HEAD(&resource_list);
2141         ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2142                                 command_size);
2143         if (unlikely(ret != 0))
2144                 goto out_err;
2145
2146         ret = vmw_resources_reserve(sw_context);
2147         if (unlikely(ret != 0))
2148                 goto out_err;
2149
2150         ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
2151         if (unlikely(ret != 0))
2152                 goto out_err;
2153
2154         ret = vmw_validate_buffers(dev_priv, sw_context);
2155         if (unlikely(ret != 0))
2156                 goto out_err;
2157
2158         ret = vmw_resources_validate(sw_context);
2159         if (unlikely(ret != 0))
2160                 goto out_err;
2161
2162         if (throttle_us) {
2163                 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2164                                    throttle_us);
2165
2166                 if (unlikely(ret != 0))
2167                         goto out_err;
2168         }
2169
2170         cmd = vmw_fifo_reserve(dev_priv, command_size);
2171         if (unlikely(cmd == NULL)) {
2172                 DRM_ERROR("Failed reserving fifo space for commands.\n");
2173                 ret = -ENOMEM;
2174                 goto out_err;
2175         }
2176
2177         vmw_apply_relocations(sw_context);
2178         memcpy(cmd, kernel_commands, command_size);
2179
2180         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2181         vmw_resource_relocations_free(&sw_context->res_relocations);
2182
2183         vmw_fifo_commit(dev_priv, command_size);
2184
2185         vmw_query_bo_switch_commit(dev_priv, sw_context);
2186         ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2187                                          &fence,
2188                                          (user_fence_rep) ? &handle : NULL);
2189         /*
2190          * This error is harmless, because if fence submission fails,
2191          * vmw_fifo_send_fence will sync. The error will be propagated to
2192          * user-space in @fence_rep
2193          */
2194
2195         if (ret != 0)
2196                 DRM_ERROR("Fence submission error. Syncing.\n");
2197
2198         vmw_resource_list_unreserve(&sw_context->resource_list, false);
2199         ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2200                                     (void *) fence);
2201
2202         if (unlikely(dev_priv->pinned_bo != NULL &&
2203                      !dev_priv->query_cid_valid))
2204                 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
2205
2206         vmw_clear_validations(sw_context);
2207         vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2208                                     user_fence_rep, fence, handle);
2209
2210         /* Don't unreference when handing fence out */
2211         if (unlikely(out_fence != NULL)) {
2212                 *out_fence = fence;
2213                 fence = NULL;
2214         } else if (likely(fence != NULL)) {
2215                 vmw_fence_obj_unreference(&fence);
2216         }
2217
2218         list_splice_init(&sw_context->resource_list, &resource_list);
2219         mutex_unlock(&dev_priv->cmdbuf_mutex);
2220
2221         /*
2222          * Unreference resources outside of the cmdbuf_mutex to
2223          * avoid deadlocks in resource destruction paths.
2224          */
2225         vmw_resource_list_unreference(&resource_list);
2226
2227         return 0;
2228
2229 out_err:
2230         vmw_resource_relocations_free(&sw_context->res_relocations);
2231         vmw_free_relocations(sw_context);
2232         ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2233         vmw_resource_list_unreserve(&sw_context->resource_list, true);
2234         vmw_clear_validations(sw_context);
2235         if (unlikely(dev_priv->pinned_bo != NULL &&
2236                      !dev_priv->query_cid_valid))
2237                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2238 out_unlock:
2239         list_splice_init(&sw_context->resource_list, &resource_list);
2240         error_resource = sw_context->error_resource;
2241         sw_context->error_resource = NULL;
2242         mutex_unlock(&dev_priv->cmdbuf_mutex);
2243
2244         /*
2245          * Unreference resources outside of the cmdbuf_mutex to
2246          * avoid deadlocks in resource destruction paths.
2247          */
2248         vmw_resource_list_unreference(&resource_list);
2249         if (unlikely(error_resource != NULL))
2250                 vmw_resource_unreference(&error_resource);
2251
2252         return ret;
2253 }
2254
2255 /**
2256  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2257  *
2258  * @dev_priv: The device private structure.
2259  *
2260  * This function is called to idle the fifo and unpin the query buffer
2261  * if the normal way to do this hits an error, which should typically be
2262  * extremely rare.
2263  */
2264 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2265 {
2266         DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2267
2268         (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2269         vmw_bo_pin(dev_priv->pinned_bo, false);
2270         vmw_bo_pin(dev_priv->dummy_query_bo, false);
2271         dev_priv->dummy_query_bo_pinned = false;
2272 }
2273
2274
2275 /**
2276  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2277  * query bo.
2278  *
2279  * @dev_priv: The device private structure.
2280  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2281  * _after_ a query barrier that flushes all queries touching the current
2282  * buffer pointed to by @dev_priv->pinned_bo
2283  *
2284  * This function should be used to unpin the pinned query bo, or
2285  * as a query barrier when we need to make sure that all queries have
2286  * finished before the next fifo command. (For example on hardware
2287  * context destructions where the hardware may otherwise leak unfinished
2288  * queries).
2289  *
2290  * This function does not return any failure codes, but make attempts
2291  * to do safe unpinning in case of errors.
2292  *
2293  * The function will synchronize on the previous query barrier, and will
2294  * thus not finish until that barrier has executed.
2295  *
2296  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2297  * before calling this function.
2298  */
2299 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2300                                      struct vmw_fence_obj *fence)
2301 {
2302         int ret = 0;
2303         struct list_head validate_list;
2304         struct ttm_validate_buffer pinned_val, query_val;
2305         struct vmw_fence_obj *lfence = NULL;
2306         struct ww_acquire_ctx ticket;
2307
2308         if (dev_priv->pinned_bo == NULL)
2309                 goto out_unlock;
2310
2311         INIT_LIST_HEAD(&validate_list);
2312
2313         pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2314         list_add_tail(&pinned_val.head, &validate_list);
2315
2316         query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2317         list_add_tail(&query_val.head, &validate_list);
2318
2319         do {
2320                 ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
2321         } while (ret == -ERESTARTSYS);
2322
2323         if (unlikely(ret != 0)) {
2324                 vmw_execbuf_unpin_panic(dev_priv);
2325                 goto out_no_reserve;
2326         }
2327
2328         if (dev_priv->query_cid_valid) {
2329                 BUG_ON(fence != NULL);
2330                 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2331                 if (unlikely(ret != 0)) {
2332                         vmw_execbuf_unpin_panic(dev_priv);
2333                         goto out_no_emit;
2334                 }
2335                 dev_priv->query_cid_valid = false;
2336         }
2337
2338         vmw_bo_pin(dev_priv->pinned_bo, false);
2339         vmw_bo_pin(dev_priv->dummy_query_bo, false);
2340         dev_priv->dummy_query_bo_pinned = false;
2341
2342         if (fence == NULL) {
2343                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2344                                                   NULL);
2345                 fence = lfence;
2346         }
2347         ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
2348         if (lfence != NULL)
2349                 vmw_fence_obj_unreference(&lfence);
2350
2351         ttm_bo_unref(&query_val.bo);
2352         ttm_bo_unref(&pinned_val.bo);
2353         ttm_bo_unref(&dev_priv->pinned_bo);
2354
2355 out_unlock:
2356         return;
2357
2358 out_no_emit:
2359         ttm_eu_backoff_reservation(&ticket, &validate_list);
2360 out_no_reserve:
2361         ttm_bo_unref(&query_val.bo);
2362         ttm_bo_unref(&pinned_val.bo);
2363         ttm_bo_unref(&dev_priv->pinned_bo);
2364 }
2365
2366 /**
2367  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2368  * query bo.
2369  *
2370  * @dev_priv: The device private structure.
2371  *
2372  * This function should be used to unpin the pinned query bo, or
2373  * as a query barrier when we need to make sure that all queries have
2374  * finished before the next fifo command. (For example on hardware
2375  * context destructions where the hardware may otherwise leak unfinished
2376  * queries).
2377  *
2378  * This function does not return any failure codes, but make attempts
2379  * to do safe unpinning in case of errors.
2380  *
2381  * The function will synchronize on the previous query barrier, and will
2382  * thus not finish until that barrier has executed.
2383  */
2384 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2385 {
2386         mutex_lock(&dev_priv->cmdbuf_mutex);
2387         if (dev_priv->query_cid_valid)
2388                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2389         mutex_unlock(&dev_priv->cmdbuf_mutex);
2390 }
2391
2392
2393 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2394                       struct drm_file *file_priv)
2395 {
2396         struct vmw_private *dev_priv = vmw_priv(dev);
2397         struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2398         struct vmw_master *vmaster = vmw_master(file_priv->master);
2399         int ret;
2400
2401         /*
2402          * This will allow us to extend the ioctl argument while
2403          * maintaining backwards compatibility:
2404          * We take different code paths depending on the value of
2405          * arg->version.
2406          */
2407
2408         if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2409                 DRM_ERROR("Incorrect execbuf version.\n");
2410                 DRM_ERROR("You're running outdated experimental "
2411                           "vmwgfx user-space drivers.");
2412                 return -EINVAL;
2413         }
2414
2415         ret = ttm_read_lock(&vmaster->lock, true);
2416         if (unlikely(ret != 0))
2417                 return ret;
2418
2419         ret = vmw_execbuf_process(file_priv, dev_priv,
2420                                   (void __user *)(unsigned long)arg->commands,
2421                                   NULL, arg->command_size, arg->throttle_us,
2422                                   (void __user *)(unsigned long)arg->fence_rep,
2423                                   NULL);
2424
2425         if (unlikely(ret != 0))
2426                 goto out_unlock;
2427
2428         vmw_kms_cursor_post_execbuf(dev_priv);
2429
2430 out_unlock:
2431         ttm_read_unlock(&vmaster->lock);
2432         return ret;
2433 }