vmwgfx: Clean up pending event references to struct drm_file objects on close
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / vmwgfx / vmwgfx_drv.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include <linux/module.h>
28
29 #include "drmP.h"
30 #include "vmwgfx_drv.h"
31 #include "ttm/ttm_placement.h"
32 #include "ttm/ttm_bo_driver.h"
33 #include "ttm/ttm_object.h"
34 #include "ttm/ttm_module.h"
35
36 #define VMWGFX_DRIVER_NAME "vmwgfx"
37 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
38 #define VMWGFX_CHIP_SVGAII 0
39 #define VMW_FB_RESERVATION 0
40
41 /**
42  * Fully encoded drm commands. Might move to vmw_drm.h
43  */
44
45 #define DRM_IOCTL_VMW_GET_PARAM                                 \
46         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,          \
47                  struct drm_vmw_getparam_arg)
48 #define DRM_IOCTL_VMW_ALLOC_DMABUF                              \
49         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,       \
50                 union drm_vmw_alloc_dmabuf_arg)
51 #define DRM_IOCTL_VMW_UNREF_DMABUF                              \
52         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,        \
53                 struct drm_vmw_unref_dmabuf_arg)
54 #define DRM_IOCTL_VMW_CURSOR_BYPASS                             \
55         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,       \
56                  struct drm_vmw_cursor_bypass_arg)
57
58 #define DRM_IOCTL_VMW_CONTROL_STREAM                            \
59         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,      \
60                  struct drm_vmw_control_stream_arg)
61 #define DRM_IOCTL_VMW_CLAIM_STREAM                              \
62         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,        \
63                  struct drm_vmw_stream_arg)
64 #define DRM_IOCTL_VMW_UNREF_STREAM                              \
65         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,        \
66                  struct drm_vmw_stream_arg)
67
68 #define DRM_IOCTL_VMW_CREATE_CONTEXT                            \
69         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,      \
70                 struct drm_vmw_context_arg)
71 #define DRM_IOCTL_VMW_UNREF_CONTEXT                             \
72         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,       \
73                 struct drm_vmw_context_arg)
74 #define DRM_IOCTL_VMW_CREATE_SURFACE                            \
75         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,     \
76                  union drm_vmw_surface_create_arg)
77 #define DRM_IOCTL_VMW_UNREF_SURFACE                             \
78         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,       \
79                  struct drm_vmw_surface_arg)
80 #define DRM_IOCTL_VMW_REF_SURFACE                               \
81         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,        \
82                  union drm_vmw_surface_reference_arg)
83 #define DRM_IOCTL_VMW_EXECBUF                                   \
84         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,             \
85                 struct drm_vmw_execbuf_arg)
86 #define DRM_IOCTL_VMW_GET_3D_CAP                                \
87         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,          \
88                  struct drm_vmw_get_3d_cap_arg)
89 #define DRM_IOCTL_VMW_FENCE_WAIT                                \
90         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,         \
91                  struct drm_vmw_fence_wait_arg)
92 #define DRM_IOCTL_VMW_FENCE_SIGNALED                            \
93         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,     \
94                  struct drm_vmw_fence_signaled_arg)
95 #define DRM_IOCTL_VMW_FENCE_UNREF                               \
96         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,         \
97                  struct drm_vmw_fence_arg)
98 #define DRM_IOCTL_VMW_FENCE_EVENT                               \
99         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,         \
100                  struct drm_vmw_fence_event_arg)
101 #define DRM_IOCTL_VMW_PRESENT                                   \
102         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,             \
103                  struct drm_vmw_present_arg)
104 #define DRM_IOCTL_VMW_PRESENT_READBACK                          \
105         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,    \
106                  struct drm_vmw_present_readback_arg)
107 #define DRM_IOCTL_VMW_UPDATE_LAYOUT                             \
108         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,       \
109                  struct drm_vmw_update_layout_arg)
110
111 /**
112  * The core DRM version of this macro doesn't account for
113  * DRM_COMMAND_BASE.
114  */
115
116 #define VMW_IOCTL_DEF(ioctl, func, flags) \
117   [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
118
119 /**
120  * Ioctl definitions.
121  */
122
123 static struct drm_ioctl_desc vmw_ioctls[] = {
124         VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
125                       DRM_AUTH | DRM_UNLOCKED),
126         VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
127                       DRM_AUTH | DRM_UNLOCKED),
128         VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
129                       DRM_AUTH | DRM_UNLOCKED),
130         VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
131                       vmw_kms_cursor_bypass_ioctl,
132                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
133
134         VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
135                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
136         VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
137                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
138         VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
139                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
140
141         VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
142                       DRM_AUTH | DRM_UNLOCKED),
143         VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
144                       DRM_AUTH | DRM_UNLOCKED),
145         VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
146                       DRM_AUTH | DRM_UNLOCKED),
147         VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
148                       DRM_AUTH | DRM_UNLOCKED),
149         VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
150                       DRM_AUTH | DRM_UNLOCKED),
151         VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
152                       DRM_AUTH | DRM_UNLOCKED),
153         VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
154                       DRM_AUTH | DRM_UNLOCKED),
155         VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
156                       vmw_fence_obj_signaled_ioctl,
157                       DRM_AUTH | DRM_UNLOCKED),
158         VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
159                       DRM_AUTH | DRM_UNLOCKED),
160         VMW_IOCTL_DEF(VMW_FENCE_EVENT,
161                       vmw_fence_event_ioctl,
162                       DRM_AUTH | DRM_UNLOCKED),
163         VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
164                       DRM_AUTH | DRM_UNLOCKED),
165
166         /* these allow direct access to the framebuffers mark as master only */
167         VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
168                       DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
169         VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
170                       vmw_present_readback_ioctl,
171                       DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
172         VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
173                       vmw_kms_update_layout_ioctl,
174                       DRM_MASTER | DRM_UNLOCKED),
175 };
176
177 static struct pci_device_id vmw_pci_id_list[] = {
178         {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
179         {0, 0, 0}
180 };
181
182 static int enable_fbdev;
183
184 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
185 static void vmw_master_init(struct vmw_master *);
186 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
187                               void *ptr);
188
189 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
190 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
191
192 static void vmw_print_capabilities(uint32_t capabilities)
193 {
194         DRM_INFO("Capabilities:\n");
195         if (capabilities & SVGA_CAP_RECT_COPY)
196                 DRM_INFO("  Rect copy.\n");
197         if (capabilities & SVGA_CAP_CURSOR)
198                 DRM_INFO("  Cursor.\n");
199         if (capabilities & SVGA_CAP_CURSOR_BYPASS)
200                 DRM_INFO("  Cursor bypass.\n");
201         if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
202                 DRM_INFO("  Cursor bypass 2.\n");
203         if (capabilities & SVGA_CAP_8BIT_EMULATION)
204                 DRM_INFO("  8bit emulation.\n");
205         if (capabilities & SVGA_CAP_ALPHA_CURSOR)
206                 DRM_INFO("  Alpha cursor.\n");
207         if (capabilities & SVGA_CAP_3D)
208                 DRM_INFO("  3D.\n");
209         if (capabilities & SVGA_CAP_EXTENDED_FIFO)
210                 DRM_INFO("  Extended Fifo.\n");
211         if (capabilities & SVGA_CAP_MULTIMON)
212                 DRM_INFO("  Multimon.\n");
213         if (capabilities & SVGA_CAP_PITCHLOCK)
214                 DRM_INFO("  Pitchlock.\n");
215         if (capabilities & SVGA_CAP_IRQMASK)
216                 DRM_INFO("  Irq mask.\n");
217         if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
218                 DRM_INFO("  Display Topology.\n");
219         if (capabilities & SVGA_CAP_GMR)
220                 DRM_INFO("  GMR.\n");
221         if (capabilities & SVGA_CAP_TRACES)
222                 DRM_INFO("  Traces.\n");
223         if (capabilities & SVGA_CAP_GMR2)
224                 DRM_INFO("  GMR2.\n");
225         if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
226                 DRM_INFO("  Screen Object 2.\n");
227 }
228
229
230 /**
231  * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
232  * the start of a buffer object.
233  *
234  * @dev_priv: The device private structure.
235  *
236  * This function will idle the buffer using an uninterruptible wait, then
237  * map the first page and initialize a pending occlusion query result structure,
238  * Finally it will unmap the buffer.
239  *
240  * TODO: Since we're only mapping a single page, we should optimize the map
241  * to use kmap_atomic / iomap_atomic.
242  */
243 static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
244 {
245         struct ttm_bo_kmap_obj map;
246         volatile SVGA3dQueryResult *result;
247         bool dummy;
248         int ret;
249         struct ttm_bo_device *bdev = &dev_priv->bdev;
250         struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
251
252         ttm_bo_reserve(bo, false, false, false, 0);
253         spin_lock(&bdev->fence_lock);
254         ret = ttm_bo_wait(bo, false, false, false);
255         spin_unlock(&bdev->fence_lock);
256         if (unlikely(ret != 0))
257                 (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
258                                          10*HZ);
259
260         ret = ttm_bo_kmap(bo, 0, 1, &map);
261         if (likely(ret == 0)) {
262                 result = ttm_kmap_obj_virtual(&map, &dummy);
263                 result->totalSize = sizeof(*result);
264                 result->state = SVGA3D_QUERYSTATE_PENDING;
265                 result->result32 = 0xff;
266                 ttm_bo_kunmap(&map);
267         } else
268                 DRM_ERROR("Dummy query buffer map failed.\n");
269         ttm_bo_unreserve(bo);
270 }
271
272
273 /**
274  * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
275  *
276  * @dev_priv: A device private structure.
277  *
278  * This function creates a small buffer object that holds the query
279  * result for dummy queries emitted as query barriers.
280  * No interruptible waits are done within this function.
281  *
282  * Returns an error if bo creation fails.
283  */
284 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
285 {
286         return ttm_bo_create(&dev_priv->bdev,
287                              PAGE_SIZE,
288                              ttm_bo_type_device,
289                              &vmw_vram_sys_placement,
290                              0, 0, false, NULL,
291                              &dev_priv->dummy_query_bo);
292 }
293
294
295 static int vmw_request_device(struct vmw_private *dev_priv)
296 {
297         int ret;
298
299         ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
300         if (unlikely(ret != 0)) {
301                 DRM_ERROR("Unable to initialize FIFO.\n");
302                 return ret;
303         }
304         vmw_fence_fifo_up(dev_priv->fman);
305         ret = vmw_dummy_query_bo_create(dev_priv);
306         if (unlikely(ret != 0))
307                 goto out_no_query_bo;
308         vmw_dummy_query_bo_prepare(dev_priv);
309
310         return 0;
311
312 out_no_query_bo:
313         vmw_fence_fifo_down(dev_priv->fman);
314         vmw_fifo_release(dev_priv, &dev_priv->fifo);
315         return ret;
316 }
317
318 static void vmw_release_device(struct vmw_private *dev_priv)
319 {
320         /*
321          * Previous destructions should've released
322          * the pinned bo.
323          */
324
325         BUG_ON(dev_priv->pinned_bo != NULL);
326
327         ttm_bo_unref(&dev_priv->dummy_query_bo);
328         vmw_fence_fifo_down(dev_priv->fman);
329         vmw_fifo_release(dev_priv, &dev_priv->fifo);
330 }
331
332 /**
333  * Increase the 3d resource refcount.
334  * If the count was prevously zero, initialize the fifo, switching to svga
335  * mode. Note that the master holds a ref as well, and may request an
336  * explicit switch to svga mode if fb is not running, using @unhide_svga.
337  */
338 int vmw_3d_resource_inc(struct vmw_private *dev_priv,
339                         bool unhide_svga)
340 {
341         int ret = 0;
342
343         mutex_lock(&dev_priv->release_mutex);
344         if (unlikely(dev_priv->num_3d_resources++ == 0)) {
345                 ret = vmw_request_device(dev_priv);
346                 if (unlikely(ret != 0))
347                         --dev_priv->num_3d_resources;
348         } else if (unhide_svga) {
349                 mutex_lock(&dev_priv->hw_mutex);
350                 vmw_write(dev_priv, SVGA_REG_ENABLE,
351                           vmw_read(dev_priv, SVGA_REG_ENABLE) &
352                           ~SVGA_REG_ENABLE_HIDE);
353                 mutex_unlock(&dev_priv->hw_mutex);
354         }
355
356         mutex_unlock(&dev_priv->release_mutex);
357         return ret;
358 }
359
360 /**
361  * Decrease the 3d resource refcount.
362  * If the count reaches zero, disable the fifo, switching to vga mode.
363  * Note that the master holds a refcount as well, and may request an
364  * explicit switch to vga mode when it releases its refcount to account
365  * for the situation of an X server vt switch to VGA with 3d resources
366  * active.
367  */
368 void vmw_3d_resource_dec(struct vmw_private *dev_priv,
369                          bool hide_svga)
370 {
371         int32_t n3d;
372
373         mutex_lock(&dev_priv->release_mutex);
374         if (unlikely(--dev_priv->num_3d_resources == 0))
375                 vmw_release_device(dev_priv);
376         else if (hide_svga) {
377                 mutex_lock(&dev_priv->hw_mutex);
378                 vmw_write(dev_priv, SVGA_REG_ENABLE,
379                           vmw_read(dev_priv, SVGA_REG_ENABLE) |
380                           SVGA_REG_ENABLE_HIDE);
381                 mutex_unlock(&dev_priv->hw_mutex);
382         }
383
384         n3d = (int32_t) dev_priv->num_3d_resources;
385         mutex_unlock(&dev_priv->release_mutex);
386
387         BUG_ON(n3d < 0);
388 }
389
390 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
391 {
392         struct vmw_private *dev_priv;
393         int ret;
394         uint32_t svga_id;
395
396         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
397         if (unlikely(dev_priv == NULL)) {
398                 DRM_ERROR("Failed allocating a device private struct.\n");
399                 return -ENOMEM;
400         }
401         memset(dev_priv, 0, sizeof(*dev_priv));
402
403         dev_priv->dev = dev;
404         dev_priv->vmw_chipset = chipset;
405         dev_priv->last_read_seqno = (uint32_t) -100;
406         mutex_init(&dev_priv->hw_mutex);
407         mutex_init(&dev_priv->cmdbuf_mutex);
408         mutex_init(&dev_priv->release_mutex);
409         rwlock_init(&dev_priv->resource_lock);
410         idr_init(&dev_priv->context_idr);
411         idr_init(&dev_priv->surface_idr);
412         idr_init(&dev_priv->stream_idr);
413         mutex_init(&dev_priv->init_mutex);
414         init_waitqueue_head(&dev_priv->fence_queue);
415         init_waitqueue_head(&dev_priv->fifo_queue);
416         dev_priv->fence_queue_waiters = 0;
417         atomic_set(&dev_priv->fifo_queue_waiters, 0);
418         INIT_LIST_HEAD(&dev_priv->surface_lru);
419         dev_priv->used_memory_size = 0;
420
421         dev_priv->io_start = pci_resource_start(dev->pdev, 0);
422         dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
423         dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
424
425         dev_priv->enable_fb = enable_fbdev;
426
427         mutex_lock(&dev_priv->hw_mutex);
428
429         vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
430         svga_id = vmw_read(dev_priv, SVGA_REG_ID);
431         if (svga_id != SVGA_ID_2) {
432                 ret = -ENOSYS;
433                 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
434                 mutex_unlock(&dev_priv->hw_mutex);
435                 goto out_err0;
436         }
437
438         dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
439
440         dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
441         dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
442         dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
443         dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
444         if (dev_priv->capabilities & SVGA_CAP_GMR) {
445                 dev_priv->max_gmr_descriptors =
446                         vmw_read(dev_priv,
447                                  SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
448                 dev_priv->max_gmr_ids =
449                         vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
450         }
451         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
452                 dev_priv->max_gmr_pages =
453                         vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
454                 dev_priv->memory_size =
455                         vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
456                 dev_priv->memory_size -= dev_priv->vram_size;
457         } else {
458                 /*
459                  * An arbitrary limit of 512MiB on surface
460                  * memory. But all HWV8 hardware supports GMR2.
461                  */
462                 dev_priv->memory_size = 512*1024*1024;
463         }
464
465         mutex_unlock(&dev_priv->hw_mutex);
466
467         vmw_print_capabilities(dev_priv->capabilities);
468
469         if (dev_priv->capabilities & SVGA_CAP_GMR) {
470                 DRM_INFO("Max GMR ids is %u\n",
471                          (unsigned)dev_priv->max_gmr_ids);
472                 DRM_INFO("Max GMR descriptors is %u\n",
473                          (unsigned)dev_priv->max_gmr_descriptors);
474         }
475         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
476                 DRM_INFO("Max number of GMR pages is %u\n",
477                          (unsigned)dev_priv->max_gmr_pages);
478                 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
479                          (unsigned)dev_priv->memory_size / 1024);
480         }
481         DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
482                  dev_priv->vram_start, dev_priv->vram_size / 1024);
483         DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
484                  dev_priv->mmio_start, dev_priv->mmio_size / 1024);
485
486         ret = vmw_ttm_global_init(dev_priv);
487         if (unlikely(ret != 0))
488                 goto out_err0;
489
490
491         vmw_master_init(&dev_priv->fbdev_master);
492         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
493         dev_priv->active_master = &dev_priv->fbdev_master;
494
495
496         ret = ttm_bo_device_init(&dev_priv->bdev,
497                                  dev_priv->bo_global_ref.ref.object,
498                                  &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
499                                  false);
500         if (unlikely(ret != 0)) {
501                 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
502                 goto out_err1;
503         }
504
505         ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
506                              (dev_priv->vram_size >> PAGE_SHIFT));
507         if (unlikely(ret != 0)) {
508                 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
509                 goto out_err2;
510         }
511
512         dev_priv->has_gmr = true;
513         if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
514                            dev_priv->max_gmr_ids) != 0) {
515                 DRM_INFO("No GMR memory available. "
516                          "Graphics memory resources are very limited.\n");
517                 dev_priv->has_gmr = false;
518         }
519
520         dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
521                                            dev_priv->mmio_size, DRM_MTRR_WC);
522
523         dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
524                                          dev_priv->mmio_size);
525
526         if (unlikely(dev_priv->mmio_virt == NULL)) {
527                 ret = -ENOMEM;
528                 DRM_ERROR("Failed mapping MMIO.\n");
529                 goto out_err3;
530         }
531
532         /* Need mmio memory to check for fifo pitchlock cap. */
533         if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
534             !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
535             !vmw_fifo_have_pitchlock(dev_priv)) {
536                 ret = -ENOSYS;
537                 DRM_ERROR("Hardware has no pitchlock\n");
538                 goto out_err4;
539         }
540
541         dev_priv->tdev = ttm_object_device_init
542             (dev_priv->mem_global_ref.object, 12);
543
544         if (unlikely(dev_priv->tdev == NULL)) {
545                 DRM_ERROR("Unable to initialize TTM object management.\n");
546                 ret = -ENOMEM;
547                 goto out_err4;
548         }
549
550         dev->dev_private = dev_priv;
551
552         ret = pci_request_regions(dev->pdev, "vmwgfx probe");
553         dev_priv->stealth = (ret != 0);
554         if (dev_priv->stealth) {
555                 /**
556                  * Request at least the mmio PCI resource.
557                  */
558
559                 DRM_INFO("It appears like vesafb is loaded. "
560                          "Ignore above error if any.\n");
561                 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
562                 if (unlikely(ret != 0)) {
563                         DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
564                         goto out_no_device;
565                 }
566         }
567
568         dev_priv->fman = vmw_fence_manager_init(dev_priv);
569         if (unlikely(dev_priv->fman == NULL))
570                 goto out_no_fman;
571
572         /* Need to start the fifo to check if we can do screen objects */
573         ret = vmw_3d_resource_inc(dev_priv, true);
574         if (unlikely(ret != 0))
575                 goto out_no_fifo;
576         vmw_kms_save_vga(dev_priv);
577
578         /* Start kms and overlay systems, needs fifo. */
579         ret = vmw_kms_init(dev_priv);
580         if (unlikely(ret != 0))
581                 goto out_no_kms;
582         vmw_overlay_init(dev_priv);
583
584         /* 3D Depends on Screen Objects being used. */
585         DRM_INFO("Detected %sdevice 3D availability.\n",
586                  vmw_fifo_have_3d(dev_priv) ?
587                  "" : "no ");
588
589         /* We might be done with the fifo now */
590         if (dev_priv->enable_fb) {
591                 vmw_fb_init(dev_priv);
592         } else {
593                 vmw_kms_restore_vga(dev_priv);
594                 vmw_3d_resource_dec(dev_priv, true);
595         }
596
597         if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
598                 ret = drm_irq_install(dev);
599                 if (unlikely(ret != 0)) {
600                         DRM_ERROR("Failed installing irq: %d\n", ret);
601                         goto out_no_irq;
602                 }
603         }
604
605         dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
606         register_pm_notifier(&dev_priv->pm_nb);
607
608         return 0;
609
610 out_no_irq:
611         if (dev_priv->enable_fb)
612                 vmw_fb_close(dev_priv);
613         vmw_overlay_close(dev_priv);
614         vmw_kms_close(dev_priv);
615 out_no_kms:
616         /* We still have a 3D resource reference held */
617         if (dev_priv->enable_fb) {
618                 vmw_kms_restore_vga(dev_priv);
619                 vmw_3d_resource_dec(dev_priv, false);
620         }
621 out_no_fifo:
622         vmw_fence_manager_takedown(dev_priv->fman);
623 out_no_fman:
624         if (dev_priv->stealth)
625                 pci_release_region(dev->pdev, 2);
626         else
627                 pci_release_regions(dev->pdev);
628 out_no_device:
629         ttm_object_device_release(&dev_priv->tdev);
630 out_err4:
631         iounmap(dev_priv->mmio_virt);
632 out_err3:
633         drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
634                      dev_priv->mmio_size, DRM_MTRR_WC);
635         if (dev_priv->has_gmr)
636                 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
637         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
638 out_err2:
639         (void)ttm_bo_device_release(&dev_priv->bdev);
640 out_err1:
641         vmw_ttm_global_release(dev_priv);
642 out_err0:
643         idr_destroy(&dev_priv->surface_idr);
644         idr_destroy(&dev_priv->context_idr);
645         idr_destroy(&dev_priv->stream_idr);
646         kfree(dev_priv);
647         return ret;
648 }
649
650 static int vmw_driver_unload(struct drm_device *dev)
651 {
652         struct vmw_private *dev_priv = vmw_priv(dev);
653
654         unregister_pm_notifier(&dev_priv->pm_nb);
655
656         if (dev_priv->ctx.cmd_bounce)
657                 vfree(dev_priv->ctx.cmd_bounce);
658         if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
659                 drm_irq_uninstall(dev_priv->dev);
660         if (dev_priv->enable_fb) {
661                 vmw_fb_close(dev_priv);
662                 vmw_kms_restore_vga(dev_priv);
663                 vmw_3d_resource_dec(dev_priv, false);
664         }
665         vmw_kms_close(dev_priv);
666         vmw_overlay_close(dev_priv);
667         vmw_fence_manager_takedown(dev_priv->fman);
668         if (dev_priv->stealth)
669                 pci_release_region(dev->pdev, 2);
670         else
671                 pci_release_regions(dev->pdev);
672
673         ttm_object_device_release(&dev_priv->tdev);
674         iounmap(dev_priv->mmio_virt);
675         drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
676                      dev_priv->mmio_size, DRM_MTRR_WC);
677         if (dev_priv->has_gmr)
678                 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
679         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
680         (void)ttm_bo_device_release(&dev_priv->bdev);
681         vmw_ttm_global_release(dev_priv);
682         idr_destroy(&dev_priv->surface_idr);
683         idr_destroy(&dev_priv->context_idr);
684         idr_destroy(&dev_priv->stream_idr);
685
686         kfree(dev_priv);
687
688         return 0;
689 }
690
691 static void vmw_preclose(struct drm_device *dev,
692                          struct drm_file *file_priv)
693 {
694         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
695         struct vmw_private *dev_priv = vmw_priv(dev);
696
697         vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
698 }
699
700 static void vmw_postclose(struct drm_device *dev,
701                          struct drm_file *file_priv)
702 {
703         struct vmw_fpriv *vmw_fp;
704
705         vmw_fp = vmw_fpriv(file_priv);
706         ttm_object_file_release(&vmw_fp->tfile);
707         if (vmw_fp->locked_master)
708                 drm_master_put(&vmw_fp->locked_master);
709         kfree(vmw_fp);
710 }
711
712 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
713 {
714         struct vmw_private *dev_priv = vmw_priv(dev);
715         struct vmw_fpriv *vmw_fp;
716         int ret = -ENOMEM;
717
718         vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
719         if (unlikely(vmw_fp == NULL))
720                 return ret;
721
722         INIT_LIST_HEAD(&vmw_fp->fence_events);
723         vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
724         if (unlikely(vmw_fp->tfile == NULL))
725                 goto out_no_tfile;
726
727         file_priv->driver_priv = vmw_fp;
728
729         if (unlikely(dev_priv->bdev.dev_mapping == NULL))
730                 dev_priv->bdev.dev_mapping =
731                         file_priv->filp->f_path.dentry->d_inode->i_mapping;
732
733         return 0;
734
735 out_no_tfile:
736         kfree(vmw_fp);
737         return ret;
738 }
739
740 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
741                                unsigned long arg)
742 {
743         struct drm_file *file_priv = filp->private_data;
744         struct drm_device *dev = file_priv->minor->dev;
745         unsigned int nr = DRM_IOCTL_NR(cmd);
746
747         /*
748          * Do extra checking on driver private ioctls.
749          */
750
751         if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
752             && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
753                 struct drm_ioctl_desc *ioctl =
754                     &vmw_ioctls[nr - DRM_COMMAND_BASE];
755
756                 if (unlikely(ioctl->cmd_drv != cmd)) {
757                         DRM_ERROR("Invalid command format, ioctl %d\n",
758                                   nr - DRM_COMMAND_BASE);
759                         return -EINVAL;
760                 }
761         }
762
763         return drm_ioctl(filp, cmd, arg);
764 }
765
766 static int vmw_firstopen(struct drm_device *dev)
767 {
768         struct vmw_private *dev_priv = vmw_priv(dev);
769         dev_priv->is_opened = true;
770
771         return 0;
772 }
773
774 static void vmw_lastclose(struct drm_device *dev)
775 {
776         struct vmw_private *dev_priv = vmw_priv(dev);
777         struct drm_crtc *crtc;
778         struct drm_mode_set set;
779         int ret;
780
781         /**
782          * Do nothing on the lastclose call from drm_unload.
783          */
784
785         if (!dev_priv->is_opened)
786                 return;
787
788         dev_priv->is_opened = false;
789         set.x = 0;
790         set.y = 0;
791         set.fb = NULL;
792         set.mode = NULL;
793         set.connectors = NULL;
794         set.num_connectors = 0;
795
796         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
797                 set.crtc = crtc;
798                 ret = crtc->funcs->set_config(&set);
799                 WARN_ON(ret != 0);
800         }
801
802 }
803
804 static void vmw_master_init(struct vmw_master *vmaster)
805 {
806         ttm_lock_init(&vmaster->lock);
807         INIT_LIST_HEAD(&vmaster->fb_surf);
808         mutex_init(&vmaster->fb_surf_mutex);
809 }
810
811 static int vmw_master_create(struct drm_device *dev,
812                              struct drm_master *master)
813 {
814         struct vmw_master *vmaster;
815
816         vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
817         if (unlikely(vmaster == NULL))
818                 return -ENOMEM;
819
820         vmw_master_init(vmaster);
821         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
822         master->driver_priv = vmaster;
823
824         return 0;
825 }
826
827 static void vmw_master_destroy(struct drm_device *dev,
828                                struct drm_master *master)
829 {
830         struct vmw_master *vmaster = vmw_master(master);
831
832         master->driver_priv = NULL;
833         kfree(vmaster);
834 }
835
836
837 static int vmw_master_set(struct drm_device *dev,
838                           struct drm_file *file_priv,
839                           bool from_open)
840 {
841         struct vmw_private *dev_priv = vmw_priv(dev);
842         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
843         struct vmw_master *active = dev_priv->active_master;
844         struct vmw_master *vmaster = vmw_master(file_priv->master);
845         int ret = 0;
846
847         if (!dev_priv->enable_fb) {
848                 ret = vmw_3d_resource_inc(dev_priv, true);
849                 if (unlikely(ret != 0))
850                         return ret;
851                 vmw_kms_save_vga(dev_priv);
852                 mutex_lock(&dev_priv->hw_mutex);
853                 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
854                 mutex_unlock(&dev_priv->hw_mutex);
855         }
856
857         if (active) {
858                 BUG_ON(active != &dev_priv->fbdev_master);
859                 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
860                 if (unlikely(ret != 0))
861                         goto out_no_active_lock;
862
863                 ttm_lock_set_kill(&active->lock, true, SIGTERM);
864                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
865                 if (unlikely(ret != 0)) {
866                         DRM_ERROR("Unable to clean VRAM on "
867                                   "master drop.\n");
868                 }
869
870                 dev_priv->active_master = NULL;
871         }
872
873         ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
874         if (!from_open) {
875                 ttm_vt_unlock(&vmaster->lock);
876                 BUG_ON(vmw_fp->locked_master != file_priv->master);
877                 drm_master_put(&vmw_fp->locked_master);
878         }
879
880         dev_priv->active_master = vmaster;
881
882         return 0;
883
884 out_no_active_lock:
885         if (!dev_priv->enable_fb) {
886                 mutex_lock(&dev_priv->hw_mutex);
887                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
888                 mutex_unlock(&dev_priv->hw_mutex);
889                 vmw_kms_restore_vga(dev_priv);
890                 vmw_3d_resource_dec(dev_priv, true);
891         }
892         return ret;
893 }
894
895 static void vmw_master_drop(struct drm_device *dev,
896                             struct drm_file *file_priv,
897                             bool from_release)
898 {
899         struct vmw_private *dev_priv = vmw_priv(dev);
900         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
901         struct vmw_master *vmaster = vmw_master(file_priv->master);
902         int ret;
903
904         /**
905          * Make sure the master doesn't disappear while we have
906          * it locked.
907          */
908
909         vmw_fp->locked_master = drm_master_get(file_priv->master);
910         ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
911         vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
912
913         if (unlikely((ret != 0))) {
914                 DRM_ERROR("Unable to lock TTM at VT switch.\n");
915                 drm_master_put(&vmw_fp->locked_master);
916         }
917
918         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
919
920         if (!dev_priv->enable_fb) {
921                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
922                 if (unlikely(ret != 0))
923                         DRM_ERROR("Unable to clean VRAM on master drop.\n");
924                 mutex_lock(&dev_priv->hw_mutex);
925                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
926                 mutex_unlock(&dev_priv->hw_mutex);
927                 vmw_kms_restore_vga(dev_priv);
928                 vmw_3d_resource_dec(dev_priv, true);
929         }
930
931         dev_priv->active_master = &dev_priv->fbdev_master;
932         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
933         ttm_vt_unlock(&dev_priv->fbdev_master.lock);
934
935         if (dev_priv->enable_fb)
936                 vmw_fb_on(dev_priv);
937 }
938
939
940 static void vmw_remove(struct pci_dev *pdev)
941 {
942         struct drm_device *dev = pci_get_drvdata(pdev);
943
944         drm_put_dev(dev);
945 }
946
947 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
948                               void *ptr)
949 {
950         struct vmw_private *dev_priv =
951                 container_of(nb, struct vmw_private, pm_nb);
952         struct vmw_master *vmaster = dev_priv->active_master;
953
954         switch (val) {
955         case PM_HIBERNATION_PREPARE:
956         case PM_SUSPEND_PREPARE:
957                 ttm_suspend_lock(&vmaster->lock);
958
959                 /**
960                  * This empties VRAM and unbinds all GMR bindings.
961                  * Buffer contents is moved to swappable memory.
962                  */
963                 vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
964                 ttm_bo_swapout_all(&dev_priv->bdev);
965
966                 break;
967         case PM_POST_HIBERNATION:
968         case PM_POST_SUSPEND:
969         case PM_POST_RESTORE:
970                 ttm_suspend_unlock(&vmaster->lock);
971
972                 break;
973         case PM_RESTORE_PREPARE:
974                 break;
975         default:
976                 break;
977         }
978         return 0;
979 }
980
981 /**
982  * These might not be needed with the virtual SVGA device.
983  */
984
985 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
986 {
987         struct drm_device *dev = pci_get_drvdata(pdev);
988         struct vmw_private *dev_priv = vmw_priv(dev);
989
990         if (dev_priv->num_3d_resources != 0) {
991                 DRM_INFO("Can't suspend or hibernate "
992                          "while 3D resources are active.\n");
993                 return -EBUSY;
994         }
995
996         pci_save_state(pdev);
997         pci_disable_device(pdev);
998         pci_set_power_state(pdev, PCI_D3hot);
999         return 0;
1000 }
1001
1002 static int vmw_pci_resume(struct pci_dev *pdev)
1003 {
1004         pci_set_power_state(pdev, PCI_D0);
1005         pci_restore_state(pdev);
1006         return pci_enable_device(pdev);
1007 }
1008
1009 static int vmw_pm_suspend(struct device *kdev)
1010 {
1011         struct pci_dev *pdev = to_pci_dev(kdev);
1012         struct pm_message dummy;
1013
1014         dummy.event = 0;
1015
1016         return vmw_pci_suspend(pdev, dummy);
1017 }
1018
1019 static int vmw_pm_resume(struct device *kdev)
1020 {
1021         struct pci_dev *pdev = to_pci_dev(kdev);
1022
1023         return vmw_pci_resume(pdev);
1024 }
1025
1026 static int vmw_pm_prepare(struct device *kdev)
1027 {
1028         struct pci_dev *pdev = to_pci_dev(kdev);
1029         struct drm_device *dev = pci_get_drvdata(pdev);
1030         struct vmw_private *dev_priv = vmw_priv(dev);
1031
1032         /**
1033          * Release 3d reference held by fbdev and potentially
1034          * stop fifo.
1035          */
1036         dev_priv->suspended = true;
1037         if (dev_priv->enable_fb)
1038                         vmw_3d_resource_dec(dev_priv, true);
1039
1040         if (dev_priv->num_3d_resources != 0) {
1041
1042                 DRM_INFO("Can't suspend or hibernate "
1043                          "while 3D resources are active.\n");
1044
1045                 if (dev_priv->enable_fb)
1046                         vmw_3d_resource_inc(dev_priv, true);
1047                 dev_priv->suspended = false;
1048                 return -EBUSY;
1049         }
1050
1051         return 0;
1052 }
1053
1054 static void vmw_pm_complete(struct device *kdev)
1055 {
1056         struct pci_dev *pdev = to_pci_dev(kdev);
1057         struct drm_device *dev = pci_get_drvdata(pdev);
1058         struct vmw_private *dev_priv = vmw_priv(dev);
1059
1060         /**
1061          * Reclaim 3d reference held by fbdev and potentially
1062          * start fifo.
1063          */
1064         if (dev_priv->enable_fb)
1065                         vmw_3d_resource_inc(dev_priv, false);
1066
1067         dev_priv->suspended = false;
1068 }
1069
1070 static const struct dev_pm_ops vmw_pm_ops = {
1071         .prepare = vmw_pm_prepare,
1072         .complete = vmw_pm_complete,
1073         .suspend = vmw_pm_suspend,
1074         .resume = vmw_pm_resume,
1075 };
1076
1077 static const struct file_operations vmwgfx_driver_fops = {
1078         .owner = THIS_MODULE,
1079         .open = drm_open,
1080         .release = drm_release,
1081         .unlocked_ioctl = vmw_unlocked_ioctl,
1082         .mmap = vmw_mmap,
1083         .poll = vmw_fops_poll,
1084         .read = vmw_fops_read,
1085         .fasync = drm_fasync,
1086 #if defined(CONFIG_COMPAT)
1087         .compat_ioctl = drm_compat_ioctl,
1088 #endif
1089         .llseek = noop_llseek,
1090 };
1091
1092 static struct drm_driver driver = {
1093         .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1094         DRIVER_MODESET,
1095         .load = vmw_driver_load,
1096         .unload = vmw_driver_unload,
1097         .firstopen = vmw_firstopen,
1098         .lastclose = vmw_lastclose,
1099         .irq_preinstall = vmw_irq_preinstall,
1100         .irq_postinstall = vmw_irq_postinstall,
1101         .irq_uninstall = vmw_irq_uninstall,
1102         .irq_handler = vmw_irq_handler,
1103         .get_vblank_counter = vmw_get_vblank_counter,
1104         .enable_vblank = vmw_enable_vblank,
1105         .disable_vblank = vmw_disable_vblank,
1106         .reclaim_buffers_locked = NULL,
1107         .ioctls = vmw_ioctls,
1108         .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1109         .dma_quiescent = NULL,  /*vmw_dma_quiescent, */
1110         .master_create = vmw_master_create,
1111         .master_destroy = vmw_master_destroy,
1112         .master_set = vmw_master_set,
1113         .master_drop = vmw_master_drop,
1114         .open = vmw_driver_open,
1115         .preclose = vmw_preclose,
1116         .postclose = vmw_postclose,
1117         .fops = &vmwgfx_driver_fops,
1118         .name = VMWGFX_DRIVER_NAME,
1119         .desc = VMWGFX_DRIVER_DESC,
1120         .date = VMWGFX_DRIVER_DATE,
1121         .major = VMWGFX_DRIVER_MAJOR,
1122         .minor = VMWGFX_DRIVER_MINOR,
1123         .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1124 };
1125
1126 static struct pci_driver vmw_pci_driver = {
1127         .name = VMWGFX_DRIVER_NAME,
1128         .id_table = vmw_pci_id_list,
1129         .probe = vmw_probe,
1130         .remove = vmw_remove,
1131         .driver = {
1132                 .pm = &vmw_pm_ops
1133         }
1134 };
1135
1136 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1137 {
1138         return drm_get_pci_dev(pdev, ent, &driver);
1139 }
1140
1141 static int __init vmwgfx_init(void)
1142 {
1143         int ret;
1144         ret = drm_pci_init(&driver, &vmw_pci_driver);
1145         if (ret)
1146                 DRM_ERROR("Failed initializing DRM.\n");
1147         return ret;
1148 }
1149
1150 static void __exit vmwgfx_exit(void)
1151 {
1152         drm_pci_exit(&driver, &vmw_pci_driver);
1153 }
1154
1155 module_init(vmwgfx_init);
1156 module_exit(vmwgfx_exit);
1157
1158 MODULE_AUTHOR("VMware Inc. and others");
1159 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1160 MODULE_LICENSE("GPL and additional rights");
1161 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1162                __stringify(VMWGFX_DRIVER_MINOR) "."
1163                __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1164                "0");