d1562281e60745cb9d3782931b3c0a6f910f6346
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/async.h>
32 #include <drm/drmP.h>
33 #include <drm/drm_crtc_helper.h>
34 #include <drm/drm_fb_helper.h>
35 #include <drm/drm_legacy.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 #include "i915_trace.h"
40 #include <linux/pci.h>
41 #include <linux/console.h>
42 #include <linux/vt.h>
43 #include <linux/vgaarb.h>
44 #include <linux/acpi.h>
45 #include <linux/pnp.h>
46 #include <linux/vga_switcheroo.h>
47 #include <linux/slab.h>
48 #include <acpi/video.h>
49 #include <linux/pm.h>
50 #include <linux/pm_runtime.h>
51 #include <linux/oom.h>
52
53 static int i915_dma_init(struct drm_device *dev, void *data,
54                          struct drm_file *file_priv)
55 {
56         return -ENODEV;
57 }
58
59 static int i915_flush_ioctl(struct drm_device *dev, void *data,
60                             struct drm_file *file_priv)
61 {
62         return -ENODEV;
63 }
64
65 static int i915_batchbuffer(struct drm_device *dev, void *data,
66                             struct drm_file *file_priv)
67 {
68         return -ENODEV;
69 }
70
71 static int i915_cmdbuffer(struct drm_device *dev, void *data,
72                           struct drm_file *file_priv)
73 {
74         return -ENODEV;
75 }
76
77 static int i915_irq_emit(struct drm_device *dev, void *data,
78                          struct drm_file *file_priv)
79 {
80         return -ENODEV;
81 }
82
83 static int i915_irq_wait(struct drm_device *dev, void *data,
84                          struct drm_file *file_priv)
85 {
86         return -ENODEV;
87 }
88
89 static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
90                          struct drm_file *file_priv)
91 {
92         return -ENODEV;
93 }
94
95 static int i915_vblank_swap(struct drm_device *dev, void *data,
96                      struct drm_file *file_priv)
97 {
98         return -ENODEV;
99 }
100
101 static int i915_flip_bufs(struct drm_device *dev, void *data,
102                           struct drm_file *file_priv)
103 {
104         return -ENODEV;
105 }
106
107 static int i915_getparam(struct drm_device *dev, void *data,
108                          struct drm_file *file_priv)
109 {
110         struct drm_i915_private *dev_priv = dev->dev_private;
111         drm_i915_getparam_t *param = data;
112         int value;
113
114         if (!dev_priv) {
115                 DRM_ERROR("called with no initialization\n");
116                 return -EINVAL;
117         }
118
119         switch (param->param) {
120         case I915_PARAM_IRQ_ACTIVE:
121                 return -ENODEV;
122         case I915_PARAM_ALLOW_BATCHBUFFER:
123                 return -ENODEV;
124         case I915_PARAM_LAST_DISPATCH:
125                 return -ENODEV;
126         case I915_PARAM_CHIPSET_ID:
127                 value = dev->pdev->device;
128                 break;
129         case I915_PARAM_HAS_GEM:
130                 value = 1;
131                 break;
132         case I915_PARAM_NUM_FENCES_AVAIL:
133                 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
134                 break;
135         case I915_PARAM_HAS_OVERLAY:
136                 value = dev_priv->overlay ? 1 : 0;
137                 break;
138         case I915_PARAM_HAS_PAGEFLIPPING:
139                 value = 1;
140                 break;
141         case I915_PARAM_HAS_EXECBUF2:
142                 /* depends on GEM */
143                 value = 1;
144                 break;
145         case I915_PARAM_HAS_BSD:
146                 value = intel_ring_initialized(&dev_priv->ring[VCS]);
147                 break;
148         case I915_PARAM_HAS_BLT:
149                 value = intel_ring_initialized(&dev_priv->ring[BCS]);
150                 break;
151         case I915_PARAM_HAS_VEBOX:
152                 value = intel_ring_initialized(&dev_priv->ring[VECS]);
153                 break;
154         case I915_PARAM_HAS_RELAXED_FENCING:
155                 value = 1;
156                 break;
157         case I915_PARAM_HAS_COHERENT_RINGS:
158                 value = 1;
159                 break;
160         case I915_PARAM_HAS_EXEC_CONSTANTS:
161                 value = INTEL_INFO(dev)->gen >= 4;
162                 break;
163         case I915_PARAM_HAS_RELAXED_DELTA:
164                 value = 1;
165                 break;
166         case I915_PARAM_HAS_GEN7_SOL_RESET:
167                 value = 1;
168                 break;
169         case I915_PARAM_HAS_LLC:
170                 value = HAS_LLC(dev);
171                 break;
172         case I915_PARAM_HAS_WT:
173                 value = HAS_WT(dev);
174                 break;
175         case I915_PARAM_HAS_ALIASING_PPGTT:
176                 value = USES_PPGTT(dev);
177                 break;
178         case I915_PARAM_HAS_WAIT_TIMEOUT:
179                 value = 1;
180                 break;
181         case I915_PARAM_HAS_SEMAPHORES:
182                 value = i915_semaphore_is_enabled(dev);
183                 break;
184         case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
185                 value = 1;
186                 break;
187         case I915_PARAM_HAS_SECURE_BATCHES:
188                 value = capable(CAP_SYS_ADMIN);
189                 break;
190         case I915_PARAM_HAS_PINNED_BATCHES:
191                 value = 1;
192                 break;
193         case I915_PARAM_HAS_EXEC_NO_RELOC:
194                 value = 1;
195                 break;
196         case I915_PARAM_HAS_EXEC_HANDLE_LUT:
197                 value = 1;
198                 break;
199         case I915_PARAM_CMD_PARSER_VERSION:
200                 value = i915_cmd_parser_get_version();
201                 break;
202         case I915_PARAM_HAS_COHERENT_PHYS_GTT:
203                 value = 1;
204                 break;
205         default:
206                 DRM_DEBUG("Unknown parameter %d\n", param->param);
207                 return -EINVAL;
208         }
209
210         if (copy_to_user(param->value, &value, sizeof(int))) {
211                 DRM_ERROR("copy_to_user failed\n");
212                 return -EFAULT;
213         }
214
215         return 0;
216 }
217
218 static int i915_setparam(struct drm_device *dev, void *data,
219                          struct drm_file *file_priv)
220 {
221         struct drm_i915_private *dev_priv = dev->dev_private;
222         drm_i915_setparam_t *param = data;
223
224         if (!dev_priv) {
225                 DRM_ERROR("called with no initialization\n");
226                 return -EINVAL;
227         }
228
229         switch (param->param) {
230         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
231         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
232         case I915_SETPARAM_ALLOW_BATCHBUFFER:
233                 return -ENODEV;
234
235         case I915_SETPARAM_NUM_USED_FENCES:
236                 if (param->value > dev_priv->num_fence_regs ||
237                     param->value < 0)
238                         return -EINVAL;
239                 /* Userspace can use first N regs */
240                 dev_priv->fence_reg_start = param->value;
241                 break;
242         default:
243                 DRM_DEBUG_DRIVER("unknown parameter %d\n",
244                                         param->param);
245                 return -EINVAL;
246         }
247
248         return 0;
249 }
250
251 static int i915_set_status_page(struct drm_device *dev, void *data,
252                                 struct drm_file *file_priv)
253 {
254         return -ENODEV;
255 }
256
257 static int i915_get_bridge_dev(struct drm_device *dev)
258 {
259         struct drm_i915_private *dev_priv = dev->dev_private;
260
261         dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
262         if (!dev_priv->bridge_dev) {
263                 DRM_ERROR("bridge device not found\n");
264                 return -1;
265         }
266         return 0;
267 }
268
269 #define MCHBAR_I915 0x44
270 #define MCHBAR_I965 0x48
271 #define MCHBAR_SIZE (4*4096)
272
273 #define DEVEN_REG 0x54
274 #define   DEVEN_MCHBAR_EN (1 << 28)
275
276 /* Allocate space for the MCH regs if needed, return nonzero on error */
277 static int
278 intel_alloc_mchbar_resource(struct drm_device *dev)
279 {
280         struct drm_i915_private *dev_priv = dev->dev_private;
281         int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
282         u32 temp_lo, temp_hi = 0;
283         u64 mchbar_addr;
284         int ret;
285
286         if (INTEL_INFO(dev)->gen >= 4)
287                 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
288         pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
289         mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
290
291         /* If ACPI doesn't have it, assume we need to allocate it ourselves */
292 #ifdef CONFIG_PNP
293         if (mchbar_addr &&
294             pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
295                 return 0;
296 #endif
297
298         /* Get some space for it */
299         dev_priv->mch_res.name = "i915 MCHBAR";
300         dev_priv->mch_res.flags = IORESOURCE_MEM;
301         ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
302                                      &dev_priv->mch_res,
303                                      MCHBAR_SIZE, MCHBAR_SIZE,
304                                      PCIBIOS_MIN_MEM,
305                                      0, pcibios_align_resource,
306                                      dev_priv->bridge_dev);
307         if (ret) {
308                 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
309                 dev_priv->mch_res.start = 0;
310                 return ret;
311         }
312
313         if (INTEL_INFO(dev)->gen >= 4)
314                 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
315                                        upper_32_bits(dev_priv->mch_res.start));
316
317         pci_write_config_dword(dev_priv->bridge_dev, reg,
318                                lower_32_bits(dev_priv->mch_res.start));
319         return 0;
320 }
321
322 /* Setup MCHBAR if possible, return true if we should disable it again */
323 static void
324 intel_setup_mchbar(struct drm_device *dev)
325 {
326         struct drm_i915_private *dev_priv = dev->dev_private;
327         int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
328         u32 temp;
329         bool enabled;
330
331         if (IS_VALLEYVIEW(dev))
332                 return;
333
334         dev_priv->mchbar_need_disable = false;
335
336         if (IS_I915G(dev) || IS_I915GM(dev)) {
337                 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
338                 enabled = !!(temp & DEVEN_MCHBAR_EN);
339         } else {
340                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
341                 enabled = temp & 1;
342         }
343
344         /* If it's already enabled, don't have to do anything */
345         if (enabled)
346                 return;
347
348         if (intel_alloc_mchbar_resource(dev))
349                 return;
350
351         dev_priv->mchbar_need_disable = true;
352
353         /* Space is allocated or reserved, so enable it. */
354         if (IS_I915G(dev) || IS_I915GM(dev)) {
355                 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
356                                        temp | DEVEN_MCHBAR_EN);
357         } else {
358                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
359                 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
360         }
361 }
362
363 static void
364 intel_teardown_mchbar(struct drm_device *dev)
365 {
366         struct drm_i915_private *dev_priv = dev->dev_private;
367         int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
368         u32 temp;
369
370         if (dev_priv->mchbar_need_disable) {
371                 if (IS_I915G(dev) || IS_I915GM(dev)) {
372                         pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
373                         temp &= ~DEVEN_MCHBAR_EN;
374                         pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
375                 } else {
376                         pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
377                         temp &= ~1;
378                         pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
379                 }
380         }
381
382         if (dev_priv->mch_res.start)
383                 release_resource(&dev_priv->mch_res);
384 }
385
386 /* true = enable decode, false = disable decoder */
387 static unsigned int i915_vga_set_decode(void *cookie, bool state)
388 {
389         struct drm_device *dev = cookie;
390
391         intel_modeset_vga_set_state(dev, state);
392         if (state)
393                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
394                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
395         else
396                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
397 }
398
399 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
400 {
401         struct drm_device *dev = pci_get_drvdata(pdev);
402         pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
403
404         if (state == VGA_SWITCHEROO_ON) {
405                 pr_info("switched on\n");
406                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
407                 /* i915 resume handler doesn't set to D0 */
408                 pci_set_power_state(dev->pdev, PCI_D0);
409                 i915_resume_legacy(dev);
410                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
411         } else {
412                 pr_err("switched off\n");
413                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
414                 i915_suspend_legacy(dev, pmm);
415                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
416         }
417 }
418
419 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
420 {
421         struct drm_device *dev = pci_get_drvdata(pdev);
422
423         /*
424          * FIXME: open_count is protected by drm_global_mutex but that would lead to
425          * locking inversion with the driver load path. And the access here is
426          * completely racy anyway. So don't bother with locking for now.
427          */
428         return dev->open_count == 0;
429 }
430
431 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
432         .set_gpu_state = i915_switcheroo_set_state,
433         .reprobe = NULL,
434         .can_switch = i915_switcheroo_can_switch,
435 };
436
437 static int i915_load_modeset_init(struct drm_device *dev)
438 {
439         struct drm_i915_private *dev_priv = dev->dev_private;
440         int ret;
441
442         ret = intel_parse_bios(dev);
443         if (ret)
444                 DRM_INFO("failed to find VBIOS tables\n");
445
446         /* If we have > 1 VGA cards, then we need to arbitrate access
447          * to the common VGA resources.
448          *
449          * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
450          * then we do not take part in VGA arbitration and the
451          * vga_client_register() fails with -ENODEV.
452          */
453         ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
454         if (ret && ret != -ENODEV)
455                 goto out;
456
457         intel_register_dsm_handler();
458
459         ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
460         if (ret)
461                 goto cleanup_vga_client;
462
463         /* Initialise stolen first so that we may reserve preallocated
464          * objects for the BIOS to KMS transition.
465          */
466         ret = i915_gem_init_stolen(dev);
467         if (ret)
468                 goto cleanup_vga_switcheroo;
469
470         intel_power_domains_init_hw(dev_priv);
471
472         ret = intel_irq_install(dev_priv);
473         if (ret)
474                 goto cleanup_gem_stolen;
475
476         /* Important: The output setup functions called by modeset_init need
477          * working irqs for e.g. gmbus and dp aux transfers. */
478         intel_modeset_init(dev);
479
480         ret = i915_gem_init(dev);
481         if (ret)
482                 goto cleanup_irq;
483
484         intel_modeset_gem_init(dev);
485
486         /* Always safe in the mode setting case. */
487         /* FIXME: do pre/post-mode set stuff in core KMS code */
488         dev->vblank_disable_allowed = true;
489         if (INTEL_INFO(dev)->num_pipes == 0)
490                 return 0;
491
492         ret = intel_fbdev_init(dev);
493         if (ret)
494                 goto cleanup_gem;
495
496         /* Only enable hotplug handling once the fbdev is fully set up. */
497         intel_hpd_init(dev_priv);
498
499         /*
500          * Some ports require correctly set-up hpd registers for detection to
501          * work properly (leading to ghost connected connector status), e.g. VGA
502          * on gm45.  Hence we can only set up the initial fbdev config after hpd
503          * irqs are fully enabled. Now we should scan for the initial config
504          * only once hotplug handling is enabled, but due to screwed-up locking
505          * around kms/fbdev init we can't protect the fdbev initial config
506          * scanning against hotplug events. Hence do this first and ignore the
507          * tiny window where we will loose hotplug notifactions.
508          */
509         async_schedule(intel_fbdev_initial_config, dev_priv);
510
511         drm_kms_helper_poll_init(dev);
512
513         return 0;
514
515 cleanup_gem:
516         mutex_lock(&dev->struct_mutex);
517         i915_gem_cleanup_ringbuffer(dev);
518         i915_gem_context_fini(dev);
519         mutex_unlock(&dev->struct_mutex);
520 cleanup_irq:
521         drm_irq_uninstall(dev);
522 cleanup_gem_stolen:
523         i915_gem_cleanup_stolen(dev);
524 cleanup_vga_switcheroo:
525         vga_switcheroo_unregister_client(dev->pdev);
526 cleanup_vga_client:
527         vga_client_register(dev->pdev, NULL, NULL, NULL);
528 out:
529         return ret;
530 }
531
532 #if IS_ENABLED(CONFIG_FB)
533 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
534 {
535         struct apertures_struct *ap;
536         struct pci_dev *pdev = dev_priv->dev->pdev;
537         bool primary;
538         int ret;
539
540         ap = alloc_apertures(1);
541         if (!ap)
542                 return -ENOMEM;
543
544         ap->ranges[0].base = dev_priv->gtt.mappable_base;
545         ap->ranges[0].size = dev_priv->gtt.mappable_end;
546
547         primary =
548                 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
549
550         ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
551
552         kfree(ap);
553
554         return ret;
555 }
556 #else
557 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
558 {
559         return 0;
560 }
561 #endif
562
563 #if !defined(CONFIG_VGA_CONSOLE)
564 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
565 {
566         return 0;
567 }
568 #elif !defined(CONFIG_DUMMY_CONSOLE)
569 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
570 {
571         return -ENODEV;
572 }
573 #else
574 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
575 {
576         int ret = 0;
577
578         DRM_INFO("Replacing VGA console driver\n");
579
580         console_lock();
581         if (con_is_bound(&vga_con))
582                 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
583         if (ret == 0) {
584                 ret = do_unregister_con_driver(&vga_con);
585
586                 /* Ignore "already unregistered". */
587                 if (ret == -ENODEV)
588                         ret = 0;
589         }
590         console_unlock();
591
592         return ret;
593 }
594 #endif
595
596 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
597 {
598         const struct intel_device_info *info = &dev_priv->info;
599
600 #define PRINT_S(name) "%s"
601 #define SEP_EMPTY
602 #define PRINT_FLAG(name) info->name ? #name "," : ""
603 #define SEP_COMMA ,
604         DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
605                          DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
606                          info->gen,
607                          dev_priv->dev->pdev->device,
608                          dev_priv->dev->pdev->revision,
609                          DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
610 #undef PRINT_S
611 #undef SEP_EMPTY
612 #undef PRINT_FLAG
613 #undef SEP_COMMA
614 }
615
616 /*
617  * Determine various intel_device_info fields at runtime.
618  *
619  * Use it when either:
620  *   - it's judged too laborious to fill n static structures with the limit
621  *     when a simple if statement does the job,
622  *   - run-time checks (eg read fuse/strap registers) are needed.
623  *
624  * This function needs to be called:
625  *   - after the MMIO has been setup as we are reading registers,
626  *   - after the PCH has been detected,
627  *   - before the first usage of the fields it can tweak.
628  */
629 static void intel_device_info_runtime_init(struct drm_device *dev)
630 {
631         struct drm_i915_private *dev_priv = dev->dev_private;
632         struct intel_device_info *info;
633         enum pipe pipe;
634
635         info = (struct intel_device_info *)&dev_priv->info;
636
637         if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
638                 for_each_pipe(dev_priv, pipe)
639                         info->num_sprites[pipe] = 2;
640         else
641                 for_each_pipe(dev_priv, pipe)
642                         info->num_sprites[pipe] = 1;
643
644         if (i915.disable_display) {
645                 DRM_INFO("Display disabled (module parameter)\n");
646                 info->num_pipes = 0;
647         } else if (info->num_pipes > 0 &&
648                    (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
649                    !IS_VALLEYVIEW(dev)) {
650                 u32 fuse_strap = I915_READ(FUSE_STRAP);
651                 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
652
653                 /*
654                  * SFUSE_STRAP is supposed to have a bit signalling the display
655                  * is fused off. Unfortunately it seems that, at least in
656                  * certain cases, fused off display means that PCH display
657                  * reads don't land anywhere. In that case, we read 0s.
658                  *
659                  * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
660                  * should be set when taking over after the firmware.
661                  */
662                 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
663                     sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
664                     (dev_priv->pch_type == PCH_CPT &&
665                      !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
666                         DRM_INFO("Display fused off, disabling\n");
667                         info->num_pipes = 0;
668                 }
669         }
670 }
671
672 /**
673  * i915_driver_load - setup chip and create an initial config
674  * @dev: DRM device
675  * @flags: startup flags
676  *
677  * The driver load routine has to do several things:
678  *   - drive output discovery via intel_modeset_init()
679  *   - initialize the memory manager
680  *   - allocate initial config memory
681  *   - setup the DRM framebuffer with the allocated memory
682  */
683 int i915_driver_load(struct drm_device *dev, unsigned long flags)
684 {
685         struct drm_i915_private *dev_priv;
686         struct intel_device_info *info, *device_info;
687         int ret = 0, mmio_bar, mmio_size;
688         uint32_t aperture_size;
689
690         info = (struct intel_device_info *) flags;
691
692         /* Refuse to load on gen6+ without kms enabled. */
693         if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
694                 DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
695                 DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
696                 return -ENODEV;
697         }
698
699         /* UMS needs agp support. */
700         if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
701                 return -EINVAL;
702
703         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
704         if (dev_priv == NULL)
705                 return -ENOMEM;
706
707         dev->dev_private = dev_priv;
708         dev_priv->dev = dev;
709
710         /* Setup the write-once "constant" device info */
711         device_info = (struct intel_device_info *)&dev_priv->info;
712         memcpy(device_info, info, sizeof(dev_priv->info));
713         device_info->device_id = dev->pdev->device;
714
715         spin_lock_init(&dev_priv->irq_lock);
716         spin_lock_init(&dev_priv->gpu_error.lock);
717         mutex_init(&dev_priv->backlight_lock);
718         spin_lock_init(&dev_priv->uncore.lock);
719         spin_lock_init(&dev_priv->mm.object_stat_lock);
720         spin_lock_init(&dev_priv->mmio_flip_lock);
721         mutex_init(&dev_priv->dpio_lock);
722         mutex_init(&dev_priv->modeset_restore_lock);
723
724         intel_pm_setup(dev);
725
726         intel_display_crc_init(dev);
727
728         i915_dump_device_info(dev_priv);
729
730         /* Not all pre-production machines fall into this category, only the
731          * very first ones. Almost everything should work, except for maybe
732          * suspend/resume. And we don't implement workarounds that affect only
733          * pre-production machines. */
734         if (IS_HSW_EARLY_SDV(dev))
735                 DRM_INFO("This is an early pre-production Haswell machine. "
736                          "It may not be fully functional.\n");
737
738         if (i915_get_bridge_dev(dev)) {
739                 ret = -EIO;
740                 goto free_priv;
741         }
742
743         mmio_bar = IS_GEN2(dev) ? 1 : 0;
744         /* Before gen4, the registers and the GTT are behind different BARs.
745          * However, from gen4 onwards, the registers and the GTT are shared
746          * in the same BAR, so we want to restrict this ioremap from
747          * clobbering the GTT which we want ioremap_wc instead. Fortunately,
748          * the register BAR remains the same size for all the earlier
749          * generations up to Ironlake.
750          */
751         if (info->gen < 5)
752                 mmio_size = 512*1024;
753         else
754                 mmio_size = 2*1024*1024;
755
756         dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
757         if (!dev_priv->regs) {
758                 DRM_ERROR("failed to map registers\n");
759                 ret = -EIO;
760                 goto put_bridge;
761         }
762
763         /* This must be called before any calls to HAS_PCH_* */
764         intel_detect_pch(dev);
765
766         intel_uncore_init(dev);
767
768         ret = i915_gem_gtt_init(dev);
769         if (ret)
770                 goto out_regs;
771
772         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
773                 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
774                  * otherwise the vga fbdev driver falls over. */
775                 ret = i915_kick_out_firmware_fb(dev_priv);
776                 if (ret) {
777                         DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
778                         goto out_gtt;
779                 }
780
781                 ret = i915_kick_out_vgacon(dev_priv);
782                 if (ret) {
783                         DRM_ERROR("failed to remove conflicting VGA console\n");
784                         goto out_gtt;
785                 }
786         }
787
788         pci_set_master(dev->pdev);
789
790         /* overlay on gen2 is broken and can't address above 1G */
791         if (IS_GEN2(dev))
792                 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
793
794         /* 965GM sometimes incorrectly writes to hardware status page (HWS)
795          * using 32bit addressing, overwriting memory if HWS is located
796          * above 4GB.
797          *
798          * The documentation also mentions an issue with undefined
799          * behaviour if any general state is accessed within a page above 4GB,
800          * which also needs to be handled carefully.
801          */
802         if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
803                 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
804
805         aperture_size = dev_priv->gtt.mappable_end;
806
807         dev_priv->gtt.mappable =
808                 io_mapping_create_wc(dev_priv->gtt.mappable_base,
809                                      aperture_size);
810         if (dev_priv->gtt.mappable == NULL) {
811                 ret = -EIO;
812                 goto out_gtt;
813         }
814
815         dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
816                                               aperture_size);
817
818         /* The i915 workqueue is primarily used for batched retirement of
819          * requests (and thus managing bo) once the task has been completed
820          * by the GPU. i915_gem_retire_requests() is called directly when we
821          * need high-priority retirement, such as waiting for an explicit
822          * bo.
823          *
824          * It is also used for periodic low-priority events, such as
825          * idle-timers and recording error state.
826          *
827          * All tasks on the workqueue are expected to acquire the dev mutex
828          * so there is no point in running more than one instance of the
829          * workqueue at any time.  Use an ordered one.
830          */
831         dev_priv->wq = alloc_ordered_workqueue("i915", 0);
832         if (dev_priv->wq == NULL) {
833                 DRM_ERROR("Failed to create our workqueue.\n");
834                 ret = -ENOMEM;
835                 goto out_mtrrfree;
836         }
837
838         dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
839         if (dev_priv->dp_wq == NULL) {
840                 DRM_ERROR("Failed to create our dp workqueue.\n");
841                 ret = -ENOMEM;
842                 goto out_freewq;
843         }
844
845         intel_irq_init(dev_priv);
846         intel_uncore_sanitize(dev);
847
848         /* Try to make sure MCHBAR is enabled before poking at it */
849         intel_setup_mchbar(dev);
850         intel_setup_gmbus(dev);
851         intel_opregion_setup(dev);
852
853         intel_setup_bios(dev);
854
855         i915_gem_load(dev);
856
857         /* On the 945G/GM, the chipset reports the MSI capability on the
858          * integrated graphics even though the support isn't actually there
859          * according to the published specs.  It doesn't appear to function
860          * correctly in testing on 945G.
861          * This may be a side effect of MSI having been made available for PEG
862          * and the registers being closely associated.
863          *
864          * According to chipset errata, on the 965GM, MSI interrupts may
865          * be lost or delayed, but we use them anyways to avoid
866          * stuck interrupts on some machines.
867          */
868         if (!IS_I945G(dev) && !IS_I945GM(dev))
869                 pci_enable_msi(dev->pdev);
870
871         intel_device_info_runtime_init(dev);
872
873         if (INTEL_INFO(dev)->num_pipes) {
874                 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
875                 if (ret)
876                         goto out_gem_unload;
877         }
878
879         intel_power_domains_init(dev_priv);
880
881         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
882                 ret = i915_load_modeset_init(dev);
883                 if (ret < 0) {
884                         DRM_ERROR("failed to init modeset\n");
885                         goto out_power_well;
886                 }
887         } else {
888                 /* Start out suspended in ums mode. */
889                 dev_priv->ums.mm_suspended = 1;
890         }
891
892         i915_setup_sysfs(dev);
893
894         if (INTEL_INFO(dev)->num_pipes) {
895                 /* Must be done after probing outputs */
896                 intel_opregion_init(dev);
897                 acpi_video_register();
898         }
899
900         if (IS_GEN5(dev))
901                 intel_gpu_ips_init(dev_priv);
902
903         intel_runtime_pm_enable(dev_priv);
904
905         return 0;
906
907 out_power_well:
908         intel_power_domains_fini(dev_priv);
909         drm_vblank_cleanup(dev);
910 out_gem_unload:
911         WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
912         unregister_shrinker(&dev_priv->mm.shrinker);
913
914         if (dev->pdev->msi_enabled)
915                 pci_disable_msi(dev->pdev);
916
917         intel_teardown_gmbus(dev);
918         intel_teardown_mchbar(dev);
919         pm_qos_remove_request(&dev_priv->pm_qos);
920         destroy_workqueue(dev_priv->dp_wq);
921 out_freewq:
922         destroy_workqueue(dev_priv->wq);
923 out_mtrrfree:
924         arch_phys_wc_del(dev_priv->gtt.mtrr);
925         io_mapping_free(dev_priv->gtt.mappable);
926 out_gtt:
927         i915_global_gtt_cleanup(dev);
928 out_regs:
929         intel_uncore_fini(dev);
930         pci_iounmap(dev->pdev, dev_priv->regs);
931 put_bridge:
932         pci_dev_put(dev_priv->bridge_dev);
933 free_priv:
934         if (dev_priv->slab)
935                 kmem_cache_destroy(dev_priv->slab);
936         kfree(dev_priv);
937         return ret;
938 }
939
940 int i915_driver_unload(struct drm_device *dev)
941 {
942         struct drm_i915_private *dev_priv = dev->dev_private;
943         int ret;
944
945         ret = i915_gem_suspend(dev);
946         if (ret) {
947                 DRM_ERROR("failed to idle hardware: %d\n", ret);
948                 return ret;
949         }
950
951         intel_power_domains_fini(dev_priv);
952
953         intel_gpu_ips_teardown();
954
955         i915_teardown_sysfs(dev);
956
957         WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
958         unregister_shrinker(&dev_priv->mm.shrinker);
959
960         io_mapping_free(dev_priv->gtt.mappable);
961         arch_phys_wc_del(dev_priv->gtt.mtrr);
962
963         acpi_video_unregister();
964
965         if (drm_core_check_feature(dev, DRIVER_MODESET))
966                 intel_fbdev_fini(dev);
967
968         drm_vblank_cleanup(dev);
969
970         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
971                 intel_modeset_cleanup(dev);
972
973                 /*
974                  * free the memory space allocated for the child device
975                  * config parsed from VBT
976                  */
977                 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
978                         kfree(dev_priv->vbt.child_dev);
979                         dev_priv->vbt.child_dev = NULL;
980                         dev_priv->vbt.child_dev_num = 0;
981                 }
982
983                 vga_switcheroo_unregister_client(dev->pdev);
984                 vga_client_register(dev->pdev, NULL, NULL, NULL);
985         }
986
987         /* Free error state after interrupts are fully disabled. */
988         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
989         cancel_work_sync(&dev_priv->gpu_error.work);
990         i915_destroy_error_state(dev);
991
992         if (dev->pdev->msi_enabled)
993                 pci_disable_msi(dev->pdev);
994
995         intel_opregion_fini(dev);
996
997         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
998                 /* Flush any outstanding unpin_work. */
999                 flush_workqueue(dev_priv->wq);
1000
1001                 mutex_lock(&dev->struct_mutex);
1002                 i915_gem_cleanup_ringbuffer(dev);
1003                 i915_gem_context_fini(dev);
1004                 mutex_unlock(&dev->struct_mutex);
1005                 i915_gem_cleanup_stolen(dev);
1006         }
1007
1008         intel_teardown_gmbus(dev);
1009         intel_teardown_mchbar(dev);
1010
1011         destroy_workqueue(dev_priv->dp_wq);
1012         destroy_workqueue(dev_priv->wq);
1013         pm_qos_remove_request(&dev_priv->pm_qos);
1014
1015         i915_global_gtt_cleanup(dev);
1016
1017         intel_uncore_fini(dev);
1018         if (dev_priv->regs != NULL)
1019                 pci_iounmap(dev->pdev, dev_priv->regs);
1020
1021         if (dev_priv->slab)
1022                 kmem_cache_destroy(dev_priv->slab);
1023
1024         pci_dev_put(dev_priv->bridge_dev);
1025         kfree(dev_priv);
1026
1027         return 0;
1028 }
1029
1030 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1031 {
1032         int ret;
1033
1034         ret = i915_gem_open(dev, file);
1035         if (ret)
1036                 return ret;
1037
1038         return 0;
1039 }
1040
1041 /**
1042  * i915_driver_lastclose - clean up after all DRM clients have exited
1043  * @dev: DRM device
1044  *
1045  * Take care of cleaning up after all DRM clients have exited.  In the
1046  * mode setting case, we want to restore the kernel's initial mode (just
1047  * in case the last client left us in a bad state).
1048  *
1049  * Additionally, in the non-mode setting case, we'll tear down the GTT
1050  * and DMA structures, since the kernel won't be using them, and clea
1051  * up any GEM state.
1052  */
1053 void i915_driver_lastclose(struct drm_device *dev)
1054 {
1055         struct drm_i915_private *dev_priv = dev->dev_private;
1056
1057         /* On gen6+ we refuse to init without kms enabled, but then the drm core
1058          * goes right around and calls lastclose. Check for this and don't clean
1059          * up anything. */
1060         if (!dev_priv)
1061                 return;
1062
1063         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1064                 intel_fbdev_restore_mode(dev);
1065                 vga_switcheroo_process_delayed_switch();
1066                 return;
1067         }
1068
1069         i915_gem_lastclose(dev);
1070 }
1071
1072 void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1073 {
1074         mutex_lock(&dev->struct_mutex);
1075         i915_gem_context_close(dev, file);
1076         i915_gem_release(dev, file);
1077         mutex_unlock(&dev->struct_mutex);
1078
1079         if (drm_core_check_feature(dev, DRIVER_MODESET))
1080                 intel_modeset_preclose(dev, file);
1081 }
1082
1083 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1084 {
1085         struct drm_i915_file_private *file_priv = file->driver_priv;
1086
1087         if (file_priv && file_priv->bsd_ring)
1088                 file_priv->bsd_ring = NULL;
1089         kfree(file_priv);
1090 }
1091
1092 const struct drm_ioctl_desc i915_ioctls[] = {
1093         DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1094         DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1095         DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
1096         DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1097         DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1098         DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1099         DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1100         DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1101         DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1102         DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1103         DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1104         DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1105         DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1106         DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1107         DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
1108         DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1109         DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1110         DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1111         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1112         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1113         DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1114         DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1115         DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1116         DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1117         DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1118         DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1119         DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1120         DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1121         DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1122         DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1123         DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1124         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1125         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1126         DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1127         DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1128         DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1129         DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1130         DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1131         DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1132         DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1133         DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1134         DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1135         DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1136         DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1137         DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1138         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1139         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1140         DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1141         DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1142         DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1143 };
1144
1145 int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
1146
1147 /*
1148  * This is really ugly: Because old userspace abused the linux agp interface to
1149  * manage the gtt, we need to claim that all intel devices are agp.  For
1150  * otherwise the drm core refuses to initialize the agp support code.
1151  */
1152 int i915_driver_device_is_agp(struct drm_device *dev)
1153 {
1154         return 1;
1155 }