1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
33 #include <uapi/drm/i915_drm.h>
36 #include "intel_bios.h"
37 #include "intel_ringbuffer.h"
38 #include <linux/io-mapping.h>
39 #include <linux/i2c.h>
40 #include <linux/i2c-algo-bit.h>
41 #include <drm/intel-gtt.h>
42 #include <linux/backlight.h>
43 #include <linux/intel-iommu.h>
44 #include <linux/kref.h>
45 #include <linux/pm_qos.h>
47 /* General customization:
50 #define DRIVER_AUTHOR "Tungsten Graphics, Inc."
52 #define DRIVER_NAME "i915"
53 #define DRIVER_DESC "Intel Graphics"
54 #define DRIVER_DATE "20080730"
62 #define pipe_name(p) ((p) + 'A')
70 #define transcoder_name(t) ((t) + 'A')
77 #define plane_name(p) ((p) + 'A')
79 #define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
89 #define port_name(p) ((p) + 'A')
91 enum intel_display_power_domain {
95 POWER_DOMAIN_PIPE_A_PANEL_FITTER,
96 POWER_DOMAIN_PIPE_B_PANEL_FITTER,
97 POWER_DOMAIN_PIPE_C_PANEL_FITTER,
98 POWER_DOMAIN_TRANSCODER_A,
99 POWER_DOMAIN_TRANSCODER_B,
100 POWER_DOMAIN_TRANSCODER_C,
101 POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
105 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
106 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
107 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
108 #define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
112 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
113 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
123 #define I915_GEM_GPU_DOMAINS \
124 (I915_GEM_DOMAIN_RENDER | \
125 I915_GEM_DOMAIN_SAMPLER | \
126 I915_GEM_DOMAIN_COMMAND | \
127 I915_GEM_DOMAIN_INSTRUCTION | \
128 I915_GEM_DOMAIN_VERTEX)
130 #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
132 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
133 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
134 if ((intel_encoder)->base.crtc == (__crtc))
136 struct drm_i915_private;
139 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
140 /* real shared dpll ids must be >= 0 */
144 #define I915_NUM_PLLS 2
146 struct intel_dpll_hw_state {
153 struct intel_shared_dpll {
154 int refcount; /* count of number of CRTCs sharing this PLL */
155 int active; /* count of number of active CRTCs (i.e. DPMS on) */
156 bool on; /* is the PLL actually active? Disabled during modeset */
158 /* should match the index in the dev_priv->shared_dplls array */
159 enum intel_dpll_id id;
160 struct intel_dpll_hw_state hw_state;
161 void (*mode_set)(struct drm_i915_private *dev_priv,
162 struct intel_shared_dpll *pll);
163 void (*enable)(struct drm_i915_private *dev_priv,
164 struct intel_shared_dpll *pll);
165 void (*disable)(struct drm_i915_private *dev_priv,
166 struct intel_shared_dpll *pll);
167 bool (*get_hw_state)(struct drm_i915_private *dev_priv,
168 struct intel_shared_dpll *pll,
169 struct intel_dpll_hw_state *hw_state);
172 /* Used by dp and fdi links */
173 struct intel_link_m_n {
181 void intel_link_compute_m_n(int bpp, int nlanes,
182 int pixel_clock, int link_clock,
183 struct intel_link_m_n *m_n);
185 struct intel_ddi_plls {
191 /* Interface history:
194 * 1.2: Add Power Management
195 * 1.3: Add vblank support
196 * 1.4: Fix cmdbuffer path, add heap destroy
197 * 1.5: Add vblank pipe configuration
198 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
199 * - Support vertical blank on secondary display pipe
201 #define DRIVER_MAJOR 1
202 #define DRIVER_MINOR 6
203 #define DRIVER_PATCHLEVEL 0
205 #define WATCH_LISTS 0
208 #define I915_GEM_PHYS_CURSOR_0 1
209 #define I915_GEM_PHYS_CURSOR_1 2
210 #define I915_GEM_PHYS_OVERLAY_REGS 3
211 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
213 struct drm_i915_gem_phys_object {
215 struct page **page_list;
216 drm_dma_handle_t *handle;
217 struct drm_i915_gem_object *cur_obj;
220 struct opregion_header;
221 struct opregion_acpi;
222 struct opregion_swsci;
223 struct opregion_asle;
225 struct intel_opregion {
226 struct opregion_header __iomem *header;
227 struct opregion_acpi __iomem *acpi;
228 struct opregion_swsci __iomem *swsci;
229 u32 swsci_gbda_sub_functions;
230 u32 swsci_sbcb_sub_functions;
231 struct opregion_asle __iomem *asle;
233 u32 __iomem *lid_state;
235 #define OPREGION_SIZE (8*1024)
237 struct intel_overlay;
238 struct intel_overlay_error_state;
240 struct drm_i915_master_private {
241 drm_local_map_t *sarea;
242 struct _drm_i915_sarea *sarea_priv;
244 #define I915_FENCE_REG_NONE -1
245 #define I915_MAX_NUM_FENCES 32
246 /* 32 fences + sign bit for FENCE_REG_NONE */
247 #define I915_MAX_NUM_FENCE_BITS 6
249 struct drm_i915_fence_reg {
250 struct list_head lru_list;
251 struct drm_i915_gem_object *obj;
255 struct sdvo_device_mapping {
264 struct intel_display_error_state;
266 struct drm_i915_error_state {
274 bool waiting[I915_NUM_RINGS];
275 u32 pipestat[I915_MAX_PIPES];
276 u32 tail[I915_NUM_RINGS];
277 u32 head[I915_NUM_RINGS];
278 u32 ctl[I915_NUM_RINGS];
279 u32 ipeir[I915_NUM_RINGS];
280 u32 ipehr[I915_NUM_RINGS];
281 u32 instdone[I915_NUM_RINGS];
282 u32 acthd[I915_NUM_RINGS];
283 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
284 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
285 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
286 /* our own tracking of ring head and tail */
287 u32 cpu_ring_head[I915_NUM_RINGS];
288 u32 cpu_ring_tail[I915_NUM_RINGS];
289 u32 error; /* gen6+ */
290 u32 err_int; /* gen7 */
291 u32 instpm[I915_NUM_RINGS];
292 u32 instps[I915_NUM_RINGS];
293 u32 extra_instdone[I915_NUM_INSTDONE_REG];
294 u32 seqno[I915_NUM_RINGS];
296 u32 fault_reg[I915_NUM_RINGS];
298 u32 faddr[I915_NUM_RINGS];
299 u64 fence[I915_MAX_NUM_FENCES];
301 struct drm_i915_error_ring {
302 struct drm_i915_error_object {
306 } *ringbuffer, *batchbuffer, *ctx;
307 struct drm_i915_error_request {
313 } ring[I915_NUM_RINGS];
314 struct drm_i915_error_buffer {
321 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
328 } **active_bo, **pinned_bo;
329 u32 *active_bo_count, *pinned_bo_count;
330 struct intel_overlay_error_state *overlay;
331 struct intel_display_error_state *display;
332 int hangcheck_score[I915_NUM_RINGS];
333 enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
336 struct intel_crtc_config;
341 struct drm_i915_display_funcs {
342 bool (*fbc_enabled)(struct drm_device *dev);
343 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
344 void (*disable_fbc)(struct drm_device *dev);
345 int (*get_display_clock_speed)(struct drm_device *dev);
346 int (*get_fifo_size)(struct drm_device *dev, int plane);
348 * find_dpll() - Find the best values for the PLL
349 * @limit: limits for the PLL
350 * @crtc: current CRTC
351 * @target: target frequency in kHz
352 * @refclk: reference clock frequency in kHz
353 * @match_clock: if provided, @best_clock P divider must
354 * match the P divider from @match_clock
355 * used for LVDS downclocking
356 * @best_clock: best PLL values found
358 * Returns true on success, false on failure.
360 bool (*find_dpll)(const struct intel_limit *limit,
361 struct drm_crtc *crtc,
362 int target, int refclk,
363 struct dpll *match_clock,
364 struct dpll *best_clock);
365 void (*update_wm)(struct drm_crtc *crtc);
366 void (*update_sprite_wm)(struct drm_plane *plane,
367 struct drm_crtc *crtc,
368 uint32_t sprite_width, int pixel_size,
369 bool enable, bool scaled);
370 void (*modeset_global_resources)(struct drm_device *dev);
371 /* Returns the active state of the crtc, and if the crtc is active,
372 * fills out the pipe-config with the hw state. */
373 bool (*get_pipe_config)(struct intel_crtc *,
374 struct intel_crtc_config *);
375 int (*crtc_mode_set)(struct drm_crtc *crtc,
377 struct drm_framebuffer *old_fb);
378 void (*crtc_enable)(struct drm_crtc *crtc);
379 void (*crtc_disable)(struct drm_crtc *crtc);
380 void (*off)(struct drm_crtc *crtc);
381 void (*write_eld)(struct drm_connector *connector,
382 struct drm_crtc *crtc);
383 void (*fdi_link_train)(struct drm_crtc *crtc);
384 void (*init_clock_gating)(struct drm_device *dev);
385 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
386 struct drm_framebuffer *fb,
387 struct drm_i915_gem_object *obj,
389 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
391 void (*hpd_irq_setup)(struct drm_device *dev);
392 /* clock updates for mode set */
394 /* render clock increase/decrease */
395 /* display clock increase/decrease */
396 /* pll clock increase/decrease */
399 struct intel_uncore_funcs {
400 void (*force_wake_get)(struct drm_i915_private *dev_priv);
401 void (*force_wake_put)(struct drm_i915_private *dev_priv);
404 struct intel_uncore {
405 spinlock_t lock; /** lock is also taken in irq contexts. */
407 struct intel_uncore_funcs funcs;
410 unsigned forcewake_count;
413 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
414 func(is_mobile) sep \
417 func(is_i945gm) sep \
419 func(need_gfx_hws) sep \
421 func(is_pineview) sep \
422 func(is_broadwater) sep \
423 func(is_crestline) sep \
424 func(is_ivybridge) sep \
425 func(is_valleyview) sep \
426 func(is_haswell) sep \
427 func(is_preliminary) sep \
428 func(has_force_wake) sep \
430 func(has_pipe_cxsr) sep \
431 func(has_hotplug) sep \
432 func(cursor_needs_physical) sep \
433 func(has_overlay) sep \
434 func(overlay_needs_physical) sep \
435 func(supports_tv) sep \
436 func(has_bsd_ring) sep \
437 func(has_blt_ring) sep \
438 func(has_vebox_ring) sep \
443 #define DEFINE_FLAG(name) u8 name:1
444 #define SEP_SEMICOLON ;
446 struct intel_device_info {
447 u32 display_mmio_offset;
450 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
456 enum i915_cache_level {
458 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
459 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
460 caches, eg sampler/render caches, and the
461 large Last-Level-Cache. LLC is coherent with
462 the CPU, but L3 is only visible to the GPU. */
463 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
466 typedef uint32_t gen6_gtt_pte_t;
468 struct i915_address_space {
470 struct drm_device *dev;
471 struct list_head global_link;
472 unsigned long start; /* Start offset always 0 for dri2 */
473 size_t total; /* size addr space maps (ex. 2GB for ggtt) */
481 * List of objects currently involved in rendering.
483 * Includes buffers having the contents of their GPU caches
484 * flushed, not necessarily primitives. last_rendering_seqno
485 * represents when the rendering involved will be completed.
487 * A reference is held on the buffer while on this list.
489 struct list_head active_list;
492 * LRU list of objects which are not in the ringbuffer and
493 * are ready to unbind, but are still in the GTT.
495 * last_rendering_seqno is 0 while an object is in this list.
497 * A reference is not held on the buffer while on this list,
498 * as merely being GTT-bound shouldn't prevent its being
499 * freed, and we'll pull it off the list in the free path.
501 struct list_head inactive_list;
503 /* FIXME: Need a more generic return type */
504 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
505 enum i915_cache_level level);
506 void (*clear_range)(struct i915_address_space *vm,
507 unsigned int first_entry,
508 unsigned int num_entries);
509 void (*insert_entries)(struct i915_address_space *vm,
511 unsigned int first_entry,
512 enum i915_cache_level cache_level);
513 void (*cleanup)(struct i915_address_space *vm);
516 /* The Graphics Translation Table is the way in which GEN hardware translates a
517 * Graphics Virtual Address into a Physical Address. In addition to the normal
518 * collateral associated with any va->pa translations GEN hardware also has a
519 * portion of the GTT which can be mapped by the CPU and remain both coherent
520 * and correct (in cases like swizzling). That region is referred to as GMADR in
524 struct i915_address_space base;
525 size_t stolen_size; /* Total size of stolen memory */
527 unsigned long mappable_end; /* End offset that we can CPU map */
528 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
529 phys_addr_t mappable_base; /* PA of our GMADR */
531 /** "Graphics Stolen Memory" holds the global PTEs */
539 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
540 size_t *stolen, phys_addr_t *mappable_base,
541 unsigned long *mappable_end);
543 #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
545 struct i915_hw_ppgtt {
546 struct i915_address_space base;
547 unsigned num_pd_entries;
548 struct page **pt_pages;
550 dma_addr_t *pt_dma_addr;
552 int (*enable)(struct drm_device *dev);
556 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
557 * VMA's presence cannot be guaranteed before binding, or after unbinding the
558 * object into/from the address space.
560 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
561 * will always be <= an objects lifetime. So object refcounting should cover us.
564 struct drm_mm_node node;
565 struct drm_i915_gem_object *obj;
566 struct i915_address_space *vm;
568 /** This object's place on the active/inactive lists */
569 struct list_head mm_list;
571 struct list_head vma_link; /* Link in the object's VMA list */
573 /** This vma's place in the batchbuffer or on the eviction list */
574 struct list_head exec_list;
577 * Used for performing relocations during execbuffer insertion.
579 struct hlist_node exec_node;
580 unsigned long exec_handle;
581 struct drm_i915_gem_exec_object2 *exec_entry;
585 struct i915_ctx_hang_stats {
586 /* This context had batch pending when hang was declared */
587 unsigned batch_pending;
589 /* This context had batch active when hang was declared */
590 unsigned batch_active;
592 /* Time when this context was last blamed for a GPU reset */
593 unsigned long guilty_ts;
595 /* This context is banned to submit more work */
599 /* This must match up with the value previously used for execbuf2.rsvd1. */
600 #define DEFAULT_CONTEXT_ID 0
601 struct i915_hw_context {
606 struct drm_i915_file_private *file_priv;
607 struct intel_ring_buffer *ring;
608 struct drm_i915_gem_object *obj;
609 struct i915_ctx_hang_stats hang_stats;
611 struct list_head link;
620 struct drm_mm_node *compressed_fb;
621 struct drm_mm_node *compressed_llb;
623 struct intel_fbc_work {
624 struct delayed_work work;
625 struct drm_crtc *crtc;
626 struct drm_framebuffer *fb;
631 FBC_OK, /* FBC is enabled */
632 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
633 FBC_NO_OUTPUT, /* no outputs enabled to compress */
634 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
635 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
636 FBC_MODE_TOO_LARGE, /* mode too large for compression */
637 FBC_BAD_PLANE, /* fbc not supported on plane */
638 FBC_NOT_TILED, /* buffer not tiled */
639 FBC_MULTIPLE_PIPES, /* more than one pipe active */
641 FBC_CHIP_DEFAULT, /* disabled by default on this chip */
646 PSR_NO_SOURCE, /* Not supported on platform */
647 PSR_NO_SINK, /* Not supported by panel */
650 PSR_PWR_WELL_ENABLED,
654 PSR_INTERLACED_ENABLED,
659 PCH_NONE = 0, /* No PCH present */
660 PCH_IBX, /* Ibexpeak PCH */
661 PCH_CPT, /* Cougarpoint PCH */
662 PCH_LPT, /* Lynxpoint PCH */
666 enum intel_sbi_destination {
671 #define QUIRK_PIPEA_FORCE (1<<0)
672 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
673 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
674 #define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
677 struct intel_fbc_work;
680 struct i2c_adapter adapter;
684 struct i2c_algo_bit_data bit_algo;
685 struct drm_i915_private *dev_priv;
688 struct i915_suspend_saved_registers {
709 u32 saveTRANS_HTOTAL_A;
710 u32 saveTRANS_HBLANK_A;
711 u32 saveTRANS_HSYNC_A;
712 u32 saveTRANS_VTOTAL_A;
713 u32 saveTRANS_VBLANK_A;
714 u32 saveTRANS_VSYNC_A;
722 u32 savePFIT_PGM_RATIOS;
723 u32 saveBLC_HIST_CTL;
725 u32 saveBLC_PWM_CTL2;
726 u32 saveBLC_CPU_PWM_CTL;
727 u32 saveBLC_CPU_PWM_CTL2;
740 u32 saveTRANS_HTOTAL_B;
741 u32 saveTRANS_HBLANK_B;
742 u32 saveTRANS_HSYNC_B;
743 u32 saveTRANS_VTOTAL_B;
744 u32 saveTRANS_VBLANK_B;
745 u32 saveTRANS_VSYNC_B;
759 u32 savePP_ON_DELAYS;
760 u32 savePP_OFF_DELAYS;
768 u32 savePFIT_CONTROL;
769 u32 save_palette_a[256];
770 u32 save_palette_b[256];
771 u32 saveDPFC_CB_BASE;
772 u32 saveFBC_CFB_BASE;
775 u32 saveFBC_CONTROL2;
785 u32 saveCACHE_MODE_0;
786 u32 saveMI_ARB_STATE;
797 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
808 u32 savePIPEA_GMCH_DATA_M;
809 u32 savePIPEB_GMCH_DATA_M;
810 u32 savePIPEA_GMCH_DATA_N;
811 u32 savePIPEB_GMCH_DATA_N;
812 u32 savePIPEA_DP_LINK_M;
813 u32 savePIPEB_DP_LINK_M;
814 u32 savePIPEA_DP_LINK_N;
815 u32 savePIPEB_DP_LINK_N;
826 u32 savePCH_DREF_CONTROL;
827 u32 saveDISP_ARB_CTL;
828 u32 savePIPEA_DATA_M1;
829 u32 savePIPEA_DATA_N1;
830 u32 savePIPEA_LINK_M1;
831 u32 savePIPEA_LINK_N1;
832 u32 savePIPEB_DATA_M1;
833 u32 savePIPEB_DATA_N1;
834 u32 savePIPEB_LINK_M1;
835 u32 savePIPEB_LINK_N1;
836 u32 saveMCHBAR_RENDER_STANDBY;
837 u32 savePCH_PORT_HOTPLUG;
840 struct intel_gen6_power_mgmt {
841 /* work and pm_iir are protected by dev_priv->irq_lock */
842 struct work_struct work;
845 /* On vlv we need to manually drop to Vmin with a delayed work. */
846 struct delayed_work vlv_work;
848 /* The below variables an all the rps hw state are protected by
849 * dev->struct mutext. */
856 struct delayed_work delayed_resume_work;
859 * Protects RPS/RC6 register access and PCU communication.
860 * Must be taken after struct_mutex if nested.
862 struct mutex hw_lock;
865 /* defined intel_pm.c */
866 extern spinlock_t mchdev_lock;
868 struct intel_ilk_power_mgmt {
876 unsigned long last_time1;
877 unsigned long chipset_power;
879 struct timespec last_time2;
880 unsigned long gfx_power;
886 struct drm_i915_gem_object *pwrctx;
887 struct drm_i915_gem_object *renderctx;
890 /* Power well structure for haswell */
891 struct i915_power_well {
892 struct drm_device *device;
894 /* power well enable/disable usage count */
899 struct i915_dri1_state {
900 unsigned allow_batchbuffer : 1;
901 u32 __iomem *gfx_hws_cpu_addr;
912 struct i915_ums_state {
914 * Flag if the X Server, and thus DRM, is not currently in
915 * control of the device.
917 * This is set between LeaveVT and EnterVT. It needs to be
918 * replaced with a semaphore. It also needs to be
919 * transitioned away from for kernel modesetting.
924 #define MAX_L3_SLICES 2
925 struct intel_l3_parity {
926 u32 *remap_info[MAX_L3_SLICES];
927 struct work_struct error_work;
932 /** Memory allocator for GTT stolen memory */
933 struct drm_mm stolen;
934 /** List of all objects in gtt_space. Used to restore gtt
935 * mappings on resume */
936 struct list_head bound_list;
938 * List of objects which are not bound to the GTT (thus
939 * are idle and not used by the GPU) but still have
940 * (presumably uncached) pages still attached.
942 struct list_head unbound_list;
944 /** Usable portion of the GTT for GEM */
945 unsigned long stolen_base; /* limited to low memory (32-bit) */
947 /** PPGTT used for aliasing the PPGTT with the GTT */
948 struct i915_hw_ppgtt *aliasing_ppgtt;
950 struct shrinker inactive_shrinker;
951 bool shrinker_no_lock_stealing;
953 /** LRU list of objects with fence regs on them. */
954 struct list_head fence_list;
957 * We leave the user IRQ off as much as possible,
958 * but this means that requests will finish and never
959 * be retired once the system goes idle. Set a timer to
960 * fire periodically while the ring is running. When it
961 * fires, go retire requests.
963 struct delayed_work retire_work;
966 * Are we in a non-interruptible section of code like
971 /** Bit 6 swizzling required for X tiling */
972 uint32_t bit_6_swizzle_x;
973 /** Bit 6 swizzling required for Y tiling */
974 uint32_t bit_6_swizzle_y;
976 /* storage for physical objects */
977 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
979 /* accounting, useful for userland debugging */
980 spinlock_t object_stat_lock;
981 size_t object_memory;
985 struct drm_i915_error_state_buf {
994 struct i915_error_state_file_priv {
995 struct drm_device *dev;
996 struct drm_i915_error_state *error;
999 struct i915_gpu_error {
1000 /* For hangcheck timer */
1001 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
1002 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
1003 /* Hang gpu twice in this window and your context gets banned */
1004 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1006 struct timer_list hangcheck_timer;
1008 /* For reset and error_state handling. */
1010 /* Protected by the above dev->gpu_error.lock. */
1011 struct drm_i915_error_state *first_error;
1012 struct work_struct work;
1015 * State variable and reset counter controlling the reset flow
1017 * Upper bits are for the reset counter. This counter is used by the
1018 * wait_seqno code to race-free noticed that a reset event happened and
1019 * that it needs to restart the entire ioctl (since most likely the
1020 * seqno it waited for won't ever signal anytime soon).
1022 * This is important for lock-free wait paths, where no contended lock
1023 * naturally enforces the correct ordering between the bail-out of the
1024 * waiter and the gpu reset work code.
1026 * Lowest bit controls the reset state machine: Set means a reset is in
1027 * progress. This state will (presuming we don't have any bugs) decay
1028 * into either unset (successful reset) or the special WEDGED value (hw
1029 * terminally sour). All waiters on the reset_queue will be woken when
1032 atomic_t reset_counter;
1035 * Special values/flags for reset_counter
1037 * Note that the code relies on
1038 * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
1041 #define I915_RESET_IN_PROGRESS_FLAG 1
1042 #define I915_WEDGED 0xffffffff
1045 * Waitqueue to signal when the reset has completed. Used by clients
1046 * that wait for dev_priv->mm.wedged to settle.
1048 wait_queue_head_t reset_queue;
1050 /* For gpu hang simulation. */
1051 unsigned int stop_rings;
1054 enum modeset_restore {
1055 MODESET_ON_LID_OPEN,
1060 struct intel_vbt_data {
1061 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1062 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1065 unsigned int int_tv_support:1;
1066 unsigned int lvds_dither:1;
1067 unsigned int lvds_vbt:1;
1068 unsigned int int_crt_support:1;
1069 unsigned int lvds_use_ssc:1;
1070 unsigned int display_clock_mode:1;
1071 unsigned int fdi_rx_polarity_inverted:1;
1073 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1078 int edp_preemphasis;
1080 bool edp_initialized;
1083 struct edp_power_seq edp_pps;
1093 struct child_device_config *child_dev;
1096 enum intel_ddb_partitioning {
1098 INTEL_DDB_PART_5_6, /* IVB+ */
1101 struct intel_wm_level {
1110 * This struct tracks the state needed for the Package C8+ feature.
1112 * Package states C8 and deeper are really deep PC states that can only be
1113 * reached when all the devices on the system allow it, so even if the graphics
1114 * device allows PC8+, it doesn't mean the system will actually get to these
1117 * Our driver only allows PC8+ when all the outputs are disabled, the power well
1118 * is disabled and the GPU is idle. When these conditions are met, we manually
1119 * do the other conditions: disable the interrupts, clocks and switch LCPLL
1122 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1123 * the state of some registers, so when we come back from PC8+ we need to
1124 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1125 * need to take care of the registers kept by RC6.
1127 * The interrupt disabling is part of the requirements. We can only leave the
1128 * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
1129 * can lock the machine.
1131 * Ideally every piece of our code that needs PC8+ disabled would call
1132 * hsw_disable_package_c8, which would increment disable_count and prevent the
1133 * system from reaching PC8+. But we don't have a symmetric way to do this for
1134 * everything, so we have the requirements_met and gpu_idle variables. When we
1135 * switch requirements_met or gpu_idle to true we decrease disable_count, and
1136 * increase it in the opposite case. The requirements_met variable is true when
1137 * all the CRTCs, encoders and the power well are disabled. The gpu_idle
1138 * variable is true when the GPU is idle.
1140 * In addition to everything, we only actually enable PC8+ if disable_count
1141 * stays at zero for at least some seconds. This is implemented with the
1142 * enable_work variable. We do this so we don't enable/disable PC8 dozens of
1143 * consecutive times when all screens are disabled and some background app
1144 * queries the state of our connectors, or we have some application constantly
1145 * waking up to use the GPU. Only after the enable_work function actually
1146 * enables PC8+ the "enable" variable will become true, which means that it can
1147 * be false even if disable_count is 0.
1149 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1150 * goes back to false exactly before we reenable the IRQs. We use this variable
1151 * to check if someone is trying to enable/disable IRQs while they're supposed
1152 * to be disabled. This shouldn't happen and we'll print some error messages in
1153 * case it happens, but if it actually happens we'll also update the variables
1154 * inside struct regsave so when we restore the IRQs they will contain the
1155 * latest expected values.
1157 * For more, read "Display Sequences for Package C8" on our documentation.
1159 struct i915_package_c8 {
1160 bool requirements_met;
1163 /* Only true after the delayed work task actually enables it. */
1167 struct delayed_work enable_work;
1174 uint32_t gen6_pmimr;
1178 typedef struct drm_i915_private {
1179 struct drm_device *dev;
1180 struct kmem_cache *slab;
1182 const struct intel_device_info *info;
1184 int relative_constants_mode;
1188 struct intel_uncore uncore;
1190 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
1193 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
1194 * controller on different i2c buses. */
1195 struct mutex gmbus_mutex;
1198 * Base address of the gmbus and gpio block.
1200 uint32_t gpio_mmio_base;
1202 wait_queue_head_t gmbus_wait_queue;
1204 struct pci_dev *bridge_dev;
1205 struct intel_ring_buffer ring[I915_NUM_RINGS];
1206 uint32_t last_seqno, next_seqno;
1208 drm_dma_handle_t *status_page_dmah;
1209 struct resource mch_res;
1211 atomic_t irq_received;
1213 /* protects the irq masks */
1214 spinlock_t irq_lock;
1216 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1217 struct pm_qos_request pm_qos;
1219 /* DPIO indirect register protection */
1220 struct mutex dpio_lock;
1222 /** Cached value of IMR to avoid reads in updating the bitfield */
1227 struct work_struct hotplug_work;
1228 bool enable_hotplug_processing;
1230 unsigned long hpd_last_jiffies;
1235 HPD_MARK_DISABLED = 2
1237 } hpd_stats[HPD_NUM_PINS];
1239 struct timer_list hotplug_reenable_timer;
1243 struct i915_fbc fbc;
1244 struct intel_opregion opregion;
1245 struct intel_vbt_data vbt;
1248 struct intel_overlay *overlay;
1249 unsigned int sprite_scaling_enabled;
1255 spinlock_t lock; /* bl registers and the above bl fields */
1256 struct backlight_device *device;
1260 bool no_aux_handshake;
1262 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1263 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1264 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1266 unsigned int fsb_freq, mem_freq, is_ddr3;
1269 * wq - Driver workqueue for GEM.
1271 * NOTE: Work items scheduled here are not allowed to grab any modeset
1272 * locks, for otherwise the flushing done in the pageflip code will
1273 * result in deadlocks.
1275 struct workqueue_struct *wq;
1277 /* Display functions */
1278 struct drm_i915_display_funcs display;
1280 /* PCH chipset type */
1281 enum intel_pch pch_type;
1282 unsigned short pch_id;
1284 unsigned long quirks;
1286 enum modeset_restore modeset_restore;
1287 struct mutex modeset_restore_lock;
1289 struct list_head vm_list; /* Global list of all address spaces */
1290 struct i915_gtt gtt; /* VMA representing the global address space */
1292 struct i915_gem_mm mm;
1294 /* Kernel Modesetting */
1296 struct sdvo_device_mapping sdvo_mappings[2];
1298 struct drm_crtc *plane_to_crtc_mapping[3];
1299 struct drm_crtc *pipe_to_crtc_mapping[3];
1300 wait_queue_head_t pending_flip_queue;
1302 int num_shared_dpll;
1303 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1304 struct intel_ddi_plls ddi_plls;
1306 /* Reclocking support */
1307 bool render_reclock_avail;
1308 bool lvds_downclock_avail;
1309 /* indicates the reduced downclock for LVDS*/
1313 bool mchbar_need_disable;
1315 struct intel_l3_parity l3_parity;
1317 /* Cannot be determined by PCIID. You must always read a register. */
1320 /* gen6+ rps state */
1321 struct intel_gen6_power_mgmt rps;
1323 /* ilk-only ips/rps state. Everything in here is protected by the global
1324 * mchdev_lock in intel_pm.c */
1325 struct intel_ilk_power_mgmt ips;
1327 /* Haswell power well */
1328 struct i915_power_well power_well;
1330 enum no_psr_reason no_psr_reason;
1332 struct i915_gpu_error gpu_error;
1334 struct drm_i915_gem_object *vlv_pctx;
1336 /* list of fbdev register on this device */
1337 struct intel_fbdev *fbdev;
1340 * The console may be contended at resume, but we don't
1341 * want it to block on it.
1343 struct work_struct console_resume_work;
1345 struct drm_property *broadcast_rgb_property;
1346 struct drm_property *force_audio_property;
1348 bool hw_contexts_disabled;
1349 uint32_t hw_context_size;
1350 struct list_head context_list;
1354 struct i915_suspend_saved_registers regfile;
1358 * Raw watermark latency values:
1359 * in 0.1us units for WM0,
1360 * in 0.5us units for WM1+.
1363 uint16_t pri_latency[5];
1365 uint16_t spr_latency[5];
1367 uint16_t cur_latency[5];
1370 struct i915_package_c8 pc8;
1372 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1374 struct i915_dri1_state dri1;
1375 /* Old ums support infrastructure, same warning applies. */
1376 struct i915_ums_state ums;
1377 } drm_i915_private_t;
1379 static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1381 return dev->dev_private;
1384 /* Iterate over initialised rings */
1385 #define for_each_ring(ring__, dev_priv__, i__) \
1386 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1387 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
1389 enum hdmi_force_audio {
1390 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
1391 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
1392 HDMI_AUDIO_AUTO, /* trust EDID */
1393 HDMI_AUDIO_ON, /* force turn on HDMI audio */
1396 #define I915_GTT_OFFSET_NONE ((u32)-1)
1398 struct drm_i915_gem_object_ops {
1399 /* Interface between the GEM object and its backing storage.
1400 * get_pages() is called once prior to the use of the associated set
1401 * of pages before to binding them into the GTT, and put_pages() is
1402 * called after we no longer need them. As we expect there to be
1403 * associated cost with migrating pages between the backing storage
1404 * and making them available for the GPU (e.g. clflush), we may hold
1405 * onto the pages after they are no longer referenced by the GPU
1406 * in case they may be used again shortly (for example migrating the
1407 * pages to a different memory domain within the GTT). put_pages()
1408 * will therefore most likely be called when the object itself is
1409 * being released or under memory pressure (where we attempt to
1410 * reap pages for the shrinker).
1412 int (*get_pages)(struct drm_i915_gem_object *);
1413 void (*put_pages)(struct drm_i915_gem_object *);
1416 struct drm_i915_gem_object {
1417 struct drm_gem_object base;
1419 const struct drm_i915_gem_object_ops *ops;
1421 /** List of VMAs backed by this object */
1422 struct list_head vma_list;
1424 /** Stolen memory for this object, instead of being backed by shmem. */
1425 struct drm_mm_node *stolen;
1426 struct list_head global_list;
1428 struct list_head ring_list;
1429 /** Used in execbuf to temporarily hold a ref */
1430 struct list_head obj_exec_link;
1433 * This is set if the object is on the active lists (has pending
1434 * rendering and so a non-zero seqno), and is not set if it i s on
1435 * inactive (ready to be unbound) list.
1437 unsigned int active:1;
1440 * This is set if the object has been written to since last bound
1443 unsigned int dirty:1;
1446 * Fence register bits (if any) for this object. Will be set
1447 * as needed when mapped into the GTT.
1448 * Protected by dev->struct_mutex.
1450 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
1453 * Advice: are the backing pages purgeable?
1455 unsigned int madv:2;
1458 * Current tiling mode for the object.
1460 unsigned int tiling_mode:2;
1462 * Whether the tiling parameters for the currently associated fence
1463 * register have changed. Note that for the purposes of tracking
1464 * tiling changes we also treat the unfenced register, the register
1465 * slot that the object occupies whilst it executes a fenced
1466 * command (such as BLT on gen2/3), as a "fence".
1468 unsigned int fence_dirty:1;
1470 /** How many users have pinned this object in GTT space. The following
1471 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1472 * (via user_pin_count), execbuffer (objects are not allowed multiple
1473 * times for the same batchbuffer), and the framebuffer code. When
1474 * switching/pageflipping, the framebuffer code has at most two buffers
1477 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1478 * bits with absolutely no headroom. So use 4 bits. */
1479 unsigned int pin_count:4;
1480 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
1483 * Is the object at the current location in the gtt mappable and
1484 * fenceable? Used to avoid costly recalculations.
1486 unsigned int map_and_fenceable:1;
1489 * Whether the current gtt mapping needs to be mappable (and isn't just
1490 * mappable by accident). Track pin and fault separate for a more
1491 * accurate mappable working set.
1493 unsigned int fault_mappable:1;
1494 unsigned int pin_mappable:1;
1495 unsigned int pin_display:1;
1498 * Is the GPU currently using a fence to access this buffer,
1500 unsigned int pending_fenced_gpu_access:1;
1501 unsigned int fenced_gpu_access:1;
1503 unsigned int cache_level:3;
1505 unsigned int has_aliasing_ppgtt_mapping:1;
1506 unsigned int has_global_gtt_mapping:1;
1507 unsigned int has_dma_mapping:1;
1509 struct sg_table *pages;
1510 int pages_pin_count;
1512 /* prime dma-buf support */
1513 void *dma_buf_vmapping;
1516 struct intel_ring_buffer *ring;
1518 /** Breadcrumb of last rendering to the buffer. */
1519 uint32_t last_read_seqno;
1520 uint32_t last_write_seqno;
1521 /** Breadcrumb of last fenced GPU access to the buffer. */
1522 uint32_t last_fenced_seqno;
1524 /** Current tiling stride for the object, if it's tiled. */
1527 /** Record of address bit 17 of each page at last unbind. */
1528 unsigned long *bit_17;
1530 /** User space pin count and filp owning the pin */
1531 uint32_t user_pin_count;
1532 struct drm_file *pin_filp;
1534 /** for phy allocated objects */
1535 struct drm_i915_gem_phys_object *phys_obj;
1537 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
1539 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1542 * Request queue structure.
1544 * The request queue allows us to note sequence numbers that have been emitted
1545 * and may be associated with active buffers to be retired.
1547 * By keeping this list, we can avoid having to do questionable
1548 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1549 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1551 struct drm_i915_gem_request {
1552 /** On Which ring this request was generated */
1553 struct intel_ring_buffer *ring;
1555 /** GEM sequence number associated with this request. */
1558 /** Position in the ringbuffer of the start of the request */
1561 /** Position in the ringbuffer of the end of the request */
1564 /** Context related to this request */
1565 struct i915_hw_context *ctx;
1567 /** Batch buffer related to this request if any */
1568 struct drm_i915_gem_object *batch_obj;
1570 /** Time at which this request was emitted, in jiffies. */
1571 unsigned long emitted_jiffies;
1573 /** global list entry for this request */
1574 struct list_head list;
1576 struct drm_i915_file_private *file_priv;
1577 /** file_priv list entry for this request */
1578 struct list_head client_list;
1581 struct drm_i915_file_private {
1584 struct list_head request_list;
1586 struct idr context_idr;
1588 struct i915_ctx_hang_stats hang_stats;
1591 #define INTEL_INFO(dev) (to_i915(dev)->info)
1593 #define IS_I830(dev) ((dev)->pci_device == 0x3577)
1594 #define IS_845G(dev) ((dev)->pci_device == 0x2562)
1595 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1596 #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1597 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1598 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1599 #define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1600 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1601 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1602 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1603 #define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1604 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1605 #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1606 #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1607 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1608 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1609 #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1610 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1611 #define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1612 (dev)->pci_device == 0x0152 || \
1613 (dev)->pci_device == 0x015a)
1614 #define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
1615 (dev)->pci_device == 0x0106 || \
1616 (dev)->pci_device == 0x010A)
1617 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1618 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1619 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1620 #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1621 ((dev)->pci_device & 0xFF00) == 0x0C00)
1622 #define IS_ULT(dev) (IS_HASWELL(dev) && \
1623 ((dev)->pci_device & 0xFF00) == 0x0A00)
1624 #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
1625 ((dev)->pci_device & 0x00F0) == 0x0020)
1626 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
1629 * The genX designation typically refers to the render engine, so render
1630 * capability related checks should use IS_GEN, while display and other checks
1631 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
1634 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1635 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1636 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1637 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1638 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1639 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
1641 #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1642 #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
1643 #define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
1644 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1645 #define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
1646 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1648 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
1649 #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
1651 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1652 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1654 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
1655 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1657 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1658 * rows, which changed the alignment requirements and fence programming.
1660 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1662 #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1663 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1664 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1665 #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1666 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1667 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1669 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1670 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1671 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1673 #define HAS_IPS(dev) (IS_ULT(dev))
1675 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
1676 #define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
1677 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
1679 #define INTEL_PCH_DEVICE_ID_MASK 0xff00
1680 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1681 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1682 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1683 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1684 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1686 #define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
1687 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1688 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1689 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1690 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
1691 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
1693 #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1695 /* DPF == dynamic parity feature */
1696 #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1697 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
1699 #define GT_FREQUENCY_MULTIPLIER 50
1701 #include "i915_trace.h"
1704 * RC6 is a special power stage which allows the GPU to enter an very
1705 * low-voltage mode when idle, using down to 0V while at this stage. This
1706 * stage is entered automatically when the GPU is idle when RC6 support is
1707 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1709 * There are different RC6 modes available in Intel GPU, which differentiate
1710 * among each other with the latency required to enter and leave RC6 and
1711 * voltage consumed by the GPU in different states.
1713 * The combination of the following flags define which states GPU is allowed
1714 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1715 * RC6pp is deepest RC6. Their support by hardware varies according to the
1716 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1717 * which brings the most power savings; deeper states save more power, but
1718 * require higher latency to switch to and wake up.
1720 #define INTEL_RC6_ENABLE (1<<0)
1721 #define INTEL_RC6p_ENABLE (1<<1)
1722 #define INTEL_RC6pp_ENABLE (1<<2)
1724 extern const struct drm_ioctl_desc i915_ioctls[];
1725 extern int i915_max_ioctl;
1726 extern unsigned int i915_fbpercrtc __always_unused;
1727 extern int i915_panel_ignore_lid __read_mostly;
1728 extern unsigned int i915_powersave __read_mostly;
1729 extern int i915_semaphores __read_mostly;
1730 extern unsigned int i915_lvds_downclock __read_mostly;
1731 extern int i915_lvds_channel_mode __read_mostly;
1732 extern int i915_panel_use_ssc __read_mostly;
1733 extern int i915_vbt_sdvo_panel_type __read_mostly;
1734 extern int i915_enable_rc6 __read_mostly;
1735 extern int i915_enable_fbc __read_mostly;
1736 extern bool i915_enable_hangcheck __read_mostly;
1737 extern int i915_enable_ppgtt __read_mostly;
1738 extern int i915_enable_psr __read_mostly;
1739 extern unsigned int i915_preliminary_hw_support __read_mostly;
1740 extern int i915_disable_power_well __read_mostly;
1741 extern int i915_enable_ips __read_mostly;
1742 extern bool i915_fastboot __read_mostly;
1743 extern int i915_enable_pc8 __read_mostly;
1744 extern int i915_pc8_timeout __read_mostly;
1745 extern bool i915_prefault_disable __read_mostly;
1747 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1748 extern int i915_resume(struct drm_device *dev);
1749 extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1750 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1753 void i915_update_dri1_breadcrumb(struct drm_device *dev);
1754 extern void i915_kernel_lost_context(struct drm_device * dev);
1755 extern int i915_driver_load(struct drm_device *, unsigned long flags);
1756 extern int i915_driver_unload(struct drm_device *);
1757 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
1758 extern void i915_driver_lastclose(struct drm_device * dev);
1759 extern void i915_driver_preclose(struct drm_device *dev,
1760 struct drm_file *file_priv);
1761 extern void i915_driver_postclose(struct drm_device *dev,
1762 struct drm_file *file_priv);
1763 extern int i915_driver_device_is_agp(struct drm_device * dev);
1764 #ifdef CONFIG_COMPAT
1765 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1768 extern int i915_emit_box(struct drm_device *dev,
1769 struct drm_clip_rect *box,
1771 extern int intel_gpu_reset(struct drm_device *dev);
1772 extern int i915_reset(struct drm_device *dev);
1773 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1774 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1775 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1776 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1778 extern void intel_console_resume(struct work_struct *work);
1781 void i915_queue_hangcheck(struct drm_device *dev);
1782 void i915_handle_error(struct drm_device *dev, bool wedged);
1784 extern void intel_irq_init(struct drm_device *dev);
1785 extern void intel_pm_init(struct drm_device *dev);
1786 extern void intel_hpd_init(struct drm_device *dev);
1787 extern void intel_pm_init(struct drm_device *dev);
1789 extern void intel_uncore_sanitize(struct drm_device *dev);
1790 extern void intel_uncore_early_sanitize(struct drm_device *dev);
1791 extern void intel_uncore_init(struct drm_device *dev);
1792 extern void intel_uncore_clear_errors(struct drm_device *dev);
1793 extern void intel_uncore_check_errors(struct drm_device *dev);
1796 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1799 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1802 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1803 struct drm_file *file_priv);
1804 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
1805 struct drm_file *file_priv);
1806 int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1807 struct drm_file *file_priv);
1808 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1809 struct drm_file *file_priv);
1810 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1811 struct drm_file *file_priv);
1812 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1813 struct drm_file *file_priv);
1814 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1815 struct drm_file *file_priv);
1816 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1817 struct drm_file *file_priv);
1818 int i915_gem_execbuffer(struct drm_device *dev, void *data,
1819 struct drm_file *file_priv);
1820 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
1821 struct drm_file *file_priv);
1822 int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1823 struct drm_file *file_priv);
1824 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1825 struct drm_file *file_priv);
1826 int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1827 struct drm_file *file_priv);
1828 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
1829 struct drm_file *file);
1830 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
1831 struct drm_file *file);
1832 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1833 struct drm_file *file_priv);
1834 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1835 struct drm_file *file_priv);
1836 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
1837 struct drm_file *file_priv);
1838 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
1839 struct drm_file *file_priv);
1840 int i915_gem_set_tiling(struct drm_device *dev, void *data,
1841 struct drm_file *file_priv);
1842 int i915_gem_get_tiling(struct drm_device *dev, void *data,
1843 struct drm_file *file_priv);
1844 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1845 struct drm_file *file_priv);
1846 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1847 struct drm_file *file_priv);
1848 void i915_gem_load(struct drm_device *dev);
1849 void *i915_gem_object_alloc(struct drm_device *dev);
1850 void i915_gem_object_free(struct drm_i915_gem_object *obj);
1851 int i915_gem_init_object(struct drm_gem_object *obj);
1852 void i915_gem_object_init(struct drm_i915_gem_object *obj,
1853 const struct drm_i915_gem_object_ops *ops);
1854 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1856 void i915_gem_free_object(struct drm_gem_object *obj);
1857 void i915_gem_vma_destroy(struct i915_vma *vma);
1859 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1860 struct i915_address_space *vm,
1862 bool map_and_fenceable,
1864 void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1865 int __must_check i915_vma_unbind(struct i915_vma *vma);
1866 int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
1867 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
1868 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1869 void i915_gem_lastclose(struct drm_device *dev);
1871 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
1872 static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1874 struct sg_page_iter sg_iter;
1876 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
1877 return sg_page_iter_page(&sg_iter);
1881 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1883 BUG_ON(obj->pages == NULL);
1884 obj->pages_pin_count++;
1886 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1888 BUG_ON(obj->pages_pin_count == 0);
1889 obj->pages_pin_count--;
1892 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1893 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1894 struct intel_ring_buffer *to);
1895 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1896 struct intel_ring_buffer *ring);
1898 int i915_gem_dumb_create(struct drm_file *file_priv,
1899 struct drm_device *dev,
1900 struct drm_mode_create_dumb *args);
1901 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1902 uint32_t handle, uint64_t *offset);
1904 * Returns true if seq1 is later than seq2.
1907 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1909 return (int32_t)(seq1 - seq2) >= 0;
1912 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
1913 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
1914 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
1915 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1918 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1920 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1921 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1922 dev_priv->fence_regs[obj->fence_reg].pin_count++;
1929 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1931 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1932 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1933 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
1934 dev_priv->fence_regs[obj->fence_reg].pin_count--;
1938 void i915_gem_retire_requests(struct drm_device *dev);
1939 void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
1940 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
1941 bool interruptible);
1942 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
1944 return unlikely(atomic_read(&error->reset_counter)
1945 & I915_RESET_IN_PROGRESS_FLAG);
1948 static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
1950 return atomic_read(&error->reset_counter) == I915_WEDGED;
1953 void i915_gem_reset(struct drm_device *dev);
1954 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
1955 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1956 int __must_check i915_gem_init(struct drm_device *dev);
1957 int __must_check i915_gem_init_hw(struct drm_device *dev);
1958 int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
1959 void i915_gem_init_swizzling(struct drm_device *dev);
1960 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1961 int __must_check i915_gpu_idle(struct drm_device *dev);
1962 int __must_check i915_gem_idle(struct drm_device *dev);
1963 int __i915_add_request(struct intel_ring_buffer *ring,
1964 struct drm_file *file,
1965 struct drm_i915_gem_object *batch_obj,
1967 #define i915_add_request(ring, seqno) \
1968 __i915_add_request(ring, NULL, NULL, seqno)
1969 int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
1971 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
1973 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1976 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
1978 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1980 struct intel_ring_buffer *pipelined);
1981 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
1982 int i915_gem_attach_phys_object(struct drm_device *dev,
1983 struct drm_i915_gem_object *obj,
1986 void i915_gem_detach_phys_object(struct drm_device *dev,
1987 struct drm_i915_gem_object *obj);
1988 void i915_gem_free_all_phys_object(struct drm_device *dev);
1989 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1992 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
1994 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1995 int tiling_mode, bool fenced);
1997 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1998 enum i915_cache_level cache_level);
2000 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
2001 struct dma_buf *dma_buf);
2003 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
2004 struct drm_gem_object *gem_obj, int flags);
2006 void i915_gem_restore_fences(struct drm_device *dev);
2008 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
2009 struct i915_address_space *vm);
2010 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
2011 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
2012 struct i915_address_space *vm);
2013 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
2014 struct i915_address_space *vm);
2015 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
2016 struct i915_address_space *vm);
2018 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2019 struct i915_address_space *vm);
2020 /* Some GGTT VM helpers */
2021 #define obj_to_ggtt(obj) \
2022 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
2023 static inline bool i915_is_ggtt(struct i915_address_space *vm)
2025 struct i915_address_space *ggtt =
2026 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
2030 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
2032 return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
2035 static inline unsigned long
2036 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
2038 return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
2041 static inline unsigned long
2042 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
2044 return i915_gem_obj_size(obj, obj_to_ggtt(obj));
2047 static inline int __must_check
2048 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2050 bool map_and_fenceable,
2053 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
2054 map_and_fenceable, nonblocking);
2058 /* i915_gem_context.c */
2059 void i915_gem_context_init(struct drm_device *dev);
2060 void i915_gem_context_fini(struct drm_device *dev);
2061 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
2062 int i915_switch_context(struct intel_ring_buffer *ring,
2063 struct drm_file *file, int to_id);
2064 void i915_gem_context_free(struct kref *ctx_ref);
2065 static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
2067 kref_get(&ctx->ref);
2070 static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
2072 kref_put(&ctx->ref, i915_gem_context_free);
2075 struct i915_ctx_hang_stats * __must_check
2076 i915_gem_context_get_hang_stats(struct drm_device *dev,
2077 struct drm_file *file,
2079 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2080 struct drm_file *file);
2081 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2082 struct drm_file *file);
2084 /* i915_gem_gtt.c */
2085 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
2086 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
2087 struct drm_i915_gem_object *obj,
2088 enum i915_cache_level cache_level);
2089 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
2090 struct drm_i915_gem_object *obj);
2092 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
2093 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
2094 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
2095 enum i915_cache_level cache_level);
2096 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
2097 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
2098 void i915_gem_init_global_gtt(struct drm_device *dev);
2099 void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
2100 unsigned long mappable_end, unsigned long end);
2101 int i915_gem_gtt_init(struct drm_device *dev);
2102 static inline void i915_gem_chipset_flush(struct drm_device *dev)
2104 if (INTEL_INFO(dev)->gen < 6)
2105 intel_gtt_chipset_flush();
2109 /* i915_gem_evict.c */
2110 int __must_check i915_gem_evict_something(struct drm_device *dev,
2111 struct i915_address_space *vm,
2114 unsigned cache_level,
2117 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2118 int i915_gem_evict_everything(struct drm_device *dev);
2120 /* i915_gem_stolen.c */
2121 int i915_gem_init_stolen(struct drm_device *dev);
2122 int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
2123 void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
2124 void i915_gem_cleanup_stolen(struct drm_device *dev);
2125 struct drm_i915_gem_object *
2126 i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
2127 struct drm_i915_gem_object *
2128 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
2132 void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
2134 /* i915_gem_tiling.c */
2135 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
2137 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2139 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
2140 obj->tiling_mode != I915_TILING_NONE;
2143 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
2144 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
2145 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
2147 /* i915_gem_debug.c */
2149 int i915_verify_lists(struct drm_device *dev);
2151 #define i915_verify_lists(dev) 0
2154 /* i915_debugfs.c */
2155 int i915_debugfs_init(struct drm_minor *minor);
2156 void i915_debugfs_cleanup(struct drm_minor *minor);
2158 /* i915_gpu_error.c */
2160 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
2161 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
2162 const struct i915_error_state_file_priv *error);
2163 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
2164 size_t count, loff_t pos);
2165 static inline void i915_error_state_buf_release(
2166 struct drm_i915_error_state_buf *eb)
2170 void i915_capture_error_state(struct drm_device *dev);
2171 void i915_error_state_get(struct drm_device *dev,
2172 struct i915_error_state_file_priv *error_priv);
2173 void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
2174 void i915_destroy_error_state(struct drm_device *dev);
2176 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
2177 const char *i915_cache_level_str(int type);
2179 /* i915_suspend.c */
2180 extern int i915_save_state(struct drm_device *dev);
2181 extern int i915_restore_state(struct drm_device *dev);
2184 void i915_save_display_reg(struct drm_device *dev);
2185 void i915_restore_display_reg(struct drm_device *dev);
2188 void i915_setup_sysfs(struct drm_device *dev_priv);
2189 void i915_teardown_sysfs(struct drm_device *dev_priv);
2192 extern int intel_setup_gmbus(struct drm_device *dev);
2193 extern void intel_teardown_gmbus(struct drm_device *dev);
2194 static inline bool intel_gmbus_is_port_valid(unsigned port)
2196 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
2199 extern struct i2c_adapter *intel_gmbus_get_adapter(
2200 struct drm_i915_private *dev_priv, unsigned port);
2201 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
2202 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
2203 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
2205 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
2207 extern void intel_i2c_reset(struct drm_device *dev);
2209 /* intel_opregion.c */
2210 struct intel_encoder;
2211 extern int intel_opregion_setup(struct drm_device *dev);
2213 extern void intel_opregion_init(struct drm_device *dev);
2214 extern void intel_opregion_fini(struct drm_device *dev);
2215 extern void intel_opregion_asle_intr(struct drm_device *dev);
2216 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
2218 extern int intel_opregion_notify_adapter(struct drm_device *dev,
2221 static inline void intel_opregion_init(struct drm_device *dev) { return; }
2222 static inline void intel_opregion_fini(struct drm_device *dev) { return; }
2223 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
2225 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
2230 intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
2238 extern void intel_register_dsm_handler(void);
2239 extern void intel_unregister_dsm_handler(void);
2241 static inline void intel_register_dsm_handler(void) { return; }
2242 static inline void intel_unregister_dsm_handler(void) { return; }
2243 #endif /* CONFIG_ACPI */
2246 extern void intel_modeset_init_hw(struct drm_device *dev);
2247 extern void intel_modeset_suspend_hw(struct drm_device *dev);
2248 extern void intel_modeset_init(struct drm_device *dev);
2249 extern void intel_modeset_gem_init(struct drm_device *dev);
2250 extern void intel_modeset_cleanup(struct drm_device *dev);
2251 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
2252 extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2253 bool force_restore);
2254 extern void i915_redisable_vga(struct drm_device *dev);
2255 extern bool intel_fbc_enabled(struct drm_device *dev);
2256 extern void intel_disable_fbc(struct drm_device *dev);
2257 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
2258 extern void intel_init_pch_refclk(struct drm_device *dev);
2259 extern void gen6_set_rps(struct drm_device *dev, u8 val);
2260 extern void valleyview_set_rps(struct drm_device *dev, u8 val);
2261 extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
2262 extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
2263 extern void intel_detect_pch(struct drm_device *dev);
2264 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
2265 extern int intel_enable_rc6(const struct drm_device *dev);
2267 extern bool i915_semaphore_is_enabled(struct drm_device *dev);
2268 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
2269 struct drm_file *file);
2272 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
2273 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
2274 struct intel_overlay_error_state *error);
2276 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
2277 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
2278 struct drm_device *dev,
2279 struct intel_display_error_state *error);
2281 /* On SNB platform, before reading ring registers forcewake bit
2282 * must be set to prevent GT core from power down and stale values being
2285 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
2286 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
2288 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
2289 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
2291 /* intel_sideband.c */
2292 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
2293 void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
2294 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
2295 u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
2296 void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2297 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
2298 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2299 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
2300 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2301 u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
2302 void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2303 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
2304 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
2305 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
2306 enum intel_sbi_destination destination);
2307 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2308 enum intel_sbi_destination destination);
2310 int vlv_gpu_freq(int ddr_freq, int val);
2311 int vlv_freq_opcode(int ddr_freq, int val);
2313 #define __i915_read(x) \
2314 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
2321 #define __i915_write(x) \
2322 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
2329 #define I915_READ8(reg) i915_read8(dev_priv, (reg), true)
2330 #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true)
2332 #define I915_READ16(reg) i915_read16(dev_priv, (reg), true)
2333 #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
2334 #define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false)
2335 #define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
2337 #define I915_READ(reg) i915_read32(dev_priv, (reg), true)
2338 #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true)
2339 #define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false)
2340 #define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false)
2342 #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
2343 #define I915_READ64(reg) i915_read64(dev_priv, (reg), true)
2345 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2346 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
2348 /* "Broadcast RGB" property */
2349 #define INTEL_BROADCAST_RGB_AUTO 0
2350 #define INTEL_BROADCAST_RGB_FULL 1
2351 #define INTEL_BROADCAST_RGB_LIMITED 2
2353 static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
2355 if (HAS_PCH_SPLIT(dev))
2356 return CPU_VGACNTRL;
2357 else if (IS_VALLEYVIEW(dev))
2358 return VLV_VGACNTRL;
2363 static inline void __user *to_user_ptr(u64 address)
2365 return (void __user *)(uintptr_t)address;
2368 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
2370 unsigned long j = msecs_to_jiffies(m);
2372 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2375 static inline unsigned long
2376 timespec_to_jiffies_timeout(const struct timespec *value)
2378 unsigned long j = timespec_to_jiffies(value);
2380 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);