1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
33 #include <uapi/drm/i915_drm.h>
36 #include "intel_bios.h"
37 #include "intel_ringbuffer.h"
38 #include <linux/io-mapping.h>
39 #include <linux/i2c.h>
40 #include <linux/i2c-algo-bit.h>
41 #include <drm/intel-gtt.h>
42 #include <linux/backlight.h>
43 #include <linux/intel-iommu.h>
44 #include <linux/kref.h>
45 #include <linux/pm_qos.h>
47 /* General customization:
50 #define DRIVER_AUTHOR "Tungsten Graphics, Inc."
52 #define DRIVER_NAME "i915"
53 #define DRIVER_DESC "Intel Graphics"
54 #define DRIVER_DATE "20080730"
62 #define pipe_name(p) ((p) + 'A')
70 #define transcoder_name(t) ((t) + 'A')
77 #define plane_name(p) ((p) + 'A')
79 #define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
89 #define port_name(p) ((p) + 'A')
91 enum intel_display_power_domain {
95 POWER_DOMAIN_PIPE_A_PANEL_FITTER,
96 POWER_DOMAIN_PIPE_B_PANEL_FITTER,
97 POWER_DOMAIN_PIPE_C_PANEL_FITTER,
98 POWER_DOMAIN_TRANSCODER_A,
99 POWER_DOMAIN_TRANSCODER_B,
100 POWER_DOMAIN_TRANSCODER_C,
101 POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
104 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
105 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
106 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
107 #define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
111 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
112 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
122 #define I915_GEM_GPU_DOMAINS \
123 (I915_GEM_DOMAIN_RENDER | \
124 I915_GEM_DOMAIN_SAMPLER | \
125 I915_GEM_DOMAIN_COMMAND | \
126 I915_GEM_DOMAIN_INSTRUCTION | \
127 I915_GEM_DOMAIN_VERTEX)
129 #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
131 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
132 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
133 if ((intel_encoder)->base.crtc == (__crtc))
135 struct drm_i915_private;
138 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
139 /* real shared dpll ids must be >= 0 */
143 #define I915_NUM_PLLS 2
145 struct intel_dpll_hw_state {
152 struct intel_shared_dpll {
153 int refcount; /* count of number of CRTCs sharing this PLL */
154 int active; /* count of number of active CRTCs (i.e. DPMS on) */
155 bool on; /* is the PLL actually active? Disabled during modeset */
157 /* should match the index in the dev_priv->shared_dplls array */
158 enum intel_dpll_id id;
159 struct intel_dpll_hw_state hw_state;
160 void (*mode_set)(struct drm_i915_private *dev_priv,
161 struct intel_shared_dpll *pll);
162 void (*enable)(struct drm_i915_private *dev_priv,
163 struct intel_shared_dpll *pll);
164 void (*disable)(struct drm_i915_private *dev_priv,
165 struct intel_shared_dpll *pll);
166 bool (*get_hw_state)(struct drm_i915_private *dev_priv,
167 struct intel_shared_dpll *pll,
168 struct intel_dpll_hw_state *hw_state);
171 /* Used by dp and fdi links */
172 struct intel_link_m_n {
180 void intel_link_compute_m_n(int bpp, int nlanes,
181 int pixel_clock, int link_clock,
182 struct intel_link_m_n *m_n);
184 struct intel_ddi_plls {
190 /* Interface history:
193 * 1.2: Add Power Management
194 * 1.3: Add vblank support
195 * 1.4: Fix cmdbuffer path, add heap destroy
196 * 1.5: Add vblank pipe configuration
197 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
198 * - Support vertical blank on secondary display pipe
200 #define DRIVER_MAJOR 1
201 #define DRIVER_MINOR 6
202 #define DRIVER_PATCHLEVEL 0
204 #define WATCH_LISTS 0
207 #define I915_GEM_PHYS_CURSOR_0 1
208 #define I915_GEM_PHYS_CURSOR_1 2
209 #define I915_GEM_PHYS_OVERLAY_REGS 3
210 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
212 struct drm_i915_gem_phys_object {
214 struct page **page_list;
215 drm_dma_handle_t *handle;
216 struct drm_i915_gem_object *cur_obj;
219 struct opregion_header;
220 struct opregion_acpi;
221 struct opregion_swsci;
222 struct opregion_asle;
224 struct intel_opregion {
225 struct opregion_header __iomem *header;
226 struct opregion_acpi __iomem *acpi;
227 struct opregion_swsci __iomem *swsci;
228 u32 swsci_gbda_sub_functions;
229 u32 swsci_sbcb_sub_functions;
230 struct opregion_asle __iomem *asle;
232 u32 __iomem *lid_state;
234 #define OPREGION_SIZE (8*1024)
236 struct intel_overlay;
237 struct intel_overlay_error_state;
239 struct drm_i915_master_private {
240 drm_local_map_t *sarea;
241 struct _drm_i915_sarea *sarea_priv;
243 #define I915_FENCE_REG_NONE -1
244 #define I915_MAX_NUM_FENCES 32
245 /* 32 fences + sign bit for FENCE_REG_NONE */
246 #define I915_MAX_NUM_FENCE_BITS 6
248 struct drm_i915_fence_reg {
249 struct list_head lru_list;
250 struct drm_i915_gem_object *obj;
254 struct sdvo_device_mapping {
263 struct intel_display_error_state;
265 struct drm_i915_error_state {
273 bool waiting[I915_NUM_RINGS];
274 u32 pipestat[I915_MAX_PIPES];
275 u32 tail[I915_NUM_RINGS];
276 u32 head[I915_NUM_RINGS];
277 u32 ctl[I915_NUM_RINGS];
278 u32 ipeir[I915_NUM_RINGS];
279 u32 ipehr[I915_NUM_RINGS];
280 u32 instdone[I915_NUM_RINGS];
281 u32 acthd[I915_NUM_RINGS];
282 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
283 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
284 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
285 /* our own tracking of ring head and tail */
286 u32 cpu_ring_head[I915_NUM_RINGS];
287 u32 cpu_ring_tail[I915_NUM_RINGS];
288 u32 error; /* gen6+ */
289 u32 err_int; /* gen7 */
290 u32 instpm[I915_NUM_RINGS];
291 u32 instps[I915_NUM_RINGS];
292 u32 extra_instdone[I915_NUM_INSTDONE_REG];
293 u32 seqno[I915_NUM_RINGS];
295 u32 fault_reg[I915_NUM_RINGS];
297 u32 faddr[I915_NUM_RINGS];
298 u64 fence[I915_MAX_NUM_FENCES];
300 struct drm_i915_error_ring {
301 struct drm_i915_error_object {
305 } *ringbuffer, *batchbuffer, *ctx;
306 struct drm_i915_error_request {
312 } ring[I915_NUM_RINGS];
313 struct drm_i915_error_buffer {
320 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
327 } **active_bo, **pinned_bo;
328 u32 *active_bo_count, *pinned_bo_count;
329 struct intel_overlay_error_state *overlay;
330 struct intel_display_error_state *display;
331 int hangcheck_score[I915_NUM_RINGS];
332 enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
335 struct intel_crtc_config;
340 struct drm_i915_display_funcs {
341 bool (*fbc_enabled)(struct drm_device *dev);
342 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
343 void (*disable_fbc)(struct drm_device *dev);
344 int (*get_display_clock_speed)(struct drm_device *dev);
345 int (*get_fifo_size)(struct drm_device *dev, int plane);
347 * find_dpll() - Find the best values for the PLL
348 * @limit: limits for the PLL
349 * @crtc: current CRTC
350 * @target: target frequency in kHz
351 * @refclk: reference clock frequency in kHz
352 * @match_clock: if provided, @best_clock P divider must
353 * match the P divider from @match_clock
354 * used for LVDS downclocking
355 * @best_clock: best PLL values found
357 * Returns true on success, false on failure.
359 bool (*find_dpll)(const struct intel_limit *limit,
360 struct drm_crtc *crtc,
361 int target, int refclk,
362 struct dpll *match_clock,
363 struct dpll *best_clock);
364 void (*update_wm)(struct drm_crtc *crtc);
365 void (*update_sprite_wm)(struct drm_plane *plane,
366 struct drm_crtc *crtc,
367 uint32_t sprite_width, int pixel_size,
368 bool enable, bool scaled);
369 void (*modeset_global_resources)(struct drm_device *dev);
370 /* Returns the active state of the crtc, and if the crtc is active,
371 * fills out the pipe-config with the hw state. */
372 bool (*get_pipe_config)(struct intel_crtc *,
373 struct intel_crtc_config *);
374 int (*crtc_mode_set)(struct drm_crtc *crtc,
376 struct drm_framebuffer *old_fb);
377 void (*crtc_enable)(struct drm_crtc *crtc);
378 void (*crtc_disable)(struct drm_crtc *crtc);
379 void (*off)(struct drm_crtc *crtc);
380 void (*write_eld)(struct drm_connector *connector,
381 struct drm_crtc *crtc);
382 void (*fdi_link_train)(struct drm_crtc *crtc);
383 void (*init_clock_gating)(struct drm_device *dev);
384 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
385 struct drm_framebuffer *fb,
386 struct drm_i915_gem_object *obj,
388 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
390 void (*hpd_irq_setup)(struct drm_device *dev);
391 /* clock updates for mode set */
393 /* render clock increase/decrease */
394 /* display clock increase/decrease */
395 /* pll clock increase/decrease */
398 struct intel_uncore_funcs {
399 void (*force_wake_get)(struct drm_i915_private *dev_priv);
400 void (*force_wake_put)(struct drm_i915_private *dev_priv);
403 struct intel_uncore {
404 spinlock_t lock; /** lock is also taken in irq contexts. */
406 struct intel_uncore_funcs funcs;
409 unsigned forcewake_count;
412 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
413 func(is_mobile) sep \
416 func(is_i945gm) sep \
418 func(need_gfx_hws) sep \
420 func(is_pineview) sep \
421 func(is_broadwater) sep \
422 func(is_crestline) sep \
423 func(is_ivybridge) sep \
424 func(is_valleyview) sep \
425 func(is_haswell) sep \
426 func(is_preliminary) sep \
427 func(has_force_wake) sep \
429 func(has_pipe_cxsr) sep \
430 func(has_hotplug) sep \
431 func(cursor_needs_physical) sep \
432 func(has_overlay) sep \
433 func(overlay_needs_physical) sep \
434 func(supports_tv) sep \
435 func(has_bsd_ring) sep \
436 func(has_blt_ring) sep \
437 func(has_vebox_ring) sep \
442 #define DEFINE_FLAG(name) u8 name:1
443 #define SEP_SEMICOLON ;
445 struct intel_device_info {
446 u32 display_mmio_offset;
449 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
455 enum i915_cache_level {
457 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
458 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
459 caches, eg sampler/render caches, and the
460 large Last-Level-Cache. LLC is coherent with
461 the CPU, but L3 is only visible to the GPU. */
462 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
465 typedef uint32_t gen6_gtt_pte_t;
467 struct i915_address_space {
469 struct drm_device *dev;
470 struct list_head global_link;
471 unsigned long start; /* Start offset always 0 for dri2 */
472 size_t total; /* size addr space maps (ex. 2GB for ggtt) */
480 * List of objects currently involved in rendering.
482 * Includes buffers having the contents of their GPU caches
483 * flushed, not necessarily primitives. last_rendering_seqno
484 * represents when the rendering involved will be completed.
486 * A reference is held on the buffer while on this list.
488 struct list_head active_list;
491 * LRU list of objects which are not in the ringbuffer and
492 * are ready to unbind, but are still in the GTT.
494 * last_rendering_seqno is 0 while an object is in this list.
496 * A reference is not held on the buffer while on this list,
497 * as merely being GTT-bound shouldn't prevent its being
498 * freed, and we'll pull it off the list in the free path.
500 struct list_head inactive_list;
502 /* FIXME: Need a more generic return type */
503 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
504 enum i915_cache_level level);
505 void (*clear_range)(struct i915_address_space *vm,
506 unsigned int first_entry,
507 unsigned int num_entries);
508 void (*insert_entries)(struct i915_address_space *vm,
510 unsigned int first_entry,
511 enum i915_cache_level cache_level);
512 void (*cleanup)(struct i915_address_space *vm);
515 /* The Graphics Translation Table is the way in which GEN hardware translates a
516 * Graphics Virtual Address into a Physical Address. In addition to the normal
517 * collateral associated with any va->pa translations GEN hardware also has a
518 * portion of the GTT which can be mapped by the CPU and remain both coherent
519 * and correct (in cases like swizzling). That region is referred to as GMADR in
523 struct i915_address_space base;
524 size_t stolen_size; /* Total size of stolen memory */
526 unsigned long mappable_end; /* End offset that we can CPU map */
527 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
528 phys_addr_t mappable_base; /* PA of our GMADR */
530 /** "Graphics Stolen Memory" holds the global PTEs */
538 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
539 size_t *stolen, phys_addr_t *mappable_base,
540 unsigned long *mappable_end);
542 #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
544 struct i915_hw_ppgtt {
545 struct i915_address_space base;
546 unsigned num_pd_entries;
547 struct page **pt_pages;
549 dma_addr_t *pt_dma_addr;
551 int (*enable)(struct drm_device *dev);
555 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
556 * VMA's presence cannot be guaranteed before binding, or after unbinding the
557 * object into/from the address space.
559 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
560 * will always be <= an objects lifetime. So object refcounting should cover us.
563 struct drm_mm_node node;
564 struct drm_i915_gem_object *obj;
565 struct i915_address_space *vm;
567 /** This object's place on the active/inactive lists */
568 struct list_head mm_list;
570 struct list_head vma_link; /* Link in the object's VMA list */
572 /** This vma's place in the batchbuffer or on the eviction list */
573 struct list_head exec_list;
576 * Used for performing relocations during execbuffer insertion.
578 struct hlist_node exec_node;
579 unsigned long exec_handle;
580 struct drm_i915_gem_exec_object2 *exec_entry;
584 struct i915_ctx_hang_stats {
585 /* This context had batch pending when hang was declared */
586 unsigned batch_pending;
588 /* This context had batch active when hang was declared */
589 unsigned batch_active;
591 /* Time when this context was last blamed for a GPU reset */
592 unsigned long guilty_ts;
594 /* This context is banned to submit more work */
598 /* This must match up with the value previously used for execbuf2.rsvd1. */
599 #define DEFAULT_CONTEXT_ID 0
600 struct i915_hw_context {
604 struct drm_i915_file_private *file_priv;
605 struct intel_ring_buffer *ring;
606 struct drm_i915_gem_object *obj;
607 struct i915_ctx_hang_stats hang_stats;
609 struct list_head link;
618 struct drm_mm_node *compressed_fb;
619 struct drm_mm_node *compressed_llb;
621 struct intel_fbc_work {
622 struct delayed_work work;
623 struct drm_crtc *crtc;
624 struct drm_framebuffer *fb;
629 FBC_OK, /* FBC is enabled */
630 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
631 FBC_NO_OUTPUT, /* no outputs enabled to compress */
632 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
633 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
634 FBC_MODE_TOO_LARGE, /* mode too large for compression */
635 FBC_BAD_PLANE, /* fbc not supported on plane */
636 FBC_NOT_TILED, /* buffer not tiled */
637 FBC_MULTIPLE_PIPES, /* more than one pipe active */
639 FBC_CHIP_DEFAULT, /* disabled by default on this chip */
644 PSR_NO_SOURCE, /* Not supported on platform */
645 PSR_NO_SINK, /* Not supported by panel */
648 PSR_PWR_WELL_ENABLED,
652 PSR_INTERLACED_ENABLED,
657 PCH_NONE = 0, /* No PCH present */
658 PCH_IBX, /* Ibexpeak PCH */
659 PCH_CPT, /* Cougarpoint PCH */
660 PCH_LPT, /* Lynxpoint PCH */
664 enum intel_sbi_destination {
669 #define QUIRK_PIPEA_FORCE (1<<0)
670 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
671 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
672 #define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
675 struct intel_fbc_work;
678 struct i2c_adapter adapter;
682 struct i2c_algo_bit_data bit_algo;
683 struct drm_i915_private *dev_priv;
686 struct i915_suspend_saved_registers {
707 u32 saveTRANS_HTOTAL_A;
708 u32 saveTRANS_HBLANK_A;
709 u32 saveTRANS_HSYNC_A;
710 u32 saveTRANS_VTOTAL_A;
711 u32 saveTRANS_VBLANK_A;
712 u32 saveTRANS_VSYNC_A;
720 u32 savePFIT_PGM_RATIOS;
721 u32 saveBLC_HIST_CTL;
723 u32 saveBLC_PWM_CTL2;
724 u32 saveBLC_CPU_PWM_CTL;
725 u32 saveBLC_CPU_PWM_CTL2;
738 u32 saveTRANS_HTOTAL_B;
739 u32 saveTRANS_HBLANK_B;
740 u32 saveTRANS_HSYNC_B;
741 u32 saveTRANS_VTOTAL_B;
742 u32 saveTRANS_VBLANK_B;
743 u32 saveTRANS_VSYNC_B;
757 u32 savePP_ON_DELAYS;
758 u32 savePP_OFF_DELAYS;
766 u32 savePFIT_CONTROL;
767 u32 save_palette_a[256];
768 u32 save_palette_b[256];
769 u32 saveDPFC_CB_BASE;
770 u32 saveFBC_CFB_BASE;
773 u32 saveFBC_CONTROL2;
783 u32 saveCACHE_MODE_0;
784 u32 saveMI_ARB_STATE;
795 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
806 u32 savePIPEA_GMCH_DATA_M;
807 u32 savePIPEB_GMCH_DATA_M;
808 u32 savePIPEA_GMCH_DATA_N;
809 u32 savePIPEB_GMCH_DATA_N;
810 u32 savePIPEA_DP_LINK_M;
811 u32 savePIPEB_DP_LINK_M;
812 u32 savePIPEA_DP_LINK_N;
813 u32 savePIPEB_DP_LINK_N;
824 u32 savePCH_DREF_CONTROL;
825 u32 saveDISP_ARB_CTL;
826 u32 savePIPEA_DATA_M1;
827 u32 savePIPEA_DATA_N1;
828 u32 savePIPEA_LINK_M1;
829 u32 savePIPEA_LINK_N1;
830 u32 savePIPEB_DATA_M1;
831 u32 savePIPEB_DATA_N1;
832 u32 savePIPEB_LINK_M1;
833 u32 savePIPEB_LINK_N1;
834 u32 saveMCHBAR_RENDER_STANDBY;
835 u32 savePCH_PORT_HOTPLUG;
838 struct intel_gen6_power_mgmt {
839 /* work and pm_iir are protected by dev_priv->irq_lock */
840 struct work_struct work;
843 /* On vlv we need to manually drop to Vmin with a delayed work. */
844 struct delayed_work vlv_work;
846 /* The below variables an all the rps hw state are protected by
847 * dev->struct mutext. */
854 struct delayed_work delayed_resume_work;
857 * Protects RPS/RC6 register access and PCU communication.
858 * Must be taken after struct_mutex if nested.
860 struct mutex hw_lock;
863 /* defined intel_pm.c */
864 extern spinlock_t mchdev_lock;
866 struct intel_ilk_power_mgmt {
874 unsigned long last_time1;
875 unsigned long chipset_power;
877 struct timespec last_time2;
878 unsigned long gfx_power;
884 struct drm_i915_gem_object *pwrctx;
885 struct drm_i915_gem_object *renderctx;
888 /* Power well structure for haswell */
889 struct i915_power_well {
890 struct drm_device *device;
892 /* power well enable/disable usage count */
897 struct i915_dri1_state {
898 unsigned allow_batchbuffer : 1;
899 u32 __iomem *gfx_hws_cpu_addr;
910 struct i915_ums_state {
912 * Flag if the X Server, and thus DRM, is not currently in
913 * control of the device.
915 * This is set between LeaveVT and EnterVT. It needs to be
916 * replaced with a semaphore. It also needs to be
917 * transitioned away from for kernel modesetting.
922 #define MAX_L3_SLICES 2
923 struct intel_l3_parity {
924 u32 *remap_info[MAX_L3_SLICES];
925 struct work_struct error_work;
930 /** Memory allocator for GTT stolen memory */
931 struct drm_mm stolen;
932 /** List of all objects in gtt_space. Used to restore gtt
933 * mappings on resume */
934 struct list_head bound_list;
936 * List of objects which are not bound to the GTT (thus
937 * are idle and not used by the GPU) but still have
938 * (presumably uncached) pages still attached.
940 struct list_head unbound_list;
942 /** Usable portion of the GTT for GEM */
943 unsigned long stolen_base; /* limited to low memory (32-bit) */
945 /** PPGTT used for aliasing the PPGTT with the GTT */
946 struct i915_hw_ppgtt *aliasing_ppgtt;
948 struct shrinker inactive_shrinker;
949 bool shrinker_no_lock_stealing;
951 /** LRU list of objects with fence regs on them. */
952 struct list_head fence_list;
955 * We leave the user IRQ off as much as possible,
956 * but this means that requests will finish and never
957 * be retired once the system goes idle. Set a timer to
958 * fire periodically while the ring is running. When it
959 * fires, go retire requests.
961 struct delayed_work retire_work;
964 * Are we in a non-interruptible section of code like
969 /** Bit 6 swizzling required for X tiling */
970 uint32_t bit_6_swizzle_x;
971 /** Bit 6 swizzling required for Y tiling */
972 uint32_t bit_6_swizzle_y;
974 /* storage for physical objects */
975 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
977 /* accounting, useful for userland debugging */
978 spinlock_t object_stat_lock;
979 size_t object_memory;
983 struct drm_i915_error_state_buf {
992 struct i915_error_state_file_priv {
993 struct drm_device *dev;
994 struct drm_i915_error_state *error;
997 struct i915_gpu_error {
998 /* For hangcheck timer */
999 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
1000 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
1001 /* Hang gpu twice in this window and your context gets banned */
1002 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1004 struct timer_list hangcheck_timer;
1006 /* For reset and error_state handling. */
1008 /* Protected by the above dev->gpu_error.lock. */
1009 struct drm_i915_error_state *first_error;
1010 struct work_struct work;
1013 * State variable and reset counter controlling the reset flow
1015 * Upper bits are for the reset counter. This counter is used by the
1016 * wait_seqno code to race-free noticed that a reset event happened and
1017 * that it needs to restart the entire ioctl (since most likely the
1018 * seqno it waited for won't ever signal anytime soon).
1020 * This is important for lock-free wait paths, where no contended lock
1021 * naturally enforces the correct ordering between the bail-out of the
1022 * waiter and the gpu reset work code.
1024 * Lowest bit controls the reset state machine: Set means a reset is in
1025 * progress. This state will (presuming we don't have any bugs) decay
1026 * into either unset (successful reset) or the special WEDGED value (hw
1027 * terminally sour). All waiters on the reset_queue will be woken when
1030 atomic_t reset_counter;
1033 * Special values/flags for reset_counter
1035 * Note that the code relies on
1036 * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
1039 #define I915_RESET_IN_PROGRESS_FLAG 1
1040 #define I915_WEDGED 0xffffffff
1043 * Waitqueue to signal when the reset has completed. Used by clients
1044 * that wait for dev_priv->mm.wedged to settle.
1046 wait_queue_head_t reset_queue;
1048 /* For gpu hang simulation. */
1049 unsigned int stop_rings;
1052 enum modeset_restore {
1053 MODESET_ON_LID_OPEN,
1058 struct intel_vbt_data {
1059 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1060 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1063 unsigned int int_tv_support:1;
1064 unsigned int lvds_dither:1;
1065 unsigned int lvds_vbt:1;
1066 unsigned int int_crt_support:1;
1067 unsigned int lvds_use_ssc:1;
1068 unsigned int display_clock_mode:1;
1069 unsigned int fdi_rx_polarity_inverted:1;
1071 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1076 int edp_preemphasis;
1078 bool edp_initialized;
1081 struct edp_power_seq edp_pps;
1091 struct child_device_config *child_dev;
1094 enum intel_ddb_partitioning {
1096 INTEL_DDB_PART_5_6, /* IVB+ */
1099 struct intel_wm_level {
1108 * This struct tracks the state needed for the Package C8+ feature.
1110 * Package states C8 and deeper are really deep PC states that can only be
1111 * reached when all the devices on the system allow it, so even if the graphics
1112 * device allows PC8+, it doesn't mean the system will actually get to these
1115 * Our driver only allows PC8+ when all the outputs are disabled, the power well
1116 * is disabled and the GPU is idle. When these conditions are met, we manually
1117 * do the other conditions: disable the interrupts, clocks and switch LCPLL
1120 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1121 * the state of some registers, so when we come back from PC8+ we need to
1122 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1123 * need to take care of the registers kept by RC6.
1125 * The interrupt disabling is part of the requirements. We can only leave the
1126 * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
1127 * can lock the machine.
1129 * Ideally every piece of our code that needs PC8+ disabled would call
1130 * hsw_disable_package_c8, which would increment disable_count and prevent the
1131 * system from reaching PC8+. But we don't have a symmetric way to do this for
1132 * everything, so we have the requirements_met and gpu_idle variables. When we
1133 * switch requirements_met or gpu_idle to true we decrease disable_count, and
1134 * increase it in the opposite case. The requirements_met variable is true when
1135 * all the CRTCs, encoders and the power well are disabled. The gpu_idle
1136 * variable is true when the GPU is idle.
1138 * In addition to everything, we only actually enable PC8+ if disable_count
1139 * stays at zero for at least some seconds. This is implemented with the
1140 * enable_work variable. We do this so we don't enable/disable PC8 dozens of
1141 * consecutive times when all screens are disabled and some background app
1142 * queries the state of our connectors, or we have some application constantly
1143 * waking up to use the GPU. Only after the enable_work function actually
1144 * enables PC8+ the "enable" variable will become true, which means that it can
1145 * be false even if disable_count is 0.
1147 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1148 * goes back to false exactly before we reenable the IRQs. We use this variable
1149 * to check if someone is trying to enable/disable IRQs while they're supposed
1150 * to be disabled. This shouldn't happen and we'll print some error messages in
1151 * case it happens, but if it actually happens we'll also update the variables
1152 * inside struct regsave so when we restore the IRQs they will contain the
1153 * latest expected values.
1155 * For more, read "Display Sequences for Package C8" on our documentation.
1157 struct i915_package_c8 {
1158 bool requirements_met;
1161 /* Only true after the delayed work task actually enables it. */
1165 struct delayed_work enable_work;
1172 uint32_t gen6_pmimr;
1176 typedef struct drm_i915_private {
1177 struct drm_device *dev;
1178 struct kmem_cache *slab;
1180 const struct intel_device_info *info;
1182 int relative_constants_mode;
1186 struct intel_uncore uncore;
1188 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
1191 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
1192 * controller on different i2c buses. */
1193 struct mutex gmbus_mutex;
1196 * Base address of the gmbus and gpio block.
1198 uint32_t gpio_mmio_base;
1200 wait_queue_head_t gmbus_wait_queue;
1202 struct pci_dev *bridge_dev;
1203 struct intel_ring_buffer ring[I915_NUM_RINGS];
1204 uint32_t last_seqno, next_seqno;
1206 drm_dma_handle_t *status_page_dmah;
1207 struct resource mch_res;
1209 atomic_t irq_received;
1211 /* protects the irq masks */
1212 spinlock_t irq_lock;
1214 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1215 struct pm_qos_request pm_qos;
1217 /* DPIO indirect register protection */
1218 struct mutex dpio_lock;
1220 /** Cached value of IMR to avoid reads in updating the bitfield */
1225 struct work_struct hotplug_work;
1226 bool enable_hotplug_processing;
1228 unsigned long hpd_last_jiffies;
1233 HPD_MARK_DISABLED = 2
1235 } hpd_stats[HPD_NUM_PINS];
1237 struct timer_list hotplug_reenable_timer;
1241 struct i915_fbc fbc;
1242 struct intel_opregion opregion;
1243 struct intel_vbt_data vbt;
1246 struct intel_overlay *overlay;
1247 unsigned int sprite_scaling_enabled;
1253 spinlock_t lock; /* bl registers and the above bl fields */
1254 struct backlight_device *device;
1258 bool no_aux_handshake;
1260 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1261 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1262 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1264 unsigned int fsb_freq, mem_freq, is_ddr3;
1267 * wq - Driver workqueue for GEM.
1269 * NOTE: Work items scheduled here are not allowed to grab any modeset
1270 * locks, for otherwise the flushing done in the pageflip code will
1271 * result in deadlocks.
1273 struct workqueue_struct *wq;
1275 /* Display functions */
1276 struct drm_i915_display_funcs display;
1278 /* PCH chipset type */
1279 enum intel_pch pch_type;
1280 unsigned short pch_id;
1282 unsigned long quirks;
1284 enum modeset_restore modeset_restore;
1285 struct mutex modeset_restore_lock;
1287 struct list_head vm_list; /* Global list of all address spaces */
1288 struct i915_gtt gtt; /* VMA representing the global address space */
1290 struct i915_gem_mm mm;
1292 /* Kernel Modesetting */
1294 struct sdvo_device_mapping sdvo_mappings[2];
1296 struct drm_crtc *plane_to_crtc_mapping[3];
1297 struct drm_crtc *pipe_to_crtc_mapping[3];
1298 wait_queue_head_t pending_flip_queue;
1300 int num_shared_dpll;
1301 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1302 struct intel_ddi_plls ddi_plls;
1304 /* Reclocking support */
1305 bool render_reclock_avail;
1306 bool lvds_downclock_avail;
1307 /* indicates the reduced downclock for LVDS*/
1311 bool mchbar_need_disable;
1313 struct intel_l3_parity l3_parity;
1315 /* Cannot be determined by PCIID. You must always read a register. */
1318 /* gen6+ rps state */
1319 struct intel_gen6_power_mgmt rps;
1321 /* ilk-only ips/rps state. Everything in here is protected by the global
1322 * mchdev_lock in intel_pm.c */
1323 struct intel_ilk_power_mgmt ips;
1325 /* Haswell power well */
1326 struct i915_power_well power_well;
1328 enum no_psr_reason no_psr_reason;
1330 struct i915_gpu_error gpu_error;
1332 struct drm_i915_gem_object *vlv_pctx;
1334 /* list of fbdev register on this device */
1335 struct intel_fbdev *fbdev;
1338 * The console may be contended at resume, but we don't
1339 * want it to block on it.
1341 struct work_struct console_resume_work;
1343 struct drm_property *broadcast_rgb_property;
1344 struct drm_property *force_audio_property;
1346 bool hw_contexts_disabled;
1347 uint32_t hw_context_size;
1348 struct list_head context_list;
1352 struct i915_suspend_saved_registers regfile;
1356 * Raw watermark latency values:
1357 * in 0.1us units for WM0,
1358 * in 0.5us units for WM1+.
1361 uint16_t pri_latency[5];
1363 uint16_t spr_latency[5];
1365 uint16_t cur_latency[5];
1368 struct i915_package_c8 pc8;
1370 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1372 struct i915_dri1_state dri1;
1373 /* Old ums support infrastructure, same warning applies. */
1374 struct i915_ums_state ums;
1375 } drm_i915_private_t;
1377 static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1379 return dev->dev_private;
1382 /* Iterate over initialised rings */
1383 #define for_each_ring(ring__, dev_priv__, i__) \
1384 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1385 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
1387 enum hdmi_force_audio {
1388 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
1389 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
1390 HDMI_AUDIO_AUTO, /* trust EDID */
1391 HDMI_AUDIO_ON, /* force turn on HDMI audio */
1394 #define I915_GTT_OFFSET_NONE ((u32)-1)
1396 struct drm_i915_gem_object_ops {
1397 /* Interface between the GEM object and its backing storage.
1398 * get_pages() is called once prior to the use of the associated set
1399 * of pages before to binding them into the GTT, and put_pages() is
1400 * called after we no longer need them. As we expect there to be
1401 * associated cost with migrating pages between the backing storage
1402 * and making them available for the GPU (e.g. clflush), we may hold
1403 * onto the pages after they are no longer referenced by the GPU
1404 * in case they may be used again shortly (for example migrating the
1405 * pages to a different memory domain within the GTT). put_pages()
1406 * will therefore most likely be called when the object itself is
1407 * being released or under memory pressure (where we attempt to
1408 * reap pages for the shrinker).
1410 int (*get_pages)(struct drm_i915_gem_object *);
1411 void (*put_pages)(struct drm_i915_gem_object *);
1414 struct drm_i915_gem_object {
1415 struct drm_gem_object base;
1417 const struct drm_i915_gem_object_ops *ops;
1419 /** List of VMAs backed by this object */
1420 struct list_head vma_list;
1422 /** Stolen memory for this object, instead of being backed by shmem. */
1423 struct drm_mm_node *stolen;
1424 struct list_head global_list;
1426 struct list_head ring_list;
1427 /** Used in execbuf to temporarily hold a ref */
1428 struct list_head obj_exec_link;
1431 * This is set if the object is on the active lists (has pending
1432 * rendering and so a non-zero seqno), and is not set if it i s on
1433 * inactive (ready to be unbound) list.
1435 unsigned int active:1;
1438 * This is set if the object has been written to since last bound
1441 unsigned int dirty:1;
1444 * Fence register bits (if any) for this object. Will be set
1445 * as needed when mapped into the GTT.
1446 * Protected by dev->struct_mutex.
1448 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
1451 * Advice: are the backing pages purgeable?
1453 unsigned int madv:2;
1456 * Current tiling mode for the object.
1458 unsigned int tiling_mode:2;
1460 * Whether the tiling parameters for the currently associated fence
1461 * register have changed. Note that for the purposes of tracking
1462 * tiling changes we also treat the unfenced register, the register
1463 * slot that the object occupies whilst it executes a fenced
1464 * command (such as BLT on gen2/3), as a "fence".
1466 unsigned int fence_dirty:1;
1468 /** How many users have pinned this object in GTT space. The following
1469 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1470 * (via user_pin_count), execbuffer (objects are not allowed multiple
1471 * times for the same batchbuffer), and the framebuffer code. When
1472 * switching/pageflipping, the framebuffer code has at most two buffers
1475 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1476 * bits with absolutely no headroom. So use 4 bits. */
1477 unsigned int pin_count:4;
1478 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
1481 * Is the object at the current location in the gtt mappable and
1482 * fenceable? Used to avoid costly recalculations.
1484 unsigned int map_and_fenceable:1;
1487 * Whether the current gtt mapping needs to be mappable (and isn't just
1488 * mappable by accident). Track pin and fault separate for a more
1489 * accurate mappable working set.
1491 unsigned int fault_mappable:1;
1492 unsigned int pin_mappable:1;
1493 unsigned int pin_display:1;
1496 * Is the GPU currently using a fence to access this buffer,
1498 unsigned int pending_fenced_gpu_access:1;
1499 unsigned int fenced_gpu_access:1;
1501 unsigned int cache_level:3;
1503 unsigned int has_aliasing_ppgtt_mapping:1;
1504 unsigned int has_global_gtt_mapping:1;
1505 unsigned int has_dma_mapping:1;
1507 struct sg_table *pages;
1508 int pages_pin_count;
1510 /* prime dma-buf support */
1511 void *dma_buf_vmapping;
1514 struct intel_ring_buffer *ring;
1516 /** Breadcrumb of last rendering to the buffer. */
1517 uint32_t last_read_seqno;
1518 uint32_t last_write_seqno;
1519 /** Breadcrumb of last fenced GPU access to the buffer. */
1520 uint32_t last_fenced_seqno;
1522 /** Current tiling stride for the object, if it's tiled. */
1525 /** Record of address bit 17 of each page at last unbind. */
1526 unsigned long *bit_17;
1528 /** User space pin count and filp owning the pin */
1529 uint32_t user_pin_count;
1530 struct drm_file *pin_filp;
1532 /** for phy allocated objects */
1533 struct drm_i915_gem_phys_object *phys_obj;
1535 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
1537 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1540 * Request queue structure.
1542 * The request queue allows us to note sequence numbers that have been emitted
1543 * and may be associated with active buffers to be retired.
1545 * By keeping this list, we can avoid having to do questionable
1546 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1547 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1549 struct drm_i915_gem_request {
1550 /** On Which ring this request was generated */
1551 struct intel_ring_buffer *ring;
1553 /** GEM sequence number associated with this request. */
1556 /** Position in the ringbuffer of the start of the request */
1559 /** Position in the ringbuffer of the end of the request */
1562 /** Context related to this request */
1563 struct i915_hw_context *ctx;
1565 /** Batch buffer related to this request if any */
1566 struct drm_i915_gem_object *batch_obj;
1568 /** Time at which this request was emitted, in jiffies. */
1569 unsigned long emitted_jiffies;
1571 /** global list entry for this request */
1572 struct list_head list;
1574 struct drm_i915_file_private *file_priv;
1575 /** file_priv list entry for this request */
1576 struct list_head client_list;
1579 struct drm_i915_file_private {
1582 struct list_head request_list;
1584 struct idr context_idr;
1586 struct i915_ctx_hang_stats hang_stats;
1589 #define INTEL_INFO(dev) (to_i915(dev)->info)
1591 #define IS_I830(dev) ((dev)->pci_device == 0x3577)
1592 #define IS_845G(dev) ((dev)->pci_device == 0x2562)
1593 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1594 #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1595 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1596 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1597 #define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1598 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1599 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1600 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1601 #define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1602 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1603 #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1604 #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1605 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1606 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1607 #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1608 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1609 #define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1610 (dev)->pci_device == 0x0152 || \
1611 (dev)->pci_device == 0x015a)
1612 #define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
1613 (dev)->pci_device == 0x0106 || \
1614 (dev)->pci_device == 0x010A)
1615 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1616 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1617 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1618 #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1619 ((dev)->pci_device & 0xFF00) == 0x0C00)
1620 #define IS_ULT(dev) (IS_HASWELL(dev) && \
1621 ((dev)->pci_device & 0xFF00) == 0x0A00)
1622 #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
1623 ((dev)->pci_device & 0x00F0) == 0x0020)
1624 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
1627 * The genX designation typically refers to the render engine, so render
1628 * capability related checks should use IS_GEN, while display and other checks
1629 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
1632 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1633 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1634 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1635 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1636 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1637 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
1639 #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1640 #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
1641 #define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
1642 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1643 #define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
1644 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1646 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
1647 #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
1649 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1650 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1652 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
1653 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1655 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1656 * rows, which changed the alignment requirements and fence programming.
1658 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1660 #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1661 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1662 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1663 #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1664 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1665 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1667 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1668 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1669 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1671 #define HAS_IPS(dev) (IS_ULT(dev))
1673 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
1674 #define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
1675 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
1677 #define INTEL_PCH_DEVICE_ID_MASK 0xff00
1678 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1679 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1680 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1681 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1682 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1684 #define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
1685 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1686 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1687 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1688 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
1689 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
1691 #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1693 #define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1694 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_GPU_CACHE(dev))
1696 #define GT_FREQUENCY_MULTIPLIER 50
1698 #include "i915_trace.h"
1701 * RC6 is a special power stage which allows the GPU to enter an very
1702 * low-voltage mode when idle, using down to 0V while at this stage. This
1703 * stage is entered automatically when the GPU is idle when RC6 support is
1704 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1706 * There are different RC6 modes available in Intel GPU, which differentiate
1707 * among each other with the latency required to enter and leave RC6 and
1708 * voltage consumed by the GPU in different states.
1710 * The combination of the following flags define which states GPU is allowed
1711 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1712 * RC6pp is deepest RC6. Their support by hardware varies according to the
1713 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1714 * which brings the most power savings; deeper states save more power, but
1715 * require higher latency to switch to and wake up.
1717 #define INTEL_RC6_ENABLE (1<<0)
1718 #define INTEL_RC6p_ENABLE (1<<1)
1719 #define INTEL_RC6pp_ENABLE (1<<2)
1721 extern const struct drm_ioctl_desc i915_ioctls[];
1722 extern int i915_max_ioctl;
1723 extern unsigned int i915_fbpercrtc __always_unused;
1724 extern int i915_panel_ignore_lid __read_mostly;
1725 extern unsigned int i915_powersave __read_mostly;
1726 extern int i915_semaphores __read_mostly;
1727 extern unsigned int i915_lvds_downclock __read_mostly;
1728 extern int i915_lvds_channel_mode __read_mostly;
1729 extern int i915_panel_use_ssc __read_mostly;
1730 extern int i915_vbt_sdvo_panel_type __read_mostly;
1731 extern int i915_enable_rc6 __read_mostly;
1732 extern int i915_enable_fbc __read_mostly;
1733 extern bool i915_enable_hangcheck __read_mostly;
1734 extern int i915_enable_ppgtt __read_mostly;
1735 extern int i915_enable_psr __read_mostly;
1736 extern unsigned int i915_preliminary_hw_support __read_mostly;
1737 extern int i915_disable_power_well __read_mostly;
1738 extern int i915_enable_ips __read_mostly;
1739 extern bool i915_fastboot __read_mostly;
1740 extern int i915_enable_pc8 __read_mostly;
1741 extern int i915_pc8_timeout __read_mostly;
1742 extern bool i915_prefault_disable __read_mostly;
1744 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1745 extern int i915_resume(struct drm_device *dev);
1746 extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1747 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1750 void i915_update_dri1_breadcrumb(struct drm_device *dev);
1751 extern void i915_kernel_lost_context(struct drm_device * dev);
1752 extern int i915_driver_load(struct drm_device *, unsigned long flags);
1753 extern int i915_driver_unload(struct drm_device *);
1754 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
1755 extern void i915_driver_lastclose(struct drm_device * dev);
1756 extern void i915_driver_preclose(struct drm_device *dev,
1757 struct drm_file *file_priv);
1758 extern void i915_driver_postclose(struct drm_device *dev,
1759 struct drm_file *file_priv);
1760 extern int i915_driver_device_is_agp(struct drm_device * dev);
1761 #ifdef CONFIG_COMPAT
1762 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1765 extern int i915_emit_box(struct drm_device *dev,
1766 struct drm_clip_rect *box,
1768 extern int intel_gpu_reset(struct drm_device *dev);
1769 extern int i915_reset(struct drm_device *dev);
1770 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1771 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1772 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1773 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1775 extern void intel_console_resume(struct work_struct *work);
1778 void i915_queue_hangcheck(struct drm_device *dev);
1779 void i915_handle_error(struct drm_device *dev, bool wedged);
1781 extern void intel_irq_init(struct drm_device *dev);
1782 extern void intel_pm_init(struct drm_device *dev);
1783 extern void intel_hpd_init(struct drm_device *dev);
1784 extern void intel_pm_init(struct drm_device *dev);
1786 extern void intel_uncore_sanitize(struct drm_device *dev);
1787 extern void intel_uncore_early_sanitize(struct drm_device *dev);
1788 extern void intel_uncore_init(struct drm_device *dev);
1789 extern void intel_uncore_clear_errors(struct drm_device *dev);
1790 extern void intel_uncore_check_errors(struct drm_device *dev);
1793 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1796 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1799 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1800 struct drm_file *file_priv);
1801 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
1802 struct drm_file *file_priv);
1803 int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1804 struct drm_file *file_priv);
1805 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1806 struct drm_file *file_priv);
1807 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1808 struct drm_file *file_priv);
1809 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1810 struct drm_file *file_priv);
1811 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1812 struct drm_file *file_priv);
1813 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1814 struct drm_file *file_priv);
1815 int i915_gem_execbuffer(struct drm_device *dev, void *data,
1816 struct drm_file *file_priv);
1817 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
1818 struct drm_file *file_priv);
1819 int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1820 struct drm_file *file_priv);
1821 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1822 struct drm_file *file_priv);
1823 int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1824 struct drm_file *file_priv);
1825 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
1826 struct drm_file *file);
1827 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
1828 struct drm_file *file);
1829 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1830 struct drm_file *file_priv);
1831 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1832 struct drm_file *file_priv);
1833 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
1834 struct drm_file *file_priv);
1835 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
1836 struct drm_file *file_priv);
1837 int i915_gem_set_tiling(struct drm_device *dev, void *data,
1838 struct drm_file *file_priv);
1839 int i915_gem_get_tiling(struct drm_device *dev, void *data,
1840 struct drm_file *file_priv);
1841 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1842 struct drm_file *file_priv);
1843 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1844 struct drm_file *file_priv);
1845 void i915_gem_load(struct drm_device *dev);
1846 void *i915_gem_object_alloc(struct drm_device *dev);
1847 void i915_gem_object_free(struct drm_i915_gem_object *obj);
1848 int i915_gem_init_object(struct drm_gem_object *obj);
1849 void i915_gem_object_init(struct drm_i915_gem_object *obj,
1850 const struct drm_i915_gem_object_ops *ops);
1851 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1853 void i915_gem_free_object(struct drm_gem_object *obj);
1854 void i915_gem_vma_destroy(struct i915_vma *vma);
1856 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1857 struct i915_address_space *vm,
1859 bool map_and_fenceable,
1861 void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1862 int __must_check i915_vma_unbind(struct i915_vma *vma);
1863 int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
1864 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
1865 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1866 void i915_gem_lastclose(struct drm_device *dev);
1868 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
1869 static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1871 struct sg_page_iter sg_iter;
1873 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
1874 return sg_page_iter_page(&sg_iter);
1878 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1880 BUG_ON(obj->pages == NULL);
1881 obj->pages_pin_count++;
1883 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1885 BUG_ON(obj->pages_pin_count == 0);
1886 obj->pages_pin_count--;
1889 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1890 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1891 struct intel_ring_buffer *to);
1892 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1893 struct intel_ring_buffer *ring);
1895 int i915_gem_dumb_create(struct drm_file *file_priv,
1896 struct drm_device *dev,
1897 struct drm_mode_create_dumb *args);
1898 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1899 uint32_t handle, uint64_t *offset);
1901 * Returns true if seq1 is later than seq2.
1904 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1906 return (int32_t)(seq1 - seq2) >= 0;
1909 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
1910 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
1911 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
1912 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1915 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1917 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1918 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1919 dev_priv->fence_regs[obj->fence_reg].pin_count++;
1926 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1928 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1929 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1930 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
1931 dev_priv->fence_regs[obj->fence_reg].pin_count--;
1935 void i915_gem_retire_requests(struct drm_device *dev);
1936 void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
1937 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
1938 bool interruptible);
1939 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
1941 return unlikely(atomic_read(&error->reset_counter)
1942 & I915_RESET_IN_PROGRESS_FLAG);
1945 static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
1947 return atomic_read(&error->reset_counter) == I915_WEDGED;
1950 void i915_gem_reset(struct drm_device *dev);
1951 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
1952 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1953 int __must_check i915_gem_init(struct drm_device *dev);
1954 int __must_check i915_gem_init_hw(struct drm_device *dev);
1955 int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
1956 void i915_gem_init_swizzling(struct drm_device *dev);
1957 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1958 int __must_check i915_gpu_idle(struct drm_device *dev);
1959 int __must_check i915_gem_idle(struct drm_device *dev);
1960 int __i915_add_request(struct intel_ring_buffer *ring,
1961 struct drm_file *file,
1962 struct drm_i915_gem_object *batch_obj,
1964 #define i915_add_request(ring, seqno) \
1965 __i915_add_request(ring, NULL, NULL, seqno)
1966 int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
1968 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
1970 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1973 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
1975 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1977 struct intel_ring_buffer *pipelined);
1978 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
1979 int i915_gem_attach_phys_object(struct drm_device *dev,
1980 struct drm_i915_gem_object *obj,
1983 void i915_gem_detach_phys_object(struct drm_device *dev,
1984 struct drm_i915_gem_object *obj);
1985 void i915_gem_free_all_phys_object(struct drm_device *dev);
1986 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1989 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
1991 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1992 int tiling_mode, bool fenced);
1994 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1995 enum i915_cache_level cache_level);
1997 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1998 struct dma_buf *dma_buf);
2000 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
2001 struct drm_gem_object *gem_obj, int flags);
2003 void i915_gem_restore_fences(struct drm_device *dev);
2005 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
2006 struct i915_address_space *vm);
2007 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
2008 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
2009 struct i915_address_space *vm);
2010 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
2011 struct i915_address_space *vm);
2012 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
2013 struct i915_address_space *vm);
2015 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2016 struct i915_address_space *vm);
2017 /* Some GGTT VM helpers */
2018 #define obj_to_ggtt(obj) \
2019 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
2020 static inline bool i915_is_ggtt(struct i915_address_space *vm)
2022 struct i915_address_space *ggtt =
2023 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
2027 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
2029 return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
2032 static inline unsigned long
2033 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
2035 return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
2038 static inline unsigned long
2039 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
2041 return i915_gem_obj_size(obj, obj_to_ggtt(obj));
2044 static inline int __must_check
2045 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2047 bool map_and_fenceable,
2050 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
2051 map_and_fenceable, nonblocking);
2055 /* i915_gem_context.c */
2056 void i915_gem_context_init(struct drm_device *dev);
2057 void i915_gem_context_fini(struct drm_device *dev);
2058 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
2059 int i915_switch_context(struct intel_ring_buffer *ring,
2060 struct drm_file *file, int to_id);
2061 void i915_gem_context_free(struct kref *ctx_ref);
2062 static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
2064 kref_get(&ctx->ref);
2067 static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
2069 kref_put(&ctx->ref, i915_gem_context_free);
2072 struct i915_ctx_hang_stats * __must_check
2073 i915_gem_context_get_hang_stats(struct drm_device *dev,
2074 struct drm_file *file,
2076 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2077 struct drm_file *file);
2078 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2079 struct drm_file *file);
2081 /* i915_gem_gtt.c */
2082 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
2083 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
2084 struct drm_i915_gem_object *obj,
2085 enum i915_cache_level cache_level);
2086 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
2087 struct drm_i915_gem_object *obj);
2089 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
2090 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
2091 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
2092 enum i915_cache_level cache_level);
2093 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
2094 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
2095 void i915_gem_init_global_gtt(struct drm_device *dev);
2096 void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
2097 unsigned long mappable_end, unsigned long end);
2098 int i915_gem_gtt_init(struct drm_device *dev);
2099 static inline void i915_gem_chipset_flush(struct drm_device *dev)
2101 if (INTEL_INFO(dev)->gen < 6)
2102 intel_gtt_chipset_flush();
2106 /* i915_gem_evict.c */
2107 int __must_check i915_gem_evict_something(struct drm_device *dev,
2108 struct i915_address_space *vm,
2111 unsigned cache_level,
2114 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2115 int i915_gem_evict_everything(struct drm_device *dev);
2117 /* i915_gem_stolen.c */
2118 int i915_gem_init_stolen(struct drm_device *dev);
2119 int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
2120 void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
2121 void i915_gem_cleanup_stolen(struct drm_device *dev);
2122 struct drm_i915_gem_object *
2123 i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
2124 struct drm_i915_gem_object *
2125 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
2129 void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
2131 /* i915_gem_tiling.c */
2132 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
2134 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2136 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
2137 obj->tiling_mode != I915_TILING_NONE;
2140 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
2141 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
2142 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
2144 /* i915_gem_debug.c */
2146 int i915_verify_lists(struct drm_device *dev);
2148 #define i915_verify_lists(dev) 0
2151 /* i915_debugfs.c */
2152 int i915_debugfs_init(struct drm_minor *minor);
2153 void i915_debugfs_cleanup(struct drm_minor *minor);
2155 /* i915_gpu_error.c */
2157 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
2158 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
2159 const struct i915_error_state_file_priv *error);
2160 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
2161 size_t count, loff_t pos);
2162 static inline void i915_error_state_buf_release(
2163 struct drm_i915_error_state_buf *eb)
2167 void i915_capture_error_state(struct drm_device *dev);
2168 void i915_error_state_get(struct drm_device *dev,
2169 struct i915_error_state_file_priv *error_priv);
2170 void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
2171 void i915_destroy_error_state(struct drm_device *dev);
2173 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
2174 const char *i915_cache_level_str(int type);
2176 /* i915_suspend.c */
2177 extern int i915_save_state(struct drm_device *dev);
2178 extern int i915_restore_state(struct drm_device *dev);
2181 void i915_save_display_reg(struct drm_device *dev);
2182 void i915_restore_display_reg(struct drm_device *dev);
2185 void i915_setup_sysfs(struct drm_device *dev_priv);
2186 void i915_teardown_sysfs(struct drm_device *dev_priv);
2189 extern int intel_setup_gmbus(struct drm_device *dev);
2190 extern void intel_teardown_gmbus(struct drm_device *dev);
2191 static inline bool intel_gmbus_is_port_valid(unsigned port)
2193 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
2196 extern struct i2c_adapter *intel_gmbus_get_adapter(
2197 struct drm_i915_private *dev_priv, unsigned port);
2198 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
2199 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
2200 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
2202 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
2204 extern void intel_i2c_reset(struct drm_device *dev);
2206 /* intel_opregion.c */
2207 struct intel_encoder;
2208 extern int intel_opregion_setup(struct drm_device *dev);
2210 extern void intel_opregion_init(struct drm_device *dev);
2211 extern void intel_opregion_fini(struct drm_device *dev);
2212 extern void intel_opregion_asle_intr(struct drm_device *dev);
2213 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
2215 extern int intel_opregion_notify_adapter(struct drm_device *dev,
2218 static inline void intel_opregion_init(struct drm_device *dev) { return; }
2219 static inline void intel_opregion_fini(struct drm_device *dev) { return; }
2220 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
2222 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
2227 intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
2235 extern void intel_register_dsm_handler(void);
2236 extern void intel_unregister_dsm_handler(void);
2238 static inline void intel_register_dsm_handler(void) { return; }
2239 static inline void intel_unregister_dsm_handler(void) { return; }
2240 #endif /* CONFIG_ACPI */
2243 extern void intel_modeset_init_hw(struct drm_device *dev);
2244 extern void intel_modeset_suspend_hw(struct drm_device *dev);
2245 extern void intel_modeset_init(struct drm_device *dev);
2246 extern void intel_modeset_gem_init(struct drm_device *dev);
2247 extern void intel_modeset_cleanup(struct drm_device *dev);
2248 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
2249 extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2250 bool force_restore);
2251 extern void i915_redisable_vga(struct drm_device *dev);
2252 extern bool intel_fbc_enabled(struct drm_device *dev);
2253 extern void intel_disable_fbc(struct drm_device *dev);
2254 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
2255 extern void intel_init_pch_refclk(struct drm_device *dev);
2256 extern void gen6_set_rps(struct drm_device *dev, u8 val);
2257 extern void valleyview_set_rps(struct drm_device *dev, u8 val);
2258 extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
2259 extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
2260 extern void intel_detect_pch(struct drm_device *dev);
2261 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
2262 extern int intel_enable_rc6(const struct drm_device *dev);
2264 extern bool i915_semaphore_is_enabled(struct drm_device *dev);
2265 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
2266 struct drm_file *file);
2269 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
2270 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
2271 struct intel_overlay_error_state *error);
2273 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
2274 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
2275 struct drm_device *dev,
2276 struct intel_display_error_state *error);
2278 /* On SNB platform, before reading ring registers forcewake bit
2279 * must be set to prevent GT core from power down and stale values being
2282 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
2283 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
2285 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
2286 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
2288 /* intel_sideband.c */
2289 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
2290 void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
2291 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
2292 u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
2293 void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2294 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
2295 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2296 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
2297 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2298 u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
2299 void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2300 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
2301 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
2302 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
2303 enum intel_sbi_destination destination);
2304 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2305 enum intel_sbi_destination destination);
2307 int vlv_gpu_freq(int ddr_freq, int val);
2308 int vlv_freq_opcode(int ddr_freq, int val);
2310 #define __i915_read(x) \
2311 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
2318 #define __i915_write(x) \
2319 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
2326 #define I915_READ8(reg) i915_read8(dev_priv, (reg), true)
2327 #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true)
2329 #define I915_READ16(reg) i915_read16(dev_priv, (reg), true)
2330 #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
2331 #define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false)
2332 #define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
2334 #define I915_READ(reg) i915_read32(dev_priv, (reg), true)
2335 #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true)
2336 #define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false)
2337 #define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false)
2339 #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
2340 #define I915_READ64(reg) i915_read64(dev_priv, (reg), true)
2342 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2343 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
2345 /* "Broadcast RGB" property */
2346 #define INTEL_BROADCAST_RGB_AUTO 0
2347 #define INTEL_BROADCAST_RGB_FULL 1
2348 #define INTEL_BROADCAST_RGB_LIMITED 2
2350 static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
2352 if (HAS_PCH_SPLIT(dev))
2353 return CPU_VGACNTRL;
2354 else if (IS_VALLEYVIEW(dev))
2355 return VLV_VGACNTRL;
2360 static inline void __user *to_user_ptr(u64 address)
2362 return (void __user *)(uintptr_t)address;
2365 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
2367 unsigned long j = msecs_to_jiffies(m);
2369 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2372 static inline unsigned long
2373 timespec_to_jiffies_timeout(const struct timespec *value)
2375 unsigned long j = timespec_to_jiffies(value);
2377 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);