drm/i915: Update flush_all_caches() to take request structures
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include <drm/drmP.h>
31 #include "i915_drv.h"
32 #include <drm/i915_drm.h>
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35
36 bool
37 intel_ring_initialized(struct intel_engine_cs *ring)
38 {
39         struct drm_device *dev = ring->dev;
40
41         if (!dev)
42                 return false;
43
44         if (i915.enable_execlists) {
45                 struct intel_context *dctx = ring->default_context;
46                 struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
47
48                 return ringbuf->obj;
49         } else
50                 return ring->buffer && ring->buffer->obj;
51 }
52
53 int __intel_ring_space(int head, int tail, int size)
54 {
55         int space = head - tail;
56         if (space <= 0)
57                 space += size;
58         return space - I915_RING_FREE_SPACE;
59 }
60
61 void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
62 {
63         if (ringbuf->last_retired_head != -1) {
64                 ringbuf->head = ringbuf->last_retired_head;
65                 ringbuf->last_retired_head = -1;
66         }
67
68         ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
69                                             ringbuf->tail, ringbuf->size);
70 }
71
72 int intel_ring_space(struct intel_ringbuffer *ringbuf)
73 {
74         intel_ring_update_space(ringbuf);
75         return ringbuf->space;
76 }
77
78 bool intel_ring_stopped(struct intel_engine_cs *ring)
79 {
80         struct drm_i915_private *dev_priv = ring->dev->dev_private;
81         return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
82 }
83
84 static void __intel_ring_advance(struct intel_engine_cs *ring)
85 {
86         struct intel_ringbuffer *ringbuf = ring->buffer;
87         ringbuf->tail &= ringbuf->size - 1;
88         if (intel_ring_stopped(ring))
89                 return;
90         ring->write_tail(ring, ringbuf->tail);
91 }
92
93 static int
94 gen2_render_ring_flush(struct intel_engine_cs *ring,
95                        u32      invalidate_domains,
96                        u32      flush_domains)
97 {
98         u32 cmd;
99         int ret;
100
101         cmd = MI_FLUSH;
102         if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
103                 cmd |= MI_NO_WRITE_FLUSH;
104
105         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
106                 cmd |= MI_READ_FLUSH;
107
108         ret = intel_ring_begin(ring, 2);
109         if (ret)
110                 return ret;
111
112         intel_ring_emit(ring, cmd);
113         intel_ring_emit(ring, MI_NOOP);
114         intel_ring_advance(ring);
115
116         return 0;
117 }
118
119 static int
120 gen4_render_ring_flush(struct intel_engine_cs *ring,
121                        u32      invalidate_domains,
122                        u32      flush_domains)
123 {
124         struct drm_device *dev = ring->dev;
125         u32 cmd;
126         int ret;
127
128         /*
129          * read/write caches:
130          *
131          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
132          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
133          * also flushed at 2d versus 3d pipeline switches.
134          *
135          * read-only caches:
136          *
137          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
138          * MI_READ_FLUSH is set, and is always flushed on 965.
139          *
140          * I915_GEM_DOMAIN_COMMAND may not exist?
141          *
142          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
143          * invalidated when MI_EXE_FLUSH is set.
144          *
145          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
146          * invalidated with every MI_FLUSH.
147          *
148          * TLBs:
149          *
150          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
151          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
152          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
153          * are flushed at any MI_FLUSH.
154          */
155
156         cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
157         if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
158                 cmd &= ~MI_NO_WRITE_FLUSH;
159         if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
160                 cmd |= MI_EXE_FLUSH;
161
162         if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
163             (IS_G4X(dev) || IS_GEN5(dev)))
164                 cmd |= MI_INVALIDATE_ISP;
165
166         ret = intel_ring_begin(ring, 2);
167         if (ret)
168                 return ret;
169
170         intel_ring_emit(ring, cmd);
171         intel_ring_emit(ring, MI_NOOP);
172         intel_ring_advance(ring);
173
174         return 0;
175 }
176
177 /**
178  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
179  * implementing two workarounds on gen6.  From section 1.4.7.1
180  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
181  *
182  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
183  * produced by non-pipelined state commands), software needs to first
184  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
185  * 0.
186  *
187  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
188  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
189  *
190  * And the workaround for these two requires this workaround first:
191  *
192  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
193  * BEFORE the pipe-control with a post-sync op and no write-cache
194  * flushes.
195  *
196  * And this last workaround is tricky because of the requirements on
197  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
198  * volume 2 part 1:
199  *
200  *     "1 of the following must also be set:
201  *      - Render Target Cache Flush Enable ([12] of DW1)
202  *      - Depth Cache Flush Enable ([0] of DW1)
203  *      - Stall at Pixel Scoreboard ([1] of DW1)
204  *      - Depth Stall ([13] of DW1)
205  *      - Post-Sync Operation ([13] of DW1)
206  *      - Notify Enable ([8] of DW1)"
207  *
208  * The cache flushes require the workaround flush that triggered this
209  * one, so we can't use it.  Depth stall would trigger the same.
210  * Post-sync nonzero is what triggered this second workaround, so we
211  * can't use that one either.  Notify enable is IRQs, which aren't
212  * really our business.  That leaves only stall at scoreboard.
213  */
214 static int
215 intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
216 {
217         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
218         int ret;
219
220
221         ret = intel_ring_begin(ring, 6);
222         if (ret)
223                 return ret;
224
225         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
226         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
227                         PIPE_CONTROL_STALL_AT_SCOREBOARD);
228         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
229         intel_ring_emit(ring, 0); /* low dword */
230         intel_ring_emit(ring, 0); /* high dword */
231         intel_ring_emit(ring, MI_NOOP);
232         intel_ring_advance(ring);
233
234         ret = intel_ring_begin(ring, 6);
235         if (ret)
236                 return ret;
237
238         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
239         intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
240         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
241         intel_ring_emit(ring, 0);
242         intel_ring_emit(ring, 0);
243         intel_ring_emit(ring, MI_NOOP);
244         intel_ring_advance(ring);
245
246         return 0;
247 }
248
249 static int
250 gen6_render_ring_flush(struct intel_engine_cs *ring,
251                          u32 invalidate_domains, u32 flush_domains)
252 {
253         u32 flags = 0;
254         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
255         int ret;
256
257         /* Force SNB workarounds for PIPE_CONTROL flushes */
258         ret = intel_emit_post_sync_nonzero_flush(ring);
259         if (ret)
260                 return ret;
261
262         /* Just flush everything.  Experiments have shown that reducing the
263          * number of bits based on the write domains has little performance
264          * impact.
265          */
266         if (flush_domains) {
267                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
268                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
269                 /*
270                  * Ensure that any following seqno writes only happen
271                  * when the render cache is indeed flushed.
272                  */
273                 flags |= PIPE_CONTROL_CS_STALL;
274         }
275         if (invalidate_domains) {
276                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
277                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
278                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
279                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
280                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
281                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
282                 /*
283                  * TLB invalidate requires a post-sync write.
284                  */
285                 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
286         }
287
288         ret = intel_ring_begin(ring, 4);
289         if (ret)
290                 return ret;
291
292         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
293         intel_ring_emit(ring, flags);
294         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
295         intel_ring_emit(ring, 0);
296         intel_ring_advance(ring);
297
298         return 0;
299 }
300
301 static int
302 gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
303 {
304         int ret;
305
306         ret = intel_ring_begin(ring, 4);
307         if (ret)
308                 return ret;
309
310         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
311         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
312                               PIPE_CONTROL_STALL_AT_SCOREBOARD);
313         intel_ring_emit(ring, 0);
314         intel_ring_emit(ring, 0);
315         intel_ring_advance(ring);
316
317         return 0;
318 }
319
320 static int
321 gen7_render_ring_flush(struct intel_engine_cs *ring,
322                        u32 invalidate_domains, u32 flush_domains)
323 {
324         u32 flags = 0;
325         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
326         int ret;
327
328         /*
329          * Ensure that any following seqno writes only happen when the render
330          * cache is indeed flushed.
331          *
332          * Workaround: 4th PIPE_CONTROL command (except the ones with only
333          * read-cache invalidate bits set) must have the CS_STALL bit set. We
334          * don't try to be clever and just set it unconditionally.
335          */
336         flags |= PIPE_CONTROL_CS_STALL;
337
338         /* Just flush everything.  Experiments have shown that reducing the
339          * number of bits based on the write domains has little performance
340          * impact.
341          */
342         if (flush_domains) {
343                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
344                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
345         }
346         if (invalidate_domains) {
347                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
348                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
349                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
350                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
351                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
352                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
353                 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
354                 /*
355                  * TLB invalidate requires a post-sync write.
356                  */
357                 flags |= PIPE_CONTROL_QW_WRITE;
358                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
359
360                 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
361
362                 /* Workaround: we must issue a pipe_control with CS-stall bit
363                  * set before a pipe_control command that has the state cache
364                  * invalidate bit set. */
365                 gen7_render_ring_cs_stall_wa(ring);
366         }
367
368         ret = intel_ring_begin(ring, 4);
369         if (ret)
370                 return ret;
371
372         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
373         intel_ring_emit(ring, flags);
374         intel_ring_emit(ring, scratch_addr);
375         intel_ring_emit(ring, 0);
376         intel_ring_advance(ring);
377
378         return 0;
379 }
380
381 static int
382 gen8_emit_pipe_control(struct intel_engine_cs *ring,
383                        u32 flags, u32 scratch_addr)
384 {
385         int ret;
386
387         ret = intel_ring_begin(ring, 6);
388         if (ret)
389                 return ret;
390
391         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
392         intel_ring_emit(ring, flags);
393         intel_ring_emit(ring, scratch_addr);
394         intel_ring_emit(ring, 0);
395         intel_ring_emit(ring, 0);
396         intel_ring_emit(ring, 0);
397         intel_ring_advance(ring);
398
399         return 0;
400 }
401
402 static int
403 gen8_render_ring_flush(struct intel_engine_cs *ring,
404                        u32 invalidate_domains, u32 flush_domains)
405 {
406         u32 flags = 0;
407         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
408         int ret;
409
410         flags |= PIPE_CONTROL_CS_STALL;
411
412         if (flush_domains) {
413                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
414                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
415         }
416         if (invalidate_domains) {
417                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
418                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
419                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
420                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
421                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
422                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
423                 flags |= PIPE_CONTROL_QW_WRITE;
424                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
425
426                 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
427                 ret = gen8_emit_pipe_control(ring,
428                                              PIPE_CONTROL_CS_STALL |
429                                              PIPE_CONTROL_STALL_AT_SCOREBOARD,
430                                              0);
431                 if (ret)
432                         return ret;
433         }
434
435         return gen8_emit_pipe_control(ring, flags, scratch_addr);
436 }
437
438 static void ring_write_tail(struct intel_engine_cs *ring,
439                             u32 value)
440 {
441         struct drm_i915_private *dev_priv = ring->dev->dev_private;
442         I915_WRITE_TAIL(ring, value);
443 }
444
445 u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
446 {
447         struct drm_i915_private *dev_priv = ring->dev->dev_private;
448         u64 acthd;
449
450         if (INTEL_INFO(ring->dev)->gen >= 8)
451                 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
452                                          RING_ACTHD_UDW(ring->mmio_base));
453         else if (INTEL_INFO(ring->dev)->gen >= 4)
454                 acthd = I915_READ(RING_ACTHD(ring->mmio_base));
455         else
456                 acthd = I915_READ(ACTHD);
457
458         return acthd;
459 }
460
461 static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
462 {
463         struct drm_i915_private *dev_priv = ring->dev->dev_private;
464         u32 addr;
465
466         addr = dev_priv->status_page_dmah->busaddr;
467         if (INTEL_INFO(ring->dev)->gen >= 4)
468                 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
469         I915_WRITE(HWS_PGA, addr);
470 }
471
472 static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
473 {
474         struct drm_device *dev = ring->dev;
475         struct drm_i915_private *dev_priv = ring->dev->dev_private;
476         u32 mmio = 0;
477
478         /* The ring status page addresses are no longer next to the rest of
479          * the ring registers as of gen7.
480          */
481         if (IS_GEN7(dev)) {
482                 switch (ring->id) {
483                 case RCS:
484                         mmio = RENDER_HWS_PGA_GEN7;
485                         break;
486                 case BCS:
487                         mmio = BLT_HWS_PGA_GEN7;
488                         break;
489                 /*
490                  * VCS2 actually doesn't exist on Gen7. Only shut up
491                  * gcc switch check warning
492                  */
493                 case VCS2:
494                 case VCS:
495                         mmio = BSD_HWS_PGA_GEN7;
496                         break;
497                 case VECS:
498                         mmio = VEBOX_HWS_PGA_GEN7;
499                         break;
500                 }
501         } else if (IS_GEN6(ring->dev)) {
502                 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
503         } else {
504                 /* XXX: gen8 returns to sanity */
505                 mmio = RING_HWS_PGA(ring->mmio_base);
506         }
507
508         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
509         POSTING_READ(mmio);
510
511         /*
512          * Flush the TLB for this page
513          *
514          * FIXME: These two bits have disappeared on gen8, so a question
515          * arises: do we still need this and if so how should we go about
516          * invalidating the TLB?
517          */
518         if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
519                 u32 reg = RING_INSTPM(ring->mmio_base);
520
521                 /* ring should be idle before issuing a sync flush*/
522                 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
523
524                 I915_WRITE(reg,
525                            _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
526                                               INSTPM_SYNC_FLUSH));
527                 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
528                              1000))
529                         DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
530                                   ring->name);
531         }
532 }
533
534 static bool stop_ring(struct intel_engine_cs *ring)
535 {
536         struct drm_i915_private *dev_priv = to_i915(ring->dev);
537
538         if (!IS_GEN2(ring->dev)) {
539                 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
540                 if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
541                         DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
542                         /* Sometimes we observe that the idle flag is not
543                          * set even though the ring is empty. So double
544                          * check before giving up.
545                          */
546                         if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
547                                 return false;
548                 }
549         }
550
551         I915_WRITE_CTL(ring, 0);
552         I915_WRITE_HEAD(ring, 0);
553         ring->write_tail(ring, 0);
554
555         if (!IS_GEN2(ring->dev)) {
556                 (void)I915_READ_CTL(ring);
557                 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
558         }
559
560         return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
561 }
562
563 static int init_ring_common(struct intel_engine_cs *ring)
564 {
565         struct drm_device *dev = ring->dev;
566         struct drm_i915_private *dev_priv = dev->dev_private;
567         struct intel_ringbuffer *ringbuf = ring->buffer;
568         struct drm_i915_gem_object *obj = ringbuf->obj;
569         int ret = 0;
570
571         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
572
573         if (!stop_ring(ring)) {
574                 /* G45 ring initialization often fails to reset head to zero */
575                 DRM_DEBUG_KMS("%s head not reset to zero "
576                               "ctl %08x head %08x tail %08x start %08x\n",
577                               ring->name,
578                               I915_READ_CTL(ring),
579                               I915_READ_HEAD(ring),
580                               I915_READ_TAIL(ring),
581                               I915_READ_START(ring));
582
583                 if (!stop_ring(ring)) {
584                         DRM_ERROR("failed to set %s head to zero "
585                                   "ctl %08x head %08x tail %08x start %08x\n",
586                                   ring->name,
587                                   I915_READ_CTL(ring),
588                                   I915_READ_HEAD(ring),
589                                   I915_READ_TAIL(ring),
590                                   I915_READ_START(ring));
591                         ret = -EIO;
592                         goto out;
593                 }
594         }
595
596         if (I915_NEED_GFX_HWS(dev))
597                 intel_ring_setup_status_page(ring);
598         else
599                 ring_setup_phys_status_page(ring);
600
601         /* Enforce ordering by reading HEAD register back */
602         I915_READ_HEAD(ring);
603
604         /* Initialize the ring. This must happen _after_ we've cleared the ring
605          * registers with the above sequence (the readback of the HEAD registers
606          * also enforces ordering), otherwise the hw might lose the new ring
607          * register values. */
608         I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
609
610         /* WaClearRingBufHeadRegAtInit:ctg,elk */
611         if (I915_READ_HEAD(ring))
612                 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
613                           ring->name, I915_READ_HEAD(ring));
614         I915_WRITE_HEAD(ring, 0);
615         (void)I915_READ_HEAD(ring);
616
617         I915_WRITE_CTL(ring,
618                         ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
619                         | RING_VALID);
620
621         /* If the head is still not zero, the ring is dead */
622         if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
623                      I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
624                      (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
625                 DRM_ERROR("%s initialization failed "
626                           "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
627                           ring->name,
628                           I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
629                           I915_READ_HEAD(ring), I915_READ_TAIL(ring),
630                           I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
631                 ret = -EIO;
632                 goto out;
633         }
634
635         ringbuf->last_retired_head = -1;
636         ringbuf->head = I915_READ_HEAD(ring);
637         ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
638         intel_ring_update_space(ringbuf);
639
640         memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
641
642 out:
643         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
644
645         return ret;
646 }
647
648 void
649 intel_fini_pipe_control(struct intel_engine_cs *ring)
650 {
651         struct drm_device *dev = ring->dev;
652
653         if (ring->scratch.obj == NULL)
654                 return;
655
656         if (INTEL_INFO(dev)->gen >= 5) {
657                 kunmap(sg_page(ring->scratch.obj->pages->sgl));
658                 i915_gem_object_ggtt_unpin(ring->scratch.obj);
659         }
660
661         drm_gem_object_unreference(&ring->scratch.obj->base);
662         ring->scratch.obj = NULL;
663 }
664
665 int
666 intel_init_pipe_control(struct intel_engine_cs *ring)
667 {
668         int ret;
669
670         WARN_ON(ring->scratch.obj);
671
672         ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
673         if (ring->scratch.obj == NULL) {
674                 DRM_ERROR("Failed to allocate seqno page\n");
675                 ret = -ENOMEM;
676                 goto err;
677         }
678
679         ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
680         if (ret)
681                 goto err_unref;
682
683         ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
684         if (ret)
685                 goto err_unref;
686
687         ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
688         ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
689         if (ring->scratch.cpu_page == NULL) {
690                 ret = -ENOMEM;
691                 goto err_unpin;
692         }
693
694         DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
695                          ring->name, ring->scratch.gtt_offset);
696         return 0;
697
698 err_unpin:
699         i915_gem_object_ggtt_unpin(ring->scratch.obj);
700 err_unref:
701         drm_gem_object_unreference(&ring->scratch.obj->base);
702 err:
703         return ret;
704 }
705
706 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
707 {
708         int ret, i;
709         struct intel_engine_cs *ring = req->ring;
710         struct drm_device *dev = ring->dev;
711         struct drm_i915_private *dev_priv = dev->dev_private;
712         struct i915_workarounds *w = &dev_priv->workarounds;
713
714         if (WARN_ON_ONCE(w->count == 0))
715                 return 0;
716
717         ring->gpu_caches_dirty = true;
718         ret = intel_ring_flush_all_caches(req);
719         if (ret)
720                 return ret;
721
722         ret = intel_ring_begin(ring, (w->count * 2 + 2));
723         if (ret)
724                 return ret;
725
726         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
727         for (i = 0; i < w->count; i++) {
728                 intel_ring_emit(ring, w->reg[i].addr);
729                 intel_ring_emit(ring, w->reg[i].value);
730         }
731         intel_ring_emit(ring, MI_NOOP);
732
733         intel_ring_advance(ring);
734
735         ring->gpu_caches_dirty = true;
736         ret = intel_ring_flush_all_caches(req);
737         if (ret)
738                 return ret;
739
740         DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
741
742         return 0;
743 }
744
745 static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
746 {
747         int ret;
748
749         ret = intel_ring_workarounds_emit(req);
750         if (ret != 0)
751                 return ret;
752
753         ret = i915_gem_render_state_init(req);
754         if (ret)
755                 DRM_ERROR("init render state: %d\n", ret);
756
757         return ret;
758 }
759
760 static int wa_add(struct drm_i915_private *dev_priv,
761                   const u32 addr, const u32 mask, const u32 val)
762 {
763         const u32 idx = dev_priv->workarounds.count;
764
765         if (WARN_ON(idx >= I915_MAX_WA_REGS))
766                 return -ENOSPC;
767
768         dev_priv->workarounds.reg[idx].addr = addr;
769         dev_priv->workarounds.reg[idx].value = val;
770         dev_priv->workarounds.reg[idx].mask = mask;
771
772         dev_priv->workarounds.count++;
773
774         return 0;
775 }
776
777 #define WA_REG(addr, mask, val) { \
778                 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
779                 if (r) \
780                         return r; \
781         }
782
783 #define WA_SET_BIT_MASKED(addr, mask) \
784         WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
785
786 #define WA_CLR_BIT_MASKED(addr, mask) \
787         WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
788
789 #define WA_SET_FIELD_MASKED(addr, mask, value) \
790         WA_REG(addr, mask, _MASKED_FIELD(mask, value))
791
792 #define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
793 #define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
794
795 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
796
797 static int bdw_init_workarounds(struct intel_engine_cs *ring)
798 {
799         struct drm_device *dev = ring->dev;
800         struct drm_i915_private *dev_priv = dev->dev_private;
801
802         WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
803
804         /* WaDisableAsyncFlipPerfMode:bdw */
805         WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
806
807         /* WaDisablePartialInstShootdown:bdw */
808         /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
809         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
810                           PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
811                           STALL_DOP_GATING_DISABLE);
812
813         /* WaDisableDopClockGating:bdw */
814         WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
815                           DOP_CLOCK_GATING_DISABLE);
816
817         WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
818                           GEN8_SAMPLER_POWER_BYPASS_DIS);
819
820         /* Use Force Non-Coherent whenever executing a 3D context. This is a
821          * workaround for for a possible hang in the unlikely event a TLB
822          * invalidation occurs during a PSD flush.
823          */
824         WA_SET_BIT_MASKED(HDC_CHICKEN0,
825                           /* WaForceEnableNonCoherent:bdw */
826                           HDC_FORCE_NON_COHERENT |
827                           /* WaForceContextSaveRestoreNonCoherent:bdw */
828                           HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
829                           /* WaHdcDisableFetchWhenMasked:bdw */
830                           HDC_DONOT_FETCH_MEM_WHEN_MASKED |
831                           /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
832                           (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
833
834         /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
835          * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
836          *  polygons in the same 8x4 pixel/sample area to be processed without
837          *  stalling waiting for the earlier ones to write to Hierarchical Z
838          *  buffer."
839          *
840          * This optimization is off by default for Broadwell; turn it on.
841          */
842         WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
843
844         /* Wa4x4STCOptimizationDisable:bdw */
845         WA_SET_BIT_MASKED(CACHE_MODE_1,
846                           GEN8_4x4_STC_OPTIMIZATION_DISABLE);
847
848         /*
849          * BSpec recommends 8x4 when MSAA is used,
850          * however in practice 16x4 seems fastest.
851          *
852          * Note that PS/WM thread counts depend on the WIZ hashing
853          * disable bit, which we don't touch here, but it's good
854          * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
855          */
856         WA_SET_FIELD_MASKED(GEN7_GT_MODE,
857                             GEN6_WIZ_HASHING_MASK,
858                             GEN6_WIZ_HASHING_16x4);
859
860         return 0;
861 }
862
863 static int chv_init_workarounds(struct intel_engine_cs *ring)
864 {
865         struct drm_device *dev = ring->dev;
866         struct drm_i915_private *dev_priv = dev->dev_private;
867
868         WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
869
870         /* WaDisableAsyncFlipPerfMode:chv */
871         WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
872
873         /* WaDisablePartialInstShootdown:chv */
874         /* WaDisableThreadStallDopClockGating:chv */
875         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
876                           PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
877                           STALL_DOP_GATING_DISABLE);
878
879         /* Use Force Non-Coherent whenever executing a 3D context. This is a
880          * workaround for a possible hang in the unlikely event a TLB
881          * invalidation occurs during a PSD flush.
882          */
883         /* WaForceEnableNonCoherent:chv */
884         /* WaHdcDisableFetchWhenMasked:chv */
885         WA_SET_BIT_MASKED(HDC_CHICKEN0,
886                           HDC_FORCE_NON_COHERENT |
887                           HDC_DONOT_FETCH_MEM_WHEN_MASKED);
888
889         /* According to the CACHE_MODE_0 default value documentation, some
890          * CHV platforms disable this optimization by default.  Turn it on.
891          */
892         WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
893
894         /* Wa4x4STCOptimizationDisable:chv */
895         WA_SET_BIT_MASKED(CACHE_MODE_1,
896                           GEN8_4x4_STC_OPTIMIZATION_DISABLE);
897
898         /* Improve HiZ throughput on CHV. */
899         WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
900
901         /*
902          * BSpec recommends 8x4 when MSAA is used,
903          * however in practice 16x4 seems fastest.
904          *
905          * Note that PS/WM thread counts depend on the WIZ hashing
906          * disable bit, which we don't touch here, but it's good
907          * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
908          */
909         WA_SET_FIELD_MASKED(GEN7_GT_MODE,
910                             GEN6_WIZ_HASHING_MASK,
911                             GEN6_WIZ_HASHING_16x4);
912
913         return 0;
914 }
915
916 static int gen9_init_workarounds(struct intel_engine_cs *ring)
917 {
918         struct drm_device *dev = ring->dev;
919         struct drm_i915_private *dev_priv = dev->dev_private;
920         uint32_t tmp;
921
922         /* WaDisablePartialInstShootdown:skl,bxt */
923         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
924                           PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
925
926         /* Syncing dependencies between camera and graphics:skl,bxt */
927         WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
928                           GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
929
930         if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 ||
931             INTEL_REVID(dev) == SKL_REVID_B0)) ||
932             (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
933                 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
934                 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
935                                   GEN9_DG_MIRROR_FIX_ENABLE);
936         }
937
938         if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
939             (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
940                 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
941                 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
942                                   GEN9_RHWO_OPTIMIZATION_DISABLE);
943                 WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
944                                   DISABLE_PIXEL_MASK_CAMMING);
945         }
946
947         if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
948             IS_BROXTON(dev)) {
949                 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
950                 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
951                                   GEN9_ENABLE_YV12_BUGFIX);
952         }
953
954         /* Wa4x4STCOptimizationDisable:skl,bxt */
955         WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
956
957         /* WaDisablePartialResolveInVc:skl,bxt */
958         WA_SET_BIT_MASKED(CACHE_MODE_1, GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
959
960         /* WaCcsTlbPrefetchDisable:skl,bxt */
961         WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
962                           GEN9_CCS_TLB_PREFETCH_ENABLE);
963
964         /* WaDisableMaskBasedCammingInRCC:skl,bxt */
965         if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_C0) ||
966             (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0))
967                 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
968                                   PIXEL_MASK_CAMMING_DISABLE);
969
970         /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
971         tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
972         if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_F0) ||
973             (IS_BROXTON(dev) && INTEL_REVID(dev) >= BXT_REVID_B0))
974                 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
975         WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
976
977         return 0;
978 }
979
980 static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
981 {
982         struct drm_device *dev = ring->dev;
983         struct drm_i915_private *dev_priv = dev->dev_private;
984         u8 vals[3] = { 0, 0, 0 };
985         unsigned int i;
986
987         for (i = 0; i < 3; i++) {
988                 u8 ss;
989
990                 /*
991                  * Only consider slices where one, and only one, subslice has 7
992                  * EUs
993                  */
994                 if (hweight8(dev_priv->info.subslice_7eu[i]) != 1)
995                         continue;
996
997                 /*
998                  * subslice_7eu[i] != 0 (because of the check above) and
999                  * ss_max == 4 (maximum number of subslices possible per slice)
1000                  *
1001                  * ->    0 <= ss <= 3;
1002                  */
1003                 ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
1004                 vals[i] = 3 - ss;
1005         }
1006
1007         if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
1008                 return 0;
1009
1010         /* Tune IZ hashing. See intel_device_info_runtime_init() */
1011         WA_SET_FIELD_MASKED(GEN7_GT_MODE,
1012                             GEN9_IZ_HASHING_MASK(2) |
1013                             GEN9_IZ_HASHING_MASK(1) |
1014                             GEN9_IZ_HASHING_MASK(0),
1015                             GEN9_IZ_HASHING(2, vals[2]) |
1016                             GEN9_IZ_HASHING(1, vals[1]) |
1017                             GEN9_IZ_HASHING(0, vals[0]));
1018
1019         return 0;
1020 }
1021
1022
1023 static int skl_init_workarounds(struct intel_engine_cs *ring)
1024 {
1025         struct drm_device *dev = ring->dev;
1026         struct drm_i915_private *dev_priv = dev->dev_private;
1027
1028         gen9_init_workarounds(ring);
1029
1030         /* WaDisablePowerCompilerClockGating:skl */
1031         if (INTEL_REVID(dev) == SKL_REVID_B0)
1032                 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1033                                   BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1034
1035         if (INTEL_REVID(dev) <= SKL_REVID_D0) {
1036                 /*
1037                  *Use Force Non-Coherent whenever executing a 3D context. This
1038                  * is a workaround for a possible hang in the unlikely event
1039                  * a TLB invalidation occurs during a PSD flush.
1040                  */
1041                 /* WaForceEnableNonCoherent:skl */
1042                 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1043                                   HDC_FORCE_NON_COHERENT);
1044         }
1045
1046         if (INTEL_REVID(dev) == SKL_REVID_C0 ||
1047             INTEL_REVID(dev) == SKL_REVID_D0)
1048                 /* WaBarrierPerformanceFixDisable:skl */
1049                 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1050                                   HDC_FENCE_DEST_SLM_DISABLE |
1051                                   HDC_BARRIER_PERFORMANCE_DISABLE);
1052
1053         return skl_tune_iz_hashing(ring);
1054 }
1055
1056 static int bxt_init_workarounds(struct intel_engine_cs *ring)
1057 {
1058         struct drm_device *dev = ring->dev;
1059         struct drm_i915_private *dev_priv = dev->dev_private;
1060
1061         gen9_init_workarounds(ring);
1062
1063         /* WaDisableThreadStallDopClockGating:bxt */
1064         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1065                           STALL_DOP_GATING_DISABLE);
1066
1067         /* WaDisableSbeCacheDispatchPortSharing:bxt */
1068         if (INTEL_REVID(dev) <= BXT_REVID_B0) {
1069                 WA_SET_BIT_MASKED(
1070                         GEN7_HALF_SLICE_CHICKEN1,
1071                         GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1072         }
1073
1074         return 0;
1075 }
1076
1077 int init_workarounds_ring(struct intel_engine_cs *ring)
1078 {
1079         struct drm_device *dev = ring->dev;
1080         struct drm_i915_private *dev_priv = dev->dev_private;
1081
1082         WARN_ON(ring->id != RCS);
1083
1084         dev_priv->workarounds.count = 0;
1085
1086         if (IS_BROADWELL(dev))
1087                 return bdw_init_workarounds(ring);
1088
1089         if (IS_CHERRYVIEW(dev))
1090                 return chv_init_workarounds(ring);
1091
1092         if (IS_SKYLAKE(dev))
1093                 return skl_init_workarounds(ring);
1094
1095         if (IS_BROXTON(dev))
1096                 return bxt_init_workarounds(ring);
1097
1098         return 0;
1099 }
1100
1101 static int init_render_ring(struct intel_engine_cs *ring)
1102 {
1103         struct drm_device *dev = ring->dev;
1104         struct drm_i915_private *dev_priv = dev->dev_private;
1105         int ret = init_ring_common(ring);
1106         if (ret)
1107                 return ret;
1108
1109         /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
1110         if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
1111                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
1112
1113         /* We need to disable the AsyncFlip performance optimisations in order
1114          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1115          * programmed to '1' on all products.
1116          *
1117          * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
1118          */
1119         if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
1120                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1121
1122         /* Required for the hardware to program scanline values for waiting */
1123         /* WaEnableFlushTlbInvalidationMode:snb */
1124         if (INTEL_INFO(dev)->gen == 6)
1125                 I915_WRITE(GFX_MODE,
1126                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
1127
1128         /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
1129         if (IS_GEN7(dev))
1130                 I915_WRITE(GFX_MODE_GEN7,
1131                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
1132                            _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
1133
1134         if (IS_GEN6(dev)) {
1135                 /* From the Sandybridge PRM, volume 1 part 3, page 24:
1136                  * "If this bit is set, STCunit will have LRA as replacement
1137                  *  policy. [...] This bit must be reset.  LRA replacement
1138                  *  policy is not supported."
1139                  */
1140                 I915_WRITE(CACHE_MODE_0,
1141                            _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
1142         }
1143
1144         if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
1145                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1146
1147         if (HAS_L3_DPF(dev))
1148                 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1149
1150         return init_workarounds_ring(ring);
1151 }
1152
1153 static void render_ring_cleanup(struct intel_engine_cs *ring)
1154 {
1155         struct drm_device *dev = ring->dev;
1156         struct drm_i915_private *dev_priv = dev->dev_private;
1157
1158         if (dev_priv->semaphore_obj) {
1159                 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
1160                 drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
1161                 dev_priv->semaphore_obj = NULL;
1162         }
1163
1164         intel_fini_pipe_control(ring);
1165 }
1166
1167 static int gen8_rcs_signal(struct intel_engine_cs *signaller,
1168                            unsigned int num_dwords)
1169 {
1170 #define MBOX_UPDATE_DWORDS 8
1171         struct drm_device *dev = signaller->dev;
1172         struct drm_i915_private *dev_priv = dev->dev_private;
1173         struct intel_engine_cs *waiter;
1174         int i, ret, num_rings;
1175
1176         num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
1177         num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
1178 #undef MBOX_UPDATE_DWORDS
1179
1180         ret = intel_ring_begin(signaller, num_dwords);
1181         if (ret)
1182                 return ret;
1183
1184         for_each_ring(waiter, dev_priv, i) {
1185                 u32 seqno;
1186                 u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
1187                 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1188                         continue;
1189
1190                 seqno = i915_gem_request_get_seqno(
1191                                            signaller->outstanding_lazy_request);
1192                 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
1193                 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
1194                                            PIPE_CONTROL_QW_WRITE |
1195                                            PIPE_CONTROL_FLUSH_ENABLE);
1196                 intel_ring_emit(signaller, lower_32_bits(gtt_offset));
1197                 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
1198                 intel_ring_emit(signaller, seqno);
1199                 intel_ring_emit(signaller, 0);
1200                 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
1201                                            MI_SEMAPHORE_TARGET(waiter->id));
1202                 intel_ring_emit(signaller, 0);
1203         }
1204
1205         return 0;
1206 }
1207
1208 static int gen8_xcs_signal(struct intel_engine_cs *signaller,
1209                            unsigned int num_dwords)
1210 {
1211 #define MBOX_UPDATE_DWORDS 6
1212         struct drm_device *dev = signaller->dev;
1213         struct drm_i915_private *dev_priv = dev->dev_private;
1214         struct intel_engine_cs *waiter;
1215         int i, ret, num_rings;
1216
1217         num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
1218         num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
1219 #undef MBOX_UPDATE_DWORDS
1220
1221         ret = intel_ring_begin(signaller, num_dwords);
1222         if (ret)
1223                 return ret;
1224
1225         for_each_ring(waiter, dev_priv, i) {
1226                 u32 seqno;
1227                 u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
1228                 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1229                         continue;
1230
1231                 seqno = i915_gem_request_get_seqno(
1232                                            signaller->outstanding_lazy_request);
1233                 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
1234                                            MI_FLUSH_DW_OP_STOREDW);
1235                 intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
1236                                            MI_FLUSH_DW_USE_GTT);
1237                 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
1238                 intel_ring_emit(signaller, seqno);
1239                 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
1240                                            MI_SEMAPHORE_TARGET(waiter->id));
1241                 intel_ring_emit(signaller, 0);
1242         }
1243
1244         return 0;
1245 }
1246
1247 static int gen6_signal(struct intel_engine_cs *signaller,
1248                        unsigned int num_dwords)
1249 {
1250         struct drm_device *dev = signaller->dev;
1251         struct drm_i915_private *dev_priv = dev->dev_private;
1252         struct intel_engine_cs *useless;
1253         int i, ret, num_rings;
1254
1255 #define MBOX_UPDATE_DWORDS 3
1256         num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
1257         num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
1258 #undef MBOX_UPDATE_DWORDS
1259
1260         ret = intel_ring_begin(signaller, num_dwords);
1261         if (ret)
1262                 return ret;
1263
1264         for_each_ring(useless, dev_priv, i) {
1265                 u32 mbox_reg = signaller->semaphore.mbox.signal[i];
1266                 if (mbox_reg != GEN6_NOSYNC) {
1267                         u32 seqno = i915_gem_request_get_seqno(
1268                                            signaller->outstanding_lazy_request);
1269                         intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
1270                         intel_ring_emit(signaller, mbox_reg);
1271                         intel_ring_emit(signaller, seqno);
1272                 }
1273         }
1274
1275         /* If num_dwords was rounded, make sure the tail pointer is correct */
1276         if (num_rings % 2 == 0)
1277                 intel_ring_emit(signaller, MI_NOOP);
1278
1279         return 0;
1280 }
1281
1282 /**
1283  * gen6_add_request - Update the semaphore mailbox registers
1284  * 
1285  * @ring - ring that is adding a request
1286  * @seqno - return seqno stuck into the ring
1287  *
1288  * Update the mailbox registers in the *other* rings with the current seqno.
1289  * This acts like a signal in the canonical semaphore.
1290  */
1291 static int
1292 gen6_add_request(struct intel_engine_cs *ring)
1293 {
1294         int ret;
1295
1296         if (ring->semaphore.signal)
1297                 ret = ring->semaphore.signal(ring, 4);
1298         else
1299                 ret = intel_ring_begin(ring, 4);
1300
1301         if (ret)
1302                 return ret;
1303
1304         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1305         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1306         intel_ring_emit(ring,
1307                     i915_gem_request_get_seqno(ring->outstanding_lazy_request));
1308         intel_ring_emit(ring, MI_USER_INTERRUPT);
1309         __intel_ring_advance(ring);
1310
1311         return 0;
1312 }
1313
1314 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
1315                                               u32 seqno)
1316 {
1317         struct drm_i915_private *dev_priv = dev->dev_private;
1318         return dev_priv->last_seqno < seqno;
1319 }
1320
1321 /**
1322  * intel_ring_sync - sync the waiter to the signaller on seqno
1323  *
1324  * @waiter - ring that is waiting
1325  * @signaller - ring which has, or will signal
1326  * @seqno - seqno which the waiter will block on
1327  */
1328
1329 static int
1330 gen8_ring_sync(struct intel_engine_cs *waiter,
1331                struct intel_engine_cs *signaller,
1332                u32 seqno)
1333 {
1334         struct drm_i915_private *dev_priv = waiter->dev->dev_private;
1335         int ret;
1336
1337         ret = intel_ring_begin(waiter, 4);
1338         if (ret)
1339                 return ret;
1340
1341         intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
1342                                 MI_SEMAPHORE_GLOBAL_GTT |
1343                                 MI_SEMAPHORE_POLL |
1344                                 MI_SEMAPHORE_SAD_GTE_SDD);
1345         intel_ring_emit(waiter, seqno);
1346         intel_ring_emit(waiter,
1347                         lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
1348         intel_ring_emit(waiter,
1349                         upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
1350         intel_ring_advance(waiter);
1351         return 0;
1352 }
1353
1354 static int
1355 gen6_ring_sync(struct intel_engine_cs *waiter,
1356                struct intel_engine_cs *signaller,
1357                u32 seqno)
1358 {
1359         u32 dw1 = MI_SEMAPHORE_MBOX |
1360                   MI_SEMAPHORE_COMPARE |
1361                   MI_SEMAPHORE_REGISTER;
1362         u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
1363         int ret;
1364
1365         /* Throughout all of the GEM code, seqno passed implies our current
1366          * seqno is >= the last seqno executed. However for hardware the
1367          * comparison is strictly greater than.
1368          */
1369         seqno -= 1;
1370
1371         WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
1372
1373         ret = intel_ring_begin(waiter, 4);
1374         if (ret)
1375                 return ret;
1376
1377         /* If seqno wrap happened, omit the wait with no-ops */
1378         if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
1379                 intel_ring_emit(waiter, dw1 | wait_mbox);
1380                 intel_ring_emit(waiter, seqno);
1381                 intel_ring_emit(waiter, 0);
1382                 intel_ring_emit(waiter, MI_NOOP);
1383         } else {
1384                 intel_ring_emit(waiter, MI_NOOP);
1385                 intel_ring_emit(waiter, MI_NOOP);
1386                 intel_ring_emit(waiter, MI_NOOP);
1387                 intel_ring_emit(waiter, MI_NOOP);
1388         }
1389         intel_ring_advance(waiter);
1390
1391         return 0;
1392 }
1393
1394 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
1395 do {                                                                    \
1396         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
1397                  PIPE_CONTROL_DEPTH_STALL);                             \
1398         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
1399         intel_ring_emit(ring__, 0);                                                     \
1400         intel_ring_emit(ring__, 0);                                                     \
1401 } while (0)
1402
1403 static int
1404 pc_render_add_request(struct intel_engine_cs *ring)
1405 {
1406         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
1407         int ret;
1408
1409         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
1410          * incoherent with writes to memory, i.e. completely fubar,
1411          * so we need to use PIPE_NOTIFY instead.
1412          *
1413          * However, we also need to workaround the qword write
1414          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
1415          * memory before requesting an interrupt.
1416          */
1417         ret = intel_ring_begin(ring, 32);
1418         if (ret)
1419                 return ret;
1420
1421         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
1422                         PIPE_CONTROL_WRITE_FLUSH |
1423                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
1424         intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
1425         intel_ring_emit(ring,
1426                     i915_gem_request_get_seqno(ring->outstanding_lazy_request));
1427         intel_ring_emit(ring, 0);
1428         PIPE_CONTROL_FLUSH(ring, scratch_addr);
1429         scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
1430         PIPE_CONTROL_FLUSH(ring, scratch_addr);
1431         scratch_addr += 2 * CACHELINE_BYTES;
1432         PIPE_CONTROL_FLUSH(ring, scratch_addr);
1433         scratch_addr += 2 * CACHELINE_BYTES;
1434         PIPE_CONTROL_FLUSH(ring, scratch_addr);
1435         scratch_addr += 2 * CACHELINE_BYTES;
1436         PIPE_CONTROL_FLUSH(ring, scratch_addr);
1437         scratch_addr += 2 * CACHELINE_BYTES;
1438         PIPE_CONTROL_FLUSH(ring, scratch_addr);
1439
1440         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
1441                         PIPE_CONTROL_WRITE_FLUSH |
1442                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
1443                         PIPE_CONTROL_NOTIFY);
1444         intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
1445         intel_ring_emit(ring,
1446                     i915_gem_request_get_seqno(ring->outstanding_lazy_request));
1447         intel_ring_emit(ring, 0);
1448         __intel_ring_advance(ring);
1449
1450         return 0;
1451 }
1452
1453 static u32
1454 gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1455 {
1456         /* Workaround to force correct ordering between irq and seqno writes on
1457          * ivb (and maybe also on snb) by reading from a CS register (like
1458          * ACTHD) before reading the status page. */
1459         if (!lazy_coherency) {
1460                 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1461                 POSTING_READ(RING_ACTHD(ring->mmio_base));
1462         }
1463
1464         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
1465 }
1466
1467 static u32
1468 ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1469 {
1470         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
1471 }
1472
1473 static void
1474 ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
1475 {
1476         intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
1477 }
1478
1479 static u32
1480 pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1481 {
1482         return ring->scratch.cpu_page[0];
1483 }
1484
1485 static void
1486 pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
1487 {
1488         ring->scratch.cpu_page[0] = seqno;
1489 }
1490
1491 static bool
1492 gen5_ring_get_irq(struct intel_engine_cs *ring)
1493 {
1494         struct drm_device *dev = ring->dev;
1495         struct drm_i915_private *dev_priv = dev->dev_private;
1496         unsigned long flags;
1497
1498         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1499                 return false;
1500
1501         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1502         if (ring->irq_refcount++ == 0)
1503                 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1504         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1505
1506         return true;
1507 }
1508
1509 static void
1510 gen5_ring_put_irq(struct intel_engine_cs *ring)
1511 {
1512         struct drm_device *dev = ring->dev;
1513         struct drm_i915_private *dev_priv = dev->dev_private;
1514         unsigned long flags;
1515
1516         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1517         if (--ring->irq_refcount == 0)
1518                 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1519         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1520 }
1521
1522 static bool
1523 i9xx_ring_get_irq(struct intel_engine_cs *ring)
1524 {
1525         struct drm_device *dev = ring->dev;
1526         struct drm_i915_private *dev_priv = dev->dev_private;
1527         unsigned long flags;
1528
1529         if (!intel_irqs_enabled(dev_priv))
1530                 return false;
1531
1532         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1533         if (ring->irq_refcount++ == 0) {
1534                 dev_priv->irq_mask &= ~ring->irq_enable_mask;
1535                 I915_WRITE(IMR, dev_priv->irq_mask);
1536                 POSTING_READ(IMR);
1537         }
1538         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1539
1540         return true;
1541 }
1542
1543 static void
1544 i9xx_ring_put_irq(struct intel_engine_cs *ring)
1545 {
1546         struct drm_device *dev = ring->dev;
1547         struct drm_i915_private *dev_priv = dev->dev_private;
1548         unsigned long flags;
1549
1550         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1551         if (--ring->irq_refcount == 0) {
1552                 dev_priv->irq_mask |= ring->irq_enable_mask;
1553                 I915_WRITE(IMR, dev_priv->irq_mask);
1554                 POSTING_READ(IMR);
1555         }
1556         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1557 }
1558
1559 static bool
1560 i8xx_ring_get_irq(struct intel_engine_cs *ring)
1561 {
1562         struct drm_device *dev = ring->dev;
1563         struct drm_i915_private *dev_priv = dev->dev_private;
1564         unsigned long flags;
1565
1566         if (!intel_irqs_enabled(dev_priv))
1567                 return false;
1568
1569         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1570         if (ring->irq_refcount++ == 0) {
1571                 dev_priv->irq_mask &= ~ring->irq_enable_mask;
1572                 I915_WRITE16(IMR, dev_priv->irq_mask);
1573                 POSTING_READ16(IMR);
1574         }
1575         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1576
1577         return true;
1578 }
1579
1580 static void
1581 i8xx_ring_put_irq(struct intel_engine_cs *ring)
1582 {
1583         struct drm_device *dev = ring->dev;
1584         struct drm_i915_private *dev_priv = dev->dev_private;
1585         unsigned long flags;
1586
1587         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1588         if (--ring->irq_refcount == 0) {
1589                 dev_priv->irq_mask |= ring->irq_enable_mask;
1590                 I915_WRITE16(IMR, dev_priv->irq_mask);
1591                 POSTING_READ16(IMR);
1592         }
1593         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1594 }
1595
1596 static int
1597 bsd_ring_flush(struct intel_engine_cs *ring,
1598                u32     invalidate_domains,
1599                u32     flush_domains)
1600 {
1601         int ret;
1602
1603         ret = intel_ring_begin(ring, 2);
1604         if (ret)
1605                 return ret;
1606
1607         intel_ring_emit(ring, MI_FLUSH);
1608         intel_ring_emit(ring, MI_NOOP);
1609         intel_ring_advance(ring);
1610         return 0;
1611 }
1612
1613 static int
1614 i9xx_add_request(struct intel_engine_cs *ring)
1615 {
1616         int ret;
1617
1618         ret = intel_ring_begin(ring, 4);
1619         if (ret)
1620                 return ret;
1621
1622         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1623         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1624         intel_ring_emit(ring,
1625                     i915_gem_request_get_seqno(ring->outstanding_lazy_request));
1626         intel_ring_emit(ring, MI_USER_INTERRUPT);
1627         __intel_ring_advance(ring);
1628
1629         return 0;
1630 }
1631
1632 static bool
1633 gen6_ring_get_irq(struct intel_engine_cs *ring)
1634 {
1635         struct drm_device *dev = ring->dev;
1636         struct drm_i915_private *dev_priv = dev->dev_private;
1637         unsigned long flags;
1638
1639         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1640                 return false;
1641
1642         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1643         if (ring->irq_refcount++ == 0) {
1644                 if (HAS_L3_DPF(dev) && ring->id == RCS)
1645                         I915_WRITE_IMR(ring,
1646                                        ~(ring->irq_enable_mask |
1647                                          GT_PARITY_ERROR(dev)));
1648                 else
1649                         I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1650                 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1651         }
1652         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1653
1654         return true;
1655 }
1656
1657 static void
1658 gen6_ring_put_irq(struct intel_engine_cs *ring)
1659 {
1660         struct drm_device *dev = ring->dev;
1661         struct drm_i915_private *dev_priv = dev->dev_private;
1662         unsigned long flags;
1663
1664         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1665         if (--ring->irq_refcount == 0) {
1666                 if (HAS_L3_DPF(dev) && ring->id == RCS)
1667                         I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1668                 else
1669                         I915_WRITE_IMR(ring, ~0);
1670                 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1671         }
1672         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1673 }
1674
1675 static bool
1676 hsw_vebox_get_irq(struct intel_engine_cs *ring)
1677 {
1678         struct drm_device *dev = ring->dev;
1679         struct drm_i915_private *dev_priv = dev->dev_private;
1680         unsigned long flags;
1681
1682         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1683                 return false;
1684
1685         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1686         if (ring->irq_refcount++ == 0) {
1687                 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1688                 gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
1689         }
1690         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1691
1692         return true;
1693 }
1694
1695 static void
1696 hsw_vebox_put_irq(struct intel_engine_cs *ring)
1697 {
1698         struct drm_device *dev = ring->dev;
1699         struct drm_i915_private *dev_priv = dev->dev_private;
1700         unsigned long flags;
1701
1702         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1703         if (--ring->irq_refcount == 0) {
1704                 I915_WRITE_IMR(ring, ~0);
1705                 gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1706         }
1707         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1708 }
1709
1710 static bool
1711 gen8_ring_get_irq(struct intel_engine_cs *ring)
1712 {
1713         struct drm_device *dev = ring->dev;
1714         struct drm_i915_private *dev_priv = dev->dev_private;
1715         unsigned long flags;
1716
1717         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1718                 return false;
1719
1720         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1721         if (ring->irq_refcount++ == 0) {
1722                 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1723                         I915_WRITE_IMR(ring,
1724                                        ~(ring->irq_enable_mask |
1725                                          GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1726                 } else {
1727                         I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1728                 }
1729                 POSTING_READ(RING_IMR(ring->mmio_base));
1730         }
1731         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1732
1733         return true;
1734 }
1735
1736 static void
1737 gen8_ring_put_irq(struct intel_engine_cs *ring)
1738 {
1739         struct drm_device *dev = ring->dev;
1740         struct drm_i915_private *dev_priv = dev->dev_private;
1741         unsigned long flags;
1742
1743         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1744         if (--ring->irq_refcount == 0) {
1745                 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1746                         I915_WRITE_IMR(ring,
1747                                        ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1748                 } else {
1749                         I915_WRITE_IMR(ring, ~0);
1750                 }
1751                 POSTING_READ(RING_IMR(ring->mmio_base));
1752         }
1753         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1754 }
1755
1756 static int
1757 i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1758                          u64 offset, u32 length,
1759                          unsigned dispatch_flags)
1760 {
1761         int ret;
1762
1763         ret = intel_ring_begin(ring, 2);
1764         if (ret)
1765                 return ret;
1766
1767         intel_ring_emit(ring,
1768                         MI_BATCH_BUFFER_START |
1769                         MI_BATCH_GTT |
1770                         (dispatch_flags & I915_DISPATCH_SECURE ?
1771                          0 : MI_BATCH_NON_SECURE_I965));
1772         intel_ring_emit(ring, offset);
1773         intel_ring_advance(ring);
1774
1775         return 0;
1776 }
1777
1778 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1779 #define I830_BATCH_LIMIT (256*1024)
1780 #define I830_TLB_ENTRIES (2)
1781 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1782 static int
1783 i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1784                          u64 offset, u32 len,
1785                          unsigned dispatch_flags)
1786 {
1787         u32 cs_offset = ring->scratch.gtt_offset;
1788         int ret;
1789
1790         ret = intel_ring_begin(ring, 6);
1791         if (ret)
1792                 return ret;
1793
1794         /* Evict the invalid PTE TLBs */
1795         intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
1796         intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
1797         intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
1798         intel_ring_emit(ring, cs_offset);
1799         intel_ring_emit(ring, 0xdeadbeef);
1800         intel_ring_emit(ring, MI_NOOP);
1801         intel_ring_advance(ring);
1802
1803         if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1804                 if (len > I830_BATCH_LIMIT)
1805                         return -ENOSPC;
1806
1807                 ret = intel_ring_begin(ring, 6 + 2);
1808                 if (ret)
1809                         return ret;
1810
1811                 /* Blit the batch (which has now all relocs applied) to the
1812                  * stable batch scratch bo area (so that the CS never
1813                  * stumbles over its tlb invalidation bug) ...
1814                  */
1815                 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
1816                 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
1817                 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
1818                 intel_ring_emit(ring, cs_offset);
1819                 intel_ring_emit(ring, 4096);
1820                 intel_ring_emit(ring, offset);
1821
1822                 intel_ring_emit(ring, MI_FLUSH);
1823                 intel_ring_emit(ring, MI_NOOP);
1824                 intel_ring_advance(ring);
1825
1826                 /* ... and execute it. */
1827                 offset = cs_offset;
1828         }
1829
1830         ret = intel_ring_begin(ring, 4);
1831         if (ret)
1832                 return ret;
1833
1834         intel_ring_emit(ring, MI_BATCH_BUFFER);
1835         intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1836                                         0 : MI_BATCH_NON_SECURE));
1837         intel_ring_emit(ring, offset + len - 8);
1838         intel_ring_emit(ring, MI_NOOP);
1839         intel_ring_advance(ring);
1840
1841         return 0;
1842 }
1843
1844 static int
1845 i915_dispatch_execbuffer(struct intel_engine_cs *ring,
1846                          u64 offset, u32 len,
1847                          unsigned dispatch_flags)
1848 {
1849         int ret;
1850
1851         ret = intel_ring_begin(ring, 2);
1852         if (ret)
1853                 return ret;
1854
1855         intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1856         intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1857                                         0 : MI_BATCH_NON_SECURE));
1858         intel_ring_advance(ring);
1859
1860         return 0;
1861 }
1862
1863 static void cleanup_status_page(struct intel_engine_cs *ring)
1864 {
1865         struct drm_i915_gem_object *obj;
1866
1867         obj = ring->status_page.obj;
1868         if (obj == NULL)
1869                 return;
1870
1871         kunmap(sg_page(obj->pages->sgl));
1872         i915_gem_object_ggtt_unpin(obj);
1873         drm_gem_object_unreference(&obj->base);
1874         ring->status_page.obj = NULL;
1875 }
1876
1877 static int init_status_page(struct intel_engine_cs *ring)
1878 {
1879         struct drm_i915_gem_object *obj;
1880
1881         if ((obj = ring->status_page.obj) == NULL) {
1882                 unsigned flags;
1883                 int ret;
1884
1885                 obj = i915_gem_alloc_object(ring->dev, 4096);
1886                 if (obj == NULL) {
1887                         DRM_ERROR("Failed to allocate status page\n");
1888                         return -ENOMEM;
1889                 }
1890
1891                 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1892                 if (ret)
1893                         goto err_unref;
1894
1895                 flags = 0;
1896                 if (!HAS_LLC(ring->dev))
1897                         /* On g33, we cannot place HWS above 256MiB, so
1898                          * restrict its pinning to the low mappable arena.
1899                          * Though this restriction is not documented for
1900                          * gen4, gen5, or byt, they also behave similarly
1901                          * and hang if the HWS is placed at the top of the
1902                          * GTT. To generalise, it appears that all !llc
1903                          * platforms have issues with us placing the HWS
1904                          * above the mappable region (even though we never
1905                          * actualy map it).
1906                          */
1907                         flags |= PIN_MAPPABLE;
1908                 ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
1909                 if (ret) {
1910 err_unref:
1911                         drm_gem_object_unreference(&obj->base);
1912                         return ret;
1913                 }
1914
1915                 ring->status_page.obj = obj;
1916         }
1917
1918         ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1919         ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1920         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1921
1922         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1923                         ring->name, ring->status_page.gfx_addr);
1924
1925         return 0;
1926 }
1927
1928 static int init_phys_status_page(struct intel_engine_cs *ring)
1929 {
1930         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1931
1932         if (!dev_priv->status_page_dmah) {
1933                 dev_priv->status_page_dmah =
1934                         drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
1935                 if (!dev_priv->status_page_dmah)
1936                         return -ENOMEM;
1937         }
1938
1939         ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1940         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1941
1942         return 0;
1943 }
1944
1945 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1946 {
1947         iounmap(ringbuf->virtual_start);
1948         ringbuf->virtual_start = NULL;
1949         i915_gem_object_ggtt_unpin(ringbuf->obj);
1950 }
1951
1952 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
1953                                      struct intel_ringbuffer *ringbuf)
1954 {
1955         struct drm_i915_private *dev_priv = to_i915(dev);
1956         struct drm_i915_gem_object *obj = ringbuf->obj;
1957         int ret;
1958
1959         ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1960         if (ret)
1961                 return ret;
1962
1963         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1964         if (ret) {
1965                 i915_gem_object_ggtt_unpin(obj);
1966                 return ret;
1967         }
1968
1969         ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
1970                         i915_gem_obj_ggtt_offset(obj), ringbuf->size);
1971         if (ringbuf->virtual_start == NULL) {
1972                 i915_gem_object_ggtt_unpin(obj);
1973                 return -EINVAL;
1974         }
1975
1976         return 0;
1977 }
1978
1979 void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1980 {
1981         drm_gem_object_unreference(&ringbuf->obj->base);
1982         ringbuf->obj = NULL;
1983 }
1984
1985 int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1986                                struct intel_ringbuffer *ringbuf)
1987 {
1988         struct drm_i915_gem_object *obj;
1989
1990         obj = NULL;
1991         if (!HAS_LLC(dev))
1992                 obj = i915_gem_object_create_stolen(dev, ringbuf->size);
1993         if (obj == NULL)
1994                 obj = i915_gem_alloc_object(dev, ringbuf->size);
1995         if (obj == NULL)
1996                 return -ENOMEM;
1997
1998         /* mark ring buffers as read-only from GPU side by default */
1999         obj->gt_ro = 1;
2000
2001         ringbuf->obj = obj;
2002
2003         return 0;
2004 }
2005
2006 static int intel_init_ring_buffer(struct drm_device *dev,
2007                                   struct intel_engine_cs *ring)
2008 {
2009         struct intel_ringbuffer *ringbuf;
2010         int ret;
2011
2012         WARN_ON(ring->buffer);
2013
2014         ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
2015         if (!ringbuf)
2016                 return -ENOMEM;
2017         ring->buffer = ringbuf;
2018
2019         ring->dev = dev;
2020         INIT_LIST_HEAD(&ring->active_list);
2021         INIT_LIST_HEAD(&ring->request_list);
2022         INIT_LIST_HEAD(&ring->execlist_queue);
2023         i915_gem_batch_pool_init(dev, &ring->batch_pool);
2024         ringbuf->size = 32 * PAGE_SIZE;
2025         ringbuf->ring = ring;
2026         memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
2027
2028         init_waitqueue_head(&ring->irq_queue);
2029
2030         if (I915_NEED_GFX_HWS(dev)) {
2031                 ret = init_status_page(ring);
2032                 if (ret)
2033                         goto error;
2034         } else {
2035                 BUG_ON(ring->id != RCS);
2036                 ret = init_phys_status_page(ring);
2037                 if (ret)
2038                         goto error;
2039         }
2040
2041         WARN_ON(ringbuf->obj);
2042
2043         ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
2044         if (ret) {
2045                 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
2046                                 ring->name, ret);
2047                 goto error;
2048         }
2049
2050         ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
2051         if (ret) {
2052                 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
2053                                 ring->name, ret);
2054                 intel_destroy_ringbuffer_obj(ringbuf);
2055                 goto error;
2056         }
2057
2058         /* Workaround an erratum on the i830 which causes a hang if
2059          * the TAIL pointer points to within the last 2 cachelines
2060          * of the buffer.
2061          */
2062         ringbuf->effective_size = ringbuf->size;
2063         if (IS_I830(dev) || IS_845G(dev))
2064                 ringbuf->effective_size -= 2 * CACHELINE_BYTES;
2065
2066         ret = i915_cmd_parser_init_ring(ring);
2067         if (ret)
2068                 goto error;
2069
2070         return 0;
2071
2072 error:
2073         kfree(ringbuf);
2074         ring->buffer = NULL;
2075         return ret;
2076 }
2077
2078 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
2079 {
2080         struct drm_i915_private *dev_priv;
2081         struct intel_ringbuffer *ringbuf;
2082
2083         if (!intel_ring_initialized(ring))
2084                 return;
2085
2086         dev_priv = to_i915(ring->dev);
2087         ringbuf = ring->buffer;
2088
2089         intel_stop_ring_buffer(ring);
2090         WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
2091
2092         intel_unpin_ringbuffer_obj(ringbuf);
2093         intel_destroy_ringbuffer_obj(ringbuf);
2094         i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
2095
2096         if (ring->cleanup)
2097                 ring->cleanup(ring);
2098
2099         cleanup_status_page(ring);
2100
2101         i915_cmd_parser_fini_ring(ring);
2102         i915_gem_batch_pool_fini(&ring->batch_pool);
2103
2104         kfree(ringbuf);
2105         ring->buffer = NULL;
2106 }
2107
2108 static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
2109 {
2110         struct intel_ringbuffer *ringbuf = ring->buffer;
2111         struct drm_i915_gem_request *request;
2112         unsigned space;
2113         int ret;
2114
2115         /* The whole point of reserving space is to not wait! */
2116         WARN_ON(ringbuf->reserved_in_use);
2117
2118         if (intel_ring_space(ringbuf) >= n)
2119                 return 0;
2120
2121         list_for_each_entry(request, &ring->request_list, list) {
2122                 space = __intel_ring_space(request->postfix, ringbuf->tail,
2123                                            ringbuf->size);
2124                 if (space >= n)
2125                         break;
2126         }
2127
2128         if (WARN_ON(&request->list == &ring->request_list))
2129                 return -ENOSPC;
2130
2131         ret = i915_wait_request(request);
2132         if (ret)
2133                 return ret;
2134
2135         ringbuf->space = space;
2136         return 0;
2137 }
2138
2139 static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
2140 {
2141         uint32_t __iomem *virt;
2142         struct intel_ringbuffer *ringbuf = ring->buffer;
2143         int rem = ringbuf->size - ringbuf->tail;
2144
2145         /* Can't wrap if space has already been reserved! */
2146         WARN_ON(ringbuf->reserved_in_use);
2147
2148         if (ringbuf->space < rem) {
2149                 int ret = ring_wait_for_space(ring, rem);
2150                 if (ret)
2151                         return ret;
2152         }
2153
2154         virt = ringbuf->virtual_start + ringbuf->tail;
2155         rem /= 4;
2156         while (rem--)
2157                 iowrite32(MI_NOOP, virt++);
2158
2159         ringbuf->tail = 0;
2160         intel_ring_update_space(ringbuf);
2161
2162         return 0;
2163 }
2164
2165 int intel_ring_idle(struct intel_engine_cs *ring)
2166 {
2167         struct drm_i915_gem_request *req;
2168
2169         /* We need to add any requests required to flush the objects and ring */
2170         WARN_ON(ring->outstanding_lazy_request);
2171         if (ring->outstanding_lazy_request)
2172                 i915_add_request(ring->outstanding_lazy_request);
2173
2174         /* Wait upon the last request to be completed */
2175         if (list_empty(&ring->request_list))
2176                 return 0;
2177
2178         req = list_entry(ring->request_list.prev,
2179                         struct drm_i915_gem_request,
2180                         list);
2181
2182         /* Make sure we do not trigger any retires */
2183         return __i915_wait_request(req,
2184                                    atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter),
2185                                    to_i915(ring->dev)->mm.interruptible,
2186                                    NULL, NULL);
2187 }
2188
2189 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
2190 {
2191         request->ringbuf = request->ring->buffer;
2192         return 0;
2193 }
2194
2195 void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
2196 {
2197         /* NB: Until request management is fully tidied up and the OLR is
2198          * removed, there are too many ways for get false hits on this
2199          * anti-recursion check! */
2200         /*WARN_ON(ringbuf->reserved_size);*/
2201         WARN_ON(ringbuf->reserved_in_use);
2202
2203         ringbuf->reserved_size = size;
2204
2205         /*
2206          * Really need to call _begin() here but that currently leads to
2207          * recursion problems! This will be fixed later but for now just
2208          * return and hope for the best. Note that there is only a real
2209          * problem if the create of the request never actually calls _begin()
2210          * but if they are not submitting any work then why did they create
2211          * the request in the first place?
2212          */
2213 }
2214
2215 void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
2216 {
2217         WARN_ON(ringbuf->reserved_in_use);
2218
2219         ringbuf->reserved_size   = 0;
2220         ringbuf->reserved_in_use = false;
2221 }
2222
2223 void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
2224 {
2225         WARN_ON(ringbuf->reserved_in_use);
2226
2227         ringbuf->reserved_in_use = true;
2228         ringbuf->reserved_tail   = ringbuf->tail;
2229 }
2230
2231 void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
2232 {
2233         WARN_ON(!ringbuf->reserved_in_use);
2234         WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
2235              "request reserved size too small: %d vs %d!\n",
2236              ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
2237
2238         ringbuf->reserved_size   = 0;
2239         ringbuf->reserved_in_use = false;
2240 }
2241
2242 static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
2243 {
2244         struct intel_ringbuffer *ringbuf = ring->buffer;
2245         int ret;
2246
2247         /*
2248          * Add on the reserved size to the request to make sure that after
2249          * the intended commands have been emitted, there is guaranteed to
2250          * still be enough free space to send them to the hardware.
2251          */
2252         if (!ringbuf->reserved_in_use)
2253                 bytes += ringbuf->reserved_size;
2254
2255         if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
2256                 ret = intel_wrap_ring_buffer(ring);
2257                 if (unlikely(ret))
2258                         return ret;
2259
2260                 if(ringbuf->reserved_size) {
2261                         uint32_t size = ringbuf->reserved_size;
2262
2263                         intel_ring_reserved_space_cancel(ringbuf);
2264                         intel_ring_reserved_space_reserve(ringbuf, size);
2265                 }
2266         }
2267
2268         if (unlikely(ringbuf->space < bytes)) {
2269                 ret = ring_wait_for_space(ring, bytes);
2270                 if (unlikely(ret))
2271                         return ret;
2272         }
2273
2274         return 0;
2275 }
2276
2277 int intel_ring_begin(struct intel_engine_cs *ring,
2278                      int num_dwords)
2279 {
2280         struct drm_i915_gem_request *req;
2281         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2282         int ret;
2283
2284         ret = i915_gem_check_wedge(&dev_priv->gpu_error,
2285                                    dev_priv->mm.interruptible);
2286         if (ret)
2287                 return ret;
2288
2289         ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
2290         if (ret)
2291                 return ret;
2292
2293         /* Preallocate the olr before touching the ring */
2294         ret = i915_gem_request_alloc(ring, ring->default_context, &req);
2295         if (ret)
2296                 return ret;
2297
2298         ring->buffer->space -= num_dwords * sizeof(uint32_t);
2299         return 0;
2300 }
2301
2302 /* Align the ring tail to a cacheline boundary */
2303 int intel_ring_cacheline_align(struct intel_engine_cs *ring)
2304 {
2305         int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
2306         int ret;
2307
2308         if (num_dwords == 0)
2309                 return 0;
2310
2311         num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
2312         ret = intel_ring_begin(ring, num_dwords);
2313         if (ret)
2314                 return ret;
2315
2316         while (num_dwords--)
2317                 intel_ring_emit(ring, MI_NOOP);
2318
2319         intel_ring_advance(ring);
2320
2321         return 0;
2322 }
2323
2324 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
2325 {
2326         struct drm_device *dev = ring->dev;
2327         struct drm_i915_private *dev_priv = dev->dev_private;
2328
2329         BUG_ON(ring->outstanding_lazy_request);
2330
2331         if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
2332                 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
2333                 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
2334                 if (HAS_VEBOX(dev))
2335                         I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
2336         }
2337
2338         ring->set_seqno(ring, seqno);
2339         ring->hangcheck.seqno = seqno;
2340 }
2341
2342 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
2343                                      u32 value)
2344 {
2345         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2346
2347        /* Every tail move must follow the sequence below */
2348
2349         /* Disable notification that the ring is IDLE. The GT
2350          * will then assume that it is busy and bring it out of rc6.
2351          */
2352         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
2353                    _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2354
2355         /* Clear the context id. Here be magic! */
2356         I915_WRITE64(GEN6_BSD_RNCID, 0x0);
2357
2358         /* Wait for the ring not to be idle, i.e. for it to wake up. */
2359         if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
2360                       GEN6_BSD_SLEEP_INDICATOR) == 0,
2361                      50))
2362                 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2363
2364         /* Now that the ring is fully powered up, update the tail */
2365         I915_WRITE_TAIL(ring, value);
2366         POSTING_READ(RING_TAIL(ring->mmio_base));
2367
2368         /* Let the ring send IDLE messages to the GT again,
2369          * and so let it sleep to conserve power when idle.
2370          */
2371         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
2372                    _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2373 }
2374
2375 static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
2376                                u32 invalidate, u32 flush)
2377 {
2378         uint32_t cmd;
2379         int ret;
2380
2381         ret = intel_ring_begin(ring, 4);
2382         if (ret)
2383                 return ret;
2384
2385         cmd = MI_FLUSH_DW;
2386         if (INTEL_INFO(ring->dev)->gen >= 8)
2387                 cmd += 1;
2388
2389         /* We always require a command barrier so that subsequent
2390          * commands, such as breadcrumb interrupts, are strictly ordered
2391          * wrt the contents of the write cache being flushed to memory
2392          * (and thus being coherent from the CPU).
2393          */
2394         cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2395
2396         /*
2397          * Bspec vol 1c.5 - video engine command streamer:
2398          * "If ENABLED, all TLBs will be invalidated once the flush
2399          * operation is complete. This bit is only valid when the
2400          * Post-Sync Operation field is a value of 1h or 3h."
2401          */
2402         if (invalidate & I915_GEM_GPU_DOMAINS)
2403                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
2404
2405         intel_ring_emit(ring, cmd);
2406         intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2407         if (INTEL_INFO(ring->dev)->gen >= 8) {
2408                 intel_ring_emit(ring, 0); /* upper addr */
2409                 intel_ring_emit(ring, 0); /* value */
2410         } else  {
2411                 intel_ring_emit(ring, 0);
2412                 intel_ring_emit(ring, MI_NOOP);
2413         }
2414         intel_ring_advance(ring);
2415         return 0;
2416 }
2417
2418 static int
2419 gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2420                               u64 offset, u32 len,
2421                               unsigned dispatch_flags)
2422 {
2423         bool ppgtt = USES_PPGTT(ring->dev) &&
2424                         !(dispatch_flags & I915_DISPATCH_SECURE);
2425         int ret;
2426
2427         ret = intel_ring_begin(ring, 4);
2428         if (ret)
2429                 return ret;
2430
2431         /* FIXME(BDW): Address space and security selectors. */
2432         intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
2433         intel_ring_emit(ring, lower_32_bits(offset));
2434         intel_ring_emit(ring, upper_32_bits(offset));
2435         intel_ring_emit(ring, MI_NOOP);
2436         intel_ring_advance(ring);
2437
2438         return 0;
2439 }
2440
2441 static int
2442 hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2443                              u64 offset, u32 len,
2444                              unsigned dispatch_flags)
2445 {
2446         int ret;
2447
2448         ret = intel_ring_begin(ring, 2);
2449         if (ret)
2450                 return ret;
2451
2452         intel_ring_emit(ring,
2453                         MI_BATCH_BUFFER_START |
2454                         (dispatch_flags & I915_DISPATCH_SECURE ?
2455                          0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
2456         /* bit0-7 is the length on GEN6+ */
2457         intel_ring_emit(ring, offset);
2458         intel_ring_advance(ring);
2459
2460         return 0;
2461 }
2462
2463 static int
2464 gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2465                               u64 offset, u32 len,
2466                               unsigned dispatch_flags)
2467 {
2468         int ret;
2469
2470         ret = intel_ring_begin(ring, 2);
2471         if (ret)
2472                 return ret;
2473
2474         intel_ring_emit(ring,
2475                         MI_BATCH_BUFFER_START |
2476                         (dispatch_flags & I915_DISPATCH_SECURE ?
2477                          0 : MI_BATCH_NON_SECURE_I965));
2478         /* bit0-7 is the length on GEN6+ */
2479         intel_ring_emit(ring, offset);
2480         intel_ring_advance(ring);
2481
2482         return 0;
2483 }
2484
2485 /* Blitter support (SandyBridge+) */
2486
2487 static int gen6_ring_flush(struct intel_engine_cs *ring,
2488                            u32 invalidate, u32 flush)
2489 {
2490         struct drm_device *dev = ring->dev;
2491         uint32_t cmd;
2492         int ret;
2493
2494         ret = intel_ring_begin(ring, 4);
2495         if (ret)
2496                 return ret;
2497
2498         cmd = MI_FLUSH_DW;
2499         if (INTEL_INFO(dev)->gen >= 8)
2500                 cmd += 1;
2501
2502         /* We always require a command barrier so that subsequent
2503          * commands, such as breadcrumb interrupts, are strictly ordered
2504          * wrt the contents of the write cache being flushed to memory
2505          * (and thus being coherent from the CPU).
2506          */
2507         cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2508
2509         /*
2510          * Bspec vol 1c.3 - blitter engine command streamer:
2511          * "If ENABLED, all TLBs will be invalidated once the flush
2512          * operation is complete. This bit is only valid when the
2513          * Post-Sync Operation field is a value of 1h or 3h."
2514          */
2515         if (invalidate & I915_GEM_DOMAIN_RENDER)
2516                 cmd |= MI_INVALIDATE_TLB;
2517         intel_ring_emit(ring, cmd);
2518         intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2519         if (INTEL_INFO(dev)->gen >= 8) {
2520                 intel_ring_emit(ring, 0); /* upper addr */
2521                 intel_ring_emit(ring, 0); /* value */
2522         } else  {
2523                 intel_ring_emit(ring, 0);
2524                 intel_ring_emit(ring, MI_NOOP);
2525         }
2526         intel_ring_advance(ring);
2527
2528         return 0;
2529 }
2530
2531 int intel_init_render_ring_buffer(struct drm_device *dev)
2532 {
2533         struct drm_i915_private *dev_priv = dev->dev_private;
2534         struct intel_engine_cs *ring = &dev_priv->ring[RCS];
2535         struct drm_i915_gem_object *obj;
2536         int ret;
2537
2538         ring->name = "render ring";
2539         ring->id = RCS;
2540         ring->mmio_base = RENDER_RING_BASE;
2541
2542         if (INTEL_INFO(dev)->gen >= 8) {
2543                 if (i915_semaphore_is_enabled(dev)) {
2544                         obj = i915_gem_alloc_object(dev, 4096);
2545                         if (obj == NULL) {
2546                                 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
2547                                 i915.semaphores = 0;
2548                         } else {
2549                                 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2550                                 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
2551                                 if (ret != 0) {
2552                                         drm_gem_object_unreference(&obj->base);
2553                                         DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
2554                                         i915.semaphores = 0;
2555                                 } else
2556                                         dev_priv->semaphore_obj = obj;
2557                         }
2558                 }
2559
2560                 ring->init_context = intel_rcs_ctx_init;
2561                 ring->add_request = gen6_add_request;
2562                 ring->flush = gen8_render_ring_flush;
2563                 ring->irq_get = gen8_ring_get_irq;
2564                 ring->irq_put = gen8_ring_put_irq;
2565                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2566                 ring->get_seqno = gen6_ring_get_seqno;
2567                 ring->set_seqno = ring_set_seqno;
2568                 if (i915_semaphore_is_enabled(dev)) {
2569                         WARN_ON(!dev_priv->semaphore_obj);
2570                         ring->semaphore.sync_to = gen8_ring_sync;
2571                         ring->semaphore.signal = gen8_rcs_signal;
2572                         GEN8_RING_SEMAPHORE_INIT;
2573                 }
2574         } else if (INTEL_INFO(dev)->gen >= 6) {
2575                 ring->add_request = gen6_add_request;
2576                 ring->flush = gen7_render_ring_flush;
2577                 if (INTEL_INFO(dev)->gen == 6)
2578                         ring->flush = gen6_render_ring_flush;
2579                 ring->irq_get = gen6_ring_get_irq;
2580                 ring->irq_put = gen6_ring_put_irq;
2581                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2582                 ring->get_seqno = gen6_ring_get_seqno;
2583                 ring->set_seqno = ring_set_seqno;
2584                 if (i915_semaphore_is_enabled(dev)) {
2585                         ring->semaphore.sync_to = gen6_ring_sync;
2586                         ring->semaphore.signal = gen6_signal;
2587                         /*
2588                          * The current semaphore is only applied on pre-gen8
2589                          * platform.  And there is no VCS2 ring on the pre-gen8
2590                          * platform. So the semaphore between RCS and VCS2 is
2591                          * initialized as INVALID.  Gen8 will initialize the
2592                          * sema between VCS2 and RCS later.
2593                          */
2594                         ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
2595                         ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
2596                         ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
2597                         ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
2598                         ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2599                         ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
2600                         ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
2601                         ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
2602                         ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
2603                         ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2604                 }
2605         } else if (IS_GEN5(dev)) {
2606                 ring->add_request = pc_render_add_request;
2607                 ring->flush = gen4_render_ring_flush;
2608                 ring->get_seqno = pc_render_get_seqno;
2609                 ring->set_seqno = pc_render_set_seqno;
2610                 ring->irq_get = gen5_ring_get_irq;
2611                 ring->irq_put = gen5_ring_put_irq;
2612                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
2613                                         GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
2614         } else {
2615                 ring->add_request = i9xx_add_request;
2616                 if (INTEL_INFO(dev)->gen < 4)
2617                         ring->flush = gen2_render_ring_flush;
2618                 else
2619                         ring->flush = gen4_render_ring_flush;
2620                 ring->get_seqno = ring_get_seqno;
2621                 ring->set_seqno = ring_set_seqno;
2622                 if (IS_GEN2(dev)) {
2623                         ring->irq_get = i8xx_ring_get_irq;
2624                         ring->irq_put = i8xx_ring_put_irq;
2625                 } else {
2626                         ring->irq_get = i9xx_ring_get_irq;
2627                         ring->irq_put = i9xx_ring_put_irq;
2628                 }
2629                 ring->irq_enable_mask = I915_USER_INTERRUPT;
2630         }
2631         ring->write_tail = ring_write_tail;
2632
2633         if (IS_HASWELL(dev))
2634                 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
2635         else if (IS_GEN8(dev))
2636                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2637         else if (INTEL_INFO(dev)->gen >= 6)
2638                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2639         else if (INTEL_INFO(dev)->gen >= 4)
2640                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2641         else if (IS_I830(dev) || IS_845G(dev))
2642                 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
2643         else
2644                 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
2645         ring->init_hw = init_render_ring;
2646         ring->cleanup = render_ring_cleanup;
2647
2648         /* Workaround batchbuffer to combat CS tlb bug. */
2649         if (HAS_BROKEN_CS_TLB(dev)) {
2650                 obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
2651                 if (obj == NULL) {
2652                         DRM_ERROR("Failed to allocate batch bo\n");
2653                         return -ENOMEM;
2654                 }
2655
2656                 ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
2657                 if (ret != 0) {
2658                         drm_gem_object_unreference(&obj->base);
2659                         DRM_ERROR("Failed to ping batch bo\n");
2660                         return ret;
2661                 }
2662
2663                 ring->scratch.obj = obj;
2664                 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
2665         }
2666
2667         ret = intel_init_ring_buffer(dev, ring);
2668         if (ret)
2669                 return ret;
2670
2671         if (INTEL_INFO(dev)->gen >= 5) {
2672                 ret = intel_init_pipe_control(ring);
2673                 if (ret)
2674                         return ret;
2675         }
2676
2677         return 0;
2678 }
2679
2680 int intel_init_bsd_ring_buffer(struct drm_device *dev)
2681 {
2682         struct drm_i915_private *dev_priv = dev->dev_private;
2683         struct intel_engine_cs *ring = &dev_priv->ring[VCS];
2684
2685         ring->name = "bsd ring";
2686         ring->id = VCS;
2687
2688         ring->write_tail = ring_write_tail;
2689         if (INTEL_INFO(dev)->gen >= 6) {
2690                 ring->mmio_base = GEN6_BSD_RING_BASE;
2691                 /* gen6 bsd needs a special wa for tail updates */
2692                 if (IS_GEN6(dev))
2693                         ring->write_tail = gen6_bsd_ring_write_tail;
2694                 ring->flush = gen6_bsd_ring_flush;
2695                 ring->add_request = gen6_add_request;
2696                 ring->get_seqno = gen6_ring_get_seqno;
2697                 ring->set_seqno = ring_set_seqno;
2698                 if (INTEL_INFO(dev)->gen >= 8) {
2699                         ring->irq_enable_mask =
2700                                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2701                         ring->irq_get = gen8_ring_get_irq;
2702                         ring->irq_put = gen8_ring_put_irq;
2703                         ring->dispatch_execbuffer =
2704                                 gen8_ring_dispatch_execbuffer;
2705                         if (i915_semaphore_is_enabled(dev)) {
2706                                 ring->semaphore.sync_to = gen8_ring_sync;
2707                                 ring->semaphore.signal = gen8_xcs_signal;
2708                                 GEN8_RING_SEMAPHORE_INIT;
2709                         }
2710                 } else {
2711                         ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2712                         ring->irq_get = gen6_ring_get_irq;
2713                         ring->irq_put = gen6_ring_put_irq;
2714                         ring->dispatch_execbuffer =
2715                                 gen6_ring_dispatch_execbuffer;
2716                         if (i915_semaphore_is_enabled(dev)) {
2717                                 ring->semaphore.sync_to = gen6_ring_sync;
2718                                 ring->semaphore.signal = gen6_signal;
2719                                 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
2720                                 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2721                                 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
2722                                 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
2723                                 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2724                                 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
2725                                 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2726                                 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
2727                                 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
2728                                 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2729                         }
2730                 }
2731         } else {
2732                 ring->mmio_base = BSD_RING_BASE;
2733                 ring->flush = bsd_ring_flush;
2734                 ring->add_request = i9xx_add_request;
2735                 ring->get_seqno = ring_get_seqno;
2736                 ring->set_seqno = ring_set_seqno;
2737                 if (IS_GEN5(dev)) {
2738                         ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2739                         ring->irq_get = gen5_ring_get_irq;
2740                         ring->irq_put = gen5_ring_put_irq;
2741                 } else {
2742                         ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2743                         ring->irq_get = i9xx_ring_get_irq;
2744                         ring->irq_put = i9xx_ring_put_irq;
2745                 }
2746                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2747         }
2748         ring->init_hw = init_ring_common;
2749
2750         return intel_init_ring_buffer(dev, ring);
2751 }
2752
2753 /**
2754  * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
2755  */
2756 int intel_init_bsd2_ring_buffer(struct drm_device *dev)
2757 {
2758         struct drm_i915_private *dev_priv = dev->dev_private;
2759         struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
2760
2761         ring->name = "bsd2 ring";
2762         ring->id = VCS2;
2763
2764         ring->write_tail = ring_write_tail;
2765         ring->mmio_base = GEN8_BSD2_RING_BASE;
2766         ring->flush = gen6_bsd_ring_flush;
2767         ring->add_request = gen6_add_request;
2768         ring->get_seqno = gen6_ring_get_seqno;
2769         ring->set_seqno = ring_set_seqno;
2770         ring->irq_enable_mask =
2771                         GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
2772         ring->irq_get = gen8_ring_get_irq;
2773         ring->irq_put = gen8_ring_put_irq;
2774         ring->dispatch_execbuffer =
2775                         gen8_ring_dispatch_execbuffer;
2776         if (i915_semaphore_is_enabled(dev)) {
2777                 ring->semaphore.sync_to = gen8_ring_sync;
2778                 ring->semaphore.signal = gen8_xcs_signal;
2779                 GEN8_RING_SEMAPHORE_INIT;
2780         }
2781         ring->init_hw = init_ring_common;
2782
2783         return intel_init_ring_buffer(dev, ring);
2784 }
2785
2786 int intel_init_blt_ring_buffer(struct drm_device *dev)
2787 {
2788         struct drm_i915_private *dev_priv = dev->dev_private;
2789         struct intel_engine_cs *ring = &dev_priv->ring[BCS];
2790
2791         ring->name = "blitter ring";
2792         ring->id = BCS;
2793
2794         ring->mmio_base = BLT_RING_BASE;
2795         ring->write_tail = ring_write_tail;
2796         ring->flush = gen6_ring_flush;
2797         ring->add_request = gen6_add_request;
2798         ring->get_seqno = gen6_ring_get_seqno;
2799         ring->set_seqno = ring_set_seqno;
2800         if (INTEL_INFO(dev)->gen >= 8) {
2801                 ring->irq_enable_mask =
2802                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
2803                 ring->irq_get = gen8_ring_get_irq;
2804                 ring->irq_put = gen8_ring_put_irq;
2805                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2806                 if (i915_semaphore_is_enabled(dev)) {
2807                         ring->semaphore.sync_to = gen8_ring_sync;
2808                         ring->semaphore.signal = gen8_xcs_signal;
2809                         GEN8_RING_SEMAPHORE_INIT;
2810                 }
2811         } else {
2812                 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2813                 ring->irq_get = gen6_ring_get_irq;
2814                 ring->irq_put = gen6_ring_put_irq;
2815                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2816                 if (i915_semaphore_is_enabled(dev)) {
2817                         ring->semaphore.signal = gen6_signal;
2818                         ring->semaphore.sync_to = gen6_ring_sync;
2819                         /*
2820                          * The current semaphore is only applied on pre-gen8
2821                          * platform.  And there is no VCS2 ring on the pre-gen8
2822                          * platform. So the semaphore between BCS and VCS2 is
2823                          * initialized as INVALID.  Gen8 will initialize the
2824                          * sema between BCS and VCS2 later.
2825                          */
2826                         ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
2827                         ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
2828                         ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2829                         ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
2830                         ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2831                         ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
2832                         ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
2833                         ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2834                         ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
2835                         ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2836                 }
2837         }
2838         ring->init_hw = init_ring_common;
2839
2840         return intel_init_ring_buffer(dev, ring);
2841 }
2842
2843 int intel_init_vebox_ring_buffer(struct drm_device *dev)
2844 {
2845         struct drm_i915_private *dev_priv = dev->dev_private;
2846         struct intel_engine_cs *ring = &dev_priv->ring[VECS];
2847
2848         ring->name = "video enhancement ring";
2849         ring->id = VECS;
2850
2851         ring->mmio_base = VEBOX_RING_BASE;
2852         ring->write_tail = ring_write_tail;
2853         ring->flush = gen6_ring_flush;
2854         ring->add_request = gen6_add_request;
2855         ring->get_seqno = gen6_ring_get_seqno;
2856         ring->set_seqno = ring_set_seqno;
2857
2858         if (INTEL_INFO(dev)->gen >= 8) {
2859                 ring->irq_enable_mask =
2860                         GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
2861                 ring->irq_get = gen8_ring_get_irq;
2862                 ring->irq_put = gen8_ring_put_irq;
2863                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2864                 if (i915_semaphore_is_enabled(dev)) {
2865                         ring->semaphore.sync_to = gen8_ring_sync;
2866                         ring->semaphore.signal = gen8_xcs_signal;
2867                         GEN8_RING_SEMAPHORE_INIT;
2868                 }
2869         } else {
2870                 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2871                 ring->irq_get = hsw_vebox_get_irq;
2872                 ring->irq_put = hsw_vebox_put_irq;
2873                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2874                 if (i915_semaphore_is_enabled(dev)) {
2875                         ring->semaphore.sync_to = gen6_ring_sync;
2876                         ring->semaphore.signal = gen6_signal;
2877                         ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
2878                         ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
2879                         ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
2880                         ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2881                         ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2882                         ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
2883                         ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
2884                         ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
2885                         ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2886                         ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2887                 }
2888         }
2889         ring->init_hw = init_ring_common;
2890
2891         return intel_init_ring_buffer(dev, ring);
2892 }
2893
2894 int
2895 intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
2896 {
2897         struct intel_engine_cs *ring = req->ring;
2898         int ret;
2899
2900         if (!ring->gpu_caches_dirty)
2901                 return 0;
2902
2903         ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
2904         if (ret)
2905                 return ret;
2906
2907         trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
2908
2909         ring->gpu_caches_dirty = false;
2910         return 0;
2911 }
2912
2913 int
2914 intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
2915 {
2916         struct intel_engine_cs *ring = req->ring;
2917         uint32_t flush_domains;
2918         int ret;
2919
2920         flush_domains = 0;
2921         if (ring->gpu_caches_dirty)
2922                 flush_domains = I915_GEM_GPU_DOMAINS;
2923
2924         ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2925         if (ret)
2926                 return ret;
2927
2928         trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2929
2930         ring->gpu_caches_dirty = false;
2931         return 0;
2932 }
2933
2934 void
2935 intel_stop_ring_buffer(struct intel_engine_cs *ring)
2936 {
2937         int ret;
2938
2939         if (!intel_ring_initialized(ring))
2940                 return;
2941
2942         ret = intel_ring_idle(ring);
2943         if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
2944                 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
2945                           ring->name, ret);
2946
2947         stop_ring(ring);
2948 }