785f246d28a8e812ae285cb3f242e6d4d0cec6c0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include <drm/drmP.h>
31 #include "i915_drv.h"
32 #include <drm/i915_drm.h>
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35
36 static inline int ring_space(struct intel_ring_buffer *ring)
37 {
38         int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
39         if (space < 0)
40                 space += ring->size;
41         return space;
42 }
43
44 void __intel_ring_advance(struct intel_ring_buffer *ring)
45 {
46         struct drm_i915_private *dev_priv = ring->dev->dev_private;
47
48         ring->tail &= ring->size - 1;
49         if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
50                 return;
51         ring->write_tail(ring, ring->tail);
52 }
53
54 static int
55 gen2_render_ring_flush(struct intel_ring_buffer *ring,
56                        u32      invalidate_domains,
57                        u32      flush_domains)
58 {
59         u32 cmd;
60         int ret;
61
62         cmd = MI_FLUSH;
63         if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
64                 cmd |= MI_NO_WRITE_FLUSH;
65
66         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
67                 cmd |= MI_READ_FLUSH;
68
69         ret = intel_ring_begin(ring, 2);
70         if (ret)
71                 return ret;
72
73         intel_ring_emit(ring, cmd);
74         intel_ring_emit(ring, MI_NOOP);
75         intel_ring_advance(ring);
76
77         return 0;
78 }
79
80 static int
81 gen4_render_ring_flush(struct intel_ring_buffer *ring,
82                        u32      invalidate_domains,
83                        u32      flush_domains)
84 {
85         struct drm_device *dev = ring->dev;
86         u32 cmd;
87         int ret;
88
89         /*
90          * read/write caches:
91          *
92          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
93          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
94          * also flushed at 2d versus 3d pipeline switches.
95          *
96          * read-only caches:
97          *
98          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
99          * MI_READ_FLUSH is set, and is always flushed on 965.
100          *
101          * I915_GEM_DOMAIN_COMMAND may not exist?
102          *
103          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
104          * invalidated when MI_EXE_FLUSH is set.
105          *
106          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
107          * invalidated with every MI_FLUSH.
108          *
109          * TLBs:
110          *
111          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
112          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
113          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
114          * are flushed at any MI_FLUSH.
115          */
116
117         cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
118         if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
119                 cmd &= ~MI_NO_WRITE_FLUSH;
120         if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
121                 cmd |= MI_EXE_FLUSH;
122
123         if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
124             (IS_G4X(dev) || IS_GEN5(dev)))
125                 cmd |= MI_INVALIDATE_ISP;
126
127         ret = intel_ring_begin(ring, 2);
128         if (ret)
129                 return ret;
130
131         intel_ring_emit(ring, cmd);
132         intel_ring_emit(ring, MI_NOOP);
133         intel_ring_advance(ring);
134
135         return 0;
136 }
137
138 /**
139  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
140  * implementing two workarounds on gen6.  From section 1.4.7.1
141  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
142  *
143  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
144  * produced by non-pipelined state commands), software needs to first
145  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
146  * 0.
147  *
148  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
149  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
150  *
151  * And the workaround for these two requires this workaround first:
152  *
153  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
154  * BEFORE the pipe-control with a post-sync op and no write-cache
155  * flushes.
156  *
157  * And this last workaround is tricky because of the requirements on
158  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
159  * volume 2 part 1:
160  *
161  *     "1 of the following must also be set:
162  *      - Render Target Cache Flush Enable ([12] of DW1)
163  *      - Depth Cache Flush Enable ([0] of DW1)
164  *      - Stall at Pixel Scoreboard ([1] of DW1)
165  *      - Depth Stall ([13] of DW1)
166  *      - Post-Sync Operation ([13] of DW1)
167  *      - Notify Enable ([8] of DW1)"
168  *
169  * The cache flushes require the workaround flush that triggered this
170  * one, so we can't use it.  Depth stall would trigger the same.
171  * Post-sync nonzero is what triggered this second workaround, so we
172  * can't use that one either.  Notify enable is IRQs, which aren't
173  * really our business.  That leaves only stall at scoreboard.
174  */
175 static int
176 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
177 {
178         u32 scratch_addr = ring->scratch.gtt_offset + 128;
179         int ret;
180
181
182         ret = intel_ring_begin(ring, 6);
183         if (ret)
184                 return ret;
185
186         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
187         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
188                         PIPE_CONTROL_STALL_AT_SCOREBOARD);
189         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
190         intel_ring_emit(ring, 0); /* low dword */
191         intel_ring_emit(ring, 0); /* high dword */
192         intel_ring_emit(ring, MI_NOOP);
193         intel_ring_advance(ring);
194
195         ret = intel_ring_begin(ring, 6);
196         if (ret)
197                 return ret;
198
199         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
200         intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
201         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
202         intel_ring_emit(ring, 0);
203         intel_ring_emit(ring, 0);
204         intel_ring_emit(ring, MI_NOOP);
205         intel_ring_advance(ring);
206
207         return 0;
208 }
209
210 static int
211 gen6_render_ring_flush(struct intel_ring_buffer *ring,
212                          u32 invalidate_domains, u32 flush_domains)
213 {
214         u32 flags = 0;
215         u32 scratch_addr = ring->scratch.gtt_offset + 128;
216         int ret;
217
218         /* Force SNB workarounds for PIPE_CONTROL flushes */
219         ret = intel_emit_post_sync_nonzero_flush(ring);
220         if (ret)
221                 return ret;
222
223         /* Just flush everything.  Experiments have shown that reducing the
224          * number of bits based on the write domains has little performance
225          * impact.
226          */
227         if (flush_domains) {
228                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
229                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
230                 /*
231                  * Ensure that any following seqno writes only happen
232                  * when the render cache is indeed flushed.
233                  */
234                 flags |= PIPE_CONTROL_CS_STALL;
235         }
236         if (invalidate_domains) {
237                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
238                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
239                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
240                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
241                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
242                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
243                 /*
244                  * TLB invalidate requires a post-sync write.
245                  */
246                 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
247         }
248
249         ret = intel_ring_begin(ring, 4);
250         if (ret)
251                 return ret;
252
253         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
254         intel_ring_emit(ring, flags);
255         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
256         intel_ring_emit(ring, 0);
257         intel_ring_advance(ring);
258
259         return 0;
260 }
261
262 static int
263 gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
264 {
265         int ret;
266
267         ret = intel_ring_begin(ring, 4);
268         if (ret)
269                 return ret;
270
271         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
272         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
273                               PIPE_CONTROL_STALL_AT_SCOREBOARD);
274         intel_ring_emit(ring, 0);
275         intel_ring_emit(ring, 0);
276         intel_ring_advance(ring);
277
278         return 0;
279 }
280
281 static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
282 {
283         int ret;
284
285         if (!ring->fbc_dirty)
286                 return 0;
287
288         ret = intel_ring_begin(ring, 6);
289         if (ret)
290                 return ret;
291         /* WaFbcNukeOn3DBlt:ivb/hsw */
292         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
293         intel_ring_emit(ring, MSG_FBC_REND_STATE);
294         intel_ring_emit(ring, value);
295         intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
296         intel_ring_emit(ring, MSG_FBC_REND_STATE);
297         intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
298         intel_ring_advance(ring);
299
300         ring->fbc_dirty = false;
301         return 0;
302 }
303
304 static int
305 gen7_render_ring_flush(struct intel_ring_buffer *ring,
306                        u32 invalidate_domains, u32 flush_domains)
307 {
308         u32 flags = 0;
309         u32 scratch_addr = ring->scratch.gtt_offset + 128;
310         int ret;
311
312         /*
313          * Ensure that any following seqno writes only happen when the render
314          * cache is indeed flushed.
315          *
316          * Workaround: 4th PIPE_CONTROL command (except the ones with only
317          * read-cache invalidate bits set) must have the CS_STALL bit set. We
318          * don't try to be clever and just set it unconditionally.
319          */
320         flags |= PIPE_CONTROL_CS_STALL;
321
322         /* Just flush everything.  Experiments have shown that reducing the
323          * number of bits based on the write domains has little performance
324          * impact.
325          */
326         if (flush_domains) {
327                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
328                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
329         }
330         if (invalidate_domains) {
331                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
332                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
333                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
334                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
335                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
336                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
337                 /*
338                  * TLB invalidate requires a post-sync write.
339                  */
340                 flags |= PIPE_CONTROL_QW_WRITE;
341                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
342
343                 /* Workaround: we must issue a pipe_control with CS-stall bit
344                  * set before a pipe_control command that has the state cache
345                  * invalidate bit set. */
346                 gen7_render_ring_cs_stall_wa(ring);
347         }
348
349         ret = intel_ring_begin(ring, 4);
350         if (ret)
351                 return ret;
352
353         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
354         intel_ring_emit(ring, flags);
355         intel_ring_emit(ring, scratch_addr);
356         intel_ring_emit(ring, 0);
357         intel_ring_advance(ring);
358
359         if (!invalidate_domains && flush_domains)
360                 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
361
362         return 0;
363 }
364
365 static int
366 gen8_render_ring_flush(struct intel_ring_buffer *ring,
367                        u32 invalidate_domains, u32 flush_domains)
368 {
369         u32 flags = 0;
370         u32 scratch_addr = ring->scratch.gtt_offset + 128;
371         int ret;
372
373         flags |= PIPE_CONTROL_CS_STALL;
374
375         if (flush_domains) {
376                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
377                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
378         }
379         if (invalidate_domains) {
380                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
381                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
382                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
383                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
384                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
385                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
386                 flags |= PIPE_CONTROL_QW_WRITE;
387                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
388         }
389
390         ret = intel_ring_begin(ring, 6);
391         if (ret)
392                 return ret;
393
394         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
395         intel_ring_emit(ring, flags);
396         intel_ring_emit(ring, scratch_addr);
397         intel_ring_emit(ring, 0);
398         intel_ring_emit(ring, 0);
399         intel_ring_emit(ring, 0);
400         intel_ring_advance(ring);
401
402         return 0;
403
404 }
405
406 static void ring_write_tail(struct intel_ring_buffer *ring,
407                             u32 value)
408 {
409         struct drm_i915_private *dev_priv = ring->dev->dev_private;
410         I915_WRITE_TAIL(ring, value);
411 }
412
413 u64 intel_ring_get_active_head(struct intel_ring_buffer *ring)
414 {
415         struct drm_i915_private *dev_priv = ring->dev->dev_private;
416         u64 acthd;
417
418         if (INTEL_INFO(ring->dev)->gen >= 8)
419                 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
420                                          RING_ACTHD_UDW(ring->mmio_base));
421         else if (INTEL_INFO(ring->dev)->gen >= 4)
422                 acthd = I915_READ(RING_ACTHD(ring->mmio_base));
423         else
424                 acthd = I915_READ(ACTHD);
425
426         return acthd;
427 }
428
429 static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
430 {
431         struct drm_i915_private *dev_priv = ring->dev->dev_private;
432         u32 addr;
433
434         addr = dev_priv->status_page_dmah->busaddr;
435         if (INTEL_INFO(ring->dev)->gen >= 4)
436                 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
437         I915_WRITE(HWS_PGA, addr);
438 }
439
440 static int init_ring_common(struct intel_ring_buffer *ring)
441 {
442         struct drm_device *dev = ring->dev;
443         struct drm_i915_private *dev_priv = dev->dev_private;
444         struct drm_i915_gem_object *obj = ring->obj;
445         int ret = 0;
446         u32 head;
447
448         gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
449
450         /* Stop the ring if it's running. */
451         I915_WRITE_CTL(ring, 0);
452         I915_WRITE_HEAD(ring, 0);
453         ring->write_tail(ring, 0);
454         if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000))
455                 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
456
457         if (I915_NEED_GFX_HWS(dev))
458                 intel_ring_setup_status_page(ring);
459         else
460                 ring_setup_phys_status_page(ring);
461
462         head = I915_READ_HEAD(ring) & HEAD_ADDR;
463
464         /* G45 ring initialization fails to reset head to zero */
465         if (head != 0) {
466                 DRM_DEBUG_KMS("%s head not reset to zero "
467                               "ctl %08x head %08x tail %08x start %08x\n",
468                               ring->name,
469                               I915_READ_CTL(ring),
470                               I915_READ_HEAD(ring),
471                               I915_READ_TAIL(ring),
472                               I915_READ_START(ring));
473
474                 I915_WRITE_HEAD(ring, 0);
475
476                 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
477                         DRM_ERROR("failed to set %s head to zero "
478                                   "ctl %08x head %08x tail %08x start %08x\n",
479                                   ring->name,
480                                   I915_READ_CTL(ring),
481                                   I915_READ_HEAD(ring),
482                                   I915_READ_TAIL(ring),
483                                   I915_READ_START(ring));
484                 }
485         }
486
487         /* Initialize the ring. This must happen _after_ we've cleared the ring
488          * registers with the above sequence (the readback of the HEAD registers
489          * also enforces ordering), otherwise the hw might lose the new ring
490          * register values. */
491         I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
492         I915_WRITE_CTL(ring,
493                         ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
494                         | RING_VALID);
495
496         /* If the head is still not zero, the ring is dead */
497         if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
498                      I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
499                      (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
500                 DRM_ERROR("%s initialization failed "
501                                 "ctl %08x head %08x tail %08x start %08x\n",
502                                 ring->name,
503                                 I915_READ_CTL(ring),
504                                 I915_READ_HEAD(ring),
505                                 I915_READ_TAIL(ring),
506                                 I915_READ_START(ring));
507                 ret = -EIO;
508                 goto out;
509         }
510
511         if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
512                 i915_kernel_lost_context(ring->dev);
513         else {
514                 ring->head = I915_READ_HEAD(ring);
515                 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
516                 ring->space = ring_space(ring);
517                 ring->last_retired_head = -1;
518         }
519
520         memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
521
522 out:
523         gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
524
525         return ret;
526 }
527
528 static int
529 init_pipe_control(struct intel_ring_buffer *ring)
530 {
531         int ret;
532
533         if (ring->scratch.obj)
534                 return 0;
535
536         ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
537         if (ring->scratch.obj == NULL) {
538                 DRM_ERROR("Failed to allocate seqno page\n");
539                 ret = -ENOMEM;
540                 goto err;
541         }
542
543         ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
544         if (ret)
545                 goto err_unref;
546
547         ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
548         if (ret)
549                 goto err_unref;
550
551         ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
552         ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
553         if (ring->scratch.cpu_page == NULL) {
554                 ret = -ENOMEM;
555                 goto err_unpin;
556         }
557
558         DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
559                          ring->name, ring->scratch.gtt_offset);
560         return 0;
561
562 err_unpin:
563         i915_gem_object_ggtt_unpin(ring->scratch.obj);
564 err_unref:
565         drm_gem_object_unreference(&ring->scratch.obj->base);
566 err:
567         return ret;
568 }
569
570 static int init_render_ring(struct intel_ring_buffer *ring)
571 {
572         struct drm_device *dev = ring->dev;
573         struct drm_i915_private *dev_priv = dev->dev_private;
574         int ret = init_ring_common(ring);
575
576         /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
577         if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
578                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
579
580         /* We need to disable the AsyncFlip performance optimisations in order
581          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
582          * programmed to '1' on all products.
583          *
584          * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw
585          */
586         if (INTEL_INFO(dev)->gen >= 6)
587                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
588
589         /* Required for the hardware to program scanline values for waiting */
590         /* WaEnableFlushTlbInvalidationMode:snb */
591         if (INTEL_INFO(dev)->gen == 6)
592                 I915_WRITE(GFX_MODE,
593                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
594
595         /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
596         if (IS_GEN7(dev))
597                 I915_WRITE(GFX_MODE_GEN7,
598                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
599                            _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
600
601         if (INTEL_INFO(dev)->gen >= 5) {
602                 ret = init_pipe_control(ring);
603                 if (ret)
604                         return ret;
605         }
606
607         if (IS_GEN6(dev)) {
608                 /* From the Sandybridge PRM, volume 1 part 3, page 24:
609                  * "If this bit is set, STCunit will have LRA as replacement
610                  *  policy. [...] This bit must be reset.  LRA replacement
611                  *  policy is not supported."
612                  */
613                 I915_WRITE(CACHE_MODE_0,
614                            _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
615
616                 /* This is not explicitly set for GEN6, so read the register.
617                  * see intel_ring_mi_set_context() for why we care.
618                  * TODO: consider explicitly setting the bit for GEN5
619                  */
620                 ring->itlb_before_ctx_switch =
621                         !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_EXPLICIT);
622         }
623
624         if (INTEL_INFO(dev)->gen >= 6)
625                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
626
627         if (HAS_L3_DPF(dev))
628                 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
629
630         return ret;
631 }
632
633 static void render_ring_cleanup(struct intel_ring_buffer *ring)
634 {
635         struct drm_device *dev = ring->dev;
636
637         if (ring->scratch.obj == NULL)
638                 return;
639
640         if (INTEL_INFO(dev)->gen >= 5) {
641                 kunmap(sg_page(ring->scratch.obj->pages->sgl));
642                 i915_gem_object_ggtt_unpin(ring->scratch.obj);
643         }
644
645         drm_gem_object_unreference(&ring->scratch.obj->base);
646         ring->scratch.obj = NULL;
647 }
648
649 static void
650 update_mboxes(struct intel_ring_buffer *ring,
651               u32 mmio_offset)
652 {
653 /* NB: In order to be able to do semaphore MBOX updates for varying number
654  * of rings, it's easiest if we round up each individual update to a
655  * multiple of 2 (since ring updates must always be a multiple of 2)
656  * even though the actual update only requires 3 dwords.
657  */
658 #define MBOX_UPDATE_DWORDS 4
659         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
660         intel_ring_emit(ring, mmio_offset);
661         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
662         intel_ring_emit(ring, MI_NOOP);
663 }
664
665 /**
666  * gen6_add_request - Update the semaphore mailbox registers
667  * 
668  * @ring - ring that is adding a request
669  * @seqno - return seqno stuck into the ring
670  *
671  * Update the mailbox registers in the *other* rings with the current seqno.
672  * This acts like a signal in the canonical semaphore.
673  */
674 static int
675 gen6_add_request(struct intel_ring_buffer *ring)
676 {
677         struct drm_device *dev = ring->dev;
678         struct drm_i915_private *dev_priv = dev->dev_private;
679         struct intel_ring_buffer *useless;
680         int i, ret, num_dwords = 4;
681
682         if (i915_semaphore_is_enabled(dev))
683                 num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
684 #undef MBOX_UPDATE_DWORDS
685
686         ret = intel_ring_begin(ring, num_dwords);
687         if (ret)
688                 return ret;
689
690         if (i915_semaphore_is_enabled(dev)) {
691                 for_each_ring(useless, dev_priv, i) {
692                         u32 mbox_reg = ring->signal_mbox[i];
693                         if (mbox_reg != GEN6_NOSYNC)
694                                 update_mboxes(ring, mbox_reg);
695                 }
696         }
697
698         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
699         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
700         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
701         intel_ring_emit(ring, MI_USER_INTERRUPT);
702         __intel_ring_advance(ring);
703
704         return 0;
705 }
706
707 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
708                                               u32 seqno)
709 {
710         struct drm_i915_private *dev_priv = dev->dev_private;
711         return dev_priv->last_seqno < seqno;
712 }
713
714 /**
715  * intel_ring_sync - sync the waiter to the signaller on seqno
716  *
717  * @waiter - ring that is waiting
718  * @signaller - ring which has, or will signal
719  * @seqno - seqno which the waiter will block on
720  */
721 static int
722 gen6_ring_sync(struct intel_ring_buffer *waiter,
723                struct intel_ring_buffer *signaller,
724                u32 seqno)
725 {
726         int ret;
727         u32 dw1 = MI_SEMAPHORE_MBOX |
728                   MI_SEMAPHORE_COMPARE |
729                   MI_SEMAPHORE_REGISTER;
730
731         /* Throughout all of the GEM code, seqno passed implies our current
732          * seqno is >= the last seqno executed. However for hardware the
733          * comparison is strictly greater than.
734          */
735         seqno -= 1;
736
737         WARN_ON(signaller->semaphore_register[waiter->id] ==
738                 MI_SEMAPHORE_SYNC_INVALID);
739
740         ret = intel_ring_begin(waiter, 4);
741         if (ret)
742                 return ret;
743
744         /* If seqno wrap happened, omit the wait with no-ops */
745         if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
746                 intel_ring_emit(waiter,
747                                 dw1 |
748                                 signaller->semaphore_register[waiter->id]);
749                 intel_ring_emit(waiter, seqno);
750                 intel_ring_emit(waiter, 0);
751                 intel_ring_emit(waiter, MI_NOOP);
752         } else {
753                 intel_ring_emit(waiter, MI_NOOP);
754                 intel_ring_emit(waiter, MI_NOOP);
755                 intel_ring_emit(waiter, MI_NOOP);
756                 intel_ring_emit(waiter, MI_NOOP);
757         }
758         intel_ring_advance(waiter);
759
760         return 0;
761 }
762
763 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
764 do {                                                                    \
765         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
766                  PIPE_CONTROL_DEPTH_STALL);                             \
767         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
768         intel_ring_emit(ring__, 0);                                                     \
769         intel_ring_emit(ring__, 0);                                                     \
770 } while (0)
771
772 static int
773 pc_render_add_request(struct intel_ring_buffer *ring)
774 {
775         u32 scratch_addr = ring->scratch.gtt_offset + 128;
776         int ret;
777
778         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
779          * incoherent with writes to memory, i.e. completely fubar,
780          * so we need to use PIPE_NOTIFY instead.
781          *
782          * However, we also need to workaround the qword write
783          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
784          * memory before requesting an interrupt.
785          */
786         ret = intel_ring_begin(ring, 32);
787         if (ret)
788                 return ret;
789
790         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
791                         PIPE_CONTROL_WRITE_FLUSH |
792                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
793         intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
794         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
795         intel_ring_emit(ring, 0);
796         PIPE_CONTROL_FLUSH(ring, scratch_addr);
797         scratch_addr += 128; /* write to separate cachelines */
798         PIPE_CONTROL_FLUSH(ring, scratch_addr);
799         scratch_addr += 128;
800         PIPE_CONTROL_FLUSH(ring, scratch_addr);
801         scratch_addr += 128;
802         PIPE_CONTROL_FLUSH(ring, scratch_addr);
803         scratch_addr += 128;
804         PIPE_CONTROL_FLUSH(ring, scratch_addr);
805         scratch_addr += 128;
806         PIPE_CONTROL_FLUSH(ring, scratch_addr);
807
808         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
809                         PIPE_CONTROL_WRITE_FLUSH |
810                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
811                         PIPE_CONTROL_NOTIFY);
812         intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
813         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
814         intel_ring_emit(ring, 0);
815         __intel_ring_advance(ring);
816
817         return 0;
818 }
819
820 static u32
821 gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
822 {
823         /* Workaround to force correct ordering between irq and seqno writes on
824          * ivb (and maybe also on snb) by reading from a CS register (like
825          * ACTHD) before reading the status page. */
826         if (!lazy_coherency) {
827                 struct drm_i915_private *dev_priv = ring->dev->dev_private;
828                 POSTING_READ(RING_ACTHD(ring->mmio_base));
829         }
830
831         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
832 }
833
834 static u32
835 ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
836 {
837         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
838 }
839
840 static void
841 ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
842 {
843         intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
844 }
845
846 static u32
847 pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
848 {
849         return ring->scratch.cpu_page[0];
850 }
851
852 static void
853 pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
854 {
855         ring->scratch.cpu_page[0] = seqno;
856 }
857
858 static bool
859 gen5_ring_get_irq(struct intel_ring_buffer *ring)
860 {
861         struct drm_device *dev = ring->dev;
862         struct drm_i915_private *dev_priv = dev->dev_private;
863         unsigned long flags;
864
865         if (!dev->irq_enabled)
866                 return false;
867
868         spin_lock_irqsave(&dev_priv->irq_lock, flags);
869         if (ring->irq_refcount++ == 0)
870                 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
871         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
872
873         return true;
874 }
875
876 static void
877 gen5_ring_put_irq(struct intel_ring_buffer *ring)
878 {
879         struct drm_device *dev = ring->dev;
880         struct drm_i915_private *dev_priv = dev->dev_private;
881         unsigned long flags;
882
883         spin_lock_irqsave(&dev_priv->irq_lock, flags);
884         if (--ring->irq_refcount == 0)
885                 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
886         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
887 }
888
889 static bool
890 i9xx_ring_get_irq(struct intel_ring_buffer *ring)
891 {
892         struct drm_device *dev = ring->dev;
893         struct drm_i915_private *dev_priv = dev->dev_private;
894         unsigned long flags;
895
896         if (!dev->irq_enabled)
897                 return false;
898
899         spin_lock_irqsave(&dev_priv->irq_lock, flags);
900         if (ring->irq_refcount++ == 0) {
901                 dev_priv->irq_mask &= ~ring->irq_enable_mask;
902                 I915_WRITE(IMR, dev_priv->irq_mask);
903                 POSTING_READ(IMR);
904         }
905         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
906
907         return true;
908 }
909
910 static void
911 i9xx_ring_put_irq(struct intel_ring_buffer *ring)
912 {
913         struct drm_device *dev = ring->dev;
914         struct drm_i915_private *dev_priv = dev->dev_private;
915         unsigned long flags;
916
917         spin_lock_irqsave(&dev_priv->irq_lock, flags);
918         if (--ring->irq_refcount == 0) {
919                 dev_priv->irq_mask |= ring->irq_enable_mask;
920                 I915_WRITE(IMR, dev_priv->irq_mask);
921                 POSTING_READ(IMR);
922         }
923         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
924 }
925
926 static bool
927 i8xx_ring_get_irq(struct intel_ring_buffer *ring)
928 {
929         struct drm_device *dev = ring->dev;
930         struct drm_i915_private *dev_priv = dev->dev_private;
931         unsigned long flags;
932
933         if (!dev->irq_enabled)
934                 return false;
935
936         spin_lock_irqsave(&dev_priv->irq_lock, flags);
937         if (ring->irq_refcount++ == 0) {
938                 dev_priv->irq_mask &= ~ring->irq_enable_mask;
939                 I915_WRITE16(IMR, dev_priv->irq_mask);
940                 POSTING_READ16(IMR);
941         }
942         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
943
944         return true;
945 }
946
947 static void
948 i8xx_ring_put_irq(struct intel_ring_buffer *ring)
949 {
950         struct drm_device *dev = ring->dev;
951         struct drm_i915_private *dev_priv = dev->dev_private;
952         unsigned long flags;
953
954         spin_lock_irqsave(&dev_priv->irq_lock, flags);
955         if (--ring->irq_refcount == 0) {
956                 dev_priv->irq_mask |= ring->irq_enable_mask;
957                 I915_WRITE16(IMR, dev_priv->irq_mask);
958                 POSTING_READ16(IMR);
959         }
960         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
961 }
962
963 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
964 {
965         struct drm_device *dev = ring->dev;
966         struct drm_i915_private *dev_priv = ring->dev->dev_private;
967         u32 mmio = 0;
968
969         /* The ring status page addresses are no longer next to the rest of
970          * the ring registers as of gen7.
971          */
972         if (IS_GEN7(dev)) {
973                 switch (ring->id) {
974                 case RCS:
975                         mmio = RENDER_HWS_PGA_GEN7;
976                         break;
977                 case BCS:
978                         mmio = BLT_HWS_PGA_GEN7;
979                         break;
980                 case VCS:
981                         mmio = BSD_HWS_PGA_GEN7;
982                         break;
983                 case VECS:
984                         mmio = VEBOX_HWS_PGA_GEN7;
985                         break;
986                 }
987         } else if (IS_GEN6(ring->dev)) {
988                 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
989         } else {
990                 /* XXX: gen8 returns to sanity */
991                 mmio = RING_HWS_PGA(ring->mmio_base);
992         }
993
994         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
995         POSTING_READ(mmio);
996
997         /*
998          * Flush the TLB for this page
999          *
1000          * FIXME: These two bits have disappeared on gen8, so a question
1001          * arises: do we still need this and if so how should we go about
1002          * invalidating the TLB?
1003          */
1004         if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
1005                 u32 reg = RING_INSTPM(ring->mmio_base);
1006
1007                 /* ring should be idle before issuing a sync flush*/
1008                 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
1009
1010                 I915_WRITE(reg,
1011                            _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
1012                                               INSTPM_SYNC_FLUSH));
1013                 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
1014                              1000))
1015                         DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
1016                                   ring->name);
1017         }
1018 }
1019
1020 static int
1021 bsd_ring_flush(struct intel_ring_buffer *ring,
1022                u32     invalidate_domains,
1023                u32     flush_domains)
1024 {
1025         int ret;
1026
1027         ret = intel_ring_begin(ring, 2);
1028         if (ret)
1029                 return ret;
1030
1031         intel_ring_emit(ring, MI_FLUSH);
1032         intel_ring_emit(ring, MI_NOOP);
1033         intel_ring_advance(ring);
1034         return 0;
1035 }
1036
1037 static int
1038 i9xx_add_request(struct intel_ring_buffer *ring)
1039 {
1040         int ret;
1041
1042         ret = intel_ring_begin(ring, 4);
1043         if (ret)
1044                 return ret;
1045
1046         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1047         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1048         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
1049         intel_ring_emit(ring, MI_USER_INTERRUPT);
1050         __intel_ring_advance(ring);
1051
1052         return 0;
1053 }
1054
1055 static bool
1056 gen6_ring_get_irq(struct intel_ring_buffer *ring)
1057 {
1058         struct drm_device *dev = ring->dev;
1059         struct drm_i915_private *dev_priv = dev->dev_private;
1060         unsigned long flags;
1061
1062         if (!dev->irq_enabled)
1063                return false;
1064
1065         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1066         if (ring->irq_refcount++ == 0) {
1067                 if (HAS_L3_DPF(dev) && ring->id == RCS)
1068                         I915_WRITE_IMR(ring,
1069                                        ~(ring->irq_enable_mask |
1070                                          GT_PARITY_ERROR(dev)));
1071                 else
1072                         I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1073                 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1074         }
1075         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1076
1077         return true;
1078 }
1079
1080 static void
1081 gen6_ring_put_irq(struct intel_ring_buffer *ring)
1082 {
1083         struct drm_device *dev = ring->dev;
1084         struct drm_i915_private *dev_priv = dev->dev_private;
1085         unsigned long flags;
1086
1087         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1088         if (--ring->irq_refcount == 0) {
1089                 if (HAS_L3_DPF(dev) && ring->id == RCS)
1090                         I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1091                 else
1092                         I915_WRITE_IMR(ring, ~0);
1093                 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1094         }
1095         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1096 }
1097
1098 static bool
1099 hsw_vebox_get_irq(struct intel_ring_buffer *ring)
1100 {
1101         struct drm_device *dev = ring->dev;
1102         struct drm_i915_private *dev_priv = dev->dev_private;
1103         unsigned long flags;
1104
1105         if (!dev->irq_enabled)
1106                 return false;
1107
1108         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1109         if (ring->irq_refcount++ == 0) {
1110                 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1111                 snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
1112         }
1113         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1114
1115         return true;
1116 }
1117
1118 static void
1119 hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1120 {
1121         struct drm_device *dev = ring->dev;
1122         struct drm_i915_private *dev_priv = dev->dev_private;
1123         unsigned long flags;
1124
1125         if (!dev->irq_enabled)
1126                 return;
1127
1128         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1129         if (--ring->irq_refcount == 0) {
1130                 I915_WRITE_IMR(ring, ~0);
1131                 snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1132         }
1133         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1134 }
1135
1136 static bool
1137 gen8_ring_get_irq(struct intel_ring_buffer *ring)
1138 {
1139         struct drm_device *dev = ring->dev;
1140         struct drm_i915_private *dev_priv = dev->dev_private;
1141         unsigned long flags;
1142
1143         if (!dev->irq_enabled)
1144                 return false;
1145
1146         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1147         if (ring->irq_refcount++ == 0) {
1148                 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1149                         I915_WRITE_IMR(ring,
1150                                        ~(ring->irq_enable_mask |
1151                                          GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1152                 } else {
1153                         I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1154                 }
1155                 POSTING_READ(RING_IMR(ring->mmio_base));
1156         }
1157         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1158
1159         return true;
1160 }
1161
1162 static void
1163 gen8_ring_put_irq(struct intel_ring_buffer *ring)
1164 {
1165         struct drm_device *dev = ring->dev;
1166         struct drm_i915_private *dev_priv = dev->dev_private;
1167         unsigned long flags;
1168
1169         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1170         if (--ring->irq_refcount == 0) {
1171                 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1172                         I915_WRITE_IMR(ring,
1173                                        ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1174                 } else {
1175                         I915_WRITE_IMR(ring, ~0);
1176                 }
1177                 POSTING_READ(RING_IMR(ring->mmio_base));
1178         }
1179         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1180 }
1181
1182 static int
1183 i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1184                          u32 offset, u32 length,
1185                          unsigned flags)
1186 {
1187         int ret;
1188
1189         ret = intel_ring_begin(ring, 2);
1190         if (ret)
1191                 return ret;
1192
1193         intel_ring_emit(ring,
1194                         MI_BATCH_BUFFER_START |
1195                         MI_BATCH_GTT |
1196                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
1197         intel_ring_emit(ring, offset);
1198         intel_ring_advance(ring);
1199
1200         return 0;
1201 }
1202
1203 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1204 #define I830_BATCH_LIMIT (256*1024)
1205 static int
1206 i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
1207                                 u32 offset, u32 len,
1208                                 unsigned flags)
1209 {
1210         int ret;
1211
1212         if (flags & I915_DISPATCH_PINNED) {
1213                 ret = intel_ring_begin(ring, 4);
1214                 if (ret)
1215                         return ret;
1216
1217                 intel_ring_emit(ring, MI_BATCH_BUFFER);
1218                 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1219                 intel_ring_emit(ring, offset + len - 8);
1220                 intel_ring_emit(ring, MI_NOOP);
1221                 intel_ring_advance(ring);
1222         } else {
1223                 u32 cs_offset = ring->scratch.gtt_offset;
1224
1225                 if (len > I830_BATCH_LIMIT)
1226                         return -ENOSPC;
1227
1228                 ret = intel_ring_begin(ring, 9+3);
1229                 if (ret)
1230                         return ret;
1231                 /* Blit the batch (which has now all relocs applied) to the stable batch
1232                  * scratch bo area (so that the CS never stumbles over its tlb
1233                  * invalidation bug) ... */
1234                 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
1235                                 XY_SRC_COPY_BLT_WRITE_ALPHA |
1236                                 XY_SRC_COPY_BLT_WRITE_RGB);
1237                 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
1238                 intel_ring_emit(ring, 0);
1239                 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
1240                 intel_ring_emit(ring, cs_offset);
1241                 intel_ring_emit(ring, 0);
1242                 intel_ring_emit(ring, 4096);
1243                 intel_ring_emit(ring, offset);
1244                 intel_ring_emit(ring, MI_FLUSH);
1245
1246                 /* ... and execute it. */
1247                 intel_ring_emit(ring, MI_BATCH_BUFFER);
1248                 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1249                 intel_ring_emit(ring, cs_offset + len - 8);
1250                 intel_ring_advance(ring);
1251         }
1252
1253         return 0;
1254 }
1255
1256 static int
1257 i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
1258                          u32 offset, u32 len,
1259                          unsigned flags)
1260 {
1261         int ret;
1262
1263         ret = intel_ring_begin(ring, 2);
1264         if (ret)
1265                 return ret;
1266
1267         intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1268         intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1269         intel_ring_advance(ring);
1270
1271         return 0;
1272 }
1273
1274 static void cleanup_status_page(struct intel_ring_buffer *ring)
1275 {
1276         struct drm_i915_gem_object *obj;
1277
1278         obj = ring->status_page.obj;
1279         if (obj == NULL)
1280                 return;
1281
1282         kunmap(sg_page(obj->pages->sgl));
1283         i915_gem_object_ggtt_unpin(obj);
1284         drm_gem_object_unreference(&obj->base);
1285         ring->status_page.obj = NULL;
1286 }
1287
1288 static int init_status_page(struct intel_ring_buffer *ring)
1289 {
1290         struct drm_device *dev = ring->dev;
1291         struct drm_i915_gem_object *obj;
1292         int ret;
1293
1294         obj = i915_gem_alloc_object(dev, 4096);
1295         if (obj == NULL) {
1296                 DRM_ERROR("Failed to allocate status page\n");
1297                 ret = -ENOMEM;
1298                 goto err;
1299         }
1300
1301         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1302         if (ret)
1303                 goto err_unref;
1304
1305         ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
1306         if (ret)
1307                 goto err_unref;
1308
1309         ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1310         ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1311         if (ring->status_page.page_addr == NULL) {
1312                 ret = -ENOMEM;
1313                 goto err_unpin;
1314         }
1315         ring->status_page.obj = obj;
1316         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1317
1318         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1319                         ring->name, ring->status_page.gfx_addr);
1320
1321         return 0;
1322
1323 err_unpin:
1324         i915_gem_object_ggtt_unpin(obj);
1325 err_unref:
1326         drm_gem_object_unreference(&obj->base);
1327 err:
1328         return ret;
1329 }
1330
1331 static int init_phys_status_page(struct intel_ring_buffer *ring)
1332 {
1333         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1334
1335         if (!dev_priv->status_page_dmah) {
1336                 dev_priv->status_page_dmah =
1337                         drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
1338                 if (!dev_priv->status_page_dmah)
1339                         return -ENOMEM;
1340         }
1341
1342         ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1343         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1344
1345         return 0;
1346 }
1347
1348 static int intel_init_ring_buffer(struct drm_device *dev,
1349                                   struct intel_ring_buffer *ring)
1350 {
1351         struct drm_i915_gem_object *obj;
1352         struct drm_i915_private *dev_priv = dev->dev_private;
1353         int ret;
1354
1355         ring->dev = dev;
1356         INIT_LIST_HEAD(&ring->active_list);
1357         INIT_LIST_HEAD(&ring->request_list);
1358         ring->size = 32 * PAGE_SIZE;
1359         memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
1360
1361         init_waitqueue_head(&ring->irq_queue);
1362
1363         if (I915_NEED_GFX_HWS(dev)) {
1364                 ret = init_status_page(ring);
1365                 if (ret)
1366                         return ret;
1367         } else {
1368                 BUG_ON(ring->id != RCS);
1369                 ret = init_phys_status_page(ring);
1370                 if (ret)
1371                         return ret;
1372         }
1373
1374         obj = NULL;
1375         if (!HAS_LLC(dev))
1376                 obj = i915_gem_object_create_stolen(dev, ring->size);
1377         if (obj == NULL)
1378                 obj = i915_gem_alloc_object(dev, ring->size);
1379         if (obj == NULL) {
1380                 DRM_ERROR("Failed to allocate ringbuffer\n");
1381                 ret = -ENOMEM;
1382                 goto err_hws;
1383         }
1384
1385         ring->obj = obj;
1386
1387         ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1388         if (ret)
1389                 goto err_unref;
1390
1391         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1392         if (ret)
1393                 goto err_unpin;
1394
1395         ring->virtual_start =
1396                 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1397                            ring->size);
1398         if (ring->virtual_start == NULL) {
1399                 DRM_ERROR("Failed to map ringbuffer.\n");
1400                 ret = -EINVAL;
1401                 goto err_unpin;
1402         }
1403
1404         ret = ring->init(ring);
1405         if (ret)
1406                 goto err_unmap;
1407
1408         /* Workaround an erratum on the i830 which causes a hang if
1409          * the TAIL pointer points to within the last 2 cachelines
1410          * of the buffer.
1411          */
1412         ring->effective_size = ring->size;
1413         if (IS_I830(ring->dev) || IS_845G(ring->dev))
1414                 ring->effective_size -= 128;
1415
1416         i915_cmd_parser_init_ring(ring);
1417
1418         return 0;
1419
1420 err_unmap:
1421         iounmap(ring->virtual_start);
1422 err_unpin:
1423         i915_gem_object_ggtt_unpin(obj);
1424 err_unref:
1425         drm_gem_object_unreference(&obj->base);
1426         ring->obj = NULL;
1427 err_hws:
1428         cleanup_status_page(ring);
1429         return ret;
1430 }
1431
1432 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1433 {
1434         struct drm_i915_private *dev_priv;
1435         int ret;
1436
1437         if (ring->obj == NULL)
1438                 return;
1439
1440         /* Disable the ring buffer. The ring must be idle at this point */
1441         dev_priv = ring->dev->dev_private;
1442         ret = intel_ring_idle(ring);
1443         if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
1444                 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1445                           ring->name, ret);
1446
1447         I915_WRITE_CTL(ring, 0);
1448
1449         iounmap(ring->virtual_start);
1450
1451         i915_gem_object_ggtt_unpin(ring->obj);
1452         drm_gem_object_unreference(&ring->obj->base);
1453         ring->obj = NULL;
1454         ring->preallocated_lazy_request = NULL;
1455         ring->outstanding_lazy_seqno = 0;
1456
1457         if (ring->cleanup)
1458                 ring->cleanup(ring);
1459
1460         cleanup_status_page(ring);
1461 }
1462
1463 static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1464 {
1465         struct drm_i915_gem_request *request;
1466         u32 seqno = 0, tail;
1467         int ret;
1468
1469         if (ring->last_retired_head != -1) {
1470                 ring->head = ring->last_retired_head;
1471                 ring->last_retired_head = -1;
1472
1473                 ring->space = ring_space(ring);
1474                 if (ring->space >= n)
1475                         return 0;
1476         }
1477
1478         list_for_each_entry(request, &ring->request_list, list) {
1479                 int space;
1480
1481                 if (request->tail == -1)
1482                         continue;
1483
1484                 space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
1485                 if (space < 0)
1486                         space += ring->size;
1487                 if (space >= n) {
1488                         seqno = request->seqno;
1489                         tail = request->tail;
1490                         break;
1491                 }
1492
1493                 /* Consume this request in case we need more space than
1494                  * is available and so need to prevent a race between
1495                  * updating last_retired_head and direct reads of
1496                  * I915_RING_HEAD. It also provides a nice sanity check.
1497                  */
1498                 request->tail = -1;
1499         }
1500
1501         if (seqno == 0)
1502                 return -ENOSPC;
1503
1504         ret = i915_wait_seqno(ring, seqno);
1505         if (ret)
1506                 return ret;
1507
1508         ring->head = tail;
1509         ring->space = ring_space(ring);
1510         if (WARN_ON(ring->space < n))
1511                 return -ENOSPC;
1512
1513         return 0;
1514 }
1515
1516 static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1517 {
1518         struct drm_device *dev = ring->dev;
1519         struct drm_i915_private *dev_priv = dev->dev_private;
1520         unsigned long end;
1521         int ret;
1522
1523         ret = intel_ring_wait_request(ring, n);
1524         if (ret != -ENOSPC)
1525                 return ret;
1526
1527         /* force the tail write in case we have been skipping them */
1528         __intel_ring_advance(ring);
1529
1530         trace_i915_ring_wait_begin(ring);
1531         /* With GEM the hangcheck timer should kick us out of the loop,
1532          * leaving it early runs the risk of corrupting GEM state (due
1533          * to running on almost untested codepaths). But on resume
1534          * timers don't work yet, so prevent a complete hang in that
1535          * case by choosing an insanely large timeout. */
1536         end = jiffies + 60 * HZ;
1537
1538         do {
1539                 ring->head = I915_READ_HEAD(ring);
1540                 ring->space = ring_space(ring);
1541                 if (ring->space >= n) {
1542                         trace_i915_ring_wait_end(ring);
1543                         return 0;
1544                 }
1545
1546                 if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
1547                     dev->primary->master) {
1548                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1549                         if (master_priv->sarea_priv)
1550                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1551                 }
1552
1553                 msleep(1);
1554
1555                 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1556                                            dev_priv->mm.interruptible);
1557                 if (ret)
1558                         return ret;
1559         } while (!time_after(jiffies, end));
1560         trace_i915_ring_wait_end(ring);
1561         return -EBUSY;
1562 }
1563
1564 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1565 {
1566         uint32_t __iomem *virt;
1567         int rem = ring->size - ring->tail;
1568
1569         if (ring->space < rem) {
1570                 int ret = ring_wait_for_space(ring, rem);
1571                 if (ret)
1572                         return ret;
1573         }
1574
1575         virt = ring->virtual_start + ring->tail;
1576         rem /= 4;
1577         while (rem--)
1578                 iowrite32(MI_NOOP, virt++);
1579
1580         ring->tail = 0;
1581         ring->space = ring_space(ring);
1582
1583         return 0;
1584 }
1585
1586 int intel_ring_idle(struct intel_ring_buffer *ring)
1587 {
1588         u32 seqno;
1589         int ret;
1590
1591         /* We need to add any requests required to flush the objects and ring */
1592         if (ring->outstanding_lazy_seqno) {
1593                 ret = i915_add_request(ring, NULL);
1594                 if (ret)
1595                         return ret;
1596         }
1597
1598         /* Wait upon the last request to be completed */
1599         if (list_empty(&ring->request_list))
1600                 return 0;
1601
1602         seqno = list_entry(ring->request_list.prev,
1603                            struct drm_i915_gem_request,
1604                            list)->seqno;
1605
1606         return i915_wait_seqno(ring, seqno);
1607 }
1608
1609 static int
1610 intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1611 {
1612         if (ring->outstanding_lazy_seqno)
1613                 return 0;
1614
1615         if (ring->preallocated_lazy_request == NULL) {
1616                 struct drm_i915_gem_request *request;
1617
1618                 request = kmalloc(sizeof(*request), GFP_KERNEL);
1619                 if (request == NULL)
1620                         return -ENOMEM;
1621
1622                 ring->preallocated_lazy_request = request;
1623         }
1624
1625         return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
1626 }
1627
1628 static int __intel_ring_prepare(struct intel_ring_buffer *ring,
1629                                 int bytes)
1630 {
1631         int ret;
1632
1633         if (unlikely(ring->tail + bytes > ring->effective_size)) {
1634                 ret = intel_wrap_ring_buffer(ring);
1635                 if (unlikely(ret))
1636                         return ret;
1637         }
1638
1639         if (unlikely(ring->space < bytes)) {
1640                 ret = ring_wait_for_space(ring, bytes);
1641                 if (unlikely(ret))
1642                         return ret;
1643         }
1644
1645         return 0;
1646 }
1647
1648 int intel_ring_begin(struct intel_ring_buffer *ring,
1649                      int num_dwords)
1650 {
1651         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1652         int ret;
1653
1654         ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1655                                    dev_priv->mm.interruptible);
1656         if (ret)
1657                 return ret;
1658
1659         ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
1660         if (ret)
1661                 return ret;
1662
1663         /* Preallocate the olr before touching the ring */
1664         ret = intel_ring_alloc_seqno(ring);
1665         if (ret)
1666                 return ret;
1667
1668         ring->space -= num_dwords * sizeof(uint32_t);
1669         return 0;
1670 }
1671
1672 /* Align the ring tail to a cacheline boundary */
1673 int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
1674 {
1675         int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
1676         int ret;
1677
1678         if (num_dwords == 0)
1679                 return 0;
1680
1681         ret = intel_ring_begin(ring, num_dwords);
1682         if (ret)
1683                 return ret;
1684
1685         while (num_dwords--)
1686                 intel_ring_emit(ring, MI_NOOP);
1687
1688         intel_ring_advance(ring);
1689
1690         return 0;
1691 }
1692
1693 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1694 {
1695         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1696
1697         BUG_ON(ring->outstanding_lazy_seqno);
1698
1699         if (INTEL_INFO(ring->dev)->gen >= 6) {
1700                 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1701                 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
1702                 if (HAS_VEBOX(ring->dev))
1703                         I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
1704         }
1705
1706         ring->set_seqno(ring, seqno);
1707         ring->hangcheck.seqno = seqno;
1708 }
1709
1710 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1711                                      u32 value)
1712 {
1713         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1714
1715        /* Every tail move must follow the sequence below */
1716
1717         /* Disable notification that the ring is IDLE. The GT
1718          * will then assume that it is busy and bring it out of rc6.
1719          */
1720         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1721                    _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1722
1723         /* Clear the context id. Here be magic! */
1724         I915_WRITE64(GEN6_BSD_RNCID, 0x0);
1725
1726         /* Wait for the ring not to be idle, i.e. for it to wake up. */
1727         if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1728                       GEN6_BSD_SLEEP_INDICATOR) == 0,
1729                      50))
1730                 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1731
1732         /* Now that the ring is fully powered up, update the tail */
1733         I915_WRITE_TAIL(ring, value);
1734         POSTING_READ(RING_TAIL(ring->mmio_base));
1735
1736         /* Let the ring send IDLE messages to the GT again,
1737          * and so let it sleep to conserve power when idle.
1738          */
1739         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1740                    _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1741 }
1742
1743 static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1744                                u32 invalidate, u32 flush)
1745 {
1746         uint32_t cmd;
1747         int ret;
1748
1749         ret = intel_ring_begin(ring, 4);
1750         if (ret)
1751                 return ret;
1752
1753         cmd = MI_FLUSH_DW;
1754         if (INTEL_INFO(ring->dev)->gen >= 8)
1755                 cmd += 1;
1756         /*
1757          * Bspec vol 1c.5 - video engine command streamer:
1758          * "If ENABLED, all TLBs will be invalidated once the flush
1759          * operation is complete. This bit is only valid when the
1760          * Post-Sync Operation field is a value of 1h or 3h."
1761          */
1762         if (invalidate & I915_GEM_GPU_DOMAINS)
1763                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
1764                         MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1765         intel_ring_emit(ring, cmd);
1766         intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1767         if (INTEL_INFO(ring->dev)->gen >= 8) {
1768                 intel_ring_emit(ring, 0); /* upper addr */
1769                 intel_ring_emit(ring, 0); /* value */
1770         } else  {
1771                 intel_ring_emit(ring, 0);
1772                 intel_ring_emit(ring, MI_NOOP);
1773         }
1774         intel_ring_advance(ring);
1775         return 0;
1776 }
1777
1778 static int
1779 gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1780                               u32 offset, u32 len,
1781                               unsigned flags)
1782 {
1783         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1784         bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
1785                 !(flags & I915_DISPATCH_SECURE);
1786         int ret;
1787
1788         ret = intel_ring_begin(ring, 4);
1789         if (ret)
1790                 return ret;
1791
1792         /* FIXME(BDW): Address space and security selectors. */
1793         intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
1794         intel_ring_emit(ring, offset);
1795         intel_ring_emit(ring, 0);
1796         intel_ring_emit(ring, MI_NOOP);
1797         intel_ring_advance(ring);
1798
1799         return 0;
1800 }
1801
1802 static int
1803 hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1804                               u32 offset, u32 len,
1805                               unsigned flags)
1806 {
1807         int ret;
1808
1809         ret = intel_ring_begin(ring, 2);
1810         if (ret)
1811                 return ret;
1812
1813         intel_ring_emit(ring,
1814                         MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
1815                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
1816         /* bit0-7 is the length on GEN6+ */
1817         intel_ring_emit(ring, offset);
1818         intel_ring_advance(ring);
1819
1820         return 0;
1821 }
1822
1823 static int
1824 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1825                               u32 offset, u32 len,
1826                               unsigned flags)
1827 {
1828         int ret;
1829
1830         ret = intel_ring_begin(ring, 2);
1831         if (ret)
1832                 return ret;
1833
1834         intel_ring_emit(ring,
1835                         MI_BATCH_BUFFER_START |
1836                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
1837         /* bit0-7 is the length on GEN6+ */
1838         intel_ring_emit(ring, offset);
1839         intel_ring_advance(ring);
1840
1841         return 0;
1842 }
1843
1844 /* Blitter support (SandyBridge+) */
1845
1846 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1847                            u32 invalidate, u32 flush)
1848 {
1849         struct drm_device *dev = ring->dev;
1850         uint32_t cmd;
1851         int ret;
1852
1853         ret = intel_ring_begin(ring, 4);
1854         if (ret)
1855                 return ret;
1856
1857         cmd = MI_FLUSH_DW;
1858         if (INTEL_INFO(ring->dev)->gen >= 8)
1859                 cmd += 1;
1860         /*
1861          * Bspec vol 1c.3 - blitter engine command streamer:
1862          * "If ENABLED, all TLBs will be invalidated once the flush
1863          * operation is complete. This bit is only valid when the
1864          * Post-Sync Operation field is a value of 1h or 3h."
1865          */
1866         if (invalidate & I915_GEM_DOMAIN_RENDER)
1867                 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
1868                         MI_FLUSH_DW_OP_STOREDW;
1869         intel_ring_emit(ring, cmd);
1870         intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1871         if (INTEL_INFO(ring->dev)->gen >= 8) {
1872                 intel_ring_emit(ring, 0); /* upper addr */
1873                 intel_ring_emit(ring, 0); /* value */
1874         } else  {
1875                 intel_ring_emit(ring, 0);
1876                 intel_ring_emit(ring, MI_NOOP);
1877         }
1878         intel_ring_advance(ring);
1879
1880         if (IS_GEN7(dev) && !invalidate && flush)
1881                 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
1882
1883         return 0;
1884 }
1885
1886 int intel_init_render_ring_buffer(struct drm_device *dev)
1887 {
1888         struct drm_i915_private *dev_priv = dev->dev_private;
1889         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1890
1891         ring->name = "render ring";
1892         ring->id = RCS;
1893         ring->mmio_base = RENDER_RING_BASE;
1894
1895         if (INTEL_INFO(dev)->gen >= 6) {
1896                 ring->add_request = gen6_add_request;
1897                 ring->flush = gen7_render_ring_flush;
1898                 if (INTEL_INFO(dev)->gen == 6)
1899                         ring->flush = gen6_render_ring_flush;
1900                 if (INTEL_INFO(dev)->gen >= 8) {
1901                         ring->flush = gen8_render_ring_flush;
1902                         ring->irq_get = gen8_ring_get_irq;
1903                         ring->irq_put = gen8_ring_put_irq;
1904                 } else {
1905                         ring->irq_get = gen6_ring_get_irq;
1906                         ring->irq_put = gen6_ring_put_irq;
1907                 }
1908                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1909                 ring->get_seqno = gen6_ring_get_seqno;
1910                 ring->set_seqno = ring_set_seqno;
1911                 ring->sync_to = gen6_ring_sync;
1912                 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
1913                 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
1914                 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
1915                 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
1916                 ring->signal_mbox[RCS] = GEN6_NOSYNC;
1917                 ring->signal_mbox[VCS] = GEN6_VRSYNC;
1918                 ring->signal_mbox[BCS] = GEN6_BRSYNC;
1919                 ring->signal_mbox[VECS] = GEN6_VERSYNC;
1920         } else if (IS_GEN5(dev)) {
1921                 ring->add_request = pc_render_add_request;
1922                 ring->flush = gen4_render_ring_flush;
1923                 ring->get_seqno = pc_render_get_seqno;
1924                 ring->set_seqno = pc_render_set_seqno;
1925                 ring->irq_get = gen5_ring_get_irq;
1926                 ring->irq_put = gen5_ring_put_irq;
1927                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
1928                                         GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
1929         } else {
1930                 ring->add_request = i9xx_add_request;
1931                 if (INTEL_INFO(dev)->gen < 4)
1932                         ring->flush = gen2_render_ring_flush;
1933                 else
1934                         ring->flush = gen4_render_ring_flush;
1935                 ring->get_seqno = ring_get_seqno;
1936                 ring->set_seqno = ring_set_seqno;
1937                 if (IS_GEN2(dev)) {
1938                         ring->irq_get = i8xx_ring_get_irq;
1939                         ring->irq_put = i8xx_ring_put_irq;
1940                 } else {
1941                         ring->irq_get = i9xx_ring_get_irq;
1942                         ring->irq_put = i9xx_ring_put_irq;
1943                 }
1944                 ring->irq_enable_mask = I915_USER_INTERRUPT;
1945         }
1946         ring->write_tail = ring_write_tail;
1947         if (IS_HASWELL(dev))
1948                 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1949         else if (IS_GEN8(dev))
1950                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
1951         else if (INTEL_INFO(dev)->gen >= 6)
1952                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1953         else if (INTEL_INFO(dev)->gen >= 4)
1954                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1955         else if (IS_I830(dev) || IS_845G(dev))
1956                 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1957         else
1958                 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1959         ring->init = init_render_ring;
1960         ring->cleanup = render_ring_cleanup;
1961
1962         /* Workaround batchbuffer to combat CS tlb bug. */
1963         if (HAS_BROKEN_CS_TLB(dev)) {
1964                 struct drm_i915_gem_object *obj;
1965                 int ret;
1966
1967                 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
1968                 if (obj == NULL) {
1969                         DRM_ERROR("Failed to allocate batch bo\n");
1970                         return -ENOMEM;
1971                 }
1972
1973                 ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
1974                 if (ret != 0) {
1975                         drm_gem_object_unreference(&obj->base);
1976                         DRM_ERROR("Failed to ping batch bo\n");
1977                         return ret;
1978                 }
1979
1980                 ring->scratch.obj = obj;
1981                 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
1982         }
1983
1984         return intel_init_ring_buffer(dev, ring);
1985 }
1986
1987 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1988 {
1989         struct drm_i915_private *dev_priv = dev->dev_private;
1990         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1991         int ret;
1992
1993         ring->name = "render ring";
1994         ring->id = RCS;
1995         ring->mmio_base = RENDER_RING_BASE;
1996
1997         if (INTEL_INFO(dev)->gen >= 6) {
1998                 /* non-kms not supported on gen6+ */
1999                 return -ENODEV;
2000         }
2001
2002         /* Note: gem is not supported on gen5/ilk without kms (the corresponding
2003          * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
2004          * the special gen5 functions. */
2005         ring->add_request = i9xx_add_request;
2006         if (INTEL_INFO(dev)->gen < 4)
2007                 ring->flush = gen2_render_ring_flush;
2008         else
2009                 ring->flush = gen4_render_ring_flush;
2010         ring->get_seqno = ring_get_seqno;
2011         ring->set_seqno = ring_set_seqno;
2012         if (IS_GEN2(dev)) {
2013                 ring->irq_get = i8xx_ring_get_irq;
2014                 ring->irq_put = i8xx_ring_put_irq;
2015         } else {
2016                 ring->irq_get = i9xx_ring_get_irq;
2017                 ring->irq_put = i9xx_ring_put_irq;
2018         }
2019         ring->irq_enable_mask = I915_USER_INTERRUPT;
2020         ring->write_tail = ring_write_tail;
2021         if (INTEL_INFO(dev)->gen >= 4)
2022                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2023         else if (IS_I830(dev) || IS_845G(dev))
2024                 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
2025         else
2026                 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
2027         ring->init = init_render_ring;
2028         ring->cleanup = render_ring_cleanup;
2029
2030         ring->dev = dev;
2031         INIT_LIST_HEAD(&ring->active_list);
2032         INIT_LIST_HEAD(&ring->request_list);
2033
2034         ring->size = size;
2035         ring->effective_size = ring->size;
2036         if (IS_I830(ring->dev) || IS_845G(ring->dev))
2037                 ring->effective_size -= 128;
2038
2039         ring->virtual_start = ioremap_wc(start, size);
2040         if (ring->virtual_start == NULL) {
2041                 DRM_ERROR("can not ioremap virtual address for"
2042                           " ring buffer\n");
2043                 return -ENOMEM;
2044         }
2045
2046         if (!I915_NEED_GFX_HWS(dev)) {
2047                 ret = init_phys_status_page(ring);
2048                 if (ret)
2049                         return ret;
2050         }
2051
2052         return 0;
2053 }
2054
2055 int intel_init_bsd_ring_buffer(struct drm_device *dev)
2056 {
2057         struct drm_i915_private *dev_priv = dev->dev_private;
2058         struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
2059
2060         ring->name = "bsd ring";
2061         ring->id = VCS;
2062
2063         ring->write_tail = ring_write_tail;
2064         if (INTEL_INFO(dev)->gen >= 6) {
2065                 ring->mmio_base = GEN6_BSD_RING_BASE;
2066                 /* gen6 bsd needs a special wa for tail updates */
2067                 if (IS_GEN6(dev))
2068                         ring->write_tail = gen6_bsd_ring_write_tail;
2069                 ring->flush = gen6_bsd_ring_flush;
2070                 ring->add_request = gen6_add_request;
2071                 ring->get_seqno = gen6_ring_get_seqno;
2072                 ring->set_seqno = ring_set_seqno;
2073                 if (INTEL_INFO(dev)->gen >= 8) {
2074                         ring->irq_enable_mask =
2075                                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2076                         ring->irq_get = gen8_ring_get_irq;
2077                         ring->irq_put = gen8_ring_put_irq;
2078                         ring->dispatch_execbuffer =
2079                                 gen8_ring_dispatch_execbuffer;
2080                 } else {
2081                         ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2082                         ring->irq_get = gen6_ring_get_irq;
2083                         ring->irq_put = gen6_ring_put_irq;
2084                         ring->dispatch_execbuffer =
2085                                 gen6_ring_dispatch_execbuffer;
2086                 }
2087                 ring->sync_to = gen6_ring_sync;
2088                 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
2089                 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2090                 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
2091                 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
2092                 ring->signal_mbox[RCS] = GEN6_RVSYNC;
2093                 ring->signal_mbox[VCS] = GEN6_NOSYNC;
2094                 ring->signal_mbox[BCS] = GEN6_BVSYNC;
2095                 ring->signal_mbox[VECS] = GEN6_VEVSYNC;
2096         } else {
2097                 ring->mmio_base = BSD_RING_BASE;
2098                 ring->flush = bsd_ring_flush;
2099                 ring->add_request = i9xx_add_request;
2100                 ring->get_seqno = ring_get_seqno;
2101                 ring->set_seqno = ring_set_seqno;
2102                 if (IS_GEN5(dev)) {
2103                         ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2104                         ring->irq_get = gen5_ring_get_irq;
2105                         ring->irq_put = gen5_ring_put_irq;
2106                 } else {
2107                         ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2108                         ring->irq_get = i9xx_ring_get_irq;
2109                         ring->irq_put = i9xx_ring_put_irq;
2110                 }
2111                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2112         }
2113         ring->init = init_ring_common;
2114
2115         return intel_init_ring_buffer(dev, ring);
2116 }
2117
2118 int intel_init_blt_ring_buffer(struct drm_device *dev)
2119 {
2120         struct drm_i915_private *dev_priv = dev->dev_private;
2121         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
2122
2123         ring->name = "blitter ring";
2124         ring->id = BCS;
2125
2126         ring->mmio_base = BLT_RING_BASE;
2127         ring->write_tail = ring_write_tail;
2128         ring->flush = gen6_ring_flush;
2129         ring->add_request = gen6_add_request;
2130         ring->get_seqno = gen6_ring_get_seqno;
2131         ring->set_seqno = ring_set_seqno;
2132         if (INTEL_INFO(dev)->gen >= 8) {
2133                 ring->irq_enable_mask =
2134                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
2135                 ring->irq_get = gen8_ring_get_irq;
2136                 ring->irq_put = gen8_ring_put_irq;
2137                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2138         } else {
2139                 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2140                 ring->irq_get = gen6_ring_get_irq;
2141                 ring->irq_put = gen6_ring_put_irq;
2142                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2143         }
2144         ring->sync_to = gen6_ring_sync;
2145         ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
2146         ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
2147         ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2148         ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
2149         ring->signal_mbox[RCS] = GEN6_RBSYNC;
2150         ring->signal_mbox[VCS] = GEN6_VBSYNC;
2151         ring->signal_mbox[BCS] = GEN6_NOSYNC;
2152         ring->signal_mbox[VECS] = GEN6_VEBSYNC;
2153         ring->init = init_ring_common;
2154
2155         return intel_init_ring_buffer(dev, ring);
2156 }
2157
2158 int intel_init_vebox_ring_buffer(struct drm_device *dev)
2159 {
2160         struct drm_i915_private *dev_priv = dev->dev_private;
2161         struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
2162
2163         ring->name = "video enhancement ring";
2164         ring->id = VECS;
2165
2166         ring->mmio_base = VEBOX_RING_BASE;
2167         ring->write_tail = ring_write_tail;
2168         ring->flush = gen6_ring_flush;
2169         ring->add_request = gen6_add_request;
2170         ring->get_seqno = gen6_ring_get_seqno;
2171         ring->set_seqno = ring_set_seqno;
2172
2173         if (INTEL_INFO(dev)->gen >= 8) {
2174                 ring->irq_enable_mask =
2175                         GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
2176                 ring->irq_get = gen8_ring_get_irq;
2177                 ring->irq_put = gen8_ring_put_irq;
2178                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2179         } else {
2180                 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2181                 ring->irq_get = hsw_vebox_get_irq;
2182                 ring->irq_put = hsw_vebox_put_irq;
2183                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2184         }
2185         ring->sync_to = gen6_ring_sync;
2186         ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
2187         ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
2188         ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
2189         ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2190         ring->signal_mbox[RCS] = GEN6_RVESYNC;
2191         ring->signal_mbox[VCS] = GEN6_VVESYNC;
2192         ring->signal_mbox[BCS] = GEN6_BVESYNC;
2193         ring->signal_mbox[VECS] = GEN6_NOSYNC;
2194         ring->init = init_ring_common;
2195
2196         return intel_init_ring_buffer(dev, ring);
2197 }
2198
2199 int
2200 intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
2201 {
2202         int ret;
2203
2204         if (!ring->gpu_caches_dirty)
2205                 return 0;
2206
2207         ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
2208         if (ret)
2209                 return ret;
2210
2211         trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
2212
2213         ring->gpu_caches_dirty = false;
2214         return 0;
2215 }
2216
2217 int
2218 intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
2219 {
2220         uint32_t flush_domains;
2221         int ret;
2222
2223         flush_domains = 0;
2224         if (ring->gpu_caches_dirty)
2225                 flush_domains = I915_GEM_GPU_DOMAINS;
2226
2227         ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2228         if (ret)
2229                 return ret;
2230
2231         trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2232
2233         ring->gpu_caches_dirty = false;
2234         return 0;
2235 }