/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
- WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
- DISABLE_PIXEL_MASK_CAMMING);
+ /*
+ * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
+ * but we do that in per ctx batchbuffer as there is an issue
+ * with this register not getting restored on ctx restore
+ */
}
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
+ if (INTEL_REVID(dev) == SKL_REVID_C0 ||
+ INTEL_REVID(dev) == SKL_REVID_D0)
+ /* WaBarrierPerformanceFixDisable:skl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FENCE_DEST_SLM_DISABLE |
+ HDC_BARRIER_PERFORMANCE_DISABLE);
+
if (INTEL_REVID(dev) <= SKL_REVID_D0) {
/*
*Use Force Non-Coherent whenever executing a 3D context. This
unsigned space;
int ret;
- /* The whole point of reserving space is to not wait! */
- WARN_ON(ringbuf->reserved_in_use);
-
if (intel_ring_space(ringbuf) >= n)
return 0;
+ /* The whole point of reserving space is to not wait! */
+ WARN_ON(ringbuf->reserved_in_use);
+
list_for_each_entry(request, &ring->request_list, list) {
space = __intel_ring_space(request->postfix, ringbuf->tail,
ringbuf->size);
return 0;
}
-static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
+static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
{
uint32_t __iomem *virt;
- struct intel_ringbuffer *ringbuf = ring->buffer;
int rem = ringbuf->size - ringbuf->tail;
- /* Can't wrap if space has already been reserved! */
- WARN_ON(ringbuf->reserved_in_use);
-
- if (ringbuf->space < rem) {
- int ret = ring_wait_for_space(ring, rem);
- if (ret)
- return ret;
- }
-
virt = ringbuf->virtual_start + ringbuf->tail;
rem /= 4;
while (rem--)
ringbuf->tail = 0;
intel_ring_update_space(ringbuf);
-
- return 0;
}
int intel_ring_idle(struct intel_engine_cs *ring)
void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
{
WARN_ON(!ringbuf->reserved_in_use);
- WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
- "request reserved size too small: %d vs %d!\n",
- ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
+ if (ringbuf->tail > ringbuf->reserved_tail) {
+ WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
+ "request reserved size too small: %d vs %d!\n",
+ ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
+ } else {
+ /*
+ * The ring was wrapped while the reserved space was in use.
+ * That means that some unknown amount of the ring tail was
+ * no-op filled and skipped. Thus simply adding the ring size
+ * to the tail and doing the above space check will not work.
+ * Rather than attempt to track how much tail was skipped,
+ * it is much simpler to say that also skipping the sanity
+ * check every once in a while is not a big issue.
+ */
+ }
ringbuf->reserved_size = 0;
ringbuf->reserved_in_use = false;
static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
- int ret;
-
- /*
- * Add on the reserved size to the request to make sure that after
- * the intended commands have been emitted, there is guaranteed to
- * still be enough free space to send them to the hardware.
- */
- if (!ringbuf->reserved_in_use)
- bytes += ringbuf->reserved_size;
+ int remain_usable = ringbuf->effective_size - ringbuf->tail;
+ int remain_actual = ringbuf->size - ringbuf->tail;
+ int ret, total_bytes, wait_bytes = 0;
+ bool need_wrap = false;
- if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
- ret = intel_wrap_ring_buffer(ring);
- if (unlikely(ret))
- return ret;
-
- if(ringbuf->reserved_size) {
- uint32_t size = ringbuf->reserved_size;
+ if (ringbuf->reserved_in_use)
+ total_bytes = bytes;
+ else
+ total_bytes = bytes + ringbuf->reserved_size;
- intel_ring_reserved_space_cancel(ringbuf);
- intel_ring_reserved_space_reserve(ringbuf, size);
+ if (unlikely(bytes > remain_usable)) {
+ /*
+ * Not enough space for the basic request. So need to flush
+ * out the remainder and then wait for base + reserved.
+ */
+ wait_bytes = remain_actual + total_bytes;
+ need_wrap = true;
+ } else {
+ if (unlikely(total_bytes > remain_usable)) {
+ /*
+ * The base request will fit but the reserved space
+ * falls off the end. So only need to to wait for the
+ * reserved size after flushing out the remainder.
+ */
+ wait_bytes = remain_actual + ringbuf->reserved_size;
+ need_wrap = true;
+ } else if (total_bytes > ringbuf->space) {
+ /* No wrapping required, just waiting. */
+ wait_bytes = total_bytes;
}
}
- if (unlikely(ringbuf->space < bytes)) {
- ret = ring_wait_for_space(ring, bytes);
+ if (wait_bytes) {
+ ret = ring_wait_for_space(ring, wait_bytes);
if (unlikely(ret))
return ret;
+
+ if (need_wrap)
+ __wrap_ring_buffer(ringbuf);
}
return 0;
return ret;
/* FIXME(BDW): Address space and security selectors. */
- intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+ (dispatch_flags & I915_DISPATCH_RS ?
+ MI_BATCH_RESOURCE_STREAMER : 0));
intel_ring_emit(ring, lower_32_bits(offset));
intel_ring_emit(ring, upper_32_bits(offset));
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
(dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
+ 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+ (dispatch_flags & I915_DISPATCH_RS ?
+ MI_BATCH_RESOURCE_STREAMER : 0));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);