drm/ttm: allow fence to be added as shared
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / radeon / radeon_cs.c
index 1321491cf4992a7adc170ee957ac1b2be19285b3..cd517ab936085c55eda7c14324c5604e35ec4883 100644 (file)
@@ -133,13 +133,17 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                 * the buffers used for read only, which doubles the range
                 * to 0 to 31. 32 is reserved for the kernel driver.
                 */
-               priority = (r->flags & 0xf) * 2 + !!r->write_domain;
+               priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
+                          + !!r->write_domain;
 
                /* the first reloc of an UVD job is the msg and that must be in
-                  VRAM, also but everything into VRAM on AGP cards to avoid
-                  image corruptions */
+                  VRAM, also but everything into VRAM on AGP cards and older
+                  IGP chips to avoid image corruptions */
                if (p->ring == R600_RING_TYPE_UVD_INDEX &&
-                   (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
+                   (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) ||
+                    p->rdev->family == CHIP_RS780 ||
+                    p->rdev->family == CHIP_RS880)) {
+
                        /* TODO: is this still needed for NI+ ? */
                        p->relocs[i].prefered_domains =
                                RADEON_GEM_DOMAIN_VRAM;
@@ -179,6 +183,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                }
 
                p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
+               p->relocs[i].tv.shared = false;
                p->relocs[i].handle = r->handle;
 
                radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
@@ -249,11 +254,17 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
        int i;
 
        for (i = 0; i < p->nrelocs; i++) {
+               struct reservation_object *resv;
+               struct fence *fence;
+
                if (!p->relocs[i].robj)
                        continue;
 
+               resv = p->relocs[i].robj->tbo.resv;
+               fence = reservation_object_get_excl(resv);
+
                radeon_semaphore_sync_to(p->ib.semaphore,
-                                        p->relocs[i].robj->tbo.sync_obj);
+                                        (struct radeon_fence *)fence);
        }
 }
 
@@ -423,7 +434,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
 
                ttm_eu_fence_buffer_objects(&parser->ticket,
                                            &parser->validated,
-                                           parser->ib.fence);
+                                           &parser->ib.fence->base);
        } else if (backoff) {
                ttm_eu_backoff_reservation(&parser->ticket,
                                           &parser->validated);
@@ -471,7 +482,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
                radeon_vce_note_usage(rdev);
 
        radeon_cs_sync_rings(parser);
-       r = radeon_ib_schedule(rdev, &parser->ib, NULL);
+       r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
        if (r) {
                DRM_ERROR("Failed to schedule IB !\n");
        }
@@ -562,9 +573,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
 
        if ((rdev->family >= CHIP_TAHITI) &&
            (parser->chunk_const_ib_idx != -1)) {
-               r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
+               r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
        } else {
-               r = radeon_ib_schedule(rdev, &parser->ib, NULL);
+               r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
        }
 
 out:
@@ -649,6 +660,13 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                up_read(&rdev->exclusive_lock);
                return -EBUSY;
        }
+       if (rdev->in_reset) {
+               up_read(&rdev->exclusive_lock);
+               r = radeon_gpu_reset(rdev);
+               if (!r)
+                       r = -EAGAIN;
+               return r;
+       }
        /* initialize parser */
        memset(&parser, 0, sizeof(struct radeon_cs_parser));
        parser.filp = filp;