drm/ttm: allow fence to be added as shared
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / radeon / radeon_vm.c
index 088ffdc2f577c06519e2a31bc90bbd402c940125..1cce4468cd7567513a659bc8b2125639a45f775d 100644 (file)
@@ -143,6 +143,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
        list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
        list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
        list[0].tv.bo = &vm->page_directory->tbo;
+       list[0].tv.shared = false;
        list[0].tiling_flags = 0;
        list[0].handle = 0;
        list_add(&list[0].tv.head, head);
@@ -156,6 +157,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
                list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
                list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
                list[idx].tv.bo = &list[idx].robj->tbo;
+               list[idx].tv.shared = false;
                list[idx].tiling_flags = 0;
                list[idx].handle = 0;
                list_add(&list[idx++].tv.head, head);
@@ -395,11 +397,12 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
 
         memset(&tv, 0, sizeof(tv));
         tv.bo = &bo->tbo;
+       tv.shared = false;
 
         INIT_LIST_HEAD(&head);
         list_add(&tv.head, &head);
 
-        r = ttm_eu_reserve_buffers(&ticket, &head);
+        r = ttm_eu_reserve_buffers(&ticket, &head, true);
         if (r)
                return r;
 
@@ -424,7 +427,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
        if (r)
                 goto error;
 
-       ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
+       ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base);
        radeon_ib_free(rdev, &ib);
 
        return 0;
@@ -693,8 +696,14 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
                                    incr, R600_PTE_VALID);
 
        if (ib.length_dw != 0) {
+               struct fence *fence;
+
                radeon_asic_vm_pad_ib(rdev, &ib);
-               radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
+
+               fence = reservation_object_get_excl(pd->tbo.resv);
+               radeon_semaphore_sync_to(ib.semaphore,
+                                        (struct radeon_fence *)fence);
+
                radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
                WARN_ON(ib.length_dw > ndw);
                r = radeon_ib_schedule(rdev, &ib, NULL, false);
@@ -820,8 +829,11 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
                struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
                unsigned nptes;
                uint64_t pte;
+               struct fence *fence;
 
-               radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj);
+               fence = reservation_object_get_excl(pt->tbo.resv);
+               radeon_semaphore_sync_to(ib->semaphore,
+                                        (struct radeon_fence *)fence);
 
                if ((addr & ~mask) == (end & ~mask))
                        nptes = end - addr;
@@ -892,6 +904,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
        bo_va->flags &= ~RADEON_VM_PAGE_VALID;
        bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
        bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
+       if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm))
+               bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
+
        if (mem) {
                addr = mem->start << PAGE_SHIFT;
                if (mem->mem_type != TTM_PL_SYSTEM) {