struct amdgpu_cs_parser *sched_job)
{
amdgpu_bo_fence(sched_job->job_param.vm.bo,
- sched_job->ibs[sched_job->num_ibs -1].fence, true);
+ &sched_job->ibs[sched_job->num_ibs -1].fence->base, true);
return 0;
}
r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
if (r)
goto error_free;
- amdgpu_bo_fence(bo, ib->fence, true);
+ amdgpu_bo_fence(bo, &ib->fence->base, true);
}
error_free:
amdgpu_ib_free(adev, ib);
return r;
}
- amdgpu_bo_fence(pd, ib->fence, true);
+ amdgpu_bo_fence(pd, &ib->fence->base, true);
}
}
*/
static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
uint64_t start, uint64_t end,
- struct amdgpu_fence *fence)
+ struct fence *fence)
{
unsigned i;
static int amdgpu_vm_bo_update_mapping_run_job(
struct amdgpu_cs_parser *sched_job)
{
- struct amdgpu_fence **fence = sched_job->job_param.vm_mapping.fence;
+ struct fence **fence = sched_job->job_param.vm_mapping.fence;
amdgpu_vm_fence_pts(sched_job->job_param.vm_mapping.vm,
sched_job->job_param.vm_mapping.start,
sched_job->job_param.vm_mapping.last + 1,
- sched_job->ibs[sched_job->num_ibs -1].fence);
+ &sched_job->ibs[sched_job->num_ibs -1].fence->base);
if (fence) {
- amdgpu_fence_unref(fence);
- *fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs -1].fence);
+ fence_put(*fence);
+ *fence = fence_get(&sched_job->ibs[sched_job->num_ibs -1].fence->base);
}
return 0;
}
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
uint64_t addr, uint32_t gtt_flags,
- struct amdgpu_fence **fence)
+ struct fence **fence)
{
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
unsigned nptes, ncmds, ndw;
}
amdgpu_vm_fence_pts(vm, mapping->it.start,
- mapping->it.last + 1, ib->fence);
+ mapping->it.last + 1, &ib->fence->base);
if (fence) {
- amdgpu_fence_unref(fence);
- *fence = amdgpu_fence_ref(ib->fence);
+ fence_put(*fence);
+ *fence = fence_get(&ib->fence->base);
}
amdgpu_ib_free(adev, ib);
addr = 0;
}
- if (addr == bo_va->addr)
- return 0;
-
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
- list_for_each_entry(mapping, &bo_va->mappings, list) {
+ spin_lock(&vm->status_lock);
+ if (!list_empty(&bo_va->vm_status))
+ list_splice_init(&bo_va->valids, &bo_va->invalids);
+ spin_unlock(&vm->status_lock);
+
+ list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
flags, &bo_va->last_pt_update);
if (r)
return r;
}
- bo_va->addr = addr;
spin_lock(&vm->status_lock);
list_del_init(&bo_va->vm_status);
+ if (!mem)
+ list_add(&bo_va->vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
return 0;
spin_unlock(&vm->status_lock);
if (bo_va)
- r = amdgpu_sync_fence(adev, sync, &bo_va->last_pt_update->base);
+ r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
return r;
}
}
bo_va->vm = vm;
bo_va->bo = bo;
- bo_va->addr = 0;
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
- INIT_LIST_HEAD(&bo_va->mappings);
+ INIT_LIST_HEAD(&bo_va->valids);
+ INIT_LIST_HEAD(&bo_va->invalids);
INIT_LIST_HEAD(&bo_va->vm_status);
mutex_lock(&vm->mutex);
mapping->offset = offset;
mapping->flags = flags;
- list_add(&mapping->list, &bo_va->mappings);
+ list_add(&mapping->list, &bo_va->invalids);
interval_tree_insert(&mapping->it, &vm->va);
trace_amdgpu_vm_bo_map(bo_va, mapping);
- bo_va->addr = 0;
-
/* Make sure the page tables are allocated */
saddr >>= amdgpu_vm_block_size;
eaddr >>= amdgpu_vm_block_size;
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_vm *vm = bo_va->vm;
+ bool valid = true;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- list_for_each_entry(mapping, &bo_va->mappings, list) {
+ list_for_each_entry(mapping, &bo_va->valids, list) {
if (mapping->it.start == saddr)
break;
}
- if (&mapping->list == &bo_va->mappings) {
- amdgpu_bo_unreserve(bo_va->bo);
- return -ENOENT;
+ if (&mapping->list == &bo_va->valids) {
+ valid = false;
+
+ list_for_each_entry(mapping, &bo_va->invalids, list) {
+ if (mapping->it.start == saddr)
+ break;
+ }
+
+ if (&mapping->list == &bo_va->invalids) {
+ amdgpu_bo_unreserve(bo_va->bo);
+ return -ENOENT;
+ }
}
mutex_lock(&vm->mutex);
interval_tree_remove(&mapping->it, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
- if (bo_va->addr) {
- /* clear the old address */
+ if (valid)
list_add(&mapping->list, &vm->freed);
- } else {
+ else
kfree(mapping);
- }
mutex_unlock(&vm->mutex);
amdgpu_bo_unreserve(bo_va->bo);
list_del(&bo_va->vm_status);
spin_unlock(&vm->status_lock);
- list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) {
+ list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
- if (bo_va->addr)
- list_add(&mapping->list, &vm->freed);
- else
- kfree(mapping);
+ list_add(&mapping->list, &vm->freed);
+ }
+ list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
+ list_del(&mapping->list);
+ interval_tree_remove(&mapping->it, &vm->va);
+ kfree(mapping);
}
- amdgpu_fence_unref(&bo_va->last_pt_update);
+
+ fence_put(bo_va->last_pt_update);
kfree(bo_va);
mutex_unlock(&vm->mutex);
struct amdgpu_bo_va *bo_va;
list_for_each_entry(bo_va, &bo->va, bo_list) {
- if (bo_va->addr) {
- spin_lock(&bo_va->vm->status_lock);
- list_del(&bo_va->vm_status);
+ spin_lock(&bo_va->vm->status_lock);
+ if (list_empty(&bo_va->vm_status))
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
- spin_unlock(&bo_va->vm->status_lock);
- }
+ spin_unlock(&bo_va->vm->status_lock);
}
}
vm->va = RB_ROOT;
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated);
+ INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
pd_size = amdgpu_vm_directory_size(adev);