/**
* amdgpu_vm_grab_id - allocate the next free VMID
*
- * @ring: ring we want to submit job to
* @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @sync: sync object where we add dependencies
*
- * Allocate an id for the vm (cayman+).
- * Returns the fence we need to sync to (if any).
+ * Allocate an id for the vm, adding fences to the sync obj as necessary.
*
- * Global and local mutex must be locked!
+ * Global mutex must be locked!
*/
-struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
- struct amdgpu_vm *vm)
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync)
{
struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {};
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
/* check if the id is still valid */
if (vm_id->id && vm_id->last_id_use &&
vm_id->last_id_use == adev->vm_manager.active[vm_id->id])
- return NULL;
+ return 0;
/* we definately need to flush */
vm_id->pd_gpu_addr = ~0ll;
/* found a free one */
vm_id->id = i;
trace_amdgpu_vm_grab_id(i, ring->idx);
- return NULL;
+ return 0;
}
if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) {
for (i = 0; i < 2; ++i) {
if (choices[i]) {
+ struct amdgpu_fence *fence;
+
+ fence = adev->vm_manager.active[choices[i]];
vm_id->id = choices[i];
+
trace_amdgpu_vm_grab_id(choices[i], ring->idx);
- return adev->vm_manager.active[choices[i]];
+ return amdgpu_sync_fence(ring->adev, sync, &fence->base);
}
}
/* should never happen */
BUG();
- return NULL;
+ return -EINVAL;
}
/**
}
}
+static int amdgpu_vm_free_job(
+ struct amdgpu_cs_parser *sched_job)
+{
+ int i;
+ for (i = 0; i < sched_job->num_ibs; i++)
+ amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
+ kfree(sched_job->ibs);
+ return 0;
+}
+
+static int amdgpu_vm_run_job(
+ struct amdgpu_cs_parser *sched_job)
+{
+ amdgpu_bo_fence(sched_job->job_param.vm.bo,
+ &sched_job->ibs[sched_job->num_ibs -1].fence->base, true);
+ return 0;
+}
+
/**
* amdgpu_vm_clear_bo - initially clear the page dir/table
*
struct amdgpu_bo *bo)
{
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
- struct amdgpu_ib ib;
+ struct amdgpu_cs_parser *sched_job = NULL;
+ struct amdgpu_ib *ib;
unsigned entries;
uint64_t addr;
int r;
addr = amdgpu_bo_gpu_offset(bo);
entries = amdgpu_bo_size(bo) / 8;
- r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, &ib);
- if (r)
+ ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+ if (!ib)
goto error_unreserve;
- ib.length_dw = 0;
-
- amdgpu_vm_update_pages(adev, &ib, addr, 0, entries, 0, 0, 0);
- amdgpu_vm_pad_ib(adev, &ib);
- WARN_ON(ib.length_dw > 64);
-
- r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
+ r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
if (r)
goto error_free;
- amdgpu_bo_fence(bo, ib.fence, true);
+ ib->length_dw = 0;
+
+ amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
+ amdgpu_vm_pad_ib(adev, ib);
+ WARN_ON(ib->length_dw > 64);
+
+ if (amdgpu_enable_scheduler) {
+ int r;
+ uint64_t v_seq;
+ sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
+ adev->kernel_ctx, ib, 1);
+ if(!sched_job)
+ goto error_free;
+ sched_job->job_param.vm.bo = bo;
+ sched_job->run_job = amdgpu_vm_run_job;
+ sched_job->free_job = amdgpu_vm_free_job;
+ v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
+ ib->sequence = v_seq;
+ amd_sched_push_job(ring->scheduler,
+ &adev->kernel_ctx->rings[ring->idx].c_entity,
+ sched_job);
+ r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
+ v_seq,
+ false,
+ -1);
+ if (r)
+ DRM_ERROR("emit timeout\n");
+
+ amdgpu_bo_unreserve(bo);
+ return 0;
+ } else {
+ r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
+ if (r)
+ goto error_free;
+ amdgpu_bo_fence(bo, &ib->fence->base, true);
+ }
error_free:
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
error_unreserve:
amdgpu_bo_unreserve(bo);
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0, pt_idx, ndw;
- struct amdgpu_ib ib;
+ struct amdgpu_ib *ib;
+ struct amdgpu_cs_parser *sched_job = NULL;
+
int r;
/* padding, etc. */
if (ndw > 0xfffff)
return -ENOMEM;
- r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
+ ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+ if (!ib)
+ return -ENOMEM;
+
+ r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
if (r)
return r;
- ib.length_dw = 0;
+ ib->length_dw = 0;
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
((last_pt + incr * count) != pt)) {
if (count) {
- amdgpu_vm_update_pages(adev, &ib, last_pde,
+ amdgpu_vm_update_pages(adev, ib, last_pde,
last_pt, count, incr,
AMDGPU_PTE_VALID, 0);
}
}
if (count)
- amdgpu_vm_update_pages(adev, &ib, last_pde, last_pt, count,
+ amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count,
incr, AMDGPU_PTE_VALID, 0);
- if (ib.length_dw != 0) {
- amdgpu_vm_pad_ib(adev, &ib);
- amdgpu_sync_resv(adev, &ib.sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
- WARN_ON(ib.length_dw > ndw);
- r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
- if (r) {
- amdgpu_ib_free(adev, &ib);
- return r;
+ if (ib->length_dw != 0) {
+ amdgpu_vm_pad_ib(adev, ib);
+ amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
+ WARN_ON(ib->length_dw > ndw);
+
+ if (amdgpu_enable_scheduler) {
+ int r;
+ uint64_t v_seq;
+ sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
+ adev->kernel_ctx,
+ ib, 1);
+ if(!sched_job)
+ goto error_free;
+ sched_job->job_param.vm.bo = pd;
+ sched_job->run_job = amdgpu_vm_run_job;
+ sched_job->free_job = amdgpu_vm_free_job;
+ v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
+ ib->sequence = v_seq;
+ amd_sched_push_job(ring->scheduler,
+ &adev->kernel_ctx->rings[ring->idx].c_entity,
+ sched_job);
+ r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
+ v_seq,
+ false,
+ -1);
+ if (r)
+ DRM_ERROR("emit timeout\n");
+ } else {
+ r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
+ if (r) {
+ amdgpu_ib_free(adev, ib);
+ return r;
+ }
+ amdgpu_bo_fence(pd, &ib->fence->base, true);
}
- amdgpu_bo_fence(pd, ib.fence, true);
}
- amdgpu_ib_free(adev, &ib);
+
+ if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
+ }
return 0;
+
+error_free:
+ if (sched_job)
+ kfree(sched_job);
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
+ return -ENOMEM;
}
/**
*/
static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
uint64_t start, uint64_t end,
- struct amdgpu_fence *fence)
+ struct fence *fence)
{
unsigned i;
amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
}
+static int amdgpu_vm_bo_update_mapping_run_job(
+ struct amdgpu_cs_parser *sched_job)
+{
+ struct fence **fence = sched_job->job_param.vm_mapping.fence;
+ amdgpu_vm_fence_pts(sched_job->job_param.vm_mapping.vm,
+ sched_job->job_param.vm_mapping.start,
+ sched_job->job_param.vm_mapping.last + 1,
+ &sched_job->ibs[sched_job->num_ibs -1].fence->base);
+ if (fence) {
+ fence_put(*fence);
+ *fence = fence_get(&sched_job->ibs[sched_job->num_ibs -1].fence->base);
+ }
+ return 0;
+}
/**
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
uint64_t addr, uint32_t gtt_flags,
- struct amdgpu_fence **fence)
+ struct fence **fence)
{
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
unsigned nptes, ncmds, ndw;
uint32_t flags = gtt_flags;
- struct amdgpu_ib ib;
+ struct amdgpu_ib *ib;
+ struct amdgpu_cs_parser *sched_job = NULL;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
if (ndw > 0xfffff)
return -ENOMEM;
- r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
- if (r)
+ ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+ if (!ib)
+ return -ENOMEM;
+
+ r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
+ if (r) {
+ kfree(ib);
return r;
- ib.length_dw = 0;
+ }
+
+ ib->length_dw = 0;
if (!(flags & AMDGPU_PTE_VALID)) {
unsigned i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_fence *f = vm->ids[i].last_id_use;
- r = amdgpu_sync_fence(adev, &ib.sync, &f->base);
+ r = amdgpu_sync_fence(adev, &ib->sync, &f->base);
if (r)
return r;
}
}
- r = amdgpu_vm_update_ptes(adev, vm, &ib, mapping->it.start,
+ r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
mapping->it.last + 1, addr + mapping->offset,
flags, gtt_flags);
if (r) {
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
return r;
}
- amdgpu_vm_pad_ib(adev, &ib);
- WARN_ON(ib.length_dw > ndw);
+ amdgpu_vm_pad_ib(adev, ib);
+ WARN_ON(ib->length_dw > ndw);
- r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
- if (r) {
- amdgpu_ib_free(adev, &ib);
- return r;
- }
- amdgpu_vm_fence_pts(vm, mapping->it.start,
- mapping->it.last + 1, ib.fence);
- if (fence) {
- amdgpu_fence_unref(fence);
- *fence = amdgpu_fence_ref(ib.fence);
- }
- amdgpu_ib_free(adev, &ib);
+ if (amdgpu_enable_scheduler) {
+ int r;
+ uint64_t v_seq;
+ sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
+ adev->kernel_ctx, ib, 1);
+ if(!sched_job)
+ goto error_free;
+ sched_job->job_param.vm_mapping.vm = vm;
+ sched_job->job_param.vm_mapping.start = mapping->it.start;
+ sched_job->job_param.vm_mapping.last = mapping->it.last;
+ sched_job->job_param.vm_mapping.fence = fence;
+ sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job;
+ sched_job->free_job = amdgpu_vm_free_job;
+ v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
+ ib->sequence = v_seq;
+ amd_sched_push_job(ring->scheduler,
+ &adev->kernel_ctx->rings[ring->idx].c_entity,
+ sched_job);
+ r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
+ v_seq,
+ false,
+ -1);
+ if (r)
+ DRM_ERROR("emit timeout\n");
+ } else {
+ r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
+ if (r) {
+ amdgpu_ib_free(adev, ib);
+ return r;
+ }
+ amdgpu_vm_fence_pts(vm, mapping->it.start,
+ mapping->it.last + 1, &ib->fence->base);
+ if (fence) {
+ fence_put(*fence);
+ *fence = fence_get(&ib->fence->base);
+ }
+
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
+ }
return 0;
+
+error_free:
+ if (sched_job)
+ kfree(sched_job);
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
+ return -ENOMEM;
}
/**
addr = 0;
}
- if (addr == bo_va->addr)
- return 0;
-
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
- list_for_each_entry(mapping, &bo_va->mappings, list) {
+ spin_lock(&vm->status_lock);
+ if (!list_empty(&bo_va->vm_status))
+ list_splice_init(&bo_va->valids, &bo_va->invalids);
+ spin_unlock(&vm->status_lock);
+
+ list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
flags, &bo_va->last_pt_update);
if (r)
return r;
}
- bo_va->addr = addr;
spin_lock(&vm->status_lock);
list_del_init(&bo_va->vm_status);
+ if (!mem)
+ list_add(&bo_va->vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
return 0;
spin_unlock(&vm->status_lock);
if (bo_va)
- r = amdgpu_sync_fence(adev, sync, &bo_va->last_pt_update->base);
+ r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
return r;
}
}
bo_va->vm = vm;
bo_va->bo = bo;
- bo_va->addr = 0;
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
- INIT_LIST_HEAD(&bo_va->mappings);
+ INIT_LIST_HEAD(&bo_va->valids);
+ INIT_LIST_HEAD(&bo_va->invalids);
INIT_LIST_HEAD(&bo_va->vm_status);
mutex_lock(&vm->mutex);
mapping->offset = offset;
mapping->flags = flags;
- list_add(&mapping->list, &bo_va->mappings);
+ list_add(&mapping->list, &bo_va->invalids);
interval_tree_insert(&mapping->it, &vm->va);
trace_amdgpu_vm_bo_map(bo_va, mapping);
- bo_va->addr = 0;
-
/* Make sure the page tables are allocated */
saddr >>= amdgpu_vm_block_size;
eaddr >>= amdgpu_vm_block_size;
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_vm *vm = bo_va->vm;
+ bool valid = true;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- list_for_each_entry(mapping, &bo_va->mappings, list) {
+ list_for_each_entry(mapping, &bo_va->valids, list) {
if (mapping->it.start == saddr)
break;
}
- if (&mapping->list == &bo_va->mappings) {
- amdgpu_bo_unreserve(bo_va->bo);
- return -ENOENT;
+ if (&mapping->list == &bo_va->valids) {
+ valid = false;
+
+ list_for_each_entry(mapping, &bo_va->invalids, list) {
+ if (mapping->it.start == saddr)
+ break;
+ }
+
+ if (&mapping->list == &bo_va->invalids) {
+ amdgpu_bo_unreserve(bo_va->bo);
+ return -ENOENT;
+ }
}
mutex_lock(&vm->mutex);
interval_tree_remove(&mapping->it, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
- if (bo_va->addr) {
- /* clear the old address */
+ if (valid)
list_add(&mapping->list, &vm->freed);
- } else {
+ else
kfree(mapping);
- }
mutex_unlock(&vm->mutex);
amdgpu_bo_unreserve(bo_va->bo);
list_del(&bo_va->vm_status);
spin_unlock(&vm->status_lock);
- list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) {
+ list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
- if (bo_va->addr)
- list_add(&mapping->list, &vm->freed);
- else
- kfree(mapping);
+ list_add(&mapping->list, &vm->freed);
}
- amdgpu_fence_unref(&bo_va->last_pt_update);
+ list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
+ list_del(&mapping->list);
+ interval_tree_remove(&mapping->it, &vm->va);
+ kfree(mapping);
+ }
+
+ fence_put(bo_va->last_pt_update);
kfree(bo_va);
mutex_unlock(&vm->mutex);
struct amdgpu_bo_va *bo_va;
list_for_each_entry(bo_va, &bo->va, bo_list) {
- if (bo_va->addr) {
- spin_lock(&bo_va->vm->status_lock);
- list_del(&bo_va->vm_status);
+ spin_lock(&bo_va->vm->status_lock);
+ if (list_empty(&bo_va->vm_status))
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
- spin_unlock(&bo_va->vm->status_lock);
- }
+ spin_unlock(&bo_va->vm->status_lock);
}
}
vm->va = RB_ROOT;
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated);
+ INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
pd_size = amdgpu_vm_directory_size(adev);