2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <drm/radeon_drm.h>
31 #include "radeon_trace.h"
35 * GPUVM is similar to the legacy gart on older asics, however
36 * rather than there being a single global gart table
37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
39 * vram pages and system memory pages and system memory pages
40 * can be mapped as snooped (cached system pages) or unsnooped
41 * (uncached system pages).
42 * Each VM has an ID associated with it and there is a page table
43 * associated with each VMID. When execting a command buffer,
44 * the kernel tells the the ring what VMID to use for that command
45 * buffer. VMIDs are allocated dynamically as commands are submitted.
46 * The userspace drivers maintain their own address space and the kernel
47 * sets up their pages tables accordingly when they submit their
48 * command buffers and a VMID is assigned.
49 * Cayman/Trinity support up to 8 active VMs at any given time;
54 * radeon_vm_num_pde - return the number of page directory entries
56 * @rdev: radeon_device pointer
58 * Calculate the number of page directory entries (cayman+).
60 static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
62 return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
66 * radeon_vm_directory_size - returns the size of the page directory in bytes
68 * @rdev: radeon_device pointer
70 * Calculate the size of the page directory in bytes (cayman+).
72 static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
74 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
78 * radeon_vm_manager_init - init the vm manager
80 * @rdev: radeon_device pointer
82 * Init the vm manager (cayman+).
83 * Returns 0 for success, error for failure.
85 int radeon_vm_manager_init(struct radeon_device *rdev)
88 struct radeon_bo_va *bo_va;
92 if (!rdev->vm_manager.enabled) {
93 /* allocate enough for 2 full VM pts */
94 size = radeon_vm_directory_size(rdev);
95 size += rdev->vm_manager.max_pfn * 8;
97 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
98 RADEON_GPU_PAGE_ALIGN(size),
99 RADEON_VM_PTB_ALIGN_SIZE,
100 RADEON_GEM_DOMAIN_VRAM);
102 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
103 (rdev->vm_manager.max_pfn * 8) >> 10);
107 r = radeon_asic_vm_init(rdev);
111 rdev->vm_manager.enabled = true;
113 r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
118 /* restore page table */
119 list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
120 if (vm->page_directory == NULL)
123 list_for_each_entry(bo_va, &vm->va, vm_list) {
124 bo_va->valid = false;
131 * radeon_vm_free_pt - free the page table for a specific vm
133 * @rdev: radeon_device pointer
136 * Free the page table of a specific vm (cayman+).
138 * Global and local mutex must be lock!
140 static void radeon_vm_free_pt(struct radeon_device *rdev,
141 struct radeon_vm *vm)
143 struct radeon_bo_va *bo_va;
146 if (!vm->page_directory)
149 list_del_init(&vm->list);
150 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
152 list_for_each_entry(bo_va, &vm->va, vm_list) {
153 bo_va->valid = false;
156 if (vm->page_tables == NULL)
159 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
160 radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
162 kfree(vm->page_tables);
166 * radeon_vm_manager_fini - tear down the vm manager
168 * @rdev: radeon_device pointer
170 * Tear down the VM manager (cayman+).
172 void radeon_vm_manager_fini(struct radeon_device *rdev)
174 struct radeon_vm *vm, *tmp;
177 if (!rdev->vm_manager.enabled)
180 mutex_lock(&rdev->vm_manager.lock);
181 /* free all allocated page tables */
182 list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
183 mutex_lock(&vm->mutex);
184 radeon_vm_free_pt(rdev, vm);
185 mutex_unlock(&vm->mutex);
187 for (i = 0; i < RADEON_NUM_VM; ++i) {
188 radeon_fence_unref(&rdev->vm_manager.active[i]);
190 radeon_asic_vm_fini(rdev);
191 mutex_unlock(&rdev->vm_manager.lock);
193 radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
194 radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
195 rdev->vm_manager.enabled = false;
199 * radeon_vm_evict - evict page table to make room for new one
201 * @rdev: radeon_device pointer
202 * @vm: VM we want to allocate something for
204 * Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
205 * Returns 0 for success, -ENOMEM for failure.
207 * Global and local mutex must be locked!
209 static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
211 struct radeon_vm *vm_evict;
213 if (list_empty(&rdev->vm_manager.lru_vm))
216 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
217 struct radeon_vm, list);
221 mutex_lock(&vm_evict->mutex);
222 radeon_vm_free_pt(rdev, vm_evict);
223 mutex_unlock(&vm_evict->mutex);
228 * radeon_vm_alloc_pt - allocates a page table for a VM
230 * @rdev: radeon_device pointer
233 * Allocate a page table for the requested vm (cayman+).
234 * Returns 0 for success, error for failure.
236 * Global and local mutex must be locked!
238 int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
240 unsigned pd_size, pd_entries, pts_size;
248 if (vm->page_directory != NULL) {
252 pd_size = radeon_vm_directory_size(rdev);
253 pd_entries = radeon_vm_num_pdes(rdev);
256 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
257 &vm->page_directory, pd_size,
258 RADEON_VM_PTB_ALIGN_SIZE, false);
260 r = radeon_vm_evict(rdev, vm);
269 vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
271 /* Initially clear the page directory */
272 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
273 NULL, pd_entries * 2 + 64);
275 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
281 radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
282 0, pd_entries, 0, 0);
284 radeon_semaphore_sync_to(ib.semaphore, vm->fence);
285 r = radeon_ib_schedule(rdev, &ib, NULL);
287 radeon_ib_free(rdev, &ib);
288 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
291 radeon_fence_unref(&vm->fence);
292 vm->fence = radeon_fence_ref(ib.fence);
293 radeon_ib_free(rdev, &ib);
294 radeon_fence_unref(&vm->last_flush);
296 /* allocate page table array */
297 pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
298 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
300 if (vm->page_tables == NULL) {
301 DRM_ERROR("Cannot allocate memory for page table array\n");
302 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
310 * radeon_vm_add_to_lru - add VMs page table to LRU list
312 * @rdev: radeon_device pointer
313 * @vm: vm to add to LRU
315 * Add the allocated page table to the LRU list (cayman+).
317 * Global mutex must be locked!
319 void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
321 list_del_init(&vm->list);
322 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
326 * radeon_vm_grab_id - allocate the next free VMID
328 * @rdev: radeon_device pointer
329 * @vm: vm to allocate id for
330 * @ring: ring we want to submit job to
332 * Allocate an id for the vm (cayman+).
333 * Returns the fence we need to sync to (if any).
335 * Global and local mutex must be locked!
337 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
338 struct radeon_vm *vm, int ring)
340 struct radeon_fence *best[RADEON_NUM_RINGS] = {};
341 unsigned choices[2] = {};
344 /* check if the id is still valid */
345 if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
348 /* we definately need to flush */
349 radeon_fence_unref(&vm->last_flush);
351 /* skip over VMID 0, since it is the system VM */
352 for (i = 1; i < rdev->vm_manager.nvm; ++i) {
353 struct radeon_fence *fence = rdev->vm_manager.active[i];
356 /* found a free one */
358 trace_radeon_vm_grab_id(vm->id, ring);
362 if (radeon_fence_is_earlier(fence, best[fence->ring])) {
363 best[fence->ring] = fence;
364 choices[fence->ring == ring ? 0 : 1] = i;
368 for (i = 0; i < 2; ++i) {
371 trace_radeon_vm_grab_id(vm->id, ring);
372 return rdev->vm_manager.active[choices[i]];
376 /* should never happen */
382 * radeon_vm_fence - remember fence for vm
384 * @rdev: radeon_device pointer
385 * @vm: vm we want to fence
386 * @fence: fence to remember
388 * Fence the vm (cayman+).
389 * Set the fence used to protect page table and id.
391 * Global and local mutex must be locked!
393 void radeon_vm_fence(struct radeon_device *rdev,
394 struct radeon_vm *vm,
395 struct radeon_fence *fence)
397 radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
398 rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
400 radeon_fence_unref(&vm->fence);
401 vm->fence = radeon_fence_ref(fence);
403 radeon_fence_unref(&vm->last_id_use);
404 vm->last_id_use = radeon_fence_ref(fence);
408 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
411 * @bo: requested buffer object
413 * Find @bo inside the requested vm (cayman+).
414 * Search inside the @bos vm list for the requested vm
415 * Returns the found bo_va or NULL if none is found
417 * Object has to be reserved!
419 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
420 struct radeon_bo *bo)
422 struct radeon_bo_va *bo_va;
424 list_for_each_entry(bo_va, &bo->va, bo_list) {
425 if (bo_va->vm == vm) {
433 * radeon_vm_bo_add - add a bo to a specific vm
435 * @rdev: radeon_device pointer
437 * @bo: radeon buffer object
439 * Add @bo into the requested vm (cayman+).
440 * Add @bo to the list of bos associated with the vm
441 * Returns newly added bo_va or NULL for failure
443 * Object has to be reserved!
445 struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
446 struct radeon_vm *vm,
447 struct radeon_bo *bo)
449 struct radeon_bo_va *bo_va;
451 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
460 bo_va->valid = false;
461 bo_va->ref_count = 1;
462 INIT_LIST_HEAD(&bo_va->bo_list);
463 INIT_LIST_HEAD(&bo_va->vm_list);
465 mutex_lock(&vm->mutex);
466 list_add(&bo_va->vm_list, &vm->va);
467 list_add_tail(&bo_va->bo_list, &bo->va);
468 mutex_unlock(&vm->mutex);
474 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
476 * @rdev: radeon_device pointer
477 * @bo_va: bo_va to store the address
478 * @soffset: requested offset of the buffer in the VM address space
479 * @flags: attributes of pages (read/write/valid/etc.)
481 * Set offset of @bo_va (cayman+).
482 * Validate and set the offset requested within the vm address space.
483 * Returns 0 for success, error for failure.
485 * Object has to be reserved!
487 int radeon_vm_bo_set_addr(struct radeon_device *rdev,
488 struct radeon_bo_va *bo_va,
492 uint64_t size = radeon_bo_size(bo_va->bo);
493 uint64_t eoffset, last_offset = 0;
494 struct radeon_vm *vm = bo_va->vm;
495 struct radeon_bo_va *tmp;
496 struct list_head *head;
500 /* make sure object fit at this offset */
501 eoffset = soffset + size;
502 if (soffset >= eoffset) {
506 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
507 if (last_pfn > rdev->vm_manager.max_pfn) {
508 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
509 last_pfn, rdev->vm_manager.max_pfn);
514 eoffset = last_pfn = 0;
517 mutex_lock(&vm->mutex);
520 list_for_each_entry(tmp, &vm->va, vm_list) {
522 /* skip over currently modified bo */
526 if (soffset >= last_offset && eoffset <= tmp->soffset) {
527 /* bo can be added before this one */
530 if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
531 /* bo and tmp overlap, invalid offset */
532 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
533 bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
534 (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
535 mutex_unlock(&vm->mutex);
538 last_offset = tmp->eoffset;
539 head = &tmp->vm_list;
542 bo_va->soffset = soffset;
543 bo_va->eoffset = eoffset;
544 bo_va->flags = flags;
545 bo_va->valid = false;
546 list_move(&bo_va->vm_list, head);
548 mutex_unlock(&vm->mutex);
553 * radeon_vm_map_gart - get the physical address of a gart page
555 * @rdev: radeon_device pointer
556 * @addr: the unmapped addr
558 * Look up the physical address of the page that the pte resolves
560 * Returns the physical address of the page.
562 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
566 /* page table offset */
567 result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
569 /* in case cpu page size != gpu page size*/
570 result |= addr & (~PAGE_MASK);
576 * radeon_vm_page_flags - translate page flags to what the hw uses
578 * @flags: flags comming from userspace
580 * Translate the flags the userspace ABI uses to hw flags.
582 static uint32_t radeon_vm_page_flags(uint32_t flags)
584 uint32_t hw_flags = 0;
585 hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
586 hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
587 hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
588 if (flags & RADEON_VM_PAGE_SYSTEM) {
589 hw_flags |= R600_PTE_SYSTEM;
590 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
596 * radeon_vm_update_pdes - make sure that page directory is valid
598 * @rdev: radeon_device pointer
600 * @start: start of GPU address range
601 * @end: end of GPU address range
603 * Allocates new page tables if necessary
604 * and updates the page directory (cayman+).
605 * Returns 0 for success, error for failure.
607 * Global and local mutex must be locked!
609 static int radeon_vm_update_pdes(struct radeon_device *rdev,
610 struct radeon_vm *vm,
611 struct radeon_ib *ib,
612 uint64_t start, uint64_t end)
614 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
616 uint64_t last_pde = ~0, last_pt = ~0;
621 start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
622 end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
624 /* walk over the address space and update the page directory */
625 for (pt_idx = start; pt_idx <= end; ++pt_idx) {
628 if (vm->page_tables[pt_idx])
632 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
633 &vm->page_tables[pt_idx],
634 RADEON_VM_PTE_COUNT * 8,
635 RADEON_GPU_PAGE_SIZE, false);
638 r = radeon_vm_evict(rdev, vm);
646 pde = vm->pd_gpu_addr + pt_idx * 8;
648 pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
650 if (((last_pde + 8 * count) != pde) ||
651 ((last_pt + incr * count) != pt)) {
654 radeon_asic_vm_set_page(rdev, ib, last_pde,
655 last_pt, count, incr,
658 count *= RADEON_VM_PTE_COUNT;
659 radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
672 radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
673 incr, R600_PTE_VALID);
675 count *= RADEON_VM_PTE_COUNT;
676 radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
684 * radeon_vm_update_ptes - make sure that page tables are valid
686 * @rdev: radeon_device pointer
688 * @start: start of GPU address range
689 * @end: end of GPU address range
690 * @dst: destination address to map to
691 * @flags: mapping flags
693 * Update the page tables in the range @start - @end (cayman+).
695 * Global and local mutex must be locked!
697 static void radeon_vm_update_ptes(struct radeon_device *rdev,
698 struct radeon_vm *vm,
699 struct radeon_ib *ib,
700 uint64_t start, uint64_t end,
701 uint64_t dst, uint32_t flags)
703 static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
705 uint64_t last_pte = ~0, last_dst = ~0;
709 start = start / RADEON_GPU_PAGE_SIZE;
710 end = end / RADEON_GPU_PAGE_SIZE;
712 /* walk over the address space and update the page tables */
713 for (addr = start; addr < end; ) {
714 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
718 if ((addr & ~mask) == (end & ~mask))
721 nptes = RADEON_VM_PTE_COUNT - (addr & mask);
723 pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
724 pte += (addr & mask) * 8;
726 if ((last_pte + 8 * count) != pte) {
729 radeon_asic_vm_set_page(rdev, ib, last_pte,
731 RADEON_GPU_PAGE_SIZE,
743 dst += nptes * RADEON_GPU_PAGE_SIZE;
747 radeon_asic_vm_set_page(rdev, ib, last_pte,
749 RADEON_GPU_PAGE_SIZE, flags);
754 * radeon_vm_bo_update - map a bo into the vm page table
756 * @rdev: radeon_device pointer
758 * @bo: radeon buffer object
761 * Fill in the page table entries for @bo (cayman+).
762 * Returns 0 for success, -EINVAL for failure.
764 * Object have to be reserved & global and local mutex must be locked!
766 int radeon_vm_bo_update(struct radeon_device *rdev,
767 struct radeon_vm *vm,
768 struct radeon_bo *bo,
769 struct ttm_mem_reg *mem)
772 struct radeon_bo_va *bo_va;
773 unsigned nptes, npdes, ndw;
777 /* nothing to do if vm isn't bound */
778 if (vm->page_directory == NULL)
781 bo_va = radeon_vm_bo_find(vm, bo);
783 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
787 if (!bo_va->soffset) {
788 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
793 if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
796 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
797 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
799 addr = mem->start << PAGE_SHIFT;
800 if (mem->mem_type != TTM_PL_SYSTEM) {
801 bo_va->flags |= RADEON_VM_PAGE_VALID;
804 if (mem->mem_type == TTM_PL_TT) {
805 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
807 addr += rdev->vm_manager.vram_base_offset;
811 bo_va->valid = false;
814 trace_radeon_vm_bo_update(bo_va);
816 nptes = radeon_bo_ngpu_pages(bo);
818 /* assume two extra pdes in case the mapping overlaps the borders */
819 npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
824 if (RADEON_VM_BLOCK_SIZE > 11)
825 /* reserve space for one header for every 2k dwords */
826 ndw += (nptes >> 11) * 4;
828 /* reserve space for one header for
829 every (1 << BLOCK_SIZE) entries */
830 ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
832 /* reserve space for pte addresses */
835 /* reserve space for one header for every 2k dwords */
836 ndw += (npdes >> 11) * 4;
838 /* reserve space for pde addresses */
841 /* reserve space for clearing new page tables */
842 ndw += npdes * 2 * RADEON_VM_PTE_COUNT;
844 /* update too big for an IB */
848 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
853 r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
855 radeon_ib_free(rdev, &ib);
859 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
860 addr, radeon_vm_page_flags(bo_va->flags));
862 radeon_semaphore_sync_to(ib.semaphore, vm->fence);
863 r = radeon_ib_schedule(rdev, &ib, NULL);
865 radeon_ib_free(rdev, &ib);
868 radeon_fence_unref(&vm->fence);
869 vm->fence = radeon_fence_ref(ib.fence);
870 radeon_ib_free(rdev, &ib);
871 radeon_fence_unref(&vm->last_flush);
877 * radeon_vm_bo_rmv - remove a bo to a specific vm
879 * @rdev: radeon_device pointer
880 * @bo_va: requested bo_va
882 * Remove @bo_va->bo from the requested vm (cayman+).
883 * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
884 * remove the ptes for @bo_va in the page table.
885 * Returns 0 for success.
887 * Object have to be reserved!
889 int radeon_vm_bo_rmv(struct radeon_device *rdev,
890 struct radeon_bo_va *bo_va)
894 mutex_lock(&rdev->vm_manager.lock);
895 mutex_lock(&bo_va->vm->mutex);
896 if (bo_va->soffset) {
897 r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
899 mutex_unlock(&rdev->vm_manager.lock);
900 list_del(&bo_va->vm_list);
901 mutex_unlock(&bo_va->vm->mutex);
902 list_del(&bo_va->bo_list);
909 * radeon_vm_bo_invalidate - mark the bo as invalid
911 * @rdev: radeon_device pointer
913 * @bo: radeon buffer object
915 * Mark @bo as invalid (cayman+).
917 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
918 struct radeon_bo *bo)
920 struct radeon_bo_va *bo_va;
922 list_for_each_entry(bo_va, &bo->va, bo_list) {
923 bo_va->valid = false;
928 * radeon_vm_init - initialize a vm instance
930 * @rdev: radeon_device pointer
933 * Init @vm fields (cayman+).
935 void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
939 vm->last_flush = NULL;
940 vm->last_id_use = NULL;
941 mutex_init(&vm->mutex);
942 INIT_LIST_HEAD(&vm->list);
943 INIT_LIST_HEAD(&vm->va);
947 * radeon_vm_fini - tear down a vm instance
949 * @rdev: radeon_device pointer
952 * Tear down @vm (cayman+).
953 * Unbind the VM and remove all bos from the vm bo list
955 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
957 struct radeon_bo_va *bo_va, *tmp;
960 mutex_lock(&rdev->vm_manager.lock);
961 mutex_lock(&vm->mutex);
962 radeon_vm_free_pt(rdev, vm);
963 mutex_unlock(&rdev->vm_manager.lock);
965 if (!list_empty(&vm->va)) {
966 dev_err(rdev->dev, "still active bo inside vm\n");
968 list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
969 list_del_init(&bo_va->vm_list);
970 r = radeon_bo_reserve(bo_va->bo, false);
972 list_del_init(&bo_va->bo_list);
973 radeon_bo_unreserve(bo_va->bo);
977 radeon_fence_unref(&vm->fence);
978 radeon_fence_unref(&vm->last_flush);
979 radeon_fence_unref(&vm->last_id_use);
980 mutex_unlock(&vm->mutex);