2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <drm/radeon_drm.h>
31 #include "radeon_trace.h"
35 * GPUVM is similar to the legacy gart on older asics, however
36 * rather than there being a single global gart table
37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
39 * vram pages and system memory pages and system memory pages
40 * can be mapped as snooped (cached system pages) or unsnooped
41 * (uncached system pages).
42 * Each VM has an ID associated with it and there is a page table
43 * associated with each VMID. When execting a command buffer,
44 * the kernel tells the the ring what VMID to use for that command
45 * buffer. VMIDs are allocated dynamically as commands are submitted.
46 * The userspace drivers maintain their own address space and the kernel
47 * sets up their pages tables accordingly when they submit their
48 * command buffers and a VMID is assigned.
49 * Cayman/Trinity support up to 8 active VMs at any given time;
54 * radeon_vm_num_pde - return the number of page directory entries
56 * @rdev: radeon_device pointer
58 * Calculate the number of page directory entries (cayman+).
60 static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
62 return rdev->vm_manager.max_pfn >> radeon_vm_block_size;
66 * radeon_vm_directory_size - returns the size of the page directory in bytes
68 * @rdev: radeon_device pointer
70 * Calculate the size of the page directory in bytes (cayman+).
72 static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
74 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
78 * radeon_vm_manager_init - init the vm manager
80 * @rdev: radeon_device pointer
82 * Init the vm manager (cayman+).
83 * Returns 0 for success, error for failure.
85 int radeon_vm_manager_init(struct radeon_device *rdev)
89 if (!rdev->vm_manager.enabled) {
90 r = radeon_asic_vm_init(rdev);
94 rdev->vm_manager.enabled = true;
100 * radeon_vm_manager_fini - tear down the vm manager
102 * @rdev: radeon_device pointer
104 * Tear down the VM manager (cayman+).
106 void radeon_vm_manager_fini(struct radeon_device *rdev)
110 if (!rdev->vm_manager.enabled)
113 for (i = 0; i < RADEON_NUM_VM; ++i)
114 radeon_fence_unref(&rdev->vm_manager.active[i]);
115 radeon_asic_vm_fini(rdev);
116 rdev->vm_manager.enabled = false;
120 * radeon_vm_get_bos - add the vm BOs to a validation list
122 * @vm: vm providing the BOs
123 * @head: head of validation list
125 * Add the page directory to the list of BOs to
126 * validate for command submission (cayman+).
128 struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
129 struct radeon_vm *vm,
130 struct list_head *head)
132 struct radeon_cs_reloc *list;
135 list = kmalloc_array(vm->max_pde_used + 2,
136 sizeof(struct radeon_cs_reloc), GFP_KERNEL);
140 /* add the vm page table to the list */
142 list[0].robj = vm->page_directory;
143 list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
144 list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
145 list[0].tv.bo = &vm->page_directory->tbo;
146 list[0].tiling_flags = 0;
148 list_add(&list[0].tv.head, head);
150 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
151 if (!vm->page_tables[i].bo)
154 list[idx].gobj = NULL;
155 list[idx].robj = vm->page_tables[i].bo;
156 list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
157 list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
158 list[idx].tv.bo = &list[idx].robj->tbo;
159 list[idx].tiling_flags = 0;
160 list[idx].handle = 0;
161 list_add(&list[idx++].tv.head, head);
168 * radeon_vm_grab_id - allocate the next free VMID
170 * @rdev: radeon_device pointer
171 * @vm: vm to allocate id for
172 * @ring: ring we want to submit job to
174 * Allocate an id for the vm (cayman+).
175 * Returns the fence we need to sync to (if any).
177 * Global and local mutex must be locked!
179 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
180 struct radeon_vm *vm, int ring)
182 struct radeon_fence *best[RADEON_NUM_RINGS] = {};
183 unsigned choices[2] = {};
186 /* check if the id is still valid */
187 if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
190 /* we definately need to flush */
191 radeon_fence_unref(&vm->last_flush);
193 /* skip over VMID 0, since it is the system VM */
194 for (i = 1; i < rdev->vm_manager.nvm; ++i) {
195 struct radeon_fence *fence = rdev->vm_manager.active[i];
198 /* found a free one */
200 trace_radeon_vm_grab_id(vm->id, ring);
204 if (radeon_fence_is_earlier(fence, best[fence->ring])) {
205 best[fence->ring] = fence;
206 choices[fence->ring == ring ? 0 : 1] = i;
210 for (i = 0; i < 2; ++i) {
213 trace_radeon_vm_grab_id(vm->id, ring);
214 return rdev->vm_manager.active[choices[i]];
218 /* should never happen */
224 * radeon_vm_flush - hardware flush the vm
226 * @rdev: radeon_device pointer
227 * @vm: vm we want to flush
228 * @ring: ring to use for flush
230 * Flush the vm (cayman+).
232 * Global and local mutex must be locked!
234 void radeon_vm_flush(struct radeon_device *rdev,
235 struct radeon_vm *vm,
238 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
240 /* if we can't remember our last VM flush then flush now! */
241 if (!vm->last_flush || pd_addr != vm->pd_gpu_addr) {
242 trace_radeon_vm_flush(pd_addr, ring, vm->id);
243 vm->pd_gpu_addr = pd_addr;
244 radeon_ring_vm_flush(rdev, ring, vm);
249 * radeon_vm_fence - remember fence for vm
251 * @rdev: radeon_device pointer
252 * @vm: vm we want to fence
253 * @fence: fence to remember
255 * Fence the vm (cayman+).
256 * Set the fence used to protect page table and id.
258 * Global and local mutex must be locked!
260 void radeon_vm_fence(struct radeon_device *rdev,
261 struct radeon_vm *vm,
262 struct radeon_fence *fence)
264 radeon_fence_unref(&vm->fence);
265 vm->fence = radeon_fence_ref(fence);
267 radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
268 rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
270 radeon_fence_unref(&vm->last_id_use);
271 vm->last_id_use = radeon_fence_ref(fence);
273 /* we just flushed the VM, remember that */
275 vm->last_flush = radeon_fence_ref(fence);
279 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
282 * @bo: requested buffer object
284 * Find @bo inside the requested vm (cayman+).
285 * Search inside the @bos vm list for the requested vm
286 * Returns the found bo_va or NULL if none is found
288 * Object has to be reserved!
290 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
291 struct radeon_bo *bo)
293 struct radeon_bo_va *bo_va;
295 list_for_each_entry(bo_va, &bo->va, bo_list) {
296 if (bo_va->vm == vm) {
304 * radeon_vm_bo_add - add a bo to a specific vm
306 * @rdev: radeon_device pointer
308 * @bo: radeon buffer object
310 * Add @bo into the requested vm (cayman+).
311 * Add @bo to the list of bos associated with the vm
312 * Returns newly added bo_va or NULL for failure
314 * Object has to be reserved!
316 struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
317 struct radeon_vm *vm,
318 struct radeon_bo *bo)
320 struct radeon_bo_va *bo_va;
322 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
332 bo_va->ref_count = 1;
333 INIT_LIST_HEAD(&bo_va->bo_list);
334 INIT_LIST_HEAD(&bo_va->vm_status);
336 mutex_lock(&vm->mutex);
337 list_add_tail(&bo_va->bo_list, &bo->va);
338 mutex_unlock(&vm->mutex);
344 * radeon_vm_set_pages - helper to call the right asic function
346 * @rdev: radeon_device pointer
347 * @ib: indirect buffer to fill with commands
348 * @pe: addr of the page entry
349 * @addr: dst addr to write into pe
350 * @count: number of page entries to update
351 * @incr: increase next addr by incr bytes
352 * @flags: hw access flags
354 * Traces the parameters and calls the right asic functions
355 * to setup the page table using the DMA.
357 static void radeon_vm_set_pages(struct radeon_device *rdev,
358 struct radeon_ib *ib,
360 uint64_t addr, unsigned count,
361 uint32_t incr, uint32_t flags)
363 trace_radeon_vm_set_page(pe, addr, count, incr, flags);
365 if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
366 uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
367 radeon_asic_vm_copy_pages(rdev, ib, pe, src, count);
369 } else if ((flags & R600_PTE_SYSTEM) || (count < 3)) {
370 radeon_asic_vm_write_pages(rdev, ib, pe, addr,
374 radeon_asic_vm_set_pages(rdev, ib, pe, addr,
380 * radeon_vm_clear_bo - initially clear the page dir/table
382 * @rdev: radeon_device pointer
385 static int radeon_vm_clear_bo(struct radeon_device *rdev,
386 struct radeon_bo *bo)
388 struct ttm_validate_buffer tv;
389 struct ww_acquire_ctx ticket;
390 struct list_head head;
396 memset(&tv, 0, sizeof(tv));
399 INIT_LIST_HEAD(&head);
400 list_add(&tv.head, &head);
402 r = ttm_eu_reserve_buffers(&ticket, &head);
406 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
410 addr = radeon_bo_gpu_offset(bo);
411 entries = radeon_bo_size(bo) / 8;
413 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
414 NULL, entries * 2 + 64);
420 radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0);
421 radeon_asic_vm_pad_ib(rdev, &ib);
423 r = radeon_ib_schedule(rdev, &ib, NULL);
427 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
428 radeon_ib_free(rdev, &ib);
433 ttm_eu_backoff_reservation(&ticket, &head);
438 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
440 * @rdev: radeon_device pointer
441 * @bo_va: bo_va to store the address
442 * @soffset: requested offset of the buffer in the VM address space
443 * @flags: attributes of pages (read/write/valid/etc.)
445 * Set offset of @bo_va (cayman+).
446 * Validate and set the offset requested within the vm address space.
447 * Returns 0 for success, error for failure.
449 * Object has to be reserved!
451 int radeon_vm_bo_set_addr(struct radeon_device *rdev,
452 struct radeon_bo_va *bo_va,
456 uint64_t size = radeon_bo_size(bo_va->bo);
457 struct radeon_vm *vm = bo_va->vm;
458 unsigned last_pfn, pt_idx;
463 /* make sure object fit at this offset */
464 eoffset = soffset + size;
465 if (soffset >= eoffset) {
469 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
470 if (last_pfn > rdev->vm_manager.max_pfn) {
471 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
472 last_pfn, rdev->vm_manager.max_pfn);
477 eoffset = last_pfn = 0;
480 mutex_lock(&vm->mutex);
481 if (bo_va->it.start || bo_va->it.last) {
483 /* add a clone of the bo_va to clear the old address */
484 struct radeon_bo_va *tmp;
485 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
486 tmp->it.start = bo_va->it.start;
487 tmp->it.last = bo_va->it.last;
489 tmp->addr = bo_va->addr;
490 tmp->bo = radeon_bo_ref(bo_va->bo);
491 list_add(&tmp->vm_status, &vm->freed);
494 interval_tree_remove(&bo_va->it, &vm->va);
499 soffset /= RADEON_GPU_PAGE_SIZE;
500 eoffset /= RADEON_GPU_PAGE_SIZE;
501 if (soffset || eoffset) {
502 struct interval_tree_node *it;
503 it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
505 struct radeon_bo_va *tmp;
506 tmp = container_of(it, struct radeon_bo_va, it);
507 /* bo and tmp overlap, invalid offset */
508 dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
509 "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
510 soffset, tmp->bo, tmp->it.start, tmp->it.last);
511 mutex_unlock(&vm->mutex);
514 bo_va->it.start = soffset;
515 bo_va->it.last = eoffset - 1;
516 interval_tree_insert(&bo_va->it, &vm->va);
519 bo_va->flags = flags;
522 soffset >>= radeon_vm_block_size;
523 eoffset >>= radeon_vm_block_size;
525 BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
527 if (eoffset > vm->max_pde_used)
528 vm->max_pde_used = eoffset;
530 radeon_bo_unreserve(bo_va->bo);
532 /* walk over the address space and allocate the page tables */
533 for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
534 struct radeon_bo *pt;
536 if (vm->page_tables[pt_idx].bo)
539 /* drop mutex to allocate and clear page table */
540 mutex_unlock(&vm->mutex);
542 r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
543 RADEON_GPU_PAGE_SIZE, true,
544 RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt);
548 r = radeon_vm_clear_bo(rdev, pt);
550 radeon_bo_unref(&pt);
551 radeon_bo_reserve(bo_va->bo, false);
555 /* aquire mutex again */
556 mutex_lock(&vm->mutex);
557 if (vm->page_tables[pt_idx].bo) {
558 /* someone else allocated the pt in the meantime */
559 mutex_unlock(&vm->mutex);
560 radeon_bo_unref(&pt);
561 mutex_lock(&vm->mutex);
565 vm->page_tables[pt_idx].addr = 0;
566 vm->page_tables[pt_idx].bo = pt;
569 mutex_unlock(&vm->mutex);
570 return radeon_bo_reserve(bo_va->bo, false);
574 * radeon_vm_map_gart - get the physical address of a gart page
576 * @rdev: radeon_device pointer
577 * @addr: the unmapped addr
579 * Look up the physical address of the page that the pte resolves
581 * Returns the physical address of the page.
583 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
587 /* page table offset */
588 result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
590 /* in case cpu page size != gpu page size*/
591 result |= addr & (~PAGE_MASK);
597 * radeon_vm_page_flags - translate page flags to what the hw uses
599 * @flags: flags comming from userspace
601 * Translate the flags the userspace ABI uses to hw flags.
603 static uint32_t radeon_vm_page_flags(uint32_t flags)
605 uint32_t hw_flags = 0;
606 hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
607 hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
608 hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
609 if (flags & RADEON_VM_PAGE_SYSTEM) {
610 hw_flags |= R600_PTE_SYSTEM;
611 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
617 * radeon_vm_update_pdes - make sure that page directory is valid
619 * @rdev: radeon_device pointer
621 * @start: start of GPU address range
622 * @end: end of GPU address range
624 * Allocates new page tables if necessary
625 * and updates the page directory (cayman+).
626 * Returns 0 for success, error for failure.
628 * Global and local mutex must be locked!
630 int radeon_vm_update_page_directory(struct radeon_device *rdev,
631 struct radeon_vm *vm)
633 struct radeon_bo *pd = vm->page_directory;
634 uint64_t pd_addr = radeon_bo_gpu_offset(pd);
635 uint32_t incr = RADEON_VM_PTE_COUNT * 8;
636 uint64_t last_pde = ~0, last_pt = ~0;
637 unsigned count = 0, pt_idx, ndw;
644 /* assume the worst case */
645 ndw += vm->max_pde_used * 16;
647 /* update too big for an IB */
651 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
656 /* walk over the address space and update the page directory */
657 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
658 struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
664 pt = radeon_bo_gpu_offset(bo);
665 if (vm->page_tables[pt_idx].addr == pt)
667 vm->page_tables[pt_idx].addr = pt;
669 pde = pd_addr + pt_idx * 8;
670 if (((last_pde + 8 * count) != pde) ||
671 ((last_pt + incr * count) != pt)) {
674 radeon_vm_set_pages(rdev, &ib, last_pde,
675 last_pt, count, incr,
688 radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count,
689 incr, R600_PTE_VALID);
691 if (ib.length_dw != 0) {
692 radeon_asic_vm_pad_ib(rdev, &ib);
693 radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
694 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
695 r = radeon_ib_schedule(rdev, &ib, NULL);
697 radeon_ib_free(rdev, &ib);
700 radeon_fence_unref(&vm->fence);
701 vm->fence = radeon_fence_ref(ib.fence);
702 radeon_fence_unref(&vm->last_flush);
704 radeon_ib_free(rdev, &ib);
710 * radeon_vm_frag_ptes - add fragment information to PTEs
712 * @rdev: radeon_device pointer
713 * @ib: IB for the update
714 * @pe_start: first PTE to handle
715 * @pe_end: last PTE to handle
716 * @addr: addr those PTEs should point to
717 * @flags: hw mapping flags
719 * Global and local mutex must be locked!
721 static void radeon_vm_frag_ptes(struct radeon_device *rdev,
722 struct radeon_ib *ib,
723 uint64_t pe_start, uint64_t pe_end,
724 uint64_t addr, uint32_t flags)
727 * The MC L1 TLB supports variable sized pages, based on a fragment
728 * field in the PTE. When this field is set to a non-zero value, page
729 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
730 * flags are considered valid for all PTEs within the fragment range
731 * and corresponding mappings are assumed to be physically contiguous.
733 * The L1 TLB can store a single PTE for the whole fragment,
734 * significantly increasing the space available for translation
735 * caching. This leads to large improvements in throughput when the
736 * TLB is under pressure.
738 * The L2 TLB distributes small and large fragments into two
739 * asymmetric partitions. The large fragment cache is significantly
740 * larger. Thus, we try to use large fragments wherever possible.
741 * Userspace can support this by aligning virtual base address and
742 * allocation size to the fragment size.
745 /* NI is optimized for 256KB fragments, SI and newer for 64KB */
746 uint64_t frag_flags = rdev->family == CHIP_CAYMAN ?
747 R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
748 uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80;
750 uint64_t frag_start = ALIGN(pe_start, frag_align);
751 uint64_t frag_end = pe_end & ~(frag_align - 1);
755 /* system pages are non continuously */
756 if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) ||
757 (frag_start >= frag_end)) {
759 count = (pe_end - pe_start) / 8;
760 radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
761 RADEON_GPU_PAGE_SIZE, flags);
765 /* handle the 4K area at the beginning */
766 if (pe_start != frag_start) {
767 count = (frag_start - pe_start) / 8;
768 radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
769 RADEON_GPU_PAGE_SIZE, flags);
770 addr += RADEON_GPU_PAGE_SIZE * count;
773 /* handle the area in the middle */
774 count = (frag_end - frag_start) / 8;
775 radeon_vm_set_pages(rdev, ib, frag_start, addr, count,
776 RADEON_GPU_PAGE_SIZE, flags | frag_flags);
778 /* handle the 4K area at the end */
779 if (frag_end != pe_end) {
780 addr += RADEON_GPU_PAGE_SIZE * count;
781 count = (pe_end - frag_end) / 8;
782 radeon_vm_set_pages(rdev, ib, frag_end, addr, count,
783 RADEON_GPU_PAGE_SIZE, flags);
788 * radeon_vm_update_ptes - make sure that page tables are valid
790 * @rdev: radeon_device pointer
792 * @start: start of GPU address range
793 * @end: end of GPU address range
794 * @dst: destination address to map to
795 * @flags: mapping flags
797 * Update the page tables in the range @start - @end (cayman+).
799 * Global and local mutex must be locked!
801 static void radeon_vm_update_ptes(struct radeon_device *rdev,
802 struct radeon_vm *vm,
803 struct radeon_ib *ib,
804 uint64_t start, uint64_t end,
805 uint64_t dst, uint32_t flags)
807 uint64_t mask = RADEON_VM_PTE_COUNT - 1;
808 uint64_t last_pte = ~0, last_dst = ~0;
812 /* walk over the address space and update the page tables */
813 for (addr = start; addr < end; ) {
814 uint64_t pt_idx = addr >> radeon_vm_block_size;
815 struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
819 radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj);
821 if ((addr & ~mask) == (end & ~mask))
824 nptes = RADEON_VM_PTE_COUNT - (addr & mask);
826 pte = radeon_bo_gpu_offset(pt);
827 pte += (addr & mask) * 8;
829 if ((last_pte + 8 * count) != pte) {
832 radeon_vm_frag_ptes(rdev, ib, last_pte,
833 last_pte + 8 * count,
845 dst += nptes * RADEON_GPU_PAGE_SIZE;
849 radeon_vm_frag_ptes(rdev, ib, last_pte,
850 last_pte + 8 * count,
856 * radeon_vm_bo_update - map a bo into the vm page table
858 * @rdev: radeon_device pointer
860 * @bo: radeon buffer object
863 * Fill in the page table entries for @bo (cayman+).
864 * Returns 0 for success, -EINVAL for failure.
866 * Object have to be reserved and mutex must be locked!
868 int radeon_vm_bo_update(struct radeon_device *rdev,
869 struct radeon_bo_va *bo_va,
870 struct ttm_mem_reg *mem)
872 struct radeon_vm *vm = bo_va->vm;
878 if (!bo_va->it.start) {
879 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
884 list_del_init(&bo_va->vm_status);
886 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
887 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
888 bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
890 addr = mem->start << PAGE_SHIFT;
891 if (mem->mem_type != TTM_PL_SYSTEM) {
892 bo_va->flags |= RADEON_VM_PAGE_VALID;
894 if (mem->mem_type == TTM_PL_TT) {
895 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
896 if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
897 bo_va->flags |= RADEON_VM_PAGE_SNOOPED;
900 addr += rdev->vm_manager.vram_base_offset;
906 if (addr == bo_va->addr)
910 trace_radeon_vm_bo_update(bo_va);
912 nptes = bo_va->it.last - bo_va->it.start + 1;
917 if (radeon_vm_block_size > 11)
918 /* reserve space for one header for every 2k dwords */
919 ndw += (nptes >> 11) * 4;
921 /* reserve space for one header for
922 every (1 << BLOCK_SIZE) entries */
923 ndw += (nptes >> radeon_vm_block_size) * 4;
925 /* reserve space for pte addresses */
928 /* update too big for an IB */
932 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
937 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
938 bo_va->it.last + 1, addr,
939 radeon_vm_page_flags(bo_va->flags));
941 radeon_asic_vm_pad_ib(rdev, &ib);
942 radeon_semaphore_sync_to(ib.semaphore, vm->fence);
943 r = radeon_ib_schedule(rdev, &ib, NULL);
945 radeon_ib_free(rdev, &ib);
948 radeon_fence_unref(&vm->fence);
949 vm->fence = radeon_fence_ref(ib.fence);
950 radeon_ib_free(rdev, &ib);
951 radeon_fence_unref(&vm->last_flush);
957 * radeon_vm_clear_freed - clear freed BOs in the PT
959 * @rdev: radeon_device pointer
962 * Make sure all freed BOs are cleared in the PT.
963 * Returns 0 for success.
965 * PTs have to be reserved and mutex must be locked!
967 int radeon_vm_clear_freed(struct radeon_device *rdev,
968 struct radeon_vm *vm)
970 struct radeon_bo_va *bo_va, *tmp;
973 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
974 r = radeon_vm_bo_update(rdev, bo_va, NULL);
975 radeon_bo_unref(&bo_va->bo);
985 * radeon_vm_clear_invalids - clear invalidated BOs in the PT
987 * @rdev: radeon_device pointer
990 * Make sure all invalidated BOs are cleared in the PT.
991 * Returns 0 for success.
993 * PTs have to be reserved and mutex must be locked!
995 int radeon_vm_clear_invalids(struct radeon_device *rdev,
996 struct radeon_vm *vm)
998 struct radeon_bo_va *bo_va, *tmp;
1001 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) {
1002 r = radeon_vm_bo_update(rdev, bo_va, NULL);
1010 * radeon_vm_bo_rmv - remove a bo to a specific vm
1012 * @rdev: radeon_device pointer
1013 * @bo_va: requested bo_va
1015 * Remove @bo_va->bo from the requested vm (cayman+).
1017 * Object have to be reserved!
1019 void radeon_vm_bo_rmv(struct radeon_device *rdev,
1020 struct radeon_bo_va *bo_va)
1022 struct radeon_vm *vm = bo_va->vm;
1024 list_del(&bo_va->bo_list);
1026 mutex_lock(&vm->mutex);
1027 interval_tree_remove(&bo_va->it, &vm->va);
1028 list_del(&bo_va->vm_status);
1031 bo_va->bo = radeon_bo_ref(bo_va->bo);
1032 list_add(&bo_va->vm_status, &vm->freed);
1037 mutex_unlock(&vm->mutex);
1041 * radeon_vm_bo_invalidate - mark the bo as invalid
1043 * @rdev: radeon_device pointer
1045 * @bo: radeon buffer object
1047 * Mark @bo as invalid (cayman+).
1049 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1050 struct radeon_bo *bo)
1052 struct radeon_bo_va *bo_va;
1054 list_for_each_entry(bo_va, &bo->va, bo_list) {
1056 mutex_lock(&bo_va->vm->mutex);
1057 list_del(&bo_va->vm_status);
1058 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1059 mutex_unlock(&bo_va->vm->mutex);
1065 * radeon_vm_init - initialize a vm instance
1067 * @rdev: radeon_device pointer
1070 * Init @vm fields (cayman+).
1072 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
1074 const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
1075 RADEON_VM_PTE_COUNT * 8);
1076 unsigned pd_size, pd_entries, pts_size;
1080 vm->ib_bo_va = NULL;
1082 vm->last_flush = NULL;
1083 vm->last_id_use = NULL;
1084 mutex_init(&vm->mutex);
1086 INIT_LIST_HEAD(&vm->invalidated);
1087 INIT_LIST_HEAD(&vm->freed);
1089 pd_size = radeon_vm_directory_size(rdev);
1090 pd_entries = radeon_vm_num_pdes(rdev);
1092 /* allocate page table array */
1093 pts_size = pd_entries * sizeof(struct radeon_vm_pt);
1094 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
1095 if (vm->page_tables == NULL) {
1096 DRM_ERROR("Cannot allocate memory for page table array\n");
1100 r = radeon_bo_create(rdev, pd_size, align, true,
1101 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
1102 &vm->page_directory);
1106 r = radeon_vm_clear_bo(rdev, vm->page_directory);
1108 radeon_bo_unref(&vm->page_directory);
1109 vm->page_directory = NULL;
1117 * radeon_vm_fini - tear down a vm instance
1119 * @rdev: radeon_device pointer
1122 * Tear down @vm (cayman+).
1123 * Unbind the VM and remove all bos from the vm bo list
1125 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1127 struct radeon_bo_va *bo_va, *tmp;
1130 if (!RB_EMPTY_ROOT(&vm->va)) {
1131 dev_err(rdev->dev, "still active bo inside vm\n");
1133 rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) {
1134 interval_tree_remove(&bo_va->it, &vm->va);
1135 r = radeon_bo_reserve(bo_va->bo, false);
1137 list_del_init(&bo_va->bo_list);
1138 radeon_bo_unreserve(bo_va->bo);
1142 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
1143 radeon_bo_unref(&bo_va->bo);
1147 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
1148 radeon_bo_unref(&vm->page_tables[i].bo);
1149 kfree(vm->page_tables);
1151 radeon_bo_unref(&vm->page_directory);
1153 radeon_fence_unref(&vm->fence);
1154 radeon_fence_unref(&vm->last_flush);
1155 radeon_fence_unref(&vm->last_id_use);
1157 mutex_destroy(&vm->mutex);