2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <core/gpuobj.h>
28 #include <subdev/fb.h>
29 #include <subdev/vm.h>
32 nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
34 struct nouveau_vm *vm = vma->vm;
35 struct nouveau_vmmgr *vmm = vm->vmm;
36 struct nouveau_mm_node *r;
37 int big = vma->node->type != vmm->spg_shift;
38 u32 offset = vma->node->offset + (delta >> 12);
39 u32 bits = vma->node->type - 12;
40 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
41 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
42 u32 max = 1 << (vmm->pgt_bits - bits);
46 list_for_each_entry(r, &node->regions, rl_entry) {
47 u64 phys = (u64)r->offset << 12;
48 u32 num = r->length >> bits;
51 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
54 if (unlikely(end >= max))
58 vmm->map(vma, pgt, node, pte, len, phys, delta);
62 if (unlikely(end >= max)) {
63 phys += len << (bits + 12);
68 delta += (u64)len << vma->node->type;
76 nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
78 nouveau_vm_map_at(vma, 0, node);
82 nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
83 struct nouveau_mem *mem)
85 struct nouveau_vm *vm = vma->vm;
86 struct nouveau_vmmgr *vmm = vm->vmm;
87 int big = vma->node->type != vmm->spg_shift;
88 u32 offset = vma->node->offset + (delta >> 12);
89 u32 bits = vma->node->type - 12;
90 u32 num = length >> vma->node->type;
91 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
92 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
93 u32 max = 1 << (vmm->pgt_bits - bits);
97 struct scatterlist *sg;
99 for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
100 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
101 sglen = sg_dma_len(sg) >> PAGE_SHIFT;
104 if (unlikely(end >= max))
108 for (m = 0; m < len; m++) {
109 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
111 vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
118 if (unlikely(end >= max)) {
123 for (; m < sglen; m++) {
124 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
126 vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
140 nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
141 struct nouveau_mem *mem)
143 struct nouveau_vm *vm = vma->vm;
144 struct nouveau_vmmgr *vmm = vm->vmm;
145 dma_addr_t *list = mem->pages;
146 int big = vma->node->type != vmm->spg_shift;
147 u32 offset = vma->node->offset + (delta >> 12);
148 u32 bits = vma->node->type - 12;
149 u32 num = length >> vma->node->type;
150 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
151 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
152 u32 max = 1 << (vmm->pgt_bits - bits);
156 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
159 if (unlikely(end >= max))
163 vmm->map_sg(vma, pgt, mem, pte, len, list);
168 if (unlikely(end >= max)) {
178 nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
180 struct nouveau_vm *vm = vma->vm;
181 struct nouveau_vmmgr *vmm = vm->vmm;
182 int big = vma->node->type != vmm->spg_shift;
183 u32 offset = vma->node->offset + (delta >> 12);
184 u32 bits = vma->node->type - 12;
185 u32 num = length >> vma->node->type;
186 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
187 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
188 u32 max = 1 << (vmm->pgt_bits - bits);
192 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
195 if (unlikely(end >= max))
199 vmm->unmap(pgt, pte, len);
203 if (unlikely(end >= max)) {
213 nouveau_vm_unmap(struct nouveau_vma *vma)
215 nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
219 nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
221 struct nouveau_vmmgr *vmm = vm->vmm;
222 struct nouveau_vm_pgd *vpgd;
223 struct nouveau_vm_pgt *vpgt;
224 struct nouveau_gpuobj *pgt;
227 for (pde = fpde; pde <= lpde; pde++) {
228 vpgt = &vm->pgt[pde - vm->fpde];
229 if (--vpgt->refcount[big])
232 pgt = vpgt->obj[big];
233 vpgt->obj[big] = NULL;
235 list_for_each_entry(vpgd, &vm->pgd_list, head) {
236 vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
239 mutex_unlock(&vm->mm.mutex);
240 nouveau_gpuobj_ref(NULL, &pgt);
241 mutex_lock(&vm->mm.mutex);
246 nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
248 struct nouveau_vmmgr *vmm = vm->vmm;
249 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
250 struct nouveau_vm_pgd *vpgd;
251 struct nouveau_gpuobj *pgt;
252 int big = (type != vmm->spg_shift);
256 pgt_size = (1 << (vmm->pgt_bits + 12)) >> type;
259 mutex_unlock(&vm->mm.mutex);
260 ret = nouveau_gpuobj_new(nv_object(vm->vmm), NULL, pgt_size, 0x1000,
261 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
262 mutex_lock(&vm->mm.mutex);
266 /* someone beat us to filling the PDE while we didn't have the lock */
267 if (unlikely(vpgt->refcount[big]++)) {
268 mutex_unlock(&vm->mm.mutex);
269 nouveau_gpuobj_ref(NULL, &pgt);
270 mutex_lock(&vm->mm.mutex);
274 vpgt->obj[big] = pgt;
275 list_for_each_entry(vpgd, &vm->pgd_list, head) {
276 vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
283 nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
284 u32 access, struct nouveau_vma *vma)
286 struct nouveau_vmmgr *vmm = vm->vmm;
287 u32 align = (1 << page_shift) >> 12;
288 u32 msize = size >> 12;
292 mutex_lock(&vm->mm.mutex);
293 ret = nouveau_mm_head(&vm->mm, page_shift, msize, msize, align,
295 if (unlikely(ret != 0)) {
296 mutex_unlock(&vm->mm.mutex);
300 fpde = (vma->node->offset >> vmm->pgt_bits);
301 lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
303 for (pde = fpde; pde <= lpde; pde++) {
304 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
305 int big = (vma->node->type != vmm->spg_shift);
307 if (likely(vpgt->refcount[big])) {
308 vpgt->refcount[big]++;
312 ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
315 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
316 nouveau_mm_free(&vm->mm, &vma->node);
317 mutex_unlock(&vm->mm.mutex);
321 mutex_unlock(&vm->mm.mutex);
324 vma->offset = (u64)vma->node->offset << 12;
325 vma->access = access;
330 nouveau_vm_put(struct nouveau_vma *vma)
332 struct nouveau_vm *vm = vma->vm;
333 struct nouveau_vmmgr *vmm = vm->vmm;
336 if (unlikely(vma->node == NULL))
338 fpde = (vma->node->offset >> vmm->pgt_bits);
339 lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
341 mutex_lock(&vm->mm.mutex);
342 nouveau_vm_unmap_pgt(vm, vma->node->type != vmm->spg_shift, fpde, lpde);
343 nouveau_mm_free(&vm->mm, &vma->node);
344 mutex_unlock(&vm->mm.mutex);
348 nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
349 u64 mm_offset, u32 block, struct nouveau_vm **pvm)
351 struct nouveau_vm *vm;
352 u64 mm_length = (offset + length) - mm_offset;
355 vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL);
359 INIT_LIST_HEAD(&vm->pgd_list);
362 vm->fpde = offset >> (vmm->pgt_bits + 12);
363 vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
365 vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
371 ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
383 nouveau_vm_new(struct nouveau_device *device, u64 offset, u64 length,
384 u64 mm_offset, struct nouveau_vm **pvm)
386 struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
387 return vmm->create(vmm, offset, length, mm_offset, pvm);
391 nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
393 struct nouveau_vmmgr *vmm = vm->vmm;
394 struct nouveau_vm_pgd *vpgd;
400 vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
404 nouveau_gpuobj_ref(pgd, &vpgd->obj);
406 mutex_lock(&vm->mm.mutex);
407 for (i = vm->fpde; i <= vm->lpde; i++)
408 vmm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
409 list_add(&vpgd->head, &vm->pgd_list);
410 mutex_unlock(&vm->mm.mutex);
415 nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
417 struct nouveau_vm_pgd *vpgd, *tmp;
418 struct nouveau_gpuobj *pgd = NULL;
423 mutex_lock(&vm->mm.mutex);
424 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
425 if (vpgd->obj == mpgd) {
427 list_del(&vpgd->head);
432 mutex_unlock(&vm->mm.mutex);
434 nouveau_gpuobj_ref(NULL, &pgd);
438 nouveau_vm_del(struct nouveau_vm *vm)
440 struct nouveau_vm_pgd *vpgd, *tmp;
442 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
443 nouveau_vm_unlink(vm, vpgd->obj);
446 nouveau_mm_fini(&vm->mm);
452 nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
453 struct nouveau_gpuobj *pgd)
455 struct nouveau_vm *vm;
460 ret = nouveau_vm_link(vm, pgd);
471 nouveau_vm_unlink(vm, pgd);
473 if (--vm->refcount == 0)