2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <subdev/mmu.h>
25 #include <subdev/bar.h>
26 #include <subdev/fb.h>
27 #include <subdev/ltc.h>
28 #include <subdev/timer.h>
30 #include <core/gpuobj.h>
32 /* Map from compressed to corresponding uncompressed storage type.
33 * The value 0xff represents an invalid storage type.
35 const u8 gf100_pte_storage_type_map[256] =
37 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
38 0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
39 0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
40 0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
41 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
42 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
43 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
44 0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
45 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
46 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
47 0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
48 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
49 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
50 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
51 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
52 0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
53 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
54 0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
55 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
56 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
57 0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
58 0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
59 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
60 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
61 0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
62 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
63 0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
64 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
65 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
66 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
67 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
68 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
73 gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_gpuobj *pgt[2])
75 u32 pde[2] = { 0, 0 };
78 pde[1] = 0x00000001 | (pgt[0]->addr >> 8);
80 pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
82 nv_wo32(pgd, (index * 8) + 0, pde[0]);
83 nv_wo32(pgd, (index * 8) + 4, pde[1]);
87 gf100_vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
91 phys |= 0x00000001; /* present */
92 if (vma->access & NV_MEM_ACCESS_SYS)
95 phys |= ((u64)target << 32);
96 phys |= ((u64)memtype << 36);
101 gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
102 struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
104 u64 next = 1 << (vma->node->type - 8);
106 phys = gf100_vm_addr(vma, phys, mem->memtype, 0);
110 struct nvkm_ltc *ltc = nvkm_ltc(vma->vm->mmu);
111 u32 tag = mem->tag->offset + (delta >> 17);
112 phys |= (u64)tag << (32 + 12);
113 next |= (u64)1 << (32 + 12);
114 ltc->tags_clear(ltc, tag, cnt);
118 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
119 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
126 gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
127 struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
129 u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
130 /* compressed storage types are invalid for system memory */
131 u32 memtype = gf100_pte_storage_type_map[mem->memtype & 0xff];
135 u64 phys = gf100_vm_addr(vma, *list++, memtype, target);
136 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
137 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
143 gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
147 nv_wo32(pgt, pte + 0, 0x00000000);
148 nv_wo32(pgt, pte + 4, 0x00000000);
154 gf100_vm_flush(struct nvkm_vm *vm)
156 struct nvkm_mmu *mmu = (void *)vm->mmu;
157 struct nvkm_device *device = mmu->subdev.device;
158 struct nvkm_bar *bar = device->bar;
159 struct nvkm_vm_pgd *vpgd;
164 type = 0x00000001; /* PAGE_ALL */
165 if (atomic_read(&vm->engref[NVDEV_SUBDEV_BAR]))
166 type |= 0x00000004; /* HUB_ONLY */
168 mutex_lock(&nv_subdev(mmu)->mutex);
169 list_for_each_entry(vpgd, &vm->pgd_list, head) {
170 /* looks like maybe a "free flush slots" counter, the
171 * faster you write to 0x100cbc to more it decreases
173 if (!nv_wait_ne(mmu, 0x100c80, 0x00ff0000, 0x00000000)) {
174 nv_error(mmu, "vm timeout 0: 0x%08x %d\n",
175 nvkm_rd32(device, 0x100c80), type);
178 nvkm_wr32(device, 0x100cb8, vpgd->obj->addr >> 8);
179 nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
181 /* wait for flush to be queued? */
182 if (!nv_wait(mmu, 0x100c80, 0x00008000, 0x00008000)) {
183 nv_error(mmu, "vm timeout 1: 0x%08x %d\n",
184 nvkm_rd32(device, 0x100c80), type);
187 mutex_unlock(&nv_subdev(mmu)->mutex);
191 gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
192 struct nvkm_vm **pvm)
194 return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, pvm);
198 gf100_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
199 struct nvkm_oclass *oclass, void *data, u32 size,
200 struct nvkm_object **pobject)
202 struct nvkm_mmu *mmu;
205 ret = nvkm_mmu_create(parent, engine, oclass, "VM", "mmu", &mmu);
206 *pobject = nv_object(mmu);
210 mmu->limit = 1ULL << 40;
212 mmu->pgt_bits = 27 - 12;
215 mmu->create = gf100_vm_create;
216 mmu->map_pgt = gf100_vm_map_pgt;
217 mmu->map = gf100_vm_map;
218 mmu->map_sg = gf100_vm_map_sg;
219 mmu->unmap = gf100_vm_unmap;
220 mmu->flush = gf100_vm_flush;
226 .handle = NV_SUBDEV(MMU, 0xc0),
227 .ofuncs = &(struct nvkm_ofuncs) {
228 .ctor = gf100_mmu_ctor,
229 .dtor = _nvkm_mmu_dtor,
230 .init = _nvkm_mmu_init,
231 .fini = _nvkm_mmu_fini,