2 * Copyright 2017 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <subdev/fb.h>
25 #include <subdev/ltc.h>
26 #include <subdev/timer.h>
28 #include <nvif/if900d.h>
29 #include <nvif/unpack.h>
32 gf100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
33 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
35 u64 base = (addr >> 8) | map->type;
38 if (map->ctag && !(map->next & (1ULL << 44))) {
40 data = base | ((map->ctag >> 1) << 44);
41 if (!(map->ctag++ & 1))
44 VMM_WO064(pt, vmm, ptei++ * 8, data);
48 map->type += ptes * map->ctag;
51 VMM_WO064(pt, vmm, ptei++ * 8, data);
58 gf100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
59 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
61 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
65 gf100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
66 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
68 if (map->page->shift == PAGE_SHIFT) {
69 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
70 nvkm_kmap(pt->memory);
72 const u64 data = (*map->dma++ >> 8) | map->type;
73 VMM_WO064(pt, vmm, ptei++ * 8, data);
74 map->type += map->ctag;
76 nvkm_done(pt->memory);
80 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
84 gf100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
85 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
87 VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
91 gf100_vmm_pgt_unmap(struct nvkm_vmm *vmm,
92 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
94 VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes);
97 const struct nvkm_vmm_desc_func
99 .unmap = gf100_vmm_pgt_unmap,
100 .mem = gf100_vmm_pgt_mem,
101 .dma = gf100_vmm_pgt_dma,
102 .sgl = gf100_vmm_pgt_sgl,
106 gf100_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
108 struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
109 struct nvkm_mmu_pt *pd = pgd->pt[0];
110 struct nvkm_mmu_pt *pt;
113 if ((pt = pgt->pt[0])) {
114 switch (nvkm_memory_target(pt->memory)) {
115 case NVKM_MEM_TARGET_VRAM: data |= 1ULL << 0; break;
116 case NVKM_MEM_TARGET_HOST: data |= 2ULL << 0;
117 data |= BIT_ULL(35); /* VOL */
119 case NVKM_MEM_TARGET_NCOH: data |= 3ULL << 0; break;
124 data |= pt->addr >> 8;
127 if ((pt = pgt->pt[1])) {
128 switch (nvkm_memory_target(pt->memory)) {
129 case NVKM_MEM_TARGET_VRAM: data |= 1ULL << 32; break;
130 case NVKM_MEM_TARGET_HOST: data |= 2ULL << 32;
131 data |= BIT_ULL(34); /* VOL */
133 case NVKM_MEM_TARGET_NCOH: data |= 3ULL << 32; break;
138 data |= pt->addr << 24;
141 nvkm_kmap(pd->memory);
142 VMM_WO064(pd, vmm, pdei * 8, data);
143 nvkm_done(pd->memory);
146 const struct nvkm_vmm_desc_func
148 .unmap = gf100_vmm_pgt_unmap,
149 .pde = gf100_vmm_pgd_pde,
152 static const struct nvkm_vmm_desc
153 gf100_vmm_desc_17_12[] = {
154 { SPT, 15, 8, 0x1000, &gf100_vmm_pgt },
155 { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
159 static const struct nvkm_vmm_desc
160 gf100_vmm_desc_17_17[] = {
161 { LPT, 10, 8, 0x1000, &gf100_vmm_pgt },
162 { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
166 static const struct nvkm_vmm_desc
167 gf100_vmm_desc_16_12[] = {
168 { SPT, 14, 8, 0x1000, &gf100_vmm_pgt },
169 { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
173 static const struct nvkm_vmm_desc
174 gf100_vmm_desc_16_16[] = {
175 { LPT, 10, 8, 0x1000, &gf100_vmm_pgt },
176 { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
181 gf100_vmm_flush_(struct nvkm_vmm *vmm, int depth)
183 struct nvkm_subdev *subdev = &vmm->mmu->subdev;
184 struct nvkm_device *device = subdev->device;
185 u32 type = depth << 24;
187 type = 0x00000001; /* PAGE_ALL */
188 if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
189 type |= 0x00000004; /* HUB_ONLY */
191 mutex_lock(&subdev->mutex);
192 /* Looks like maybe a "free flush slots" counter, the
193 * faster you write to 0x100cbc to more it decreases.
195 nvkm_msec(device, 2000,
196 if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
200 nvkm_wr32(device, 0x100cb8, vmm->pd->pt[0]->addr >> 8);
201 nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
203 /* Wait for flush to be queued? */
204 nvkm_msec(device, 2000,
205 if (nvkm_rd32(device, 0x100c80) & 0x00008000)
208 mutex_unlock(&subdev->mutex);
212 gf100_vmm_flush(struct nvkm_vmm *vmm, int depth)
214 gf100_vmm_flush_(vmm, 0);
218 gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
219 struct nvkm_vmm_map *map)
221 const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
222 const struct nvkm_vmm_page *page = map->page;
223 const bool gm20x = page->desc->func->sparse != NULL;
225 struct gf100_vmm_map_vn vn;
226 struct gf100_vmm_map_v0 v0;
228 struct nvkm_device *device = vmm->mmu->subdev.device;
229 struct nvkm_memory *memory = map->memory;
230 u8 kind, priv, ro, vol;
231 int kindn, aper, ret = -ENOSYS;
234 map->next = (1 << page->shift) >> 8;
235 map->type = map->ctag = 0;
237 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
238 vol = !!args->v0.vol;
240 priv = !!args->v0.priv;
241 kind = args->v0.kind;
243 if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
244 vol = target == NVKM_MEM_TARGET_HOST;
249 VMM_DEBUG(vmm, "args");
253 aper = vmm->func->aper(target);
254 if (WARN_ON(aper < 0))
257 kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
258 if (kind >= kindn || kindm[kind] == 0xff) {
259 VMM_DEBUG(vmm, "kind %02x", kind);
263 if (kindm[kind] != kind) {
264 u32 comp = (page->shift == 16 && !gm20x) ? 16 : 17;
265 u32 tags = ALIGN(nvkm_memory_size(memory), 1 << 17) >> comp;
266 if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
267 VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
271 ret = nvkm_memory_tags_get(memory, device, tags,
275 VMM_DEBUG(vmm, "comp %d", ret);
280 u64 tags = map->tags->mn->offset + (map->offset >> 17);
281 if (page->shift == 17 || !gm20x) {
282 map->type |= tags << 44;
283 map->ctag |= 1ULL << 44;
284 map->next |= 1ULL << 44;
286 map->ctag |= tags << 1 | 1;
294 map->type |= (u64)priv << 1;
295 map->type |= (u64) ro << 2;
296 map->type |= (u64) vol << 32;
297 map->type |= (u64)aper << 33;
298 map->type |= (u64)kind << 36;
303 gf100_vmm_aper(enum nvkm_memory_target target)
306 case NVKM_MEM_TARGET_VRAM: return 0;
307 case NVKM_MEM_TARGET_HOST: return 2;
308 case NVKM_MEM_TARGET_NCOH: return 3;
315 gf100_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
317 nvkm_fo64(inst, 0x0200, 0x00000000, 2);
321 gf100_vmm_join_(struct nvkm_vmm *vmm, struct nvkm_memory *inst, u64 base)
323 struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
325 switch (nvkm_memory_target(pd->memory)) {
326 case NVKM_MEM_TARGET_VRAM: base |= 0ULL << 0; break;
327 case NVKM_MEM_TARGET_HOST: base |= 2ULL << 0;
328 base |= BIT_ULL(2) /* VOL. */;
330 case NVKM_MEM_TARGET_NCOH: base |= 3ULL << 0; break;
338 nvkm_wo64(inst, 0x0200, base);
339 nvkm_wo64(inst, 0x0208, vmm->limit - 1);
345 gf100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
347 return gf100_vmm_join_(vmm, inst, 0);
350 static const struct nvkm_vmm_func
352 .join = gf100_vmm_join,
353 .part = gf100_vmm_part,
354 .aper = gf100_vmm_aper,
355 .valid = gf100_vmm_valid,
356 .flush = gf100_vmm_flush,
358 { 17, &gf100_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC },
359 { 12, &gf100_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx },
364 static const struct nvkm_vmm_func
366 .join = gf100_vmm_join,
367 .part = gf100_vmm_part,
368 .aper = gf100_vmm_aper,
369 .valid = gf100_vmm_valid,
370 .flush = gf100_vmm_flush,
372 { 16, &gf100_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC },
373 { 12, &gf100_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx },
379 gf100_vmm_new_(const struct nvkm_vmm_func *func_16,
380 const struct nvkm_vmm_func *func_17,
381 struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
382 struct lock_class_key *key, const char *name,
383 struct nvkm_vmm **pvmm)
385 switch (mmu->subdev.device->fb->page) {
386 case 16: return nv04_vmm_new_(func_16, mmu, 0, addr, size,
387 argv, argc, key, name, pvmm);
388 case 17: return nv04_vmm_new_(func_17, mmu, 0, addr, size,
389 argv, argc, key, name, pvmm);
397 gf100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
398 struct lock_class_key *key, const char *name,
399 struct nvkm_vmm **pvmm)
401 return gf100_vmm_new_(&gf100_vmm_16, &gf100_vmm_17, mmu, addr,
402 size, argv, argc, key, name, pvmm);