1 /* $NetBSD: nouveau_nvkm_subdev_mmu_vmmgm200.c,v 1.3 2021/12/19 10:51:58 riastradh Exp $ */ 2 3 /* 4 * Copyright 2017 Red Hat Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 #include <sys/cdefs.h> 25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_mmu_vmmgm200.c,v 1.3 2021/12/19 10:51:58 riastradh Exp $"); 26 27 #include "vmm.h" 28 29 #include <nvif/ifb00d.h> 30 #include <nvif/unpack.h> 31 32 static void 33 gm200_vmm_pgt_sparse(struct nvkm_vmm *vmm, 34 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 35 { 36 /* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */ 37 VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(32) /* VOL. */, ptes); 38 } 39 40 static const struct nvkm_vmm_desc_func 41 gm200_vmm_spt = { 42 .unmap = gf100_vmm_pgt_unmap, 43 .sparse = gm200_vmm_pgt_sparse, 44 .mem = gf100_vmm_pgt_mem, 45 .dma = gf100_vmm_pgt_dma, 46 #ifndef __NetBSD__ 47 .sgl = gf100_vmm_pgt_sgl, 48 #endif 49 }; 50 51 static const struct nvkm_vmm_desc_func 52 gm200_vmm_lpt = { 53 .invalid = gk104_vmm_lpt_invalid, 54 .unmap = gf100_vmm_pgt_unmap, 55 .sparse = gm200_vmm_pgt_sparse, 56 .mem = gf100_vmm_pgt_mem, 57 }; 58 59 static void 60 gm200_vmm_pgd_sparse(struct nvkm_vmm *vmm, 61 struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes) 62 { 63 /* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */ 64 VMM_FO064(pt, vmm, pdei * 8, BIT_ULL(35) /* VOL_BIG. */, pdes); 65 } 66 67 static const struct nvkm_vmm_desc_func 68 gm200_vmm_pgd = { 69 .unmap = gf100_vmm_pgt_unmap, 70 .sparse = gm200_vmm_pgd_sparse, 71 .pde = gf100_vmm_pgd_pde, 72 }; 73 74 const struct nvkm_vmm_desc 75 gm200_vmm_desc_17_12[] = { 76 { SPT, 15, 8, 0x1000, &gm200_vmm_spt }, 77 { PGD, 13, 8, 0x1000, &gm200_vmm_pgd }, 78 {} 79 }; 80 81 const struct nvkm_vmm_desc 82 gm200_vmm_desc_17_17[] = { 83 { LPT, 10, 8, 0x1000, &gm200_vmm_lpt }, 84 { PGD, 13, 8, 0x1000, &gm200_vmm_pgd }, 85 {} 86 }; 87 88 const struct nvkm_vmm_desc 89 gm200_vmm_desc_16_12[] = { 90 { SPT, 14, 8, 0x1000, &gm200_vmm_spt }, 91 { PGD, 14, 8, 0x1000, &gm200_vmm_pgd }, 92 {} 93 }; 94 95 const struct nvkm_vmm_desc 96 gm200_vmm_desc_16_16[] = { 97 { LPT, 10, 8, 0x1000, &gm200_vmm_lpt }, 98 { PGD, 14, 8, 0x1000, &gm200_vmm_pgd }, 99 {} 100 }; 101 102 int 103 gm200_vmm_join_(struct nvkm_vmm *vmm, struct nvkm_memory *inst, u64 base) 104 { 105 if (vmm->func->page[1].shift == 16) 106 base |= BIT_ULL(11); 107 return gf100_vmm_join_(vmm, inst, base); 108 } 109 110 int 111 gm200_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst) 112 { 113 return gm200_vmm_join_(vmm, inst, 0); 114 } 115 116 static const struct nvkm_vmm_func 117 gm200_vmm_17 = { 118 .join = gm200_vmm_join, 119 .part = gf100_vmm_part, 120 .aper = gf100_vmm_aper, 121 .valid = gf100_vmm_valid, 122 .flush = gf100_vmm_flush, 123 .invalidate_pdb = gf100_vmm_invalidate_pdb, 124 .page = { 125 { 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx }, 126 { 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SVxC }, 127 { 12, &gm200_vmm_desc_17_12[0], NVKM_VMM_PAGE_SVHx }, 128 {} 129 } 130 }; 131 132 static const struct nvkm_vmm_func 133 gm200_vmm_16 = { 134 .join = gm200_vmm_join, 135 .part = gf100_vmm_part, 136 .aper = gf100_vmm_aper, 137 .valid = gf100_vmm_valid, 138 .flush = gf100_vmm_flush, 139 .invalidate_pdb = gf100_vmm_invalidate_pdb, 140 .page = { 141 { 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx }, 142 { 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SVxC }, 143 { 12, &gm200_vmm_desc_16_12[0], NVKM_VMM_PAGE_SVHx }, 144 {} 145 } 146 }; 147 148 int 149 gm200_vmm_new_(const struct nvkm_vmm_func *func_16, 150 const struct nvkm_vmm_func *func_17, 151 struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, 152 void *argv, u32 argc, struct lock_class_key *key, 153 const char *name, struct nvkm_vmm **pvmm) 154 { 155 const struct nvkm_vmm_func *func; 156 union { 157 struct gm200_vmm_vn vn; 158 struct gm200_vmm_v0 v0; 159 } *args = argv; 160 int ret = -ENOSYS; 161 162 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { 163 switch (args->v0.bigpage) { 164 case 16: func = func_16; break; 165 case 17: func = func_17; break; 166 default: 167 return -EINVAL; 168 } 169 } else 170 if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) { 171 func = func_17; 172 } else 173 return ret; 174 175 return nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm); 176 } 177 178 int 179 gm200_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, 180 void *argv, u32 argc, struct lock_class_key *key, 181 const char *name, struct nvkm_vmm **pvmm) 182 { 183 return gm200_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, managed, addr, 184 size, argv, argc, key, name, pvmm); 185 } 186 187 int 188 gm200_vmm_new_fixed(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, 189 void *argv, u32 argc, struct lock_class_key *key, 190 const char *name, struct nvkm_vmm **pvmm) 191 { 192 return gf100_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, managed, addr, 193 size, argv, argc, key, name, pvmm); 194 } 195