1 /* $NetBSD: nouveau_nvkm_subdev_fault_gv100.c,v 1.2 2021/12/18 23:45:39 riastradh Exp $ */ 2 3 /* 4 * Copyright 2018 Red Hat Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 #include <sys/cdefs.h> 25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_fault_gv100.c,v 1.2 2021/12/18 23:45:39 riastradh Exp $"); 26 27 #include "priv.h" 28 29 #include <core/memory.h> 30 #include <subdev/mmu.h> 31 #include <engine/fifo.h> 32 33 #include <nvif/class.h> 34 35 static void 36 gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer) 37 { 38 struct nvkm_device *device = buffer->fault->subdev.device; 39 struct nvkm_memory *mem = buffer->mem; 40 u32 get = nvkm_rd32(device, buffer->get); 41 u32 put = nvkm_rd32(device, buffer->put); 42 if (put == get) 43 return; 44 45 nvkm_kmap(mem); 46 while (get != put) { 47 const u32 base = get * buffer->fault->func->buffer.entry_size; 48 const u32 instlo = nvkm_ro32(mem, base + 0x00); 49 const u32 insthi = nvkm_ro32(mem, base + 0x04); 50 const u32 addrlo = nvkm_ro32(mem, base + 0x08); 51 const u32 addrhi = nvkm_ro32(mem, base + 0x0c); 52 const u32 timelo = nvkm_ro32(mem, base + 0x10); 53 const u32 timehi = nvkm_ro32(mem, base + 0x14); 54 const u32 info0 = nvkm_ro32(mem, base + 0x18); 55 const u32 info1 = nvkm_ro32(mem, base + 0x1c); 56 struct nvkm_fault_data info; 57 58 if (++get == buffer->entries) 59 get = 0; 60 nvkm_wr32(device, buffer->get, get); 61 62 info.addr = ((u64)addrhi << 32) | addrlo; 63 info.inst = ((u64)insthi << 32) | instlo; 64 info.time = ((u64)timehi << 32) | timelo; 65 info.engine = (info0 & 0x000000ff); 66 info.valid = (info1 & 0x80000000) >> 31; 67 info.gpc = (info1 & 0x1f000000) >> 24; 68 info.hub = (info1 & 0x00100000) >> 20; 69 info.access = (info1 & 0x000f0000) >> 16; 70 info.client = (info1 & 0x00007f00) >> 8; 71 info.reason = (info1 & 0x0000001f); 72 73 nvkm_fifo_fault(device->fifo, &info); 74 } 75 nvkm_done(mem); 76 } 77 78 static void 79 gv100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable) 80 { 81 struct nvkm_device *device = buffer->fault->subdev.device; 82 const u32 intr = buffer->id ? 0x08000000 : 0x20000000; 83 if (enable) 84 nvkm_mask(device, 0x100a2c, intr, intr); 85 else 86 nvkm_mask(device, 0x100a34, intr, intr); 87 } 88 89 static void 90 gv100_fault_buffer_fini(struct nvkm_fault_buffer *buffer) 91 { 92 struct nvkm_device *device = buffer->fault->subdev.device; 93 const u32 foff = buffer->id * 0x14; 94 nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x00000000); 95 } 96 97 static void 98 gv100_fault_buffer_init(struct nvkm_fault_buffer *buffer) 99 { 100 struct nvkm_device *device = buffer->fault->subdev.device; 101 const u32 foff = buffer->id * 0x14; 102 103 nvkm_mask(device, 0x100e34 + foff, 0xc0000000, 0x40000000); 104 nvkm_wr32(device, 0x100e28 + foff, upper_32_bits(buffer->addr)); 105 nvkm_wr32(device, 0x100e24 + foff, lower_32_bits(buffer->addr)); 106 nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x80000000); 107 } 108 109 static void 110 gv100_fault_buffer_info(struct nvkm_fault_buffer *buffer) 111 { 112 struct nvkm_device *device = buffer->fault->subdev.device; 113 const u32 foff = buffer->id * 0x14; 114 115 nvkm_mask(device, 0x100e34 + foff, 0x40000000, 0x40000000); 116 117 buffer->entries = nvkm_rd32(device, 0x100e34 + foff) & 0x000fffff; 118 buffer->get = 0x100e2c + foff; 119 buffer->put = 0x100e30 + foff; 120 } 121 122 static int 123 gv100_fault_ntfy_nrpfb(struct nvkm_notify *notify) 124 { 125 struct nvkm_fault *fault = container_of(notify, typeof(*fault), nrpfb); 126 gv100_fault_buffer_process(fault->buffer[0]); 127 return NVKM_NOTIFY_KEEP; 128 } 129 130 static void 131 gv100_fault_intr_fault(struct nvkm_fault *fault) 132 { 133 struct nvkm_subdev *subdev = &fault->subdev; 134 struct nvkm_device *device = subdev->device; 135 struct nvkm_fault_data info; 136 const u32 addrlo = nvkm_rd32(device, 0x100e4c); 137 const u32 addrhi = nvkm_rd32(device, 0x100e50); 138 const u32 info0 = nvkm_rd32(device, 0x100e54); 139 const u32 insthi = nvkm_rd32(device, 0x100e58); 140 const u32 info1 = nvkm_rd32(device, 0x100e5c); 141 142 info.addr = ((u64)addrhi << 32) | addrlo; 143 info.inst = ((u64)insthi << 32) | (info0 & 0xfffff000); 144 info.time = 0; 145 info.engine = (info0 & 0x000000ff); 146 info.valid = (info1 & 0x80000000) >> 31; 147 info.gpc = (info1 & 0x1f000000) >> 24; 148 info.hub = (info1 & 0x00100000) >> 20; 149 info.access = (info1 & 0x000f0000) >> 16; 150 info.client = (info1 & 0x00007f00) >> 8; 151 info.reason = (info1 & 0x0000001f); 152 153 nvkm_fifo_fault(device->fifo, &info); 154 } 155 156 static void 157 gv100_fault_intr(struct nvkm_fault *fault) 158 { 159 struct nvkm_subdev *subdev = &fault->subdev; 160 struct nvkm_device *device = subdev->device; 161 u32 stat = nvkm_rd32(device, 0x100a20); 162 163 if (stat & 0x80000000) { 164 gv100_fault_intr_fault(fault); 165 nvkm_wr32(device, 0x100e60, 0x80000000); 166 stat &= ~0x80000000; 167 } 168 169 if (stat & 0x20000000) { 170 if (fault->buffer[0]) { 171 nvkm_event_send(&fault->event, 1, 0, NULL, 0); 172 stat &= ~0x20000000; 173 } 174 } 175 176 if (stat & 0x08000000) { 177 if (fault->buffer[1]) { 178 nvkm_event_send(&fault->event, 1, 1, NULL, 0); 179 stat &= ~0x08000000; 180 } 181 } 182 183 if (stat) { 184 nvkm_debug(subdev, "intr %08x\n", stat); 185 } 186 } 187 188 static void 189 gv100_fault_fini(struct nvkm_fault *fault) 190 { 191 nvkm_notify_put(&fault->nrpfb); 192 if (fault->buffer[0]) 193 fault->func->buffer.fini(fault->buffer[0]); 194 nvkm_mask(fault->subdev.device, 0x100a34, 0x80000000, 0x80000000); 195 } 196 197 static void 198 gv100_fault_init(struct nvkm_fault *fault) 199 { 200 nvkm_mask(fault->subdev.device, 0x100a2c, 0x80000000, 0x80000000); 201 fault->func->buffer.init(fault->buffer[0]); 202 nvkm_notify_get(&fault->nrpfb); 203 } 204 205 int 206 gv100_fault_oneinit(struct nvkm_fault *fault) 207 { 208 return nvkm_notify_init(&fault->buffer[0]->object, &fault->event, 209 gv100_fault_ntfy_nrpfb, true, NULL, 0, 0, 210 &fault->nrpfb); 211 } 212 213 static const struct nvkm_fault_func 214 gv100_fault = { 215 .oneinit = gv100_fault_oneinit, 216 .init = gv100_fault_init, 217 .fini = gv100_fault_fini, 218 .intr = gv100_fault_intr, 219 .buffer.nr = 2, 220 .buffer.entry_size = 32, 221 .buffer.info = gv100_fault_buffer_info, 222 .buffer.pin = gp100_fault_buffer_pin, 223 .buffer.init = gv100_fault_buffer_init, 224 .buffer.fini = gv100_fault_buffer_fini, 225 .buffer.intr = gv100_fault_buffer_intr, 226 /*TODO: Figure out how to expose non-replayable fault buffer, which, 227 * for some reason, is where recoverable CE faults appear... 228 * 229 * It's a bit tricky, as both NVKM and SVM will need access to 230 * the non-replayable fault buffer. 231 */ 232 .user = { { 0, 0, VOLTA_FAULT_BUFFER_A }, 1 }, 233 }; 234 235 int 236 gv100_fault_new(struct nvkm_device *device, int index, 237 struct nvkm_fault **pfault) 238 { 239 return nvkm_fault_new_(&gv100_fault, device, index, pfault); 240 } 241