Home | History | Annotate | Line # | Download | only in disp
      1 /*	$NetBSD: nouveau_nvkm_engine_disp_gv100.c,v 1.2 2021/12/18 23:45:35 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2018 Red Hat Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  */
     24 #include <sys/cdefs.h>
     25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_disp_gv100.c,v 1.2 2021/12/18 23:45:35 riastradh Exp $");
     26 
     27 #include "nv50.h"
     28 #include "head.h"
     29 #include "ior.h"
     30 #include "channv50.h"
     31 #include "rootnv50.h"
     32 
     33 #include <core/gpuobj.h>
     34 #include <subdev/timer.h>
     35 
     36 int
     37 gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask)
     38 {
     39 	struct nvkm_device *device = disp->engine.subdev.device;
     40 	*pmask = nvkm_rd32(device, 0x610064);
     41 	return (nvkm_rd32(device, 0x610074) & 0x03f00000) >> 20;
     42 }
     43 
     44 void
     45 gv100_disp_super(struct work_struct *work)
     46 {
     47 	struct nv50_disp *disp =
     48 		container_of(work, struct nv50_disp, supervisor);
     49 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
     50 	struct nvkm_device *device = subdev->device;
     51 	struct nvkm_head *head;
     52 	u32 stat = nvkm_rd32(device, 0x6107a8);
     53 	u32 mask[4];
     54 
     55 	nvkm_debug(subdev, "supervisor %d: %08x\n", ffs(disp->super), stat);
     56 	list_for_each_entry(head, &disp->base.head, head) {
     57 		mask[head->id] = nvkm_rd32(device, 0x6107ac + (head->id * 4));
     58 		HEAD_DBG(head, "%08x", mask[head->id]);
     59 	}
     60 
     61 	if (disp->super & 0x00000001) {
     62 		nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
     63 		nv50_disp_super_1(disp);
     64 		list_for_each_entry(head, &disp->base.head, head) {
     65 			if (!(mask[head->id] & 0x00001000))
     66 				continue;
     67 			nv50_disp_super_1_0(disp, head);
     68 		}
     69 	} else
     70 	if (disp->super & 0x00000002) {
     71 		list_for_each_entry(head, &disp->base.head, head) {
     72 			if (!(mask[head->id] & 0x00001000))
     73 				continue;
     74 			nv50_disp_super_2_0(disp, head);
     75 		}
     76 		nvkm_outp_route(&disp->base);
     77 		list_for_each_entry(head, &disp->base.head, head) {
     78 			if (!(mask[head->id] & 0x00010000))
     79 				continue;
     80 			nv50_disp_super_2_1(disp, head);
     81 		}
     82 		list_for_each_entry(head, &disp->base.head, head) {
     83 			if (!(mask[head->id] & 0x00001000))
     84 				continue;
     85 			nv50_disp_super_2_2(disp, head);
     86 		}
     87 	} else
     88 	if (disp->super & 0x00000004) {
     89 		list_for_each_entry(head, &disp->base.head, head) {
     90 			if (!(mask[head->id] & 0x00001000))
     91 				continue;
     92 			nv50_disp_super_3_0(disp, head);
     93 		}
     94 	}
     95 
     96 	list_for_each_entry(head, &disp->base.head, head)
     97 		nvkm_wr32(device, 0x6107ac + (head->id * 4), 0x00000000);
     98 	nvkm_wr32(device, 0x6107a8, 0x80000000);
     99 }
    100 
    101 static void
    102 gv100_disp_exception(struct nv50_disp *disp, int chid)
    103 {
    104 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
    105 	struct nvkm_device *device = subdev->device;
    106 	u32 stat = nvkm_rd32(device, 0x611020 + (chid * 12));
    107 	u32 type = (stat & 0x00007000) >> 12;
    108 	u32 mthd = (stat & 0x00000fff) << 2;
    109 	const struct nvkm_enum *reason =
    110 		nvkm_enum_find(nv50_disp_intr_error_type, type);
    111 
    112 	/*TODO: Suspect 33->41 are for WRBK channel exceptions, but we
    113 	 *      don't support those currently.
    114 	 *
    115 	 *      CORE+WIN CHIDs map directly to the FE_EXCEPT() slots.
    116 	 */
    117 	if (chid <= 32) {
    118 		u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
    119 		u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
    120 		nvkm_error(subdev, "chid %d stat %08x reason %d [%s] "
    121 				   "mthd %04x data %08x code %08x\n",
    122 			   chid, stat, type, reason ? reason->name : "",
    123 			   mthd, data, code);
    124 	} else {
    125 		nvkm_error(subdev, "chid %d stat %08x reason %d [%s] "
    126 				   "mthd %04x\n",
    127 			   chid, stat, type, reason ? reason->name : "", mthd);
    128 	}
    129 
    130 	if (chid < ARRAY_SIZE(disp->chan) && disp->chan[chid]) {
    131 		switch (mthd) {
    132 		case 0x0200:
    133 			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
    134 			break;
    135 		default:
    136 			break;
    137 		}
    138 	}
    139 
    140 	nvkm_wr32(device, 0x611020 + (chid * 12), 0x90000000);
    141 }
    142 
    143 static void
    144 gv100_disp_intr_ctrl_disp(struct nv50_disp *disp)
    145 {
    146 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
    147 	struct nvkm_device *device = subdev->device;
    148 	u32 stat = nvkm_rd32(device, 0x611c30);
    149 
    150 	if (stat & 0x00000007) {
    151 		disp->super = (stat & 0x00000007);
    152 		queue_work(disp->wq, &disp->supervisor);
    153 		nvkm_wr32(device, 0x611860, disp->super);
    154 		stat &= ~0x00000007;
    155 	}
    156 
    157 	/*TODO: I would guess this is VBIOS_RELEASE, however, NFI how to
    158 	 *      ACK it, nor does RM appear to bother.
    159 	 */
    160 	if (stat & 0x00000008)
    161 		stat &= ~0x00000008;
    162 
    163 	if (stat & 0x00000080) {
    164 		u32 error = nvkm_mask(device, 0x611848, 0x00000000, 0x00000000);
    165 		nvkm_warn(subdev, "error %08x\n", error);
    166 		stat &= ~0x00000080;
    167 	}
    168 
    169 	if (stat & 0x00000100) {
    170 		unsigned long wndws = nvkm_rd32(device, 0x611858);
    171 		unsigned long other = nvkm_rd32(device, 0x61185c);
    172 		int wndw;
    173 
    174 		nvkm_wr32(device, 0x611858, wndws);
    175 		nvkm_wr32(device, 0x61185c, other);
    176 
    177 		/* AWAKEN_OTHER_CORE. */
    178 		if (other & 0x00000001)
    179 			nv50_disp_chan_uevent_send(disp, 0);
    180 
    181 		/* AWAKEN_WIN_CH(n). */
    182 		for_each_set_bit(wndw, &wndws, disp->wndw.nr) {
    183 			nv50_disp_chan_uevent_send(disp, 1 + wndw);
    184 		}
    185 	}
    186 
    187 	if (stat)
    188 		nvkm_warn(subdev, "ctrl %08x\n", stat);
    189 }
    190 
    191 static void
    192 gv100_disp_intr_exc_other(struct nv50_disp *disp)
    193 {
    194 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
    195 	struct nvkm_device *device = subdev->device;
    196 	u32 stat = nvkm_rd32(device, 0x611854);
    197 	unsigned long mask;
    198 	int head;
    199 
    200 	if (stat & 0x00000001) {
    201 		nvkm_wr32(device, 0x611854, 0x00000001);
    202 		gv100_disp_exception(disp, 0);
    203 		stat &= ~0x00000001;
    204 	}
    205 
    206 	if ((mask = (stat & 0x00ff0000) >> 16)) {
    207 		for_each_set_bit(head, &mask, disp->wndw.nr) {
    208 			nvkm_wr32(device, 0x611854, 0x00010000 << head);
    209 			gv100_disp_exception(disp, 73 + head);
    210 			stat &= ~(0x00010000 << head);
    211 		}
    212 	}
    213 
    214 	if (stat) {
    215 		nvkm_warn(subdev, "exception %08x\n", stat);
    216 		nvkm_wr32(device, 0x611854, stat);
    217 	}
    218 }
    219 
    220 static void
    221 gv100_disp_intr_exc_winim(struct nv50_disp *disp)
    222 {
    223 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
    224 	struct nvkm_device *device = subdev->device;
    225 	unsigned long stat = nvkm_rd32(device, 0x611850);
    226 	int wndw;
    227 
    228 	for_each_set_bit(wndw, &stat, disp->wndw.nr) {
    229 		nvkm_wr32(device, 0x611850, BIT(wndw));
    230 		gv100_disp_exception(disp, 33 + wndw);
    231 		stat &= ~BIT(wndw);
    232 	}
    233 
    234 	if (stat) {
    235 		nvkm_warn(subdev, "wimm %08x\n", (u32)stat);
    236 		nvkm_wr32(device, 0x611850, stat);
    237 	}
    238 }
    239 
    240 static void
    241 gv100_disp_intr_exc_win(struct nv50_disp *disp)
    242 {
    243 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
    244 	struct nvkm_device *device = subdev->device;
    245 	unsigned long stat = nvkm_rd32(device, 0x61184c);
    246 	int wndw;
    247 
    248 	for_each_set_bit(wndw, &stat, disp->wndw.nr) {
    249 		nvkm_wr32(device, 0x61184c, BIT(wndw));
    250 		gv100_disp_exception(disp, 1 + wndw);
    251 		stat &= ~BIT(wndw);
    252 	}
    253 
    254 	if (stat) {
    255 		nvkm_warn(subdev, "wndw %08x\n", (u32)stat);
    256 		nvkm_wr32(device, 0x61184c, stat);
    257 	}
    258 }
    259 
    260 static void
    261 gv100_disp_intr_head_timing(struct nv50_disp *disp, int head)
    262 {
    263 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
    264 	struct nvkm_device *device = subdev->device;
    265 	u32 stat = nvkm_rd32(device, 0x611800 + (head * 0x04));
    266 
    267 	/* LAST_DATA, LOADV. */
    268 	if (stat & 0x00000003) {
    269 		nvkm_wr32(device, 0x611800 + (head * 0x04), stat & 0x00000003);
    270 		stat &= ~0x00000003;
    271 	}
    272 
    273 	if (stat & 0x00000004) {
    274 		nvkm_disp_vblank(&disp->base, head);
    275 		nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000004);
    276 		stat &= ~0x00000004;
    277 	}
    278 
    279 	if (stat) {
    280 		nvkm_warn(subdev, "head %08x\n", stat);
    281 		nvkm_wr32(device, 0x611800 + (head * 0x04), stat);
    282 	}
    283 }
    284 
    285 void
    286 gv100_disp_intr(struct nv50_disp *disp)
    287 {
    288 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
    289 	struct nvkm_device *device = subdev->device;
    290 	u32 stat = nvkm_rd32(device, 0x611ec0);
    291 	unsigned long mask;
    292 	int head;
    293 
    294 	if ((mask = (stat & 0x000000ff))) {
    295 		for_each_set_bit(head, &mask, 8) {
    296 			gv100_disp_intr_head_timing(disp, head);
    297 			stat &= ~BIT(head);
    298 		}
    299 	}
    300 
    301 	if (stat & 0x00000200) {
    302 		gv100_disp_intr_exc_win(disp);
    303 		stat &= ~0x00000200;
    304 	}
    305 
    306 	if (stat & 0x00000400) {
    307 		gv100_disp_intr_exc_winim(disp);
    308 		stat &= ~0x00000400;
    309 	}
    310 
    311 	if (stat & 0x00000800) {
    312 		gv100_disp_intr_exc_other(disp);
    313 		stat &= ~0x00000800;
    314 	}
    315 
    316 	if (stat & 0x00001000) {
    317 		gv100_disp_intr_ctrl_disp(disp);
    318 		stat &= ~0x00001000;
    319 	}
    320 
    321 	if (stat)
    322 		nvkm_warn(subdev, "intr %08x\n", stat);
    323 }
    324 
    325 void
    326 gv100_disp_fini(struct nv50_disp *disp)
    327 {
    328 	struct nvkm_device *device = disp->base.engine.subdev.device;
    329 	nvkm_wr32(device, 0x611db0, 0x00000000);
    330 }
    331 
    332 static int
    333 gv100_disp_init(struct nv50_disp *disp)
    334 {
    335 	struct nvkm_device *device = disp->base.engine.subdev.device;
    336 	struct nvkm_head *head;
    337 	int i, j;
    338 	u32 tmp;
    339 
    340 	/* Claim ownership of display. */
    341 	if (nvkm_rd32(device, 0x6254e8) & 0x00000002) {
    342 		nvkm_mask(device, 0x6254e8, 0x00000001, 0x00000000);
    343 		if (nvkm_msec(device, 2000,
    344 			if (!(nvkm_rd32(device, 0x6254e8) & 0x00000002))
    345 				break;
    346 		) < 0)
    347 			return -EBUSY;
    348 	}
    349 
    350 	/* Lock pin capabilities. */
    351 	tmp = nvkm_rd32(device, 0x610068);
    352 	nvkm_wr32(device, 0x640008, tmp);
    353 
    354 	/* SOR capabilities. */
    355 	for (i = 0; i < disp->sor.nr; i++) {
    356 		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
    357 		nvkm_mask(device, 0x640000, 0x00000100 << i, 0x00000100 << i);
    358 		nvkm_wr32(device, 0x640144 + (i * 0x08), tmp);
    359 	}
    360 
    361 	/* Head capabilities. */
    362 	list_for_each_entry(head, &disp->base.head, head) {
    363 		const int id = head->id;
    364 
    365 		/* RG. */
    366 		tmp = nvkm_rd32(device, 0x616300 + (id * 0x800));
    367 		nvkm_wr32(device, 0x640048 + (id * 0x020), tmp);
    368 
    369 		/* POSTCOMP. */
    370 		for (j = 0; j < 6 * 4; j += 4) {
    371 			tmp = nvkm_rd32(device, 0x616100 + (id * 0x800) + j);
    372 			nvkm_wr32(device, 0x640030 + (id * 0x20) + j, tmp);
    373 		}
    374 	}
    375 
    376 	/* Window capabilities. */
    377 	for (i = 0; i < disp->wndw.nr; i++) {
    378 		nvkm_mask(device, 0x640004, 1 << i, 1 << i);
    379 		for (j = 0; j < 6 * 4; j += 4) {
    380 			tmp = nvkm_rd32(device, 0x630050 + (i * 0x800) + j);
    381 			nvkm_wr32(device, 0x6401e4 + (i * 0x20) + j, tmp);
    382 		}
    383 	}
    384 
    385 	/* IHUB capabilities. */
    386 	for (i = 0; i < 4; i++) {
    387 		tmp = nvkm_rd32(device, 0x62e000 + (i * 0x04));
    388 		nvkm_wr32(device, 0x640010 + (i * 0x04), tmp);
    389 	}
    390 
    391 	nvkm_mask(device, 0x610078, 0x00000001, 0x00000001);
    392 
    393 	/* Setup instance memory. */
    394 	switch (nvkm_memory_target(disp->inst->memory)) {
    395 	case NVKM_MEM_TARGET_VRAM: tmp = 0x00000001; break;
    396 	case NVKM_MEM_TARGET_NCOH: tmp = 0x00000002; break;
    397 	case NVKM_MEM_TARGET_HOST: tmp = 0x00000003; break;
    398 	default:
    399 		break;
    400 	}
    401 	nvkm_wr32(device, 0x610010, 0x00000008 | tmp);
    402 	nvkm_wr32(device, 0x610014, disp->inst->addr >> 16);
    403 
    404 	/* CTRL_DISP: AWAKEN, ERROR, SUPERVISOR[1-3]. */
    405 	nvkm_wr32(device, 0x611cf0, 0x00000187); /* MSK. */
    406 	nvkm_wr32(device, 0x611db0, 0x00000187); /* EN. */
    407 
    408 	/* EXC_OTHER: CURSn, CORE. */
    409 	nvkm_wr32(device, 0x611cec, disp->head.mask << 16 |
    410 				    0x00000001); /* MSK. */
    411 	nvkm_wr32(device, 0x611dac, 0x00000000); /* EN. */
    412 
    413 	/* EXC_WINIM. */
    414 	nvkm_wr32(device, 0x611ce8, disp->wndw.mask); /* MSK. */
    415 	nvkm_wr32(device, 0x611da8, 0x00000000); /* EN. */
    416 
    417 	/* EXC_WIN. */
    418 	nvkm_wr32(device, 0x611ce4, disp->wndw.mask); /* MSK. */
    419 	nvkm_wr32(device, 0x611da4, 0x00000000); /* EN. */
    420 
    421 	/* HEAD_TIMING(n): VBLANK. */
    422 	list_for_each_entry(head, &disp->base.head, head) {
    423 		const u32 hoff = head->id * 4;
    424 		nvkm_wr32(device, 0x611cc0 + hoff, 0x00000004); /* MSK. */
    425 		nvkm_wr32(device, 0x611d80 + hoff, 0x00000000); /* EN. */
    426 	}
    427 
    428 	/* OR. */
    429 	nvkm_wr32(device, 0x611cf4, 0x00000000); /* MSK. */
    430 	nvkm_wr32(device, 0x611db4, 0x00000000); /* EN. */
    431 	return 0;
    432 }
    433 
    434 static const struct nv50_disp_func
    435 gv100_disp = {
    436 	.init = gv100_disp_init,
    437 	.fini = gv100_disp_fini,
    438 	.intr = gv100_disp_intr,
    439 	.uevent = &gv100_disp_chan_uevent,
    440 	.super = gv100_disp_super,
    441 	.root = &gv100_disp_root_oclass,
    442 	.wndw = { .cnt = gv100_disp_wndw_cnt },
    443 	.head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
    444 	.sor = { .cnt = gv100_sor_cnt, .new = gv100_sor_new },
    445 	.ramht_size = 0x2000,
    446 };
    447 
    448 int
    449 gv100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
    450 {
    451 	return nv50_disp_new_(&gv100_disp, device, index, pdisp);
    452 }
    453