Home | History | Annotate | Line # | Download | only in gr
      1 /*	$NetBSD: nouveau_nvkm_engine_gr_nv40.c,v 1.3 2021/12/18 23:45:36 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2012 Red Hat Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors: Ben Skeggs
     25  */
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_gr_nv40.c,v 1.3 2021/12/18 23:45:36 riastradh Exp $");
     28 
     29 #include "nv40.h"
     30 #include "regs.h"
     31 
     32 #include <core/client.h>
     33 #include <core/gpuobj.h>
     34 #include <subdev/fb.h>
     35 #include <subdev/timer.h>
     36 #include <engine/fifo.h>
     37 
     38 u64
     39 nv40_gr_units(struct nvkm_gr *gr)
     40 {
     41 	return nvkm_rd32(gr->engine.subdev.device, 0x1540);
     42 }
     43 
     44 /*******************************************************************************
     45  * Graphics object classes
     46  ******************************************************************************/
     47 
     48 static int
     49 nv40_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
     50 		    int align, struct nvkm_gpuobj **pgpuobj)
     51 {
     52 	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 20, align,
     53 				  false, parent, pgpuobj);
     54 	if (ret == 0) {
     55 		nvkm_kmap(*pgpuobj);
     56 		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
     57 		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
     58 		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
     59 #ifdef __BIG_ENDIAN
     60 		nvkm_mo32(*pgpuobj, 0x08, 0x01000000, 0x01000000);
     61 #endif
     62 		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
     63 		nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
     64 		nvkm_done(*pgpuobj);
     65 	}
     66 	return ret;
     67 }
     68 
     69 const struct nvkm_object_func
     70 nv40_gr_object = {
     71 	.bind = nv40_gr_object_bind,
     72 };
     73 
     74 /*******************************************************************************
     75  * PGRAPH context
     76  ******************************************************************************/
     77 
     78 static int
     79 nv40_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
     80 		  int align, struct nvkm_gpuobj **pgpuobj)
     81 {
     82 	struct nv40_gr_chan *chan = nv40_gr_chan(object);
     83 	struct nv40_gr *gr = chan->gr;
     84 	int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
     85 				  align, true, parent, pgpuobj);
     86 	if (ret == 0) {
     87 		chan->inst = (*pgpuobj)->addr;
     88 		nvkm_kmap(*pgpuobj);
     89 		nv40_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
     90 		nvkm_wo32(*pgpuobj, 0x00000, chan->inst >> 4);
     91 		nvkm_done(*pgpuobj);
     92 	}
     93 	return ret;
     94 }
     95 
     96 static int
     97 nv40_gr_chan_fini(struct nvkm_object *object, bool suspend)
     98 {
     99 	struct nv40_gr_chan *chan = nv40_gr_chan(object);
    100 	struct nv40_gr *gr = chan->gr;
    101 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
    102 	struct nvkm_device *device = subdev->device;
    103 	u32 inst = 0x01000000 | chan->inst >> 4;
    104 	int ret = 0;
    105 
    106 	nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
    107 
    108 	if (nvkm_rd32(device, 0x40032c) == inst) {
    109 		if (suspend) {
    110 			nvkm_wr32(device, 0x400720, 0x00000000);
    111 			nvkm_wr32(device, 0x400784, inst);
    112 			nvkm_mask(device, 0x400310, 0x00000020, 0x00000020);
    113 			nvkm_mask(device, 0x400304, 0x00000001, 0x00000001);
    114 			if (nvkm_msec(device, 2000,
    115 				if (!(nvkm_rd32(device, 0x400300) & 0x00000001))
    116 					break;
    117 			) < 0) {
    118 				u32 insn = nvkm_rd32(device, 0x400308);
    119 				nvkm_warn(subdev, "ctxprog timeout %08x\n", insn);
    120 				ret = -EBUSY;
    121 			}
    122 		}
    123 
    124 		nvkm_mask(device, 0x40032c, 0x01000000, 0x00000000);
    125 	}
    126 
    127 	if (nvkm_rd32(device, 0x400330) == inst)
    128 		nvkm_mask(device, 0x400330, 0x01000000, 0x00000000);
    129 
    130 	nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
    131 	return ret;
    132 }
    133 
    134 static void *
    135 nv40_gr_chan_dtor(struct nvkm_object *object)
    136 {
    137 	struct nv40_gr_chan *chan = nv40_gr_chan(object);
    138 	unsigned long flags;
    139 	spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
    140 	list_del(&chan->head);
    141 	spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
    142 	return chan;
    143 }
    144 
    145 static const struct nvkm_object_func
    146 nv40_gr_chan = {
    147 	.dtor = nv40_gr_chan_dtor,
    148 	.fini = nv40_gr_chan_fini,
    149 	.bind = nv40_gr_chan_bind,
    150 };
    151 
    152 int
    153 nv40_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
    154 		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
    155 {
    156 	struct nv40_gr *gr = nv40_gr(base);
    157 	struct nv40_gr_chan *chan;
    158 	unsigned long flags;
    159 
    160 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
    161 		return -ENOMEM;
    162 	nvkm_object_ctor(&nv40_gr_chan, oclass, &chan->object);
    163 	chan->gr = gr;
    164 	chan->fifo = fifoch;
    165 	*pobject = &chan->object;
    166 
    167 	spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
    168 	list_add(&chan->head, &gr->chan);
    169 	spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
    170 	return 0;
    171 }
    172 
    173 /*******************************************************************************
    174  * PGRAPH engine/subdev functions
    175  ******************************************************************************/
    176 
    177 static void
    178 nv40_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
    179 {
    180 	struct nv40_gr *gr = nv40_gr(base);
    181 	struct nvkm_device *device = gr->base.engine.subdev.device;
    182 	struct nvkm_fifo *fifo = device->fifo;
    183 	unsigned long flags;
    184 
    185 	nvkm_fifo_pause(fifo, &flags);
    186 	nv04_gr_idle(&gr->base);
    187 
    188 	switch (device->chipset) {
    189 	case 0x40:
    190 	case 0x41:
    191 	case 0x42:
    192 	case 0x43:
    193 	case 0x45:
    194 		nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
    195 		nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
    196 		nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
    197 		nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
    198 		nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
    199 		nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
    200 		switch (device->chipset) {
    201 		case 0x40:
    202 		case 0x45:
    203 			nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
    204 			nvkm_wr32(device, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
    205 			break;
    206 		case 0x41:
    207 		case 0x42:
    208 		case 0x43:
    209 			nvkm_wr32(device, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
    210 			nvkm_wr32(device, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
    211 			break;
    212 		default:
    213 			break;
    214 		}
    215 		break;
    216 	case 0x47:
    217 	case 0x49:
    218 	case 0x4b:
    219 		nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch);
    220 		nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit);
    221 		nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr);
    222 		nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
    223 		nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
    224 		nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
    225 		nvkm_wr32(device, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
    226 		nvkm_wr32(device, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
    227 		break;
    228 	default:
    229 		WARN_ON(1);
    230 		break;
    231 	}
    232 
    233 	nvkm_fifo_start(fifo, &flags);
    234 }
    235 
    236 void
    237 nv40_gr_intr(struct nvkm_gr *base)
    238 {
    239 	struct nv40_gr *gr = nv40_gr(base);
    240 	struct nv40_gr_chan *temp, *chan = NULL;
    241 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
    242 	struct nvkm_device *device = subdev->device;
    243 	u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
    244 	u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
    245 	u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
    246 	u32 inst = nvkm_rd32(device, 0x40032c) & 0x000fffff;
    247 	u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
    248 	u32 subc = (addr & 0x00070000) >> 16;
    249 	u32 mthd = (addr & 0x00001ffc);
    250 	u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
    251 	u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xffff;
    252 	u32 show = stat;
    253 	char msg[128], src[128], sta[128];
    254 	unsigned long flags;
    255 
    256 	spin_lock_irqsave(&gr->base.engine.lock, flags);
    257 	list_for_each_entry(temp, &gr->chan, head) {
    258 		if (temp->inst >> 4 == inst) {
    259 			chan = temp;
    260 			list_del(&chan->head);
    261 			list_add(&chan->head, &gr->chan);
    262 			break;
    263 		}
    264 	}
    265 
    266 	if (stat & NV_PGRAPH_INTR_ERROR) {
    267 		if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
    268 			nvkm_mask(device, 0x402000, 0, 0);
    269 		}
    270 	}
    271 
    272 	nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
    273 	nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
    274 
    275 	if (show) {
    276 		nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
    277 		nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
    278 		nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
    279 		nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
    280 				   "nstatus %08x [%s] ch %d [%08x %s] subc %d "
    281 				   "class %04x mthd %04x data %08x\n",
    282 			   show, msg, nsource, src, nstatus, sta,
    283 			   chan ? chan->fifo->chid : -1, inst << 4,
    284 			   chan ? chan->fifo->object.client->name : "unknown",
    285 			   subc, class, mthd, data);
    286 	}
    287 
    288 	spin_unlock_irqrestore(&gr->base.engine.lock, flags);
    289 }
    290 
    291 int
    292 nv40_gr_init(struct nvkm_gr *base)
    293 {
    294 	struct nv40_gr *gr = nv40_gr(base);
    295 	struct nvkm_device *device = gr->base.engine.subdev.device;
    296 	int ret, i, j;
    297 	u32 vramsz;
    298 
    299 	/* generate and upload context program */
    300 	ret = nv40_grctx_init(device, &gr->size);
    301 	if (ret)
    302 		return ret;
    303 
    304 	/* No context present currently */
    305 	nvkm_wr32(device, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
    306 
    307 	nvkm_wr32(device, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
    308 	nvkm_wr32(device, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
    309 
    310 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
    311 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
    312 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0);
    313 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
    314 	nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000);
    315 	nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
    316 
    317 	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
    318 	nvkm_wr32(device, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
    319 
    320 	j = nvkm_rd32(device, 0x1540) & 0xff;
    321 	if (j) {
    322 		for (i = 0; !(j & 1); j >>= 1, i++)
    323 			;
    324 		nvkm_wr32(device, 0x405000, i);
    325 	}
    326 
    327 	if (device->chipset == 0x40) {
    328 		nvkm_wr32(device, 0x4009b0, 0x83280fff);
    329 		nvkm_wr32(device, 0x4009b4, 0x000000a0);
    330 	} else {
    331 		nvkm_wr32(device, 0x400820, 0x83280eff);
    332 		nvkm_wr32(device, 0x400824, 0x000000a0);
    333 	}
    334 
    335 	switch (device->chipset) {
    336 	case 0x40:
    337 	case 0x45:
    338 		nvkm_wr32(device, 0x4009b8, 0x0078e366);
    339 		nvkm_wr32(device, 0x4009bc, 0x0000014c);
    340 		break;
    341 	case 0x41:
    342 	case 0x42: /* pciid also 0x00Cx */
    343 	/* case 0x0120: XXX (pciid) */
    344 		nvkm_wr32(device, 0x400828, 0x007596ff);
    345 		nvkm_wr32(device, 0x40082c, 0x00000108);
    346 		break;
    347 	case 0x43:
    348 		nvkm_wr32(device, 0x400828, 0x0072cb77);
    349 		nvkm_wr32(device, 0x40082c, 0x00000108);
    350 		break;
    351 	case 0x44:
    352 	case 0x46: /* G72 */
    353 	case 0x4a:
    354 	case 0x4c: /* G7x-based C51 */
    355 	case 0x4e:
    356 		nvkm_wr32(device, 0x400860, 0);
    357 		nvkm_wr32(device, 0x400864, 0);
    358 		break;
    359 	case 0x47: /* G70 */
    360 	case 0x49: /* G71 */
    361 	case 0x4b: /* G73 */
    362 		nvkm_wr32(device, 0x400828, 0x07830610);
    363 		nvkm_wr32(device, 0x40082c, 0x0000016A);
    364 		break;
    365 	default:
    366 		break;
    367 	}
    368 
    369 	nvkm_wr32(device, 0x400b38, 0x2ffff800);
    370 	nvkm_wr32(device, 0x400b3c, 0x00006000);
    371 
    372 	/* Tiling related stuff. */
    373 	switch (device->chipset) {
    374 	case 0x44:
    375 	case 0x4a:
    376 		nvkm_wr32(device, 0x400bc4, 0x1003d888);
    377 		nvkm_wr32(device, 0x400bbc, 0xb7a7b500);
    378 		break;
    379 	case 0x46:
    380 		nvkm_wr32(device, 0x400bc4, 0x0000e024);
    381 		nvkm_wr32(device, 0x400bbc, 0xb7a7b520);
    382 		break;
    383 	case 0x4c:
    384 	case 0x4e:
    385 	case 0x67:
    386 		nvkm_wr32(device, 0x400bc4, 0x1003d888);
    387 		nvkm_wr32(device, 0x400bbc, 0xb7a7b540);
    388 		break;
    389 	default:
    390 		break;
    391 	}
    392 
    393 	/* begin RAM config */
    394 	vramsz = device->func->resource_size(device, 1) - 1;
    395 	switch (device->chipset) {
    396 	case 0x40:
    397 		nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
    398 		nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
    399 		nvkm_wr32(device, 0x4069A4, nvkm_rd32(device, 0x100200));
    400 		nvkm_wr32(device, 0x4069A8, nvkm_rd32(device, 0x100204));
    401 		nvkm_wr32(device, 0x400820, 0);
    402 		nvkm_wr32(device, 0x400824, 0);
    403 		nvkm_wr32(device, 0x400864, vramsz);
    404 		nvkm_wr32(device, 0x400868, vramsz);
    405 		break;
    406 	default:
    407 		switch (device->chipset) {
    408 		case 0x41:
    409 		case 0x42:
    410 		case 0x43:
    411 		case 0x45:
    412 		case 0x4e:
    413 		case 0x44:
    414 		case 0x4a:
    415 			nvkm_wr32(device, 0x4009F0, nvkm_rd32(device, 0x100200));
    416 			nvkm_wr32(device, 0x4009F4, nvkm_rd32(device, 0x100204));
    417 			break;
    418 		default:
    419 			nvkm_wr32(device, 0x400DF0, nvkm_rd32(device, 0x100200));
    420 			nvkm_wr32(device, 0x400DF4, nvkm_rd32(device, 0x100204));
    421 			break;
    422 		}
    423 		nvkm_wr32(device, 0x4069F0, nvkm_rd32(device, 0x100200));
    424 		nvkm_wr32(device, 0x4069F4, nvkm_rd32(device, 0x100204));
    425 		nvkm_wr32(device, 0x400840, 0);
    426 		nvkm_wr32(device, 0x400844, 0);
    427 		nvkm_wr32(device, 0x4008A0, vramsz);
    428 		nvkm_wr32(device, 0x4008A4, vramsz);
    429 		break;
    430 	}
    431 
    432 	return 0;
    433 }
    434 
    435 int
    436 nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
    437 	     int index, struct nvkm_gr **pgr)
    438 {
    439 	struct nv40_gr *gr;
    440 
    441 	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
    442 		return -ENOMEM;
    443 	*pgr = &gr->base;
    444 	INIT_LIST_HEAD(&gr->chan);
    445 
    446 	return nvkm_gr_ctor(func, device, index, true, &gr->base);
    447 }
    448 
    449 static const struct nvkm_gr_func
    450 nv40_gr = {
    451 	.init = nv40_gr_init,
    452 	.intr = nv40_gr_intr,
    453 	.tile = nv40_gr_tile,
    454 	.units = nv40_gr_units,
    455 	.chan_new = nv40_gr_chan_new,
    456 	.sclass = {
    457 		{ -1, -1, 0x0012, &nv40_gr_object }, /* beta1 */
    458 		{ -1, -1, 0x0019, &nv40_gr_object }, /* clip */
    459 		{ -1, -1, 0x0030, &nv40_gr_object }, /* null */
    460 		{ -1, -1, 0x0039, &nv40_gr_object }, /* m2mf */
    461 		{ -1, -1, 0x0043, &nv40_gr_object }, /* rop */
    462 		{ -1, -1, 0x0044, &nv40_gr_object }, /* patt */
    463 		{ -1, -1, 0x004a, &nv40_gr_object }, /* gdi */
    464 		{ -1, -1, 0x0062, &nv40_gr_object }, /* surf2d */
    465 		{ -1, -1, 0x0072, &nv40_gr_object }, /* beta4 */
    466 		{ -1, -1, 0x0089, &nv40_gr_object }, /* sifm */
    467 		{ -1, -1, 0x008a, &nv40_gr_object }, /* ifc */
    468 		{ -1, -1, 0x009f, &nv40_gr_object }, /* imageblit */
    469 		{ -1, -1, 0x3062, &nv40_gr_object }, /* surf2d (nv40) */
    470 		{ -1, -1, 0x3089, &nv40_gr_object }, /* sifm (nv40) */
    471 		{ -1, -1, 0x309e, &nv40_gr_object }, /* swzsurf (nv40) */
    472 		{ -1, -1, 0x4097, &nv40_gr_object }, /* curie */
    473 		{}
    474 	}
    475 };
    476 
    477 int
    478 nv40_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
    479 {
    480 	return nv40_gr_new_(&nv40_gr, device, index, pgr);
    481 }
    482