Home | History | Annotate | Line # | Download | only in mc
      1 /*	$NetBSD: nouveau_nvkm_subdev_mc_base.c,v 1.4 2021/12/19 11:34:45 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2012 Red Hat Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors: Ben Skeggs
     25  */
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_mc_base.c,v 1.4 2021/12/19 11:34:45 riastradh Exp $");
     28 
     29 #include "priv.h"
     30 
     31 #include <core/option.h>
     32 #include <subdev/top.h>
     33 
     34 void
     35 nvkm_mc_unk260(struct nvkm_device *device, u32 data)
     36 {
     37 	struct nvkm_mc *mc = device->mc;
     38 	if (likely(mc) && mc->func->unk260)
     39 		mc->func->unk260(mc, data);
     40 }
     41 
     42 void
     43 nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx, bool en)
     44 {
     45 	struct nvkm_mc *mc = device->mc;
     46 	const struct nvkm_mc_map *map;
     47 	if (likely(mc) && mc->func->intr_mask) {
     48 		u32 mask = nvkm_top_intr_mask(device, devidx);
     49 		for (map = mc->func->intr; !mask && map->stat; map++) {
     50 			if (map->unit == devidx)
     51 				mask = map->stat;
     52 		}
     53 		mc->func->intr_mask(mc, mask, en ? mask : 0);
     54 	}
     55 }
     56 
     57 void
     58 nvkm_mc_intr_unarm(struct nvkm_device *device)
     59 {
     60 	struct nvkm_mc *mc = device->mc;
     61 	if (likely(mc))
     62 		mc->func->intr_unarm(mc);
     63 }
     64 
     65 void
     66 nvkm_mc_intr_rearm(struct nvkm_device *device)
     67 {
     68 	struct nvkm_mc *mc = device->mc;
     69 	if (likely(mc))
     70 		mc->func->intr_rearm(mc);
     71 }
     72 
     73 static u32
     74 nvkm_mc_intr_stat(struct nvkm_mc *mc)
     75 {
     76 	u32 intr = mc->func->intr_stat(mc);
     77 	if (WARN_ON_ONCE(intr == 0xffffffff))
     78 		intr = 0; /* likely fallen off the bus */
     79 	return intr;
     80 }
     81 
     82 void
     83 nvkm_mc_intr(struct nvkm_device *device, bool *handled)
     84 {
     85 	struct nvkm_mc *mc = device->mc;
     86 	struct nvkm_subdev *subdev;
     87 	const struct nvkm_mc_map *map;
     88 	u32 stat, intr;
     89 	u64 subdevs;
     90 
     91 	if (unlikely(!mc))
     92 		return;
     93 
     94 	intr = nvkm_mc_intr_stat(mc);
     95 	stat = nvkm_top_intr(device, intr, &subdevs);
     96 	while (subdevs) {
     97 		enum nvkm_devidx subidx = __ffs64(subdevs);
     98 		subdev = nvkm_device_subdev(device, subidx);
     99 		if (subdev)
    100 			nvkm_subdev_intr(subdev);
    101 		subdevs &= ~BIT_ULL(subidx);
    102 	}
    103 
    104 	for (map = mc->func->intr; map->stat; map++) {
    105 		if (intr & map->stat) {
    106 			subdev = nvkm_device_subdev(device, map->unit);
    107 			if (subdev)
    108 				nvkm_subdev_intr(subdev);
    109 			stat &= ~map->stat;
    110 		}
    111 	}
    112 
    113 	if (stat)
    114 		nvkm_error(&mc->subdev, "intr %08x\n", stat);
    115 	*handled = intr != 0;
    116 
    117 	if (mc->func->intr_hack)
    118 		mc->func->intr_hack(mc, handled);
    119 }
    120 
    121 static u32
    122 nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto,
    123 		   enum nvkm_devidx devidx)
    124 {
    125 	struct nvkm_mc *mc = device->mc;
    126 	const struct nvkm_mc_map *map;
    127 	u64 pmc_enable = 0;
    128 	if (likely(mc)) {
    129 		if (!(pmc_enable = nvkm_top_reset(device, devidx))) {
    130 			for (map = mc->func->reset; map && map->stat; map++) {
    131 				if (!isauto || !map->noauto) {
    132 					if (map->unit == devidx) {
    133 						pmc_enable = map->stat;
    134 						break;
    135 					}
    136 				}
    137 			}
    138 		}
    139 	}
    140 	return pmc_enable;
    141 }
    142 
    143 void
    144 nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx)
    145 {
    146 	u64 pmc_enable = nvkm_mc_reset_mask(device, true, devidx);
    147 	if (pmc_enable) {
    148 		nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
    149 		nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
    150 		nvkm_rd32(device, 0x000200);
    151 	}
    152 }
    153 
    154 void
    155 nvkm_mc_disable(struct nvkm_device *device, enum nvkm_devidx devidx)
    156 {
    157 	u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
    158 	if (pmc_enable)
    159 		nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
    160 }
    161 
    162 void
    163 nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx)
    164 {
    165 	u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
    166 	if (pmc_enable) {
    167 		nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
    168 		nvkm_rd32(device, 0x000200);
    169 	}
    170 }
    171 
    172 bool
    173 nvkm_mc_enabled(struct nvkm_device *device, enum nvkm_devidx devidx)
    174 {
    175 	u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
    176 
    177 	return (pmc_enable != 0) &&
    178 	       ((nvkm_rd32(device, 0x000200) & pmc_enable) == pmc_enable);
    179 }
    180 
    181 
    182 static int
    183 nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend)
    184 {
    185 	nvkm_mc_intr_unarm(subdev->device);
    186 	return 0;
    187 }
    188 
    189 static int
    190 nvkm_mc_init(struct nvkm_subdev *subdev)
    191 {
    192 	struct nvkm_mc *mc = nvkm_mc(subdev);
    193 	if (mc->func->init)
    194 		mc->func->init(mc);
    195 	nvkm_mc_intr_rearm(subdev->device);
    196 	return 0;
    197 }
    198 
    199 static void *
    200 nvkm_mc_dtor(struct nvkm_subdev *subdev)
    201 {
    202 	struct nvkm_mc *mc = nvkm_mc(subdev);
    203 	if (mc->func->dtor)
    204 		return mc->func->dtor(mc);
    205 	return nvkm_mc(subdev);
    206 }
    207 
    208 static const struct nvkm_subdev_func
    209 nvkm_mc = {
    210 	.dtor = nvkm_mc_dtor,
    211 	.init = nvkm_mc_init,
    212 	.fini = nvkm_mc_fini,
    213 };
    214 
    215 void
    216 nvkm_mc_ctor(const struct nvkm_mc_func *func, struct nvkm_device *device,
    217 	     int index, struct nvkm_mc *mc)
    218 {
    219 	nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
    220 	mc->func = func;
    221 }
    222 
    223 int
    224 nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
    225 	     int index, struct nvkm_mc **pmc)
    226 {
    227 	struct nvkm_mc *mc;
    228 	if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
    229 		return -ENOMEM;
    230 	nvkm_mc_ctor(func, device, index, *pmc);
    231 	return 0;
    232 }
    233