Home | History | Annotate | Line # | Download | only in i386
      1 /*	$NetBSD: mtrr_k6.c,v 1.15 2020/01/31 08:21:11 maxv Exp $	*/
      2 
      3 /*
      4  * Copyright 2001 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * AMD K6 MTRR support.
     40  */
     41 
     42 #include <sys/cdefs.h>
     43 __KERNEL_RCSID(0, "$NetBSD: mtrr_k6.c,v 1.15 2020/01/31 08:21:11 maxv Exp $");
     44 
     45 #include <sys/param.h>
     46 #include <sys/systm.h>
     47 #include <sys/proc.h>
     48 #include <sys/malloc.h>
     49 
     50 #include <machine/specialreg.h>
     51 #include <machine/cpufunc.h>
     52 #include <machine/mtrr.h>
     53 
     54 static void	k6_mtrr_init_cpu(struct cpu_info *);
     55 static void	k6_mtrr_reload_cpu(struct cpu_info *);
     56 static void	k6_mtrr_clean(struct proc *);
     57 static int	k6_mtrr_set(struct mtrr *, int *, struct proc *, int);
     58 static int	k6_mtrr_get(struct mtrr *, int *, struct proc *, int);
     59 static void	k6_mtrr_commit(void);
     60 static void	k6_mtrr_dump(const char *);
     61 
     62 static int	k6_mtrr_validate(struct mtrr *, struct proc *);
     63 static void	k6_raw2soft(void);
     64 static void	k6_soft2raw(void);
     65 
     66 static struct mtrr_state
     67 mtrr_var_raw[] = {
     68 	{ 0, 0 },
     69 	{ 1, 0 },
     70 };
     71 
     72 static struct mtrr *mtrr_var;
     73 
     74 const struct mtrr_funcs k6_mtrr_funcs = {
     75 	k6_mtrr_init_cpu,
     76 	k6_mtrr_reload_cpu,
     77 	k6_mtrr_clean,
     78 	k6_mtrr_set,
     79 	k6_mtrr_get,
     80 	k6_mtrr_commit,
     81 	k6_mtrr_dump
     82 };
     83 
     84 static void
     85 k6_mtrr_dump(const char *tag)
     86 {
     87 	uint64_t uwccr;
     88 	int i;
     89 
     90 	uwccr = rdmsr(MSR_K6_UWCCR);
     91 
     92 	for (i = 0; i < MTRR_K6_NVAR; i++)
     93 		printf("%s: %x: 0x%08llx\n", tag, mtrr_var_raw[i].msraddr,
     94 		    (uwccr >> (32 * mtrr_var_raw[i].msraddr)) & 0xffffffff);
     95 }
     96 
     97 /*
     98  * There are no multiprocessor K6 systems, so we don't have to deal with
     99  * any multiprocessor stuff here.
    100  */
    101 static void
    102 k6_mtrr_reload(void)
    103 {
    104 	uint64_t uwccr;
    105 	uint32_t origcr0, cr0;
    106 	int i;
    107 
    108 	x86_disable_intr();
    109 
    110 	origcr0 = cr0 = rcr0();
    111 	cr0 |= CR0_CD;
    112 	lcr0(cr0);
    113 
    114 	wbinvd();
    115 
    116 	for (i = 0, uwccr = 0; i < MTRR_K6_NVAR; i++) {
    117 		uwccr |= mtrr_var_raw[i].msrval <<
    118 		    (32 * mtrr_var_raw[i].msraddr);
    119 	}
    120 
    121 	wrmsr(MSR_K6_UWCCR, uwccr);
    122 
    123 	lcr0(origcr0);
    124 
    125 	x86_enable_intr();
    126 }
    127 
    128 static void
    129 k6_mtrr_reload_cpu(struct cpu_info *ci)
    130 {
    131 
    132 	k6_mtrr_reload();
    133 }
    134 
    135 void
    136 k6_mtrr_init_first(void)
    137 {
    138 	uint64_t uwccr;
    139 	int i;
    140 
    141 	uwccr = rdmsr(MSR_K6_UWCCR);
    142 
    143 	for (i = 0; i < MTRR_K6_NVAR; i++) {
    144 		mtrr_var_raw[i].msrval =
    145 		    (uwccr >> (32 * mtrr_var_raw[i].msraddr)) & 0xffffffff;
    146 	}
    147 #if 0
    148 	mtrr_dump("init mtrr");
    149 #endif
    150 
    151 	mtrr_var = (struct mtrr *)
    152 	    malloc(MTRR_K6_NVAR * sizeof(struct mtrr), M_TEMP, M_WAITOK);
    153 	mtrr_funcs = &k6_mtrr_funcs;
    154 
    155 	k6_raw2soft();
    156 }
    157 
    158 static void
    159 k6_raw2soft(void)
    160 {
    161 	struct mtrr *mtrrp;
    162 	uint32_t base, mask;
    163 	int i;
    164 
    165 	for (i = 0; i < MTRR_K6_NVAR; i++) {
    166 		mtrrp = &mtrr_var[i];
    167 		memset(mtrrp, 0, sizeof(*mtrrp));
    168 		base = mtrr_var_raw[i].msrval & MTRR_K6_ADDR;
    169 		mask = (mtrr_var_raw[i].msrval & MTRR_K6_MASK) >>
    170 		    MTRR_K6_MASK_SHIFT;
    171 		if (mask == 0)
    172 			continue;
    173 		mtrrp->base = base;
    174 		mtrrp->len = ffs(mask) << MTRR_K6_ADDR_SHIFT;
    175 		/* XXXJRT can both UC and WC be set? */
    176 		if (mtrr_var_raw[i].msrval & MTRR_K6_UC)
    177 			mtrrp->type = MTRR_TYPE_UC;
    178 		else if (mtrr_var_raw[i].msrval & MTRR_K6_WC)
    179 			mtrrp->type = MTRR_TYPE_WC;
    180 		else	/* XXXJRT Correct default? */
    181 			mtrrp->type = MTRR_TYPE_WT;
    182 		mtrrp->flags |= MTRR_VALID;
    183 	}
    184 }
    185 
    186 static void
    187 k6_soft2raw(void)
    188 {
    189 	struct mtrr *mtrrp;
    190 	uint32_t mask;
    191 	int i, bit;
    192 
    193 	for (i = 0; i < MTRR_K6_NVAR; i++) {
    194 		mtrrp = &mtrr_var[i];
    195 		if ((mtrrp->flags & MTRR_VALID) == 0) {
    196 			mtrr_var_raw[i].msrval = 0;
    197 			continue;
    198 		}
    199 		mtrr_var_raw[i].msrval = mtrrp->base;
    200 		for (bit = ffs(mtrrp->len >> MTRR_K6_ADDR_SHIFT) - 1, mask = 0;
    201 		     bit < 15; bit++)
    202 			mask |= 1U << bit;
    203 		mtrr_var_raw[i].msrval |= mask << MTRR_K6_MASK_SHIFT;
    204 		if (mtrrp->type == MTRR_TYPE_UC)
    205 			mtrr_var_raw[i].msrval |= MTRR_K6_UC;
    206 		else if (mtrrp->type == MTRR_TYPE_WC)
    207 			mtrr_var_raw[i].msrval |= MTRR_K6_WC;
    208 	}
    209 }
    210 
    211 static void
    212 k6_mtrr_init_cpu(struct cpu_info *ci)
    213 {
    214 
    215 	k6_mtrr_reload();
    216 #if 0
    217 	mtrr_dump(device_xname(ci->ci_dev));
    218 #endif
    219 }
    220 
    221 static int
    222 k6_mtrr_validate(struct mtrr *mtrrp, struct proc *p)
    223 {
    224 
    225 	/*
    226 	 * Must be at least 128K aligned.
    227 	 */
    228 	if (mtrrp->base & ~MTRR_K6_ADDR)
    229 		return (EINVAL);
    230 
    231 	/*
    232 	 * Must be at least 128K long, and must be a power of 2.
    233 	 */
    234 	if (mtrrp->len < (128 * 1024) || powerof2(mtrrp->len) == 0)
    235 		return (EINVAL);
    236 
    237 	/*
    238 	 * Filter out bad types.
    239 	 */
    240 	switch (mtrrp->type) {
    241 	case MTRR_TYPE_UC:
    242 	case MTRR_TYPE_WC:
    243 	case MTRR_TYPE_WT:
    244 		/* These are fine. */
    245 		break;
    246 
    247 	default:
    248 		return (EINVAL);
    249 	}
    250 
    251 	return (0);
    252 }
    253 
    254 /*
    255  * Try to find a non-conflicting match on physical MTRRs for the
    256  * requested range.
    257  */
    258 static int
    259 k6_mtrr_setone(struct mtrr *mtrrp, struct proc *p)
    260 {
    261 	struct mtrr *freep;
    262 	uint32_t low, high, curlow, curhigh;
    263 	int i;
    264 
    265 	/*
    266 	 * Try one of the variable range registers.
    267 	 * XXX could be more sophisticated here by merging ranges.
    268 	 */
    269 	low = mtrrp->base;
    270 	high = low + mtrrp->len;
    271 	freep = NULL;
    272 	for (i = 0; i < MTRR_K6_NVAR; i++) {
    273 		if ((mtrr_var[i].flags & MTRR_VALID) == 0) {
    274 			freep = &mtrr_var[i];
    275 			continue;
    276 		}
    277 		curlow = mtrr_var[i].base;
    278 		curhigh = curlow + mtrr_var[i].len;
    279 		if (low == curlow && high == curhigh &&
    280 		    (!(mtrr_var[i].flags & MTRR_PRIVATE) ||
    281 		     mtrr_var[i].owner == p->p_pid)) {
    282 			freep = &mtrr_var[i];
    283 			break;
    284 		}
    285 		if (((high >= curlow && high < curhigh) ||
    286 		    (low >= curlow && low < curhigh)) &&
    287 		    ((mtrr_var[i].type != mtrrp->type) ||
    288 		     ((mtrr_var[i].flags & MTRR_PRIVATE) &&
    289 		      mtrr_var[i].owner != p->p_pid))) {
    290 			return (EBUSY);
    291 		}
    292 	}
    293 	if (freep == NULL)
    294 		return (EBUSY);
    295 	mtrrp->flags &= ~MTRR_CANTSET;
    296 	*freep = *mtrrp;
    297 	freep->owner = mtrrp->flags & MTRR_PRIVATE ? p->p_pid : 0;
    298 
    299 	return (0);
    300 }
    301 
    302 static void
    303 k6_mtrr_clean(struct proc *p)
    304 {
    305 	int i;
    306 
    307 	for (i = 0; i < MTRR_K6_NVAR; i++) {
    308 		if ((mtrr_var[i].flags & MTRR_PRIVATE) &&
    309 		    (mtrr_var[i].owner == p->p_pid))
    310 			mtrr_var[i].flags &= ~(MTRR_PRIVATE | MTRR_VALID);
    311 	}
    312 
    313 	k6_mtrr_commit();
    314 }
    315 
    316 static int
    317 k6_mtrr_set(struct mtrr *mtrrp, int *n, struct proc *p, int flags)
    318 {
    319 	struct mtrr mtrr;
    320 	int i, error;
    321 
    322 	if (*n > MTRR_K6_NVAR) {
    323 		*n = 0;
    324 		return EINVAL;
    325 	}
    326 
    327 	error = 0;
    328 	for (i = 0; i < *n; i++) {
    329 		if (flags & MTRR_GETSET_USER) {
    330 			error = copyin(&mtrrp[i], &mtrr, sizeof(mtrr));
    331 			if (error != 0)
    332 				break;
    333 		} else
    334 			mtrr = mtrrp[i];
    335 		error = k6_mtrr_validate(&mtrr, p);
    336 		if (error != 0)
    337 			break;
    338 		error = k6_mtrr_setone(&mtrr, p);
    339 		if (error != 0)
    340 			break;
    341 		if (mtrr.flags & MTRR_PRIVATE)
    342 			p->p_md.md_flags |= MDP_USEDMTRR;
    343 	}
    344 	*n = i;
    345 	return (error);
    346 }
    347 
    348 static int
    349 k6_mtrr_get(struct mtrr *mtrrp, int *n, struct proc *p, int flags)
    350 {
    351 	int i, error;
    352 
    353 	if (mtrrp == NULL) {
    354 		*n = MTRR_K6_NVAR;
    355 		return (0);
    356 	}
    357 
    358 	error = 0;
    359 
    360 	for (i = 0; i < MTRR_K6_NVAR && i < *n; i++) {
    361 		if (flags & MTRR_GETSET_USER) {
    362 			error = copyout(&mtrr_var[i], &mtrrp[i],
    363 			    sizeof(*mtrrp));
    364 			if (error != 0)
    365 				break;
    366 		} else
    367 			memcpy(&mtrrp[i], &mtrr_var[i], sizeof(*mtrrp));
    368 	}
    369 	*n = i;
    370 	return (error);
    371 }
    372 
    373 static void
    374 k6_mtrr_commit(void)
    375 {
    376 
    377 	k6_soft2raw();
    378 	k6_mtrr_reload();
    379 }
    380