Home | History | Annotate | Line # | Download | only in include
cpufunc.h revision 1.4
      1 /*	$NetBSD: cpufunc.h,v 1.4 2005/01/31 17:32:15 jkunz Exp $	*/
      2 
      3 /*	$OpenBSD: cpufunc.h,v 1.17 2000/05/15 17:22:40 mickey Exp $	*/
      4 
      5 /*
      6  * Copyright (c) 1998,2000 Michael Shalayeff
      7  * All rights reserved.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed by Michael Shalayeff.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  */
     34 /*
     35  *  (c) Copyright 1988 HEWLETT-PACKARD COMPANY
     36  *
     37  *  To anyone who acknowledges that this file is provided "AS IS"
     38  *  without any express or implied warranty:
     39  *      permission to use, copy, modify, and distribute this file
     40  *  for any purpose is hereby granted without fee, provided that
     41  *  the above copyright notice and this notice appears in all
     42  *  copies, and that the name of Hewlett-Packard Company not be
     43  *  used in advertising or publicity pertaining to distribution
     44  *  of the software without specific, written prior permission.
     45  *  Hewlett-Packard Company makes no representations about the
     46  *  suitability of this software for any purpose.
     47  */
     48 /*
     49  * Copyright (c) 1990,1994 The University of Utah and
     50  * the Computer Systems Laboratory (CSL).  All rights reserved.
     51  *
     52  * THE UNIVERSITY OF UTAH AND CSL PROVIDE THIS SOFTWARE IN ITS "AS IS"
     53  * CONDITION, AND DISCLAIM ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
     54  * WHATSOEVER RESULTING FROM ITS USE.
     55  *
     56  * CSL requests users of this software to return to csl-dist (at) cs.utah.edu any
     57  * improvements that they make and grant CSL redistribution rights.
     58  *
     59  * 	Utah $Hdr: c_support.s 1.8 94/12/14$
     60  *	Author: Bob Wheeler, University of Utah CSL
     61  */
     62 
     63 #ifndef _HPPA_CPUFUNC_H_
     64 #define _HPPA_CPUFUNC_H_
     65 
     66 #include <machine/psl.h>
     67 #include <machine/pte.h>
     68 
     69 #define tlbbtop(b) ((b) >> (PGSHIFT - 5))
     70 #define tlbptob(p) ((p) << (PGSHIFT - 5))
     71 
     72 #define hptbtop(b) ((b) >> 17)
     73 
     74 /* Get space register for an address */
     75 static __inline register_t ldsid(vaddr_t p) {
     76 	register_t ret;
     77 	__asm __volatile("ldsid (%1),%0" : "=r" (ret) : "r" (p));
     78 	return ret;
     79 }
     80 
     81 #define mtctl(v,r) __asm __volatile("mtctl %0,%1":: "r" (v), "i" (r))
     82 #define mfctl(r,v) __asm __volatile("mfctl %1,%0": "=r" (v): "i" (r))
     83 
     84 #define mfcpu(r,v)	/* XXX for the lack of the mnemonics */		\
     85 	__asm __volatile("diag  %1\n\t"					\
     86 			 "copy  %%r22, %0"				\
     87 	: "=r" (v) : "i" ((0x1400 | ((r) << 21) | (22))) : "r22")
     88 
     89 #define mtsp(v,r) __asm __volatile("mtsp %0,%1":: "r" (v), "i" (r))
     90 #define mfsp(r,v) __asm __volatile("mfsp %1,%0": "=r" (v): "i" (r))
     91 
     92 #define ssm(v,r) __asm __volatile("ssm %1,%0": "=r" (r): "i" (v))
     93 #define rsm(v,r) __asm __volatile("rsm %1,%0": "=r" (r): "i" (v))
     94 
     95 /* Move to system mask. Old value of system mask is returned. */
     96 static __inline register_t mtsm(register_t mask) {
     97 	register_t ret;
     98 	__asm __volatile("ssm 0,%0\n\t"
     99 			 "mtsm %1": "=&r" (ret) : "r" (mask));
    100 	return ret;
    101 }
    102 
    103 static __inline register_t get_psw(void)
    104 {
    105 	register_t ret;
    106 	__asm __volatile("break %1, %2\n\tcopy %%ret0, %0" : "=r" (ret)
    107 		: "i" (HPPA_BREAK_KERNEL), "i" (HPPA_BREAK_GET_PSW)
    108 		: "r28");
    109 	return ret;
    110 }
    111 
    112 static __inline register_t set_psw(register_t psw)
    113 {
    114 	register_t ret;
    115 	__asm __volatile("copy	%0, %%arg0\n\tbreak %1, %2\n\tcopy %%ret0, %0"
    116 		: "=r" (ret)
    117 		: "i" (HPPA_BREAK_KERNEL), "i" (HPPA_BREAK_SET_PSW), "0" (psw)
    118 		: "r26", "r28");
    119 	return ret;
    120 }
    121 
    122 
    123 #define	fdce(sp,off) __asm __volatile("fdce 0(%0,%1)":: "i" (sp), "r" (off))
    124 #define	fice(sp,off) __asm __volatile("fice 0(%0,%1)":: "i" (sp), "r" (off))
    125 #define sync_caches() \
    126     __asm __volatile("sync\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop")
    127 
    128 static __inline void
    129 iitlba(u_int pg, pa_space_t sp, vaddr_t va)
    130 {
    131 	mtsp(sp, 1);
    132 	__asm volatile("iitlba %0,(%%sr1, %1)":: "r" (pg), "r" (va));
    133 }
    134 
    135 static __inline void
    136 idtlba(u_int pg, pa_space_t sp, vaddr_t va)
    137 {
    138 	mtsp(sp, 1);
    139 	__asm volatile("idtlba %0,(%%sr1, %1)":: "r" (pg), "r" (va));
    140 }
    141 
    142 static __inline void
    143 iitlbp(u_int prot, pa_space_t sp, vaddr_t va)
    144 {
    145 	mtsp(sp, 1);
    146 	__asm volatile("iitlbp %0,(%%sr1, %1)":: "r" (prot), "r" (va));
    147 }
    148 
    149 static __inline void
    150 idtlbp(u_int prot, pa_space_t sp, vaddr_t va)
    151 {
    152 	mtsp(sp, 1);
    153 	__asm volatile("idtlbp %0,(%%sr1, %1)":: "r" (prot), "r" (va));
    154 }
    155 
    156 static __inline void
    157 pitlb(pa_space_t sp, vaddr_t va)
    158 {
    159 	mtsp(sp, 1);
    160 	__asm volatile("pitlb %%r0(%%sr1, %0)":: "r" (va));
    161 }
    162 
    163 static __inline void
    164 pdtlb(pa_space_t sp, vaddr_t va)
    165 {
    166 	mtsp(sp, 1);
    167 	__asm volatile("pdtlb %%r0(%%sr1, %0)":: "r" (va));
    168 }
    169 
    170 static __inline void
    171 pitlbe(pa_space_t sp, vaddr_t va)
    172 {
    173 	mtsp(sp, 1);
    174 	__asm volatile("pitlbe %%r0(%%sr1, %0)":: "r" (va));
    175 }
    176 
    177 static __inline void
    178 pdtlbe(pa_space_t sp, vaddr_t va)
    179 {
    180 	mtsp(sp, 1);
    181 	__asm volatile("pdtlbe %%r0(%%sr1, %0)":: "r" (va));
    182 }
    183 
    184 #ifdef _KERNEL
    185 void ficache __P((pa_space_t sp, vaddr_t va, vsize_t size));
    186 void fdcache __P((pa_space_t sp, vaddr_t va, vsize_t size));
    187 void pdcache __P((pa_space_t sp, vaddr_t va, vsize_t size));
    188 void fcacheall __P((void));
    189 void ptlball __P((void));
    190 hppa_hpa_t cpu_gethpa __P((int n));
    191 
    192 #define PCXL2_ACCEL_IO_START		0xf4000000
    193 #define PCXL2_ACCEL_IO_END		(0xfc000000 - 1)
    194 #define PCXL2_ACCEL_IO_ADDR2MASK(a)	(0x8 >> ((((a) >> 25) - 2) & 3))
    195 void eaio_l2(int);
    196 
    197 /*
    198  * These flush or purge the data cache for a item whose total
    199  * size is <= the size of a data cache line, however they don't
    200  * check this constraint.
    201  */
    202 static __inline void
    203 fdcache_small(pa_space_t sp, vaddr_t va, vsize_t size)
    204 {
    205 	__asm volatile(
    206 		"	mtsp	%0,%%sr1		\n"
    207 		"	fdc	%%r0(%%sr1, %1)		\n"
    208 		"	fdc	%2(%%sr1, %1)		\n"
    209 		"	sync				\n"
    210 		"	syncdma				\n"
    211 		:
    212 		: "r" (sp), "r" (va), "r" (size - 1));
    213 }
    214 static __inline void
    215 pdcache_small(pa_space_t sp, vaddr_t va, vsize_t size)
    216 {
    217 	__asm volatile(
    218 		"	mtsp	%0,%%sr1		\n"
    219 		"	pdc	%%r0(%%sr1, %1)		\n"
    220 		"	pdc	%2(%%sr1, %1)		\n"
    221 		"	sync				\n"
    222 		"	syncdma				\n"
    223 		:
    224 		: "r" (sp), "r" (va), "r" (size - 1));
    225 }
    226 
    227 #endif /* _KERNEL */
    228 
    229 #endif /* _HPPA_CPUFUNC_H_ */
    230