Home | History | Annotate | Line # | Download | only in oea
vmparam.h revision 1.10
      1   1.1     matt /*-
      2   1.1     matt  * Copyright (c) 2001 The NetBSD Foundation, Inc.
      3   1.1     matt  * All rights reserved.
      4   1.1     matt  *
      5   1.1     matt  * This code is derived from software contributed to The NetBSD Foundation
      6   1.1     matt  * by Matt Thomas <matt (at) 3am-softwre.com> of Allegro Networks, Inc.
      7   1.1     matt  *
      8   1.1     matt  * Redistribution and use in source and binary forms, with or without
      9   1.1     matt  * modification, are permitted provided that the following conditions
     10   1.1     matt  * are met:
     11   1.1     matt  * 1. Redistributions of source code must retain the above copyright
     12   1.1     matt  *    notice, this list of conditions and the following disclaimer.
     13   1.1     matt  * 2. Redistributions in binary form must reproduce the above copyright
     14   1.1     matt  *    notice, this list of conditions and the following disclaimer in the
     15   1.1     matt  *    documentation and/or other materials provided with the distribution.
     16   1.1     matt  * 3. All advertising materials mentioning features or use of this software
     17   1.1     matt  *    must display the following acknowledgement:
     18   1.1     matt  *        This product includes software developed by the NetBSD
     19   1.1     matt  *        Foundation, Inc. and its contributors.
     20   1.1     matt  * 4. Neither the name of The NetBSD Foundation nor the names of its
     21   1.1     matt  *    contributors may be used to endorse or promote products derived
     22   1.1     matt  *    from this software without specific prior written permission.
     23   1.1     matt  *
     24   1.1     matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     25   1.1     matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     26   1.1     matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     27   1.1     matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     28   1.1     matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     29   1.1     matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     30   1.1     matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     31   1.1     matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     32   1.1     matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     33   1.1     matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     34   1.1     matt  * POSSIBILITY OF SUCH DAMAGE.
     35   1.1     matt  */
     36   1.1     matt 
     37   1.1     matt #ifndef _POWERPC_OEA_VMPARAM_H_
     38   1.1     matt #define _POWERPC_OEA_VMPARAM_H_
     39   1.1     matt 
     40   1.1     matt #include <sys/queue.h>
     41   1.1     matt 
     42   1.1     matt /*
     43   1.1     matt  * Most of the definitions in this can be overriden by a machine-specific
     44   1.1     matt  * vmparam.h if required.  Otherwise a port can just include this file
     45   1.1     matt  * get the right thing to happen.
     46   1.1     matt  */
     47   1.1     matt 
     48   1.6  thorpej /*
     49   1.6  thorpej  * OEA processors have 4K pages.  Override the PAGE_* definitions
     50   1.6  thorpej  * to be compile-time constants.
     51   1.6  thorpej  */
     52   1.6  thorpej #define	PAGE_SHIFT	12
     53   1.6  thorpej #define	PAGE_SIZE	(1 << PAGE_SHIFT)
     54   1.6  thorpej #define	PAGE_MASK	(PAGE_SIZE - 1)
     55   1.6  thorpej 
     56   1.1     matt #ifndef	USRSTACK
     57   1.1     matt #define	USRSTACK		VM_MAXUSER_ADDRESS
     58   1.1     matt #endif
     59   1.1     matt 
     60   1.2     matt #ifndef	USRSTACK32
     61   1.2     matt #define	USRSTACK32		((uint32_t)VM_MAXUSER_ADDRESS)
     62   1.2     matt #endif
     63   1.2     matt 
     64   1.1     matt #ifndef	MAXTSIZ
     65   1.1     matt #define	MAXTSIZ			(64*1024*1024)		/* maximum text size */
     66   1.1     matt #endif
     67   1.1     matt 
     68   1.1     matt #ifndef	MAXDSIZ
     69   1.1     matt #define	MAXDSIZ			(1024*1024*1024)	/* maximum data size */
     70   1.1     matt #endif
     71   1.1     matt 
     72   1.1     matt #ifndef	MAXSSIZ
     73   1.1     matt #define	MAXSSIZ			(32*1024*1024)		/* maximum stack size */
     74   1.1     matt #endif
     75   1.1     matt 
     76   1.1     matt #ifndef	DFLDSIZ
     77   1.1     matt #define	DFLDSIZ			(128*1024*1024)		/* default data size */
     78   1.1     matt #endif
     79   1.1     matt 
     80   1.1     matt #ifndef	DFLSSIZ
     81   1.1     matt #define	DFLSSIZ			(2*1024*1024)		/* default stack size */
     82   1.1     matt #endif
     83   1.1     matt 
     84   1.1     matt /*
     85   1.1     matt  * Default maximum amount of shared memory pages
     86   1.1     matt  */
     87   1.1     matt #ifndef SHMMAXPGS
     88   1.1     matt #define	SHMMAXPGS		1024
     89   1.1     matt #endif
     90   1.1     matt 
     91   1.1     matt /*
     92   1.1     matt  * Default number of pages in the user raw I/O map.
     93   1.1     matt  */
     94   1.1     matt #ifndef USRIOSIZE
     95   1.1     matt #define	USRIOSIZE		1024
     96   1.1     matt #endif
     97   1.1     matt 
     98   1.1     matt /*
     99   1.1     matt  * The number of seconds for a process to be blocked before being
    100   1.1     matt  * considered very swappable.
    101   1.1     matt  */
    102   1.1     matt #ifndef MAXSLP
    103   1.1     matt #define	MAXSLP			20
    104   1.1     matt #endif
    105   1.1     matt 
    106   1.1     matt /*
    107   1.1     matt  * Segment handling stuff
    108   1.1     matt  */
    109   1.2     matt #define	SEGMENT_LENGTH	( 0x10000000L)
    110   1.2     matt #define	SEGMENT_MASK	(~0x0fffffffL)
    111   1.1     matt 
    112   1.1     matt /*
    113   1.1     matt  * Macros to manipulate VSIDs
    114   1.1     matt  */
    115   1.1     matt #if 0
    116   1.1     matt /*
    117   1.7     matt  * Move the SR# to the top bits to make the lower bits entirely random
    118   1.1     matt  * so to give better PTE distribution.
    119   1.1     matt  */
    120   1.7     matt #define	VSID__KEYSHFT		(SR_VSID_WIDTH - SR_KEY_LEN)
    121   1.7     matt #define	VSID_SR_INCREMENT	((1L << VSID__KEYSHFT) - 1)
    122   1.7     matt #define VSID__HASHMASK		(VSID_SR_INCREMENT - 1)
    123   1.7     matt #define	VSID_MAKE(sr, hash) \
    124   1.7     matt 	(( \
    125   1.7     matt 	    (((sr) << VSID__KEYSHFT) | ((hash) & VSID__HASMASK))
    126   1.7     matt 	    << SR_VSID_SHFT) & SR_VSID)
    127   1.7     matt #define	VSID_TO_SR(vsid) \
    128   1.7     matt 	(((vsid) & SR_VSID) >> (SR_VSID_SHFT + VSID__KEYSHFT))
    129   1.7     matt #define	VSID_TO_HASH(vsid) \
    130   1.7     matt 	(((vsid) & SR_VSID) >> SR_VSID_SHFT) & VSID__HASHMASK)
    131   1.1     matt #else
    132   1.7     matt #define	VSID__HASHSHFT		(SR_KEY_LEN)
    133   1.7     matt #define	VSID_SR_INCREMENT	(1L << 0)
    134   1.7     matt #define	VSID__KEYMASK		((1L << VSID__HASHSHFT) - 1)
    135   1.7     matt #define	VSID_MAKE(sr, hash) \
    136   1.7     matt 	(( \
    137   1.7     matt 	    (((hash) << VSID__HASHSHFT) | ((sr) & VSID__KEYMASK)) \
    138   1.7     matt 	     << SR_VSID_SHFT) & SR_VSID)
    139   1.7     matt #define	VSID_TO_SR(vsid) \
    140   1.7     matt 	(((vsid) >> SR_VSID_SHFT) & VSID__KEYMASK)
    141   1.7     matt #define	VSID_TO_HASH(vsid) \
    142   1.7     matt 	(((vsid) & SR_VSID) >> (SR_VSID_SHFT + VSID__HASHSHFT))
    143   1.1     matt #endif
    144   1.1     matt 
    145   1.1     matt /*
    146   1.1     matt  * Fixed segments
    147   1.1     matt  */
    148   1.1     matt #ifndef USER_SR
    149   1.1     matt #define	USER_SR			12
    150   1.1     matt #endif
    151   1.3     matt #ifndef KERNEL_SR
    152   1.1     matt #define	KERNEL_SR		13
    153   1.3     matt #endif
    154   1.3     matt #ifndef KERNEL2_SR
    155   1.1     matt #define	KERNEL2_SR		14
    156   1.3     matt #endif
    157   1.1     matt #define	KERNEL2_SEGMENT		VSID_MAKE(KERNEL2_SR, KERNEL_VSIDBITS)
    158   1.1     matt #define	KERNEL_VSIDBITS		0xfffff
    159  1.10     matt #define	PHYSMAP_VSIDBITS	0xffffe
    160  1.10     matt #define	PHYSMAPN_SEGMENT(s)	VSID_MAKE(s, PHYSMAP_VSIDBITS)
    161   1.1     matt #define	KERNEL_SEGMENT		VSID_MAKE(KERNEL_SR, KERNEL_VSIDBITS)
    162   1.9  sanjayl #define	KERNELN_SEGMENT(s)	VSID_MAKE(s, KERNEL_VSIDBITS)
    163   1.9  sanjayl /* XXXSL: need something here that will never be mapped */
    164   1.9  sanjayl #define	EMPTY_SEGMENT		VSID_MAKE(0, 0xffffe)
    165   1.1     matt #define	USER_ADDR		((void *)(USER_SR << ADDR_SR_SHFT))
    166   1.1     matt 
    167   1.1     matt /*
    168   1.1     matt  * Some system constants
    169   1.1     matt  */
    170   1.1     matt #ifndef	NPMAPS
    171   1.1     matt #define	NPMAPS		32768	/* Number of pmaps in system */
    172   1.1     matt #endif
    173   1.1     matt 
    174   1.1     matt #define	VM_MIN_ADDRESS		((vaddr_t) 0)
    175   1.2     matt #define	VM_MAXUSER_ADDRESS	((vaddr_t) ~0xfffL)
    176   1.1     matt #define	VM_MAX_ADDRESS		VM_MAXUSER_ADDRESS
    177   1.1     matt #define	VM_MIN_KERNEL_ADDRESS	((vaddr_t) (KERNEL_SR << ADDR_SR_SHFT))
    178   1.1     matt #define	VM_MAX_KERNEL_ADDRESS	(VM_MIN_KERNEL_ADDRESS + 2*SEGMENT_LENGTH)
    179   1.1     matt 
    180   1.4     matt /*
    181   1.4     matt  * The address to which unspecified mapping requests default
    182   1.4     matt  * Put the stack in it's own segment and start mmaping at the
    183   1.4     matt  * top of the next lower segment.
    184   1.4     matt  */
    185   1.5   atatat #ifdef _KERNEL_OPT
    186   1.5   atatat #include "opt_uvm.h"
    187   1.5   atatat #endif
    188   1.8     matt #define	__USE_TOPDOWN_VM
    189   1.4     matt #define	VM_DEFAULT_ADDRESS(da, sz) \
    190   1.4     matt 	(((VM_MAXUSER_ADDRESS - MAXSSIZ) & SEGMENT_MASK) - round_page(sz))
    191   1.4     matt 
    192   1.1     matt #ifndef VM_PHYSSEG_MAX
    193   1.1     matt #define	VM_PHYSSEG_MAX		16
    194   1.1     matt #endif
    195   1.1     matt #define	VM_PHYSSEG_STRAT	VM_PSTRAT_BIGFIRST
    196   1.1     matt #define	VM_PHYSSEG_NOADD
    197   1.1     matt 
    198   1.1     matt #ifndef VM_PHYS_SIZE
    199   1.1     matt #define	VM_PHYS_SIZE		(USRIOSIZE * PAGE_SIZE)
    200   1.1     matt #endif
    201   1.1     matt 
    202   1.1     matt #ifndef VM_MAX_KERNEL_BUF
    203   1.1     matt #define	VM_MAX_KERNEL_BUF	(SEGMENT_LENGTH * 3 / 4)
    204   1.1     matt #endif
    205   1.1     matt 
    206   1.1     matt #define	VM_NFREELIST		16	/* 16 distinct memory segments */
    207   1.1     matt #define	VM_FREELIST_DEFAULT	0
    208   1.1     matt #define	VM_FREELIST_FIRST256	1
    209   1.1     matt #define	VM_FREELIST_FIRST16	2
    210   1.1     matt #define	VM_FREELIST_MAX		3
    211   1.1     matt 
    212   1.1     matt #ifndef _LOCORE
    213   1.1     matt 
    214   1.1     matt LIST_HEAD(pvo_head, pvo_entry);
    215   1.1     matt 
    216   1.1     matt #if __NetBSD_Version__ > 105180000
    217   1.1     matt #define	__HAVE_VM_PAGE_MD
    218   1.1     matt 
    219   1.1     matt struct vm_page_md {
    220   1.1     matt 	struct pvo_head mdpg_pvoh;
    221   1.1     matt 	unsigned int mdpg_attrs;
    222   1.1     matt };
    223   1.1     matt 
    224   1.1     matt #define	VM_MDPAGE_INIT(pg) do {			\
    225   1.1     matt 	LIST_INIT(&(pg)->mdpage.mdpg_pvoh);	\
    226   1.1     matt 	(pg)->mdpage.mdpg_attrs = 0;		\
    227   1.1     matt } while (/*CONSTCOND*/0)
    228   1.1     matt 
    229   1.1     matt #else
    230   1.1     matt 
    231   1.1     matt #define	__HAVE_PMAP_PHYSSEG
    232   1.1     matt 
    233   1.1     matt struct pmap_physseg {
    234   1.1     matt 	struct pvo_head *pvoh;
    235   1.1     matt 	char *attrs;
    236   1.1     matt };
    237   1.1     matt 
    238   1.1     matt #endif
    239   1.1     matt 
    240   1.1     matt #endif	/* _LOCORE */
    241   1.1     matt 
    242   1.1     matt #endif /* _POWERPC_OEA_VMPARAM_H_ */
    243