pmap_68k.h revision 1.1
1/*	$NetBSD: pmap_68k.h,v 1.1 2025/11/08 07:30:04 thorpej Exp $	*/
2
3/*-
4 * Copyright (c) 2025 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Copyright (c) 1987 Carnegie-Mellon University
34 * Copyright (c) 1991, 1993
35 *      The Regents of the University of California.  All rights reserved.
36 *
37 * This code is derived from software contributed to Berkeley by
38 * the Systems Programming Group of the University of Utah Computer
39 * Science Department.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 *    notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 *    notice, this list of conditions and the following disclaimer in the
48 *    documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 *    may be used to endorse or promote products derived from this software
51 *    without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 *      @(#)pmap.h      8.1 (Berkeley) 6/10/93
66 */
67
68#ifndef _M68K_PMAP_68K_H_
69#define	_M68K_PMAP_68K_H_
70
71#include <sys/rbtree.h>
72#include <sys/queue.h>
73
74#include <m68k/mmu_51.h>
75#include <m68k/mmu_40.h>
76
77typedef unsigned int	pt_entry_t;
78
79TAILQ_HEAD(pmap_ptpage_list, pmap_ptpage);
80LIST_HEAD(pmap_pv_list, pv_entry);
81
82struct pmap {
83	struct pmap_table *pm_lev1map;	/* level 1 table */
84	paddr_t            pm_lev1pa;	/* PA of level 1 table */
85	unsigned int       pm_refcnt;	/* reference count */
86
87	struct pmap_table *pm_pt_cache;	/* most recently used leaf table */
88
89	/* Red-Black tree that contains the active tables. */
90	struct rb_tree     pm_tables;	/* lev1map not in here */
91
92	/* Page table pages for segment and leaf tables. */
93	struct pmap_ptpage_list pm_ptpages[2];
94
95	struct pmap_pv_list pm_pvlist;	/* all associated P->V entries */
96
97	struct pmap_statistics pm_stats;/* statistics */
98};
99
100/*
101 * One entry per P->V mapping of a managed page.
102 *
103 * N.B. We want to keep this structure's size to be a multiple of
104 * 8; we want to align them to 8 bytes in order to be able to use
105 * the lower 3 bits of the pv_entry list head for page attributes.
106 */
107struct pv_entry {
108/* 0*/	struct pv_entry     *pv_next;	/* link on page list */
109/* 4*/	LIST_ENTRY(pv_entry) pv_pmlist;	/* link on pmap list */
110/*12*/	pmap_t               pv_pmap;	/* pmap that contains mapping */
111/*16*/	vaddr_t              pv_vf;	/* virtual address + flags */
112/*20*/	struct pmap_table   *pv_pt;	/* table that contains the PTE */
113/*24*/
114};
115
116/* Upper bits of pv_vf contain the virtual addess */
117#define	PV_VA(pv)	((pv)->pv_vf & ~PAGE_MASK)
118
119/* Lower bits of pv_vf contain flags */
120#define	PV_F_CI_VAC	__BIT(0)	/* mapping CI due to VAC alias */
121#define	PV_F_CI_USR	__BIT(1)	/* mapping CI due to user request */
122
123/*
124 * This describes an individual table used by the MMU.  Depending on
125 * the MMU configuration, there may be more than one table per physical
126 * page.
127 *
128 * For leaf (page) and inner segment tables, pt_st points to the
129 * segment table one level up in the tree that maps it, and pt_stidx
130 * is the index into that segment table.  pt_st also serves as a
131 * proxy for whether or not the table has been inserted into the
132 * table lookup tree.  For the level-1 table, pt_st is NULL and
133 * that table is not inserted into the lookup tree.
134 */
135struct pmap_table {
136	struct pmap_ptpage *pt_ptpage;
137	pt_entry_t         *pt_entries;
138	struct pmap_table  *pt_st;
139	unsigned short      pt_holdcnt;
140	unsigned short      pt_stidx;
141	unsigned int        pt_key;
142	union {
143		LIST_ENTRY(pmap_table) pt_freelist;
144		struct rb_node         pt_node;
145	};
146};
147
148/*
149 * This describes a page table page, which contains one or more MMU tables.
150 * It's variable length, and the table descriptors are allocated along with.
151 */
152struct pmap_ptpage {
153	TAILQ_ENTRY(pmap_ptpage)  ptp_list;
154	LIST_HEAD(, pmap_table)   ptp_freelist;
155	struct vm_page           *ptp_pg;
156	unsigned int              ptp_vpagenum : 23,
157	                          ptp_freecnt : 8,
158	                          ptp_segtab : 1;
159	struct pmap_table         ptp_tables[];
160};
161
162/*
163 * Abstract definitions for PTE bits / fields.  C code will compile-time-
164 * assert the equivalencies that we assume.
165 *
166 * N.B. assumes exclusive use of short descriptors on 68851.
167 */
168#define	PTE_VALID	PTE40_RESIDENT	/* == DT51_PAGE */
169#define	PTE_WP		PTE40_W		/* == PTE51_WP */
170#define	PTE_M		PTE40_M		/* == PTE51_M */
171#define	PTE_U		PTE40_U		/* == PTE51_U */
172#define	PTE_PVLIST	PTE40_G		/* unused on '51, don't use PFLUSHxN */
173#define	PTE_WIRED	PTE40_UR	/* unused on '51 */
174
175/*
176 * PTE40_CM overlaps with PTE51_CI and PTE51_L (which we don't use).
177 */
178#define	PTE_CMASK	PTE40_CM
179
180/*
181 * Critical bits that, when changed (see pmap_changebit()), require
182 * invalidation of the ATC.
183 */
184#define	PTE_CRIT_BITS	(PTE_WP | PTE_CMASK)
185
186/*
187 * Root Pointer attributes for Supervisor and User modes.
188 *
189 * Supervisor:
190 * - No index limit (Lower limit == 0)
191 * - Points to Short format descriptor table.
192 * - Shared Globally
193 *
194 * User:
195 * - No index limit (Lower limit == 0)
196 * - Points to Short format descriptor table.
197 */
198#define	MMU51_SRP_BITS	(DTE51_LOWER | DTE51_SG | DT51_SHORT)
199#define	MMU51_CRP_BITS	(DTE51_LOWER |            DT51_SHORT)
200
201/*
202 * Our abstract definition of a "segment" is "that which points to the
203 * leaf tables".  On the 2-level configuration, that's the level 1 table,
204 * and on the 3-level configuraiton, that's the level 2 table.
205 *
206 * This is the logical address layout:
207 *
208 * 2-level 4KB/page: l1,l2,page    == 10,10,12	(HP MMU compatible)
209 * 2-level 8KB/page: l1,l2,page    ==  8,11,13
210 * 3-level 4KB/page: l1,l2,l3,page == 7,7,6,12
211 * 3-level 8KB/page: l1,l2,l3,page == 7,7,5,13
212 *
213 * The 2-level l2 size is chosen per the number of page table entries
214 * per page, to use one whole page for PTEs per one segment table entry.
215 *
216 * The 3-level layout is defined by the 68040/68060 hardware, and is not
217 * configurable (other than chosen page size).  If '851 / '030 chooses
218 * to use the 3-level layout, it is specifically configured to be compatible
219 * with the 68040.
220 */
221							/*  8KB /  4KB  */
222#define	LA2L_L2_NBITS	(PGSHIFT - 2)			/*   11 /   10  */
223#define	LA2L_L2_COUNT	__BIT(LA2L_L2_NBITS)		/* 2048 / 1024  */
224#define	LA2L_L2_SHIFT	PGSHIFT				/*   13 /   12  */
225#define	LA2L_L1_NBITS	(32 - LA2L_L2_NBITS - PGSHIFT)	/*    8 /   10  */
226#define	LA2L_L1_COUNT	__BIT(LA2L_L1_NBITS)		/*  256 / 1024  */
227#define	LA2L_L1_SHIFT	(LA2L_L2_NBITS + PGSHIFT)	/*   24 /   22  */
228
229#define	LA2L_L1_MASK	(__BITS(0,(LA2L_L1_NBITS - 1)) << LA2L_L1_SHIFT)
230#define	LA2L_L2_MASK	(__BITS(0,(LA2L_L2_NBITS - 1)) << LA2L_L2_SHIFT)
231
232#define	LA2L_RI(va)	__SHIFTOUT((va), LA2L_L1_MASK)	/* root index */
233#define	LA2L_PGI(va)	__SHIFTOUT((va), LA2L_L2_MASK)	/* page index */
234
235#define	MMU51_TCR_BITS	(TCR51_E | TCR51_SRE |				\
236			 __SHIFTIN(PGSHIFT, TCR51_PS) |			\
237			 __SHIFTIN(LA2L_L1_NBITS, TCR51_TIA) |		\
238			 __SHIFTIN(LA2L_L2_NBITS, TCR51_TIB))
239#define	MMU40_TCR_BITS	(TCR40_E |					\
240			 __SHIFTIN(PGSHIFT - 12, TCR40_P))
241
242/* SEG1SHIFT3L is for the "upper" segment on the 3-level configuration */
243#define	SEGSHIFT2L	(LA2L_L1_SHIFT)			/*   24 /   22  */
244#define	SEGSHIFT3L	(LA40_L2_SHIFT)			/*   18 /   18  */
245#define	SEG1SHIFT3L	(LA40_L1_SHIFT)			/*   25 /   25  */
246
247/* NBSEG13L is for the "upper" segment on the 3-level configuration */
248#define	NBSEG2L		__BIT(SEGSHIFT2L)
249#define	NBSEG3L		__BIT(SEGSHIFT3L)
250#define	NBSEG13L	__BIT(SEG1SHIFT3L)
251
252#define	SEGOFSET2L	(NBSEG2L - 1)
253#define	SEGOFSET3L	(NBSEG3L - 1)
254#define	SEG1OFSET3L	(NBSEG13L - 1)
255
256#define	pmap_trunc_seg_2L(va)	(((vaddr_t)(va)) & ~SEGOFSET2L)
257#define	pmap_round_seg_2L(va)	(pmap_trunc_seg_2L((vaddr_t)(va) + SEGOFSET2L))
258#define	pmap_seg_offset_2L(va)	(((vaddr_t)(va)) & SEGOFSET2L)
259
260#define	pmap_trunc_seg_3L(va)	(((vaddr_t)(va)) & ~SEGOFSET3L)
261#define	pmap_round_seg_3L(va)	(pmap_trunc_seg_3L((vaddr_t)(va) + SEGOFSET3L))
262#define	pmap_seg_offset_3L(va)	(((vaddr_t)(va)) & SEGOFSET3L)
263
264#define	pmap_trunc_seg1_3L(va)	(((vaddr_t)(va)) & ~SEG1OFSET3L)
265#define	pmap_round_seg1_3L(va)	(pmap_trunc_seg1_3L((vaddr_t)(va)+ SEG1OFSET3L))
266#define	pmap_seg1_offset_3L(va)	(((vaddr_t)(va)) & SEG1OFSET3L)
267
268/*
269 * pmap-specific data store in the vm_page structure.
270 *
271 * We keep the U/M attrs in the lower 2 bits of the list head
272 * pointer.  This is possible because both the U and M bits are
273 * adjacent; we just need to shift them down 3 bit positions.
274 *
275 * Assumes that PV entries will be 4-byte aligned, but the allocator
276 * guarantees this for us.
277 */
278#define	__HAVE_VM_PAGE_MD
279struct vm_page_md {
280	uintptr_t pvh_listx;		/* pv_entry list + attrs */
281};
282
283#define	PVH_UM_SHIFT	3
284#define	PVH_UM_MASK	__BITS(0,1)
285#define	PVH_CI		__BIT(2)
286#define	PVH_ATTR_MASK	(PVH_UM_MASK | PVH_CI)
287#define	PVH_PV_MASK	(~PVH_ATTR_MASK)
288
289#define VM_MDPAGE_INIT(pg)					\
290do {								\
291	(pg)->mdpage.pvh_listx = 0;				\
292} while (/*CONSTCOND*/0)
293
294#define	VM_MDPAGE_PVS(pg)					\
295	((struct pv_entry *)((pg)->mdpage.pvh_listx & (uintptr_t)PVH_PV_MASK))
296
297#define	VM_MDPAGE_HEAD_PVP(pg)					\
298	((struct pv_entry **)&(pg)->mdpage.pvh_listx)
299
300#define	VM_MDPAGE_SETPVP(pvp, pv)				\
301do {								\
302	/*							\
303	 * The page attributes are in the lower two bits of	\
304	 * the first PV pointer.  Rather than comparing the	\
305	 * address and branching, we just always preserve what	\
306	 * might be there (either the attribute bits or zero	\
307	 * bits).						\
308	 */							\
309	*(pvp) = (struct pv_entry *)				\
310	    ((uintptr_t)(pv) |					\
311	     (((uintptr_t)(*(pvp))) & (uintptr_t)PVH_ATTR_MASK));\
312} while (/*CONSTCOND*/0)
313
314#define	VM_MDPAGE_UM(pg)					\
315	(((pg)->mdpage.pvh_listx & PVH_UM_MASK) << PVH_UM_SHIFT)
316
317#define	VM_MDPAGE_ADD_UM(pg, a)					\
318do {								\
319	(pg)->mdpage.pvh_listx |=				\
320	    ((a) >> PVH_UM_SHIFT) & PVH_UM_MASK;		\
321} while (/*CONSTCOND*/0)
322
323#define	VM_MDPAGE_SET_UM(pg, v)					\
324do {								\
325	(pg)->mdpage.pvh_listx =				\
326	    ((pg)->mdpage.pvh_listx & ~PVH_UM_MASK) |		\
327	    (((v) >> PVH_UM_SHIFT) & PVH_UM_MASK);		\
328} while (/*CONSTCOND*/0)
329
330#define	VM_MDPAGE_SET_CI(pg)					\
331do {								\
332	(pg)->mdpage.pvh_listx |= PVH_CI;			\
333} while (/*CONSTCOND*/0)
334
335#define VM_MDPAGE_CLR_CI(pg)					\
336do {								\
337	(pg)->mdpage.pvh_listx &= ~PVH_CI;			\
338} while (/*CONSTCOND*/0)
339
340#define	VM_MDPAGE_CI_P(pg)					\
341	((pg)->mdpage.pvh_listx & PVH_CI)
342
343bool	pmap_testbit(struct vm_page *, pt_entry_t);
344#define	pmap_is_referenced(pg)					\
345	((VM_MDPAGE_UM(pg) & PTE_U) || pmap_testbit((pg), PTE_U))
346#define	pmap_is_modified(pg)					\
347	((VM_MDPAGE_UM(pg) & PTE_M) || pmap_testbit((pg), PTE_M))
348
349bool	pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t);
350#define	pmap_clear_reference(pg)				\
351	pmap_changebit((pg), 0, (pt_entry_t)~PTE_U)
352#define	pmap_clear_modify(pg)					\
353	pmap_changebit((pg), 0, (pt_entry_t)~PTE_M)
354
355#define	pmap_update(pmap)		__nothing
356#define	pmap_copy(dp, sp, da, l, sa)	__nothing
357
358#define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
359#define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
360
361#define	PMAP_GROWKERNEL			/* enable pmap_growkernel() */
362
363/*
364 * pmap_bootstrap1() is called before the MMU is turned on.
365 * pmap_bootstrap2() is called after.
366 */
367paddr_t	pmap_bootstrap1(paddr_t/*nextpa*/, paddr_t/*firstpa*/);
368void *	pmap_bootstrap2(void);
369
370bool	pmap_extract_info(pmap_t, vaddr_t, paddr_t *, int *);
371
372/*
373 * Functions exported for compatibility with the Hibler pmap, where
374 * these are needed by other shared m68k code.
375 *
376 * XXX Clean this up eventually.
377 */
378pt_entry_t *	pmap_kernel_pte(vaddr_t);
379#define	kvtopte(va)	pmap_kernel_pte(va)
380
381paddr_t		vtophys(vaddr_t);
382
383extern char *	vmmap;
384extern void *	msgbufaddr;
385
386/* Support functions for HP MMU. */
387void	pmap_init_vac(size_t);
388void	pmap_prefer(vaddr_t, vaddr_t *, int);
389/* PMAP_PREFER() defined in <machine/pmap.h> */
390
391#endif /* _M68K_PMAP_68K_H_ */
392