booke_pmap.c revision 1.1.2.1 1 1.1.2.1 matt /* $NetBSD: booke_pmap.c,v 1.1.2.1 2011/01/07 01:26:19 matt Exp $ */
2 1.1.2.1 matt /*-
3 1.1.2.1 matt * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4 1.1.2.1 matt * All rights reserved.
5 1.1.2.1 matt *
6 1.1.2.1 matt * This code is derived from software contributed to The NetBSD Foundation
7 1.1.2.1 matt * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8 1.1.2.1 matt * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9 1.1.2.1 matt *
10 1.1.2.1 matt * This material is based upon work supported by the Defense Advanced Research
11 1.1.2.1 matt * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12 1.1.2.1 matt * Contract No. N66001-09-C-2073.
13 1.1.2.1 matt * Approved for Public Release, Distribution Unlimited
14 1.1.2.1 matt *
15 1.1.2.1 matt * Redistribution and use in source and binary forms, with or without
16 1.1.2.1 matt * modification, are permitted provided that the following conditions
17 1.1.2.1 matt * are met:
18 1.1.2.1 matt * 1. Redistributions of source code must retain the above copyright
19 1.1.2.1 matt * notice, this list of conditions and the following disclaimer.
20 1.1.2.1 matt * 2. Redistributions in binary form must reproduce the above copyright
21 1.1.2.1 matt * notice, this list of conditions and the following disclaimer in the
22 1.1.2.1 matt * documentation and/or other materials provided with the distribution.
23 1.1.2.1 matt *
24 1.1.2.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 1.1.2.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 1.1.2.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 1.1.2.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 1.1.2.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 1.1.2.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 1.1.2.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 1.1.2.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 1.1.2.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 1.1.2.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 1.1.2.1 matt * POSSIBILITY OF SUCH DAMAGE.
35 1.1.2.1 matt */
36 1.1.2.1 matt
37 1.1.2.1 matt #include <sys/cdefs.h>
38 1.1.2.1 matt
39 1.1.2.1 matt __KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.1.2.1 2011/01/07 01:26:19 matt Exp $");
40 1.1.2.1 matt
41 1.1.2.1 matt #include <sys/param.h>
42 1.1.2.1 matt #include <sys/kcore.h>
43 1.1.2.1 matt #include <sys/buf.h>
44 1.1.2.1 matt
45 1.1.2.1 matt #include <uvm/uvm_extern.h>
46 1.1.2.1 matt
47 1.1.2.1 matt #include <machine/pmap.h>
48 1.1.2.1 matt
49 1.1.2.1 matt /*
50 1.1.2.1 matt * Initialize the kernel pmap.
51 1.1.2.1 matt */
52 1.1.2.1 matt #ifdef MULTIPROCESSOR
53 1.1.2.1 matt #define PMAP_SIZE offsetof(struct pmap, pm_pai[MAXCPUS])
54 1.1.2.1 matt #else
55 1.1.2.1 matt #define PMAP_SIZE sizeof(struct pmap)
56 1.1.2.1 matt #endif
57 1.1.2.1 matt
58 1.1.2.1 matt CTASSERT(sizeof(struct pmap_segtab) == NBPG);
59 1.1.2.1 matt
60 1.1.2.1 matt void
61 1.1.2.1 matt pmap_procwr(struct proc *p, vaddr_t va, size_t len)
62 1.1.2.1 matt {
63 1.1.2.1 matt struct pmap * const pmap = p->p_vmspace->vm_map.pmap;
64 1.1.2.1 matt vsize_t off = va & PAGE_SIZE;
65 1.1.2.1 matt
66 1.1.2.1 matt kpreempt_disable();
67 1.1.2.1 matt for (const vaddr_t eva = va + len; va < eva; off = 0) {
68 1.1.2.1 matt const vaddr_t segeva = min(va + len, va - off + PAGE_SIZE);
69 1.1.2.1 matt pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
70 1.1.2.1 matt if (ptep == NULL) {
71 1.1.2.1 matt va = segeva;
72 1.1.2.1 matt continue;
73 1.1.2.1 matt }
74 1.1.2.1 matt pt_entry_t pt_entry = *ptep;
75 1.1.2.1 matt if (!pte_valid_p(pt_entry) || !pte_exec_p(pt_entry)) {
76 1.1.2.1 matt va = segeva;
77 1.1.2.1 matt continue;
78 1.1.2.1 matt }
79 1.1.2.1 matt kpreempt_enable();
80 1.1.2.1 matt dcache_wb(pte_to_paddr(pt_entry), segeva - va);
81 1.1.2.1 matt icache_inv(pte_to_paddr(pt_entry), segeva - va);
82 1.1.2.1 matt kpreempt_disable();
83 1.1.2.1 matt va = segeva;
84 1.1.2.1 matt }
85 1.1.2.1 matt kpreempt_enable();
86 1.1.2.1 matt }
87 1.1.2.1 matt
88 1.1.2.1 matt void
89 1.1.2.1 matt pmap_md_page_syncicache(struct vm_page *pg)
90 1.1.2.1 matt {
91 1.1.2.1 matt paddr_t pa = VM_PAGE_TO_PHYS(pg);
92 1.1.2.1 matt dcache_wb_page(pa);
93 1.1.2.1 matt icache_inv_page(pa);
94 1.1.2.1 matt }
95 1.1.2.1 matt
96 1.1.2.1 matt vaddr_t
97 1.1.2.1 matt pmap_md_direct_map_paddr(paddr_t pa)
98 1.1.2.1 matt {
99 1.1.2.1 matt return (vaddr_t) pa;
100 1.1.2.1 matt }
101 1.1.2.1 matt
102 1.1.2.1 matt bool
103 1.1.2.1 matt pmap_md_direct_mapped_vaddr_p(vaddr_t va)
104 1.1.2.1 matt {
105 1.1.2.1 matt return va < VM_MIN_KERNEL_ADDRESS || VM_MAX_KERNEL_ADDRESS <= va;
106 1.1.2.1 matt }
107 1.1.2.1 matt
108 1.1.2.1 matt paddr_t
109 1.1.2.1 matt pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t va)
110 1.1.2.1 matt {
111 1.1.2.1 matt return (paddr_t) va;
112 1.1.2.1 matt }
113 1.1.2.1 matt
114 1.1.2.1 matt /*
115 1.1.2.1 matt * Bootstrap the system enough to run with virtual memory.
116 1.1.2.1 matt * firstaddr is the first unused kseg0 address (not page aligned).
117 1.1.2.1 matt */
118 1.1.2.1 matt void
119 1.1.2.1 matt pmap_bootstrap(vaddr_t startkernel, vaddr_t endkernel,
120 1.1.2.1 matt const phys_ram_seg_t *avail, size_t cnt)
121 1.1.2.1 matt {
122 1.1.2.1 matt for (size_t i = 0; i < cnt; i++) {
123 1.1.2.1 matt printf(" uvm_page_physload(%#lx,%#lx,%#lx,%#lx,%d)",
124 1.1.2.1 matt atop(avail[i].start),
125 1.1.2.1 matt atop(avail[i].start + avail[i].size) - 1,
126 1.1.2.1 matt atop(avail[i].start),
127 1.1.2.1 matt atop(avail[i].start + avail[i].size) - 1,
128 1.1.2.1 matt VM_FREELIST_DEFAULT);
129 1.1.2.1 matt uvm_page_physload(
130 1.1.2.1 matt atop(avail[i].start),
131 1.1.2.1 matt atop(avail[i].start + avail[i].size) - 1,
132 1.1.2.1 matt atop(avail[i].start),
133 1.1.2.1 matt atop(avail[i].start + avail[i].size) - 1,
134 1.1.2.1 matt VM_FREELIST_DEFAULT);
135 1.1.2.1 matt }
136 1.1.2.1 matt
137 1.1.2.1 matt pmap_tlb_info_init(&pmap_tlb0_info); /* init the lock */
138 1.1.2.1 matt
139 1.1.2.1 matt /*
140 1.1.2.1 matt * Compute the number of pages kmem_map will have.
141 1.1.2.1 matt */
142 1.1.2.1 matt kmeminit_nkmempages();
143 1.1.2.1 matt
144 1.1.2.1 matt /*
145 1.1.2.1 matt * Figure out how many PTE's are necessary to map the kernel.
146 1.1.2.1 matt * We also reserve space for kmem_alloc_pageable() for vm_fork().
147 1.1.2.1 matt */
148 1.1.2.1 matt
149 1.1.2.1 matt /* Get size of buffer cache and set an upper limit */
150 1.1.2.1 matt buf_setvalimit((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 8);
151 1.1.2.1 matt vsize_t bufsz = buf_memcalc();
152 1.1.2.1 matt buf_setvalimit(bufsz);
153 1.1.2.1 matt
154 1.1.2.1 matt vsize_t nsegtabs = pmap_round_seg(VM_PHYS_SIZE
155 1.1.2.1 matt + (ubc_nwins << ubc_winshift)
156 1.1.2.1 matt + bufsz
157 1.1.2.1 matt + 16 * NCARGS
158 1.1.2.1 matt + pager_map_size
159 1.1.2.1 matt + maxproc * USPACE
160 1.1.2.1 matt #ifdef SYSVSHM
161 1.1.2.1 matt + NBPG * shminfo.shmall
162 1.1.2.1 matt #endif
163 1.1.2.1 matt + NBPG * nkmempages);
164 1.1.2.1 matt
165 1.1.2.1 matt /*
166 1.1.2.1 matt * Initialize `FYI' variables. Note we're relying on
167 1.1.2.1 matt * the fact that BSEARCH sorts the vm_physmem[] array
168 1.1.2.1 matt * for us. Must do this before uvm_pageboot_alloc()
169 1.1.2.1 matt * can be called.
170 1.1.2.1 matt */
171 1.1.2.1 matt pmap_limits.avail_start = vm_physmem[0].start << PGSHIFT;
172 1.1.2.1 matt pmap_limits.avail_end = vm_physmem[vm_nphysseg - 1].end << PGSHIFT;
173 1.1.2.1 matt const vsize_t max_nsegtabs =
174 1.1.2.1 matt (pmap_round_seg(VM_MAX_KERNEL_ADDRESS)
175 1.1.2.1 matt - pmap_trunc_seg(VM_MIN_KERNEL_ADDRESS)) / NBSEG;
176 1.1.2.1 matt if (nsegtabs >= max_nsegtabs) {
177 1.1.2.1 matt pmap_limits.virtual_end = VM_MAX_KERNEL_ADDRESS;
178 1.1.2.1 matt nsegtabs = max_nsegtabs;
179 1.1.2.1 matt } else {
180 1.1.2.1 matt pmap_limits.virtual_end = VM_MIN_KERNEL_ADDRESS
181 1.1.2.1 matt + nsegtabs * NBSEG;
182 1.1.2.1 matt }
183 1.1.2.1 matt
184 1.1.2.1 matt pmap_pvlist_lock_init(curcpu()->ci_ci.dcache_line_size);
185 1.1.2.1 matt
186 1.1.2.1 matt /*
187 1.1.2.1 matt * Now actually allocate the kernel PTE array (must be done
188 1.1.2.1 matt * after virtual_end is initialized).
189 1.1.2.1 matt */
190 1.1.2.1 matt vaddr_t segtabs =
191 1.1.2.1 matt uvm_pageboot_alloc(NBPG * nsegtabs + sizeof(struct pmap_segtab));
192 1.1.2.1 matt
193 1.1.2.1 matt /*
194 1.1.2.1 matt * Initialize the kernel's two-level page level. This only wastes
195 1.1.2.1 matt * an extra page for the segment table and allows the user/kernel
196 1.1.2.1 matt * access to be common.
197 1.1.2.1 matt */
198 1.1.2.1 matt struct pmap_segtab * const stp = (void *)segtabs;
199 1.1.2.1 matt segtabs += round_page(sizeof(struct pmap_segtab));
200 1.1.2.1 matt pt_entry_t **ptp = &stp->seg_tab[VM_MIN_KERNEL_ADDRESS >> SEGSHIFT];
201 1.1.2.1 matt for (u_int i = 0; i < nsegtabs; i++, segtabs += NBPG) {
202 1.1.2.1 matt *ptp++ = (void *)segtabs;
203 1.1.2.1 matt }
204 1.1.2.1 matt pmap_kernel()->pm_segtab = stp;
205 1.1.2.1 matt curcpu()->ci_pmap_kern_segtab = stp;
206 1.1.2.1 matt printf(" kern_segtab=%p", stp);
207 1.1.2.1 matt
208 1.1.2.1 matt #if 0
209 1.1.2.1 matt nsegtabs = (physmem + NPTEPG - 1) / NPTEPG;
210 1.1.2.1 matt segtabs = uvm_pageboot_alloc(NBPG * nsegtabs);
211 1.1.2.1 matt ptp = stp->seg_tab;
212 1.1.2.1 matt pt_entry_t pt_entry = PTE_M|PTE_xX|PTE_xR;
213 1.1.2.1 matt pt_entry_t *ptep = (void *)segtabs;
214 1.1.2.1 matt printf("%s: allocated %lu page table pages for mapping %u pages\n",
215 1.1.2.1 matt __func__, nsegtabs, physmem);
216 1.1.2.1 matt for (u_int i = 0; i < nsegtabs; i++, segtabs += NBPG, ptp++) {
217 1.1.2.1 matt *ptp = ptep;
218 1.1.2.1 matt for (u_int j = 0; j < NPTEPG; j++, ptep++) {
219 1.1.2.1 matt *ptep = pt_entry;
220 1.1.2.1 matt pt_entry += NBPG;
221 1.1.2.1 matt }
222 1.1.2.1 matt printf(" [%u]=%p (%#x)", i, *ptp, **ptp);
223 1.1.2.1 matt pt_entry |= PTE_xW;
224 1.1.2.1 matt pt_entry &= ~PTE_xX;
225 1.1.2.1 matt }
226 1.1.2.1 matt
227 1.1.2.1 matt /*
228 1.1.2.1 matt * Now make everything before the kernel inaccessible.
229 1.1.2.1 matt */
230 1.1.2.1 matt for (u_int i = 0; i < startkernel / NBPG; i += NBPG) {
231 1.1.2.1 matt stp->seg_tab[i >> SEGSHIFT][(i & SEGOFSET) >> PAGE_SHIFT] = 0;
232 1.1.2.1 matt }
233 1.1.2.1 matt #endif
234 1.1.2.1 matt
235 1.1.2.1 matt /*
236 1.1.2.1 matt * Initialize the pools.
237 1.1.2.1 matt */
238 1.1.2.1 matt pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl",
239 1.1.2.1 matt &pool_allocator_nointr, IPL_NONE);
240 1.1.2.1 matt pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
241 1.1.2.1 matt &pmap_pv_page_allocator, IPL_NONE);
242 1.1.2.1 matt
243 1.1.2.1 matt tlb_set_asid(0);
244 1.1.2.1 matt }
245 1.1.2.1 matt
246 1.1.2.1 matt struct vm_page *
247 1.1.2.1 matt pmap_md_alloc_poolpage(int flags)
248 1.1.2.1 matt {
249 1.1.2.1 matt /*
250 1.1.2.1 matt * Any managed page works for us.
251 1.1.2.1 matt */
252 1.1.2.1 matt return uvm_pagealloc(NULL, 0, NULL, flags);
253 1.1.2.1 matt }
254 1.1.2.1 matt
255 1.1.2.1 matt void
256 1.1.2.1 matt pmap_zero_page(paddr_t pa)
257 1.1.2.1 matt {
258 1.1.2.1 matt // printf("%s(%#lx): calling dcache_zero_page(%#lx)\n", __func__, pa, pa);
259 1.1.2.1 matt dcache_zero_page(pa);
260 1.1.2.1 matt }
261 1.1.2.1 matt
262 1.1.2.1 matt void
263 1.1.2.1 matt pmap_copy_page(paddr_t src, paddr_t dst)
264 1.1.2.1 matt {
265 1.1.2.1 matt const size_t line_size = curcpu()->ci_ci.dcache_line_size;
266 1.1.2.1 matt const paddr_t end = src + PAGE_SIZE;
267 1.1.2.1 matt
268 1.1.2.1 matt while (src < end) {
269 1.1.2.1 matt __asm(
270 1.1.2.1 matt "dcbt %2,%1" "\n\t" /* touch next src cachline */
271 1.1.2.1 matt "dcba 0,%1" "\n\t" /* don't fetch dst cacheline */
272 1.1.2.1 matt :: "b"(src), "b"(dst), "b"(line_size));
273 1.1.2.1 matt for (u_int i = 0;
274 1.1.2.1 matt i < line_size;
275 1.1.2.1 matt src += 32, dst += 32, i += 32) {
276 1.1.2.1 matt __asm(
277 1.1.2.1 matt "lmw 24,0(%0)" "\n\t"
278 1.1.2.1 matt "stmw 24,0(%1)"
279 1.1.2.1 matt :: "b"(src), "b"(dst)
280 1.1.2.1 matt : "r24", "r25", "r26", "r27",
281 1.1.2.1 matt "r28", "r29", "r30", "r31");
282 1.1.2.1 matt }
283 1.1.2.1 matt }
284 1.1.2.1 matt }
285 1.1.2.1 matt
286 1.1.2.1 matt void
287 1.1.2.1 matt pmap_md_init(void)
288 1.1.2.1 matt {
289 1.1.2.1 matt
290 1.1.2.1 matt /* nothing for now */
291 1.1.2.1 matt }
292 1.1.2.1 matt
293 1.1.2.1 matt bool
294 1.1.2.1 matt pmap_md_io_vaddr_p(vaddr_t va)
295 1.1.2.1 matt {
296 1.1.2.1 matt return va >= pmap_limits.avail_end
297 1.1.2.1 matt && !(VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS);
298 1.1.2.1 matt }
299 1.1.2.1 matt
300