booke_pmap.c revision 1.1.4.1 1 1.1.4.1 jruoho /*-
2 1.1.4.1 jruoho * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
3 1.1.4.1 jruoho * All rights reserved.
4 1.1.4.1 jruoho *
5 1.1.4.1 jruoho * This code is derived from software contributed to The NetBSD Foundation
6 1.1.4.1 jruoho * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
7 1.1.4.1 jruoho * Agency and which was developed by Matt Thomas of 3am Software Foundry.
8 1.1.4.1 jruoho *
9 1.1.4.1 jruoho * This material is based upon work supported by the Defense Advanced Research
10 1.1.4.1 jruoho * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
11 1.1.4.1 jruoho * Contract No. N66001-09-C-2073.
12 1.1.4.1 jruoho * Approved for Public Release, Distribution Unlimited
13 1.1.4.1 jruoho *
14 1.1.4.1 jruoho * Redistribution and use in source and binary forms, with or without
15 1.1.4.1 jruoho * modification, are permitted provided that the following conditions
16 1.1.4.1 jruoho * are met:
17 1.1.4.1 jruoho * 1. Redistributions of source code must retain the above copyright
18 1.1.4.1 jruoho * notice, this list of conditions and the following disclaimer.
19 1.1.4.1 jruoho * 2. Redistributions in binary form must reproduce the above copyright
20 1.1.4.1 jruoho * notice, this list of conditions and the following disclaimer in the
21 1.1.4.1 jruoho * documentation and/or other materials provided with the distribution.
22 1.1.4.1 jruoho *
23 1.1.4.1 jruoho * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
24 1.1.4.1 jruoho * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25 1.1.4.1 jruoho * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26 1.1.4.1 jruoho * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
27 1.1.4.1 jruoho * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 1.1.4.1 jruoho * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 1.1.4.1 jruoho * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 1.1.4.1 jruoho * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 1.1.4.1 jruoho * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 1.1.4.1 jruoho * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 1.1.4.1 jruoho * POSSIBILITY OF SUCH DAMAGE.
34 1.1.4.1 jruoho */
35 1.1.4.1 jruoho
36 1.1.4.1 jruoho #define __PMAP_PRIVATE
37 1.1.4.1 jruoho
38 1.1.4.1 jruoho #include <sys/cdefs.h>
39 1.1.4.1 jruoho
40 1.1.4.1 jruoho __KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.1.4.1 2011/06/06 09:06:25 jruoho Exp $");
41 1.1.4.1 jruoho
42 1.1.4.1 jruoho #include <sys/param.h>
43 1.1.4.1 jruoho #include <sys/kcore.h>
44 1.1.4.1 jruoho #include <sys/buf.h>
45 1.1.4.1 jruoho
46 1.1.4.1 jruoho #include <uvm/uvm_extern.h>
47 1.1.4.1 jruoho
48 1.1.4.1 jruoho #include <machine/pmap.h>
49 1.1.4.1 jruoho
50 1.1.4.1 jruoho /*
51 1.1.4.1 jruoho * Initialize the kernel pmap.
52 1.1.4.1 jruoho */
53 1.1.4.1 jruoho #ifdef MULTIPROCESSOR
54 1.1.4.1 jruoho #define PMAP_SIZE offsetof(struct pmap, pm_pai[MAXCPUS])
55 1.1.4.1 jruoho #else
56 1.1.4.1 jruoho #define PMAP_SIZE sizeof(struct pmap)
57 1.1.4.1 jruoho #endif
58 1.1.4.1 jruoho
59 1.1.4.1 jruoho CTASSERT(sizeof(struct pmap_segtab) == NBPG);
60 1.1.4.1 jruoho
61 1.1.4.1 jruoho void
62 1.1.4.1 jruoho pmap_procwr(struct proc *p, vaddr_t va, size_t len)
63 1.1.4.1 jruoho {
64 1.1.4.1 jruoho struct pmap * const pmap = p->p_vmspace->vm_map.pmap;
65 1.1.4.1 jruoho vsize_t off = va & PAGE_SIZE;
66 1.1.4.1 jruoho
67 1.1.4.1 jruoho kpreempt_disable();
68 1.1.4.1 jruoho for (const vaddr_t eva = va + len; va < eva; off = 0) {
69 1.1.4.1 jruoho const vaddr_t segeva = min(va + len, va - off + PAGE_SIZE);
70 1.1.4.1 jruoho pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
71 1.1.4.1 jruoho if (ptep == NULL) {
72 1.1.4.1 jruoho va = segeva;
73 1.1.4.1 jruoho continue;
74 1.1.4.1 jruoho }
75 1.1.4.1 jruoho pt_entry_t pt_entry = *ptep;
76 1.1.4.1 jruoho if (!pte_valid_p(pt_entry) || !pte_exec_p(pt_entry)) {
77 1.1.4.1 jruoho va = segeva;
78 1.1.4.1 jruoho continue;
79 1.1.4.1 jruoho }
80 1.1.4.1 jruoho kpreempt_enable();
81 1.1.4.1 jruoho dcache_wb(pte_to_paddr(pt_entry), segeva - va);
82 1.1.4.1 jruoho icache_inv(pte_to_paddr(pt_entry), segeva - va);
83 1.1.4.1 jruoho kpreempt_disable();
84 1.1.4.1 jruoho va = segeva;
85 1.1.4.1 jruoho }
86 1.1.4.1 jruoho kpreempt_enable();
87 1.1.4.1 jruoho }
88 1.1.4.1 jruoho
89 1.1.4.1 jruoho void
90 1.1.4.1 jruoho pmap_md_page_syncicache(struct vm_page *pg, __cpuset_t onproc)
91 1.1.4.1 jruoho {
92 1.1.4.1 jruoho /*
93 1.1.4.1 jruoho * If onproc is empty, we could do a
94 1.1.4.1 jruoho * pmap_page_protect(pg, VM_PROT_NONE) and remove all
95 1.1.4.1 jruoho * mappings of the page and clear its execness. Then
96 1.1.4.1 jruoho * the next time page is faulted, it will get icache
97 1.1.4.1 jruoho * synched. But this is easier. :)
98 1.1.4.1 jruoho */
99 1.1.4.1 jruoho paddr_t pa = VM_PAGE_TO_PHYS(pg);
100 1.1.4.1 jruoho dcache_wb_page(pa);
101 1.1.4.1 jruoho icache_inv_page(pa);
102 1.1.4.1 jruoho }
103 1.1.4.1 jruoho
104 1.1.4.1 jruoho vaddr_t
105 1.1.4.1 jruoho pmap_md_direct_map_paddr(paddr_t pa)
106 1.1.4.1 jruoho {
107 1.1.4.1 jruoho return (vaddr_t) pa;
108 1.1.4.1 jruoho }
109 1.1.4.1 jruoho
110 1.1.4.1 jruoho bool
111 1.1.4.1 jruoho pmap_md_direct_mapped_vaddr_p(vaddr_t va)
112 1.1.4.1 jruoho {
113 1.1.4.1 jruoho return va < VM_MIN_KERNEL_ADDRESS || VM_MAX_KERNEL_ADDRESS <= va;
114 1.1.4.1 jruoho }
115 1.1.4.1 jruoho
116 1.1.4.1 jruoho paddr_t
117 1.1.4.1 jruoho pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t va)
118 1.1.4.1 jruoho {
119 1.1.4.1 jruoho return (paddr_t) va;
120 1.1.4.1 jruoho }
121 1.1.4.1 jruoho
122 1.1.4.1 jruoho /*
123 1.1.4.1 jruoho * Bootstrap the system enough to run with virtual memory.
124 1.1.4.1 jruoho * firstaddr is the first unused kseg0 address (not page aligned).
125 1.1.4.1 jruoho */
126 1.1.4.1 jruoho void
127 1.1.4.1 jruoho pmap_bootstrap(vaddr_t startkernel, vaddr_t endkernel,
128 1.1.4.1 jruoho const phys_ram_seg_t *avail, size_t cnt)
129 1.1.4.1 jruoho {
130 1.1.4.1 jruoho for (size_t i = 0; i < cnt; i++) {
131 1.1.4.1 jruoho printf(" uvm_page_physload(%#lx,%#lx,%#lx,%#lx,%d)",
132 1.1.4.1 jruoho atop(avail[i].start),
133 1.1.4.1 jruoho atop(avail[i].start + avail[i].size) - 1,
134 1.1.4.1 jruoho atop(avail[i].start),
135 1.1.4.1 jruoho atop(avail[i].start + avail[i].size) - 1,
136 1.1.4.1 jruoho VM_FREELIST_DEFAULT);
137 1.1.4.1 jruoho uvm_page_physload(
138 1.1.4.1 jruoho atop(avail[i].start),
139 1.1.4.1 jruoho atop(avail[i].start + avail[i].size) - 1,
140 1.1.4.1 jruoho atop(avail[i].start),
141 1.1.4.1 jruoho atop(avail[i].start + avail[i].size) - 1,
142 1.1.4.1 jruoho VM_FREELIST_DEFAULT);
143 1.1.4.1 jruoho }
144 1.1.4.1 jruoho
145 1.1.4.1 jruoho pmap_tlb_info_init(&pmap_tlb0_info); /* init the lock */
146 1.1.4.1 jruoho
147 1.1.4.1 jruoho /*
148 1.1.4.1 jruoho * Compute the number of pages kmem_map will have.
149 1.1.4.1 jruoho */
150 1.1.4.1 jruoho kmeminit_nkmempages();
151 1.1.4.1 jruoho
152 1.1.4.1 jruoho /*
153 1.1.4.1 jruoho * Figure out how many PTE's are necessary to map the kernel.
154 1.1.4.1 jruoho * We also reserve space for kmem_alloc_pageable() for vm_fork().
155 1.1.4.1 jruoho */
156 1.1.4.1 jruoho
157 1.1.4.1 jruoho /* Get size of buffer cache and set an upper limit */
158 1.1.4.1 jruoho buf_setvalimit((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 8);
159 1.1.4.1 jruoho vsize_t bufsz = buf_memcalc();
160 1.1.4.1 jruoho buf_setvalimit(bufsz);
161 1.1.4.1 jruoho
162 1.1.4.1 jruoho vsize_t nsegtabs = pmap_round_seg(VM_PHYS_SIZE
163 1.1.4.1 jruoho + (ubc_nwins << ubc_winshift)
164 1.1.4.1 jruoho + bufsz
165 1.1.4.1 jruoho + 16 * NCARGS
166 1.1.4.1 jruoho + pager_map_size
167 1.1.4.1 jruoho + maxproc * USPACE
168 1.1.4.1 jruoho #ifdef SYSVSHM
169 1.1.4.1 jruoho + NBPG * shminfo.shmall
170 1.1.4.1 jruoho #endif
171 1.1.4.1 jruoho + NBPG * nkmempages);
172 1.1.4.1 jruoho
173 1.1.4.1 jruoho /*
174 1.1.4.1 jruoho * Initialize `FYI' variables. Note we're relying on
175 1.1.4.1 jruoho * the fact that BSEARCH sorts the vm_physmem[] array
176 1.1.4.1 jruoho * for us. Must do this before uvm_pageboot_alloc()
177 1.1.4.1 jruoho * can be called.
178 1.1.4.1 jruoho */
179 1.1.4.1 jruoho pmap_limits.avail_start = vm_physmem[0].start << PGSHIFT;
180 1.1.4.1 jruoho pmap_limits.avail_end = vm_physmem[vm_nphysseg - 1].end << PGSHIFT;
181 1.1.4.1 jruoho const vsize_t max_nsegtabs =
182 1.1.4.1 jruoho (pmap_round_seg(VM_MAX_KERNEL_ADDRESS)
183 1.1.4.1 jruoho - pmap_trunc_seg(VM_MIN_KERNEL_ADDRESS)) / NBSEG;
184 1.1.4.1 jruoho if (nsegtabs >= max_nsegtabs) {
185 1.1.4.1 jruoho pmap_limits.virtual_end = VM_MAX_KERNEL_ADDRESS;
186 1.1.4.1 jruoho nsegtabs = max_nsegtabs;
187 1.1.4.1 jruoho } else {
188 1.1.4.1 jruoho pmap_limits.virtual_end = VM_MIN_KERNEL_ADDRESS
189 1.1.4.1 jruoho + nsegtabs * NBSEG;
190 1.1.4.1 jruoho }
191 1.1.4.1 jruoho
192 1.1.4.1 jruoho pmap_pvlist_lock_init(curcpu()->ci_ci.dcache_line_size);
193 1.1.4.1 jruoho
194 1.1.4.1 jruoho /*
195 1.1.4.1 jruoho * Now actually allocate the kernel PTE array (must be done
196 1.1.4.1 jruoho * after virtual_end is initialized).
197 1.1.4.1 jruoho */
198 1.1.4.1 jruoho vaddr_t segtabs =
199 1.1.4.1 jruoho uvm_pageboot_alloc(NBPG * nsegtabs + sizeof(struct pmap_segtab));
200 1.1.4.1 jruoho
201 1.1.4.1 jruoho /*
202 1.1.4.1 jruoho * Initialize the kernel's two-level page level. This only wastes
203 1.1.4.1 jruoho * an extra page for the segment table and allows the user/kernel
204 1.1.4.1 jruoho * access to be common.
205 1.1.4.1 jruoho */
206 1.1.4.1 jruoho struct pmap_segtab * const stp = (void *)segtabs;
207 1.1.4.1 jruoho segtabs += round_page(sizeof(struct pmap_segtab));
208 1.1.4.1 jruoho pt_entry_t **ptp = &stp->seg_tab[VM_MIN_KERNEL_ADDRESS >> SEGSHIFT];
209 1.1.4.1 jruoho for (u_int i = 0; i < nsegtabs; i++, segtabs += NBPG) {
210 1.1.4.1 jruoho *ptp++ = (void *)segtabs;
211 1.1.4.1 jruoho }
212 1.1.4.1 jruoho pmap_kernel()->pm_segtab = stp;
213 1.1.4.1 jruoho curcpu()->ci_pmap_kern_segtab = stp;
214 1.1.4.1 jruoho printf(" kern_segtab=%p", stp);
215 1.1.4.1 jruoho
216 1.1.4.1 jruoho #if 0
217 1.1.4.1 jruoho nsegtabs = (physmem + NPTEPG - 1) / NPTEPG;
218 1.1.4.1 jruoho segtabs = uvm_pageboot_alloc(NBPG * nsegtabs);
219 1.1.4.1 jruoho ptp = stp->seg_tab;
220 1.1.4.1 jruoho pt_entry_t pt_entry = PTE_M|PTE_xX|PTE_xR;
221 1.1.4.1 jruoho pt_entry_t *ptep = (void *)segtabs;
222 1.1.4.1 jruoho printf("%s: allocated %lu page table pages for mapping %u pages\n",
223 1.1.4.1 jruoho __func__, nsegtabs, physmem);
224 1.1.4.1 jruoho for (u_int i = 0; i < nsegtabs; i++, segtabs += NBPG, ptp++) {
225 1.1.4.1 jruoho *ptp = ptep;
226 1.1.4.1 jruoho for (u_int j = 0; j < NPTEPG; j++, ptep++) {
227 1.1.4.1 jruoho *ptep = pt_entry;
228 1.1.4.1 jruoho pt_entry += NBPG;
229 1.1.4.1 jruoho }
230 1.1.4.1 jruoho printf(" [%u]=%p (%#x)", i, *ptp, **ptp);
231 1.1.4.1 jruoho pt_entry |= PTE_xW;
232 1.1.4.1 jruoho pt_entry &= ~PTE_xX;
233 1.1.4.1 jruoho }
234 1.1.4.1 jruoho
235 1.1.4.1 jruoho /*
236 1.1.4.1 jruoho * Now make everything before the kernel inaccessible.
237 1.1.4.1 jruoho */
238 1.1.4.1 jruoho for (u_int i = 0; i < startkernel / NBPG; i += NBPG) {
239 1.1.4.1 jruoho stp->seg_tab[i >> SEGSHIFT][(i & SEGOFSET) >> PAGE_SHIFT] = 0;
240 1.1.4.1 jruoho }
241 1.1.4.1 jruoho #endif
242 1.1.4.1 jruoho
243 1.1.4.1 jruoho /*
244 1.1.4.1 jruoho * Initialize the pools.
245 1.1.4.1 jruoho */
246 1.1.4.1 jruoho pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl",
247 1.1.4.1 jruoho &pool_allocator_nointr, IPL_NONE);
248 1.1.4.1 jruoho pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
249 1.1.4.1 jruoho &pmap_pv_page_allocator, IPL_NONE);
250 1.1.4.1 jruoho
251 1.1.4.1 jruoho tlb_set_asid(0);
252 1.1.4.1 jruoho }
253 1.1.4.1 jruoho
254 1.1.4.1 jruoho struct vm_page *
255 1.1.4.1 jruoho pmap_md_alloc_poolpage(int flags)
256 1.1.4.1 jruoho {
257 1.1.4.1 jruoho /*
258 1.1.4.1 jruoho * Any managed page works for us.
259 1.1.4.1 jruoho */
260 1.1.4.1 jruoho return uvm_pagealloc(NULL, 0, NULL, flags);
261 1.1.4.1 jruoho }
262 1.1.4.1 jruoho
263 1.1.4.1 jruoho void
264 1.1.4.1 jruoho pmap_zero_page(paddr_t pa)
265 1.1.4.1 jruoho {
266 1.1.4.1 jruoho // printf("%s(%#lx): calling dcache_zero_page(%#lx)\n", __func__, pa, pa);
267 1.1.4.1 jruoho dcache_zero_page(pa);
268 1.1.4.1 jruoho }
269 1.1.4.1 jruoho
270 1.1.4.1 jruoho void
271 1.1.4.1 jruoho pmap_copy_page(paddr_t src, paddr_t dst)
272 1.1.4.1 jruoho {
273 1.1.4.1 jruoho const size_t line_size = curcpu()->ci_ci.dcache_line_size;
274 1.1.4.1 jruoho const paddr_t end = src + PAGE_SIZE;
275 1.1.4.1 jruoho
276 1.1.4.1 jruoho while (src < end) {
277 1.1.4.1 jruoho __asm(
278 1.1.4.1 jruoho "dcbt %2,%1" "\n\t" /* touch next src cachline */
279 1.1.4.1 jruoho "dcba 0,%1" "\n\t" /* don't fetch dst cacheline */
280 1.1.4.1 jruoho :: "b"(src), "b"(dst), "b"(line_size));
281 1.1.4.1 jruoho for (u_int i = 0;
282 1.1.4.1 jruoho i < line_size;
283 1.1.4.1 jruoho src += 32, dst += 32, i += 32) {
284 1.1.4.1 jruoho __asm(
285 1.1.4.1 jruoho "lmw 24,0(%0)" "\n\t"
286 1.1.4.1 jruoho "stmw 24,0(%1)"
287 1.1.4.1 jruoho :: "b"(src), "b"(dst)
288 1.1.4.1 jruoho : "r24", "r25", "r26", "r27",
289 1.1.4.1 jruoho "r28", "r29", "r30", "r31");
290 1.1.4.1 jruoho }
291 1.1.4.1 jruoho }
292 1.1.4.1 jruoho }
293 1.1.4.1 jruoho
294 1.1.4.1 jruoho void
295 1.1.4.1 jruoho pmap_md_init(void)
296 1.1.4.1 jruoho {
297 1.1.4.1 jruoho
298 1.1.4.1 jruoho /* nothing for now */
299 1.1.4.1 jruoho }
300 1.1.4.1 jruoho
301 1.1.4.1 jruoho bool
302 1.1.4.1 jruoho pmap_md_io_vaddr_p(vaddr_t va)
303 1.1.4.1 jruoho {
304 1.1.4.1 jruoho return va >= pmap_limits.avail_end
305 1.1.4.1 jruoho && !(VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS);
306 1.1.4.1 jruoho }
307 1.1.4.1 jruoho
308