pmap_pvt.c revision 1.12 1 1.12 rin /* $NetBSD: pmap_pvt.c,v 1.12 2022/05/07 06:53:16 rin Exp $ */
2 1.1 skrll
3 1.1 skrll /*-
4 1.9 ad * Copyright (c) 2014, 2020 The NetBSD Foundation, Inc.
5 1.1 skrll * All rights reserved.
6 1.1 skrll *
7 1.1 skrll * This code is derived from software contributed to The NetBSD Foundation
8 1.1 skrll * by Taylor R. Campbell.
9 1.1 skrll *
10 1.1 skrll * Redistribution and use in source and binary forms, with or without
11 1.1 skrll * modification, are permitted provided that the following conditions
12 1.1 skrll * are met:
13 1.1 skrll * 1. Redistributions of source code must retain the above copyright
14 1.1 skrll * notice, this list of conditions and the following disclaimer.
15 1.1 skrll * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 skrll * notice, this list of conditions and the following disclaimer in the
17 1.1 skrll * documentation and/or other materials provided with the distribution.
18 1.1 skrll *
19 1.1 skrll * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 skrll * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 skrll * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 skrll * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 skrll * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 skrll * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 skrll * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 skrll * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 skrll * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 skrll * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 skrll * POSSIBILITY OF SUCH DAMAGE.
30 1.1 skrll */
31 1.1 skrll
32 1.1 skrll #include <sys/cdefs.h>
33 1.12 rin __RCSID("$NetBSD: pmap_pvt.c,v 1.12 2022/05/07 06:53:16 rin Exp $");
34 1.1 skrll
35 1.11 skrll #include <sys/param.h>
36 1.6 skrll #include <sys/atomic.h>
37 1.1 skrll #include <sys/kmem.h>
38 1.1 skrll #include <sys/pserialize.h>
39 1.1 skrll
40 1.1 skrll #include <uvm/uvm.h>
41 1.1 skrll #include <uvm/pmap/pmap_pvt.h>
42 1.1 skrll
43 1.12 rin #if !defined(PMAP_PV_TRACK_ONLY_STUBS)
44 1.1 skrll /*
45 1.1 skrll * unmanaged pv-tracked ranges
46 1.1 skrll *
47 1.1 skrll * This is a linear list for now because the only user are the DRM
48 1.1 skrll * graphics drivers, with a single tracked range per device, for the
49 1.1 skrll * graphics aperture, so there are expected to be few of them.
50 1.1 skrll *
51 1.1 skrll * This is used only after the VM system is initialized well enough
52 1.1 skrll * that we can use kmem_alloc.
53 1.1 skrll */
54 1.1 skrll
55 1.1 skrll struct pv_track {
56 1.1 skrll paddr_t pvt_start;
57 1.1 skrll psize_t pvt_size;
58 1.1 skrll struct pv_track *pvt_next;
59 1.1 skrll struct pmap_page pvt_pages[];
60 1.1 skrll };
61 1.1 skrll
62 1.1 skrll static struct {
63 1.1 skrll kmutex_t lock;
64 1.1 skrll pserialize_t psz;
65 1.1 skrll struct pv_track *list;
66 1.1 skrll } pv_unmanaged __cacheline_aligned;
67 1.1 skrll
68 1.1 skrll void
69 1.1 skrll pmap_pv_init(void)
70 1.1 skrll {
71 1.1 skrll
72 1.3 riastrad mutex_init(&pv_unmanaged.lock, MUTEX_DEFAULT, IPL_NONE);
73 1.1 skrll pv_unmanaged.psz = pserialize_create();
74 1.1 skrll pv_unmanaged.list = NULL;
75 1.1 skrll }
76 1.1 skrll
77 1.1 skrll void
78 1.1 skrll pmap_pv_track(paddr_t start, psize_t size)
79 1.1 skrll {
80 1.1 skrll struct pv_track *pvt;
81 1.1 skrll size_t npages;
82 1.1 skrll
83 1.1 skrll KASSERT(start == trunc_page(start));
84 1.1 skrll KASSERT(size == trunc_page(size));
85 1.1 skrll
86 1.3 riastrad /* We may sleep for allocation. */
87 1.3 riastrad ASSERT_SLEEPABLE();
88 1.3 riastrad
89 1.1 skrll npages = size >> PAGE_SHIFT;
90 1.1 skrll pvt = kmem_zalloc(offsetof(struct pv_track, pvt_pages[npages]),
91 1.1 skrll KM_SLEEP);
92 1.1 skrll pvt->pvt_start = start;
93 1.1 skrll pvt->pvt_size = size;
94 1.1 skrll
95 1.9 ad #ifdef PMAP_PAGE_INIT
96 1.10 ad for (size_t i = 0; i < npages; i++)
97 1.9 ad PMAP_PAGE_INIT(&pvt->pvt_pages[i]);
98 1.9 ad #endif
99 1.9 ad
100 1.1 skrll mutex_enter(&pv_unmanaged.lock);
101 1.1 skrll pvt->pvt_next = pv_unmanaged.list;
102 1.5 riastrad atomic_store_release(&pv_unmanaged.list, pvt);
103 1.1 skrll mutex_exit(&pv_unmanaged.lock);
104 1.1 skrll }
105 1.1 skrll
106 1.1 skrll void
107 1.1 skrll pmap_pv_untrack(paddr_t start, psize_t size)
108 1.1 skrll {
109 1.1 skrll struct pv_track **pvtp, *pvt;
110 1.1 skrll size_t npages;
111 1.1 skrll
112 1.1 skrll KASSERT(start == trunc_page(start));
113 1.1 skrll KASSERT(size == trunc_page(size));
114 1.1 skrll
115 1.3 riastrad /* We may sleep for pserialize_perform. */
116 1.3 riastrad ASSERT_SLEEPABLE();
117 1.3 riastrad
118 1.1 skrll mutex_enter(&pv_unmanaged.lock);
119 1.1 skrll for (pvtp = &pv_unmanaged.list;
120 1.1 skrll (pvt = *pvtp) != NULL;
121 1.1 skrll pvtp = &pvt->pvt_next) {
122 1.1 skrll if (pvt->pvt_start != start)
123 1.1 skrll continue;
124 1.1 skrll if (pvt->pvt_size != size)
125 1.1 skrll panic("pmap_pv_untrack: pv-tracking at 0x%"PRIxPADDR
126 1.1 skrll ": 0x%"PRIxPSIZE" bytes, not 0x%"PRIxPSIZE" bytes",
127 1.1 skrll pvt->pvt_start, pvt->pvt_size, size);
128 1.5 riastrad
129 1.5 riastrad /*
130 1.5 riastrad * Remove from list. Readers can safely see the old
131 1.5 riastrad * and new states of the list.
132 1.5 riastrad */
133 1.5 riastrad atomic_store_relaxed(pvtp, pvt->pvt_next);
134 1.5 riastrad
135 1.5 riastrad /* Wait for readers who can see the old state to finish. */
136 1.1 skrll pserialize_perform(pv_unmanaged.psz);
137 1.5 riastrad
138 1.5 riastrad /*
139 1.5 riastrad * We now have exclusive access to pvt and can destroy
140 1.5 riastrad * it. Poison it to catch bugs.
141 1.5 riastrad */
142 1.5 riastrad explicit_memset(&pvt->pvt_next, 0x1a, sizeof pvt->pvt_next);
143 1.1 skrll goto out;
144 1.1 skrll }
145 1.1 skrll panic("pmap_pv_untrack: pages not pv-tracked at 0x%"PRIxPADDR
146 1.1 skrll " (0x%"PRIxPSIZE" bytes)",
147 1.1 skrll start, size);
148 1.1 skrll out: mutex_exit(&pv_unmanaged.lock);
149 1.1 skrll
150 1.1 skrll npages = size >> PAGE_SHIFT;
151 1.1 skrll kmem_free(pvt, offsetof(struct pv_track, pvt_pages[npages]));
152 1.1 skrll }
153 1.1 skrll
154 1.1 skrll struct pmap_page *
155 1.1 skrll pmap_pv_tracked(paddr_t pa)
156 1.1 skrll {
157 1.1 skrll struct pv_track *pvt;
158 1.1 skrll size_t pgno;
159 1.1 skrll int s;
160 1.1 skrll
161 1.1 skrll KASSERT(pa == trunc_page(pa));
162 1.1 skrll
163 1.1 skrll s = pserialize_read_enter();
164 1.5 riastrad for (pvt = atomic_load_consume(&pv_unmanaged.list);
165 1.5 riastrad pvt != NULL;
166 1.5 riastrad pvt = pvt->pvt_next) {
167 1.1 skrll if ((pvt->pvt_start <= pa) &&
168 1.1 skrll ((pa - pvt->pvt_start) < pvt->pvt_size))
169 1.1 skrll break;
170 1.1 skrll }
171 1.1 skrll pserialize_read_exit(s);
172 1.1 skrll
173 1.1 skrll if (pvt == NULL)
174 1.1 skrll return NULL;
175 1.1 skrll KASSERT(pvt->pvt_start <= pa);
176 1.1 skrll KASSERT((pa - pvt->pvt_start) < pvt->pvt_size);
177 1.1 skrll pgno = (pa - pvt->pvt_start) >> PAGE_SHIFT;
178 1.1 skrll return &pvt->pvt_pages[pgno];
179 1.1 skrll }
180 1.1 skrll
181 1.12 rin #else /* PMAP_PV_TRACK_ONLY_STUBS */
182 1.12 rin /*
183 1.12 rin * Provide empty stubs just for MODULAR kernels.
184 1.12 rin */
185 1.12 rin
186 1.12 rin void
187 1.12 rin pmap_pv_init(void)
188 1.12 rin {
189 1.12 rin
190 1.12 rin }
191 1.12 rin
192 1.12 rin void
193 1.12 rin pmap_pv_track(paddr_t start, psize_t size)
194 1.12 rin {
195 1.12 rin
196 1.12 rin }
197 1.12 rin
198 1.12 rin void
199 1.12 rin pmap_pv_untrack(paddr_t start, psize_t size)
200 1.12 rin {
201 1.12 rin
202 1.12 rin }
203 1.12 rin
204 1.12 rin struct pmap_page *
205 1.12 rin pmap_pv_tracked(paddr_t pa)
206 1.12 rin {
207 1.12 rin
208 1.12 rin return NULL;
209 1.12 rin }
210 1.12 rin #endif
211