pmap_pvt.c revision 1.10 1 /* $NetBSD: pmap_pvt.c,v 1.10 2020/03/16 20:07:44 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2014, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __RCSID("$NetBSD: pmap_pvt.c,v 1.10 2020/03/16 20:07:44 ad Exp $");
34
35 #include <sys/atomic.h>
36 #include <sys/kmem.h>
37 #include <sys/pserialize.h>
38
39 #include <uvm/uvm.h>
40 #include <uvm/pmap/pmap_pvt.h>
41
42 /*
43 * unmanaged pv-tracked ranges
44 *
45 * This is a linear list for now because the only user are the DRM
46 * graphics drivers, with a single tracked range per device, for the
47 * graphics aperture, so there are expected to be few of them.
48 *
49 * This is used only after the VM system is initialized well enough
50 * that we can use kmem_alloc.
51 */
52
53 struct pv_track {
54 paddr_t pvt_start;
55 psize_t pvt_size;
56 struct pv_track *pvt_next;
57 struct pmap_page pvt_pages[];
58 };
59
60 static struct {
61 kmutex_t lock;
62 pserialize_t psz;
63 struct pv_track *list;
64 } pv_unmanaged __cacheline_aligned;
65
66 void
67 pmap_pv_init(void)
68 {
69
70 mutex_init(&pv_unmanaged.lock, MUTEX_DEFAULT, IPL_NONE);
71 pv_unmanaged.psz = pserialize_create();
72 pv_unmanaged.list = NULL;
73 }
74
75 void
76 pmap_pv_track(paddr_t start, psize_t size)
77 {
78 struct pv_track *pvt;
79 size_t npages;
80
81 KASSERT(start == trunc_page(start));
82 KASSERT(size == trunc_page(size));
83
84 /* We may sleep for allocation. */
85 ASSERT_SLEEPABLE();
86
87 npages = size >> PAGE_SHIFT;
88 pvt = kmem_zalloc(offsetof(struct pv_track, pvt_pages[npages]),
89 KM_SLEEP);
90 pvt->pvt_start = start;
91 pvt->pvt_size = size;
92
93 #ifdef PMAP_PAGE_INIT
94 for (size_t i = 0; i < npages; i++)
95 PMAP_PAGE_INIT(&pvt->pvt_pages[i]);
96 #endif
97
98 mutex_enter(&pv_unmanaged.lock);
99 pvt->pvt_next = pv_unmanaged.list;
100 atomic_store_release(&pv_unmanaged.list, pvt);
101 mutex_exit(&pv_unmanaged.lock);
102 }
103
104 void
105 pmap_pv_untrack(paddr_t start, psize_t size)
106 {
107 struct pv_track **pvtp, *pvt;
108 size_t npages;
109
110 KASSERT(start == trunc_page(start));
111 KASSERT(size == trunc_page(size));
112
113 /* We may sleep for pserialize_perform. */
114 ASSERT_SLEEPABLE();
115
116 mutex_enter(&pv_unmanaged.lock);
117 for (pvtp = &pv_unmanaged.list;
118 (pvt = *pvtp) != NULL;
119 pvtp = &pvt->pvt_next) {
120 if (pvt->pvt_start != start)
121 continue;
122 if (pvt->pvt_size != size)
123 panic("pmap_pv_untrack: pv-tracking at 0x%"PRIxPADDR
124 ": 0x%"PRIxPSIZE" bytes, not 0x%"PRIxPSIZE" bytes",
125 pvt->pvt_start, pvt->pvt_size, size);
126
127 /*
128 * Remove from list. Readers can safely see the old
129 * and new states of the list.
130 */
131 atomic_store_relaxed(pvtp, pvt->pvt_next);
132
133 /* Wait for readers who can see the old state to finish. */
134 pserialize_perform(pv_unmanaged.psz);
135
136 /*
137 * We now have exclusive access to pvt and can destroy
138 * it. Poison it to catch bugs.
139 */
140 explicit_memset(&pvt->pvt_next, 0x1a, sizeof pvt->pvt_next);
141 goto out;
142 }
143 panic("pmap_pv_untrack: pages not pv-tracked at 0x%"PRIxPADDR
144 " (0x%"PRIxPSIZE" bytes)",
145 start, size);
146 out: mutex_exit(&pv_unmanaged.lock);
147
148 npages = size >> PAGE_SHIFT;
149 kmem_free(pvt, offsetof(struct pv_track, pvt_pages[npages]));
150 }
151
152 struct pmap_page *
153 pmap_pv_tracked(paddr_t pa)
154 {
155 struct pv_track *pvt;
156 size_t pgno;
157 int s;
158
159 KASSERT(pa == trunc_page(pa));
160
161 s = pserialize_read_enter();
162 for (pvt = atomic_load_consume(&pv_unmanaged.list);
163 pvt != NULL;
164 pvt = pvt->pvt_next) {
165 if ((pvt->pvt_start <= pa) &&
166 ((pa - pvt->pvt_start) < pvt->pvt_size))
167 break;
168 }
169 pserialize_read_exit(s);
170
171 if (pvt == NULL)
172 return NULL;
173 KASSERT(pvt->pvt_start <= pa);
174 KASSERT((pa - pvt->pvt_start) < pvt->pvt_size);
175 pgno = (pa - pvt->pvt_start) >> PAGE_SHIFT;
176 return &pvt->pvt_pages[pgno];
177 }
178
179