pmap_pvt.c revision 1.6 1 1.6 skrll /* $NetBSD: pmap_pvt.c,v 1.6 2019/12/18 11:27:56 skrll Exp $ */
2 1.1 skrll
3 1.1 skrll /*-
4 1.1 skrll * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 1.1 skrll * All rights reserved.
6 1.1 skrll *
7 1.1 skrll * This code is derived from software contributed to The NetBSD Foundation
8 1.1 skrll * by Taylor R. Campbell.
9 1.1 skrll *
10 1.1 skrll * Redistribution and use in source and binary forms, with or without
11 1.1 skrll * modification, are permitted provided that the following conditions
12 1.1 skrll * are met:
13 1.1 skrll * 1. Redistributions of source code must retain the above copyright
14 1.1 skrll * notice, this list of conditions and the following disclaimer.
15 1.1 skrll * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 skrll * notice, this list of conditions and the following disclaimer in the
17 1.1 skrll * documentation and/or other materials provided with the distribution.
18 1.1 skrll *
19 1.1 skrll * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 skrll * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 skrll * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 skrll * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 skrll * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 skrll * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 skrll * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 skrll * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 skrll * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 skrll * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 skrll * POSSIBILITY OF SUCH DAMAGE.
30 1.1 skrll */
31 1.1 skrll
32 1.1 skrll #include <sys/cdefs.h>
33 1.6 skrll __RCSID("$NetBSD: pmap_pvt.c,v 1.6 2019/12/18 11:27:56 skrll Exp $");
34 1.1 skrll
35 1.6 skrll #include <sys/atomic.h>
36 1.1 skrll #include <sys/kmem.h>
37 1.1 skrll #include <sys/pserialize.h>
38 1.1 skrll
39 1.1 skrll #include <uvm/uvm.h>
40 1.1 skrll #include <uvm/pmap/pmap_pvt.h>
41 1.1 skrll
42 1.1 skrll /*
43 1.1 skrll * unmanaged pv-tracked ranges
44 1.1 skrll *
45 1.1 skrll * This is a linear list for now because the only user are the DRM
46 1.1 skrll * graphics drivers, with a single tracked range per device, for the
47 1.1 skrll * graphics aperture, so there are expected to be few of them.
48 1.1 skrll *
49 1.1 skrll * This is used only after the VM system is initialized well enough
50 1.1 skrll * that we can use kmem_alloc.
51 1.1 skrll */
52 1.1 skrll
53 1.1 skrll struct pv_track {
54 1.1 skrll paddr_t pvt_start;
55 1.1 skrll psize_t pvt_size;
56 1.1 skrll struct pv_track *pvt_next;
57 1.1 skrll struct pmap_page pvt_pages[];
58 1.1 skrll };
59 1.1 skrll
60 1.1 skrll static struct {
61 1.1 skrll kmutex_t lock;
62 1.1 skrll pserialize_t psz;
63 1.1 skrll struct pv_track *list;
64 1.1 skrll } pv_unmanaged __cacheline_aligned;
65 1.1 skrll
66 1.1 skrll void
67 1.1 skrll pmap_pv_init(void)
68 1.1 skrll {
69 1.1 skrll
70 1.3 riastrad mutex_init(&pv_unmanaged.lock, MUTEX_DEFAULT, IPL_NONE);
71 1.1 skrll pv_unmanaged.psz = pserialize_create();
72 1.1 skrll pv_unmanaged.list = NULL;
73 1.1 skrll }
74 1.1 skrll
75 1.1 skrll void
76 1.1 skrll pmap_pv_track(paddr_t start, psize_t size)
77 1.1 skrll {
78 1.1 skrll struct pv_track *pvt;
79 1.1 skrll size_t npages;
80 1.1 skrll
81 1.1 skrll KASSERT(start == trunc_page(start));
82 1.1 skrll KASSERT(size == trunc_page(size));
83 1.1 skrll
84 1.3 riastrad /* We may sleep for allocation. */
85 1.3 riastrad ASSERT_SLEEPABLE();
86 1.3 riastrad
87 1.1 skrll npages = size >> PAGE_SHIFT;
88 1.1 skrll pvt = kmem_zalloc(offsetof(struct pv_track, pvt_pages[npages]),
89 1.1 skrll KM_SLEEP);
90 1.1 skrll pvt->pvt_start = start;
91 1.1 skrll pvt->pvt_size = size;
92 1.1 skrll
93 1.1 skrll mutex_enter(&pv_unmanaged.lock);
94 1.1 skrll pvt->pvt_next = pv_unmanaged.list;
95 1.5 riastrad atomic_store_release(&pv_unmanaged.list, pvt);
96 1.1 skrll mutex_exit(&pv_unmanaged.lock);
97 1.1 skrll }
98 1.1 skrll
99 1.1 skrll void
100 1.1 skrll pmap_pv_untrack(paddr_t start, psize_t size)
101 1.1 skrll {
102 1.1 skrll struct pv_track **pvtp, *pvt;
103 1.1 skrll size_t npages;
104 1.1 skrll
105 1.1 skrll KASSERT(start == trunc_page(start));
106 1.1 skrll KASSERT(size == trunc_page(size));
107 1.1 skrll
108 1.3 riastrad /* We may sleep for pserialize_perform. */
109 1.3 riastrad ASSERT_SLEEPABLE();
110 1.3 riastrad
111 1.1 skrll mutex_enter(&pv_unmanaged.lock);
112 1.1 skrll for (pvtp = &pv_unmanaged.list;
113 1.1 skrll (pvt = *pvtp) != NULL;
114 1.1 skrll pvtp = &pvt->pvt_next) {
115 1.1 skrll if (pvt->pvt_start != start)
116 1.1 skrll continue;
117 1.1 skrll if (pvt->pvt_size != size)
118 1.1 skrll panic("pmap_pv_untrack: pv-tracking at 0x%"PRIxPADDR
119 1.1 skrll ": 0x%"PRIxPSIZE" bytes, not 0x%"PRIxPSIZE" bytes",
120 1.1 skrll pvt->pvt_start, pvt->pvt_size, size);
121 1.5 riastrad
122 1.5 riastrad /*
123 1.5 riastrad * Remove from list. Readers can safely see the old
124 1.5 riastrad * and new states of the list.
125 1.5 riastrad */
126 1.5 riastrad atomic_store_relaxed(pvtp, pvt->pvt_next);
127 1.5 riastrad
128 1.5 riastrad /* Wait for readers who can see the old state to finish. */
129 1.1 skrll pserialize_perform(pv_unmanaged.psz);
130 1.5 riastrad
131 1.5 riastrad /*
132 1.5 riastrad * We now have exclusive access to pvt and can destroy
133 1.5 riastrad * it. Poison it to catch bugs.
134 1.5 riastrad */
135 1.5 riastrad explicit_memset(&pvt->pvt_next, 0x1a, sizeof pvt->pvt_next);
136 1.1 skrll goto out;
137 1.1 skrll }
138 1.1 skrll panic("pmap_pv_untrack: pages not pv-tracked at 0x%"PRIxPADDR
139 1.1 skrll " (0x%"PRIxPSIZE" bytes)",
140 1.1 skrll start, size);
141 1.1 skrll out: mutex_exit(&pv_unmanaged.lock);
142 1.1 skrll
143 1.1 skrll npages = size >> PAGE_SHIFT;
144 1.1 skrll kmem_free(pvt, offsetof(struct pv_track, pvt_pages[npages]));
145 1.1 skrll }
146 1.1 skrll
147 1.1 skrll struct pmap_page *
148 1.1 skrll pmap_pv_tracked(paddr_t pa)
149 1.1 skrll {
150 1.1 skrll struct pv_track *pvt;
151 1.1 skrll size_t pgno;
152 1.1 skrll int s;
153 1.1 skrll
154 1.1 skrll KASSERT(pa == trunc_page(pa));
155 1.1 skrll
156 1.1 skrll s = pserialize_read_enter();
157 1.5 riastrad for (pvt = atomic_load_consume(&pv_unmanaged.list);
158 1.5 riastrad pvt != NULL;
159 1.5 riastrad pvt = pvt->pvt_next) {
160 1.1 skrll if ((pvt->pvt_start <= pa) &&
161 1.1 skrll ((pa - pvt->pvt_start) < pvt->pvt_size))
162 1.1 skrll break;
163 1.1 skrll }
164 1.1 skrll pserialize_read_exit(s);
165 1.1 skrll
166 1.1 skrll if (pvt == NULL)
167 1.1 skrll return NULL;
168 1.1 skrll KASSERT(pvt->pvt_start <= pa);
169 1.1 skrll KASSERT((pa - pvt->pvt_start) < pvt->pvt_size);
170 1.1 skrll pgno = (pa - pvt->pvt_start) >> PAGE_SHIFT;
171 1.1 skrll return &pvt->pvt_pages[pgno];
172 1.1 skrll }
173 1.1 skrll
174