subr_kcov.c revision 1.3 1 /* $NetBSD: subr_kcov.c,v 1.3 2019/02/23 12:07:40 kamil Exp $ */
2
3 /*
4 * Copyright (c) 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Siddharth Muralee.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33
34 #include <sys/module.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38
39 #include <sys/conf.h>
40 #include <sys/condvar.h>
41 #include <sys/kmem.h>
42 #include <sys/mutex.h>
43 #include <sys/queue.h>
44
45 #include <uvm/uvm_extern.h>
46 #include <sys/kcov.h>
47
48 #define KCOV_BUF_MAX_ENTRIES (256 << 10)
49
50 /*
51 * The KCOV descriptors are allocated during open(), and are associated with
52 * the calling proc. They are freed lazily when their refcount reaches zero,
53 * only when the process exits; this guarantees that kd->buf is not mmapped
54 * in a currently running LWP. A KCOV descriptor is active on only one LWP
55 * at the same time within the proc.
56 *
57 * In the refcount, one ref is for the proc, and one ref is for the LWP where
58 * the descriptor is active. In each case, the descriptor is pointed to in
59 * the proc's and LWP's specificdata.
60 */
61
62 typedef struct kcov_desc {
63 kmutex_t lock;
64 int refcnt;
65 kcov_int_t *buf;
66 size_t bufnent;
67 size_t bufsize;
68 TAILQ_ENTRY(kcov_desc) entry;
69 } kcov_t;
70
71 static specificdata_key_t kcov_proc_key;
72 static specificdata_key_t kcov_lwp_key;
73
74 static void
75 kcov_lock(kcov_t *kd)
76 {
77
78 mutex_enter(&kd->lock);
79 KASSERT(kd->refcnt > 0);
80 }
81
82 static void
83 kcov_unlock(kcov_t *kd)
84 {
85
86 mutex_exit(&kd->lock);
87 }
88
89 static void
90 kcov_lwp_take(kcov_t *kd)
91 {
92
93 kd->refcnt++;
94 KASSERT(kd->refcnt == 2);
95 lwp_setspecific(kcov_lwp_key, kd);
96 }
97
98 static void
99 kcov_lwp_release(kcov_t *kd)
100 {
101
102 KASSERT(kd->refcnt == 2);
103 kd->refcnt--;
104 lwp_setspecific(kcov_lwp_key, NULL);
105 }
106
107 static inline bool
108 kcov_is_owned(kcov_t *kd)
109 {
110
111 return (kd->refcnt > 1);
112 }
113
114 static void
115 kcov_free(void *arg)
116 {
117 kcov_t *kd = (kcov_t *)arg;
118 bool dofree;
119
120 if (kd == NULL) {
121 return;
122 }
123
124 kcov_lock(kd);
125 kd->refcnt--;
126 kcov_unlock(kd);
127 dofree = (kd->refcnt == 0);
128
129 if (!dofree) {
130 return;
131 }
132 if (kd->buf != NULL) {
133 uvm_km_free(kernel_map, (vaddr_t)kd->buf, kd->bufsize,
134 UVM_KMF_WIRED);
135 }
136 mutex_destroy(&kd->lock);
137 kmem_free(kd, sizeof(*kd));
138 }
139
140 static int
141 kcov_allocbuf(kcov_t *kd, uint64_t nent)
142 {
143 size_t size;
144
145 if (nent < 2 || nent > KCOV_BUF_MAX_ENTRIES)
146 return EINVAL;
147 if (kd->buf != NULL)
148 return EEXIST;
149
150 size = roundup(nent * KCOV_ENTRY_SIZE, PAGE_SIZE);
151 kd->buf = (kcov_int_t *)uvm_km_alloc(kernel_map, size, 0,
152 UVM_KMF_WIRED|UVM_KMF_ZERO);
153 if (kd->buf == NULL)
154 return ENOMEM;
155
156 kd->bufnent = nent - 1;
157 kd->bufsize = size;
158
159 return 0;
160 }
161
162 /* -------------------------------------------------------------------------- */
163
164 static int
165 kcov_open(dev_t dev, int flag, int mode, struct lwp *l)
166 {
167 struct proc *p = l->l_proc;
168 kcov_t *kd;
169
170 kd = proc_getspecific(p, kcov_proc_key);
171 if (kd != NULL)
172 return EBUSY;
173
174 kd = kmem_zalloc(sizeof(*kd), KM_SLEEP);
175 mutex_init(&kd->lock, MUTEX_DEFAULT, IPL_NONE);
176 kd->refcnt = 1;
177 proc_setspecific(p, kcov_proc_key, kd);
178
179 return 0;
180 }
181
182 static int
183 kcov_close(dev_t dev, int flag, int mode, struct lwp *l)
184 {
185
186 return 0;
187 }
188
189 static int
190 kcov_ioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
191 {
192 struct proc *p = l->l_proc;
193 int error = 0;
194 kcov_t *kd;
195
196 kd = proc_getspecific(p, kcov_proc_key);
197 if (kd == NULL)
198 return ENXIO;
199 kcov_lock(kd);
200
201 switch (cmd) {
202 case KCOV_IOC_SETBUFSIZE:
203 if (kcov_is_owned(kd)) {
204 error = EBUSY;
205 break;
206 }
207 error = kcov_allocbuf(kd, *((uint64_t *)addr));
208 break;
209 case KCOV_IOC_ENABLE:
210 if (kcov_is_owned(kd)) {
211 error = EBUSY;
212 break;
213 }
214 if (kd->buf == NULL) {
215 error = ENOBUFS;
216 break;
217 }
218 KASSERT(l == curlwp);
219 kcov_lwp_take(kd);
220 break;
221 case KCOV_IOC_DISABLE:
222 if (lwp_getspecific(kcov_lwp_key) == NULL) {
223 error = ENOENT;
224 break;
225 }
226 KASSERT(l == curlwp);
227 kcov_lwp_release(kd);
228 break;
229 default:
230 error = EINVAL;
231 }
232
233 kcov_unlock(kd);
234 return error;
235 }
236
237 static paddr_t
238 kcov_mmap(dev_t dev, off_t offset, int prot)
239 {
240 kcov_t *kd;
241 paddr_t pa;
242 vaddr_t va;
243
244 kd = proc_getspecific(curproc, kcov_proc_key);
245 KASSERT(kd != NULL);
246
247 if ((offset < 0) || (offset >= kd->bufnent * KCOV_ENTRY_SIZE)) {
248 return (paddr_t)-1;
249 }
250 if (offset & PAGE_MASK) {
251 return (paddr_t)-1;
252 }
253 va = (vaddr_t)kd->buf + offset;
254 if (!pmap_extract(pmap_kernel(), va, &pa)) {
255 return (paddr_t)-1;
256 }
257
258 return atop(pa);
259 }
260
261 static inline bool
262 in_interrupt(void)
263 {
264 return curcpu()->ci_idepth >= 0;
265 }
266
267 void __sanitizer_cov_trace_pc(void);
268
269 void
270 __sanitizer_cov_trace_pc(void)
271 {
272 extern int cold;
273 uint64_t idx;
274 kcov_t *kd;
275
276 if (__predict_false(cold)) {
277 /* Do not trace during boot. */
278 return;
279 }
280
281 if (in_interrupt()) {
282 /* Do not trace in interrupts. */
283 return;
284 }
285
286 kd = lwp_getspecific(kcov_lwp_key);
287 if (__predict_true(kd == NULL)) {
288 /* Not traced. */
289 return;
290 }
291
292 idx = kd->buf[0];
293 if (idx < kd->bufnent) {
294 kd->buf[idx+1] = (intptr_t)__builtin_return_address(0);
295 kd->buf[0]++;
296 }
297 }
298
299 /* -------------------------------------------------------------------------- */
300
301 const struct cdevsw kcov_cdevsw = {
302 .d_open = kcov_open,
303 .d_close = kcov_close,
304 .d_read = noread,
305 .d_write = nowrite,
306 .d_ioctl = kcov_ioctl,
307 .d_stop = nostop,
308 .d_tty = notty,
309 .d_poll = nopoll,
310 .d_mmap = kcov_mmap,
311 .d_kqfilter = nokqfilter,
312 .d_discard = nodiscard,
313 .d_flag = D_OTHER | D_MPSAFE
314 };
315
316 MODULE(MODULE_CLASS_ANY, kcov, NULL);
317
318 static void
319 kcov_init(void)
320 {
321
322 proc_specific_key_create(&kcov_proc_key, kcov_free);
323 lwp_specific_key_create(&kcov_lwp_key, kcov_free);
324 }
325
326 static int
327 kcov_modcmd(modcmd_t cmd, void *arg)
328 {
329
330 switch (cmd) {
331 case MODULE_CMD_INIT:
332 kcov_init();
333 return 0;
334 case MODULE_CMD_FINI:
335 return EINVAL;
336 default:
337 return ENOTTY;
338 }
339 }
340