tprof.c revision 1.14 1 /* $NetBSD: tprof.c,v 1.14 2018/07/13 07:56:29 maxv Exp $ */
2
3 /*-
4 * Copyright (c)2008,2009,2010 YAMAMOTO Takashi,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: tprof.c,v 1.14 2018/07/13 07:56:29 maxv Exp $");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35
36 #include <sys/cpu.h>
37 #include <sys/conf.h>
38 #include <sys/callout.h>
39 #include <sys/kmem.h>
40 #include <sys/module.h>
41 #include <sys/proc.h>
42 #include <sys/workqueue.h>
43 #include <sys/queue.h>
44
45 #include <dev/tprof/tprof.h>
46 #include <dev/tprof/tprof_ioctl.h>
47
48 #include "ioconf.h"
49
50 /*
51 * locking order:
52 * tprof_reader_lock -> tprof_lock
53 * tprof_startstop_lock -> tprof_lock
54 */
55
56 /*
57 * protected by:
58 * L: tprof_lock
59 * R: tprof_reader_lock
60 * S: tprof_startstop_lock
61 * s: writer should hold tprof_startstop_lock and tprof_lock
62 * reader should hold tprof_startstop_lock or tprof_lock
63 */
64
65 typedef struct tprof_buf {
66 u_int b_used;
67 u_int b_size;
68 u_int b_overflow;
69 u_int b_unused;
70 STAILQ_ENTRY(tprof_buf) b_list;
71 tprof_sample_t b_data[];
72 } tprof_buf_t;
73 #define TPROF_BUF_BYTESIZE(sz) \
74 (sizeof(tprof_buf_t) + (sz) * sizeof(tprof_sample_t))
75 #define TPROF_MAX_SAMPLES_PER_BUF 10000
76
77 #define TPROF_MAX_BUF 100
78
79 typedef struct {
80 tprof_buf_t *c_buf;
81 uint32_t c_cpuid;
82 struct work c_work;
83 callout_t c_callout;
84 } __aligned(CACHE_LINE_SIZE) tprof_cpu_t;
85
86 typedef struct tprof_backend {
87 const char *tb_name;
88 const tprof_backend_ops_t *tb_ops;
89 LIST_ENTRY(tprof_backend) tb_list;
90 int tb_usecount; /* S: */
91 } tprof_backend_t;
92
93 static kmutex_t tprof_lock;
94 static bool tprof_running; /* s: */
95 static u_int tprof_nworker; /* L: # of running worker LWPs */
96 static lwp_t *tprof_owner;
97 static STAILQ_HEAD(, tprof_buf) tprof_list; /* L: global buffer list */
98 static u_int tprof_nbuf_on_list; /* L: # of buffers on tprof_list */
99 static struct workqueue *tprof_wq;
100 static tprof_cpu_t tprof_cpus[MAXCPUS] __aligned(CACHE_LINE_SIZE);
101 static u_int tprof_samples_per_buf;
102
103 static tprof_backend_t *tprof_backend; /* S: */
104 static LIST_HEAD(, tprof_backend) tprof_backends =
105 LIST_HEAD_INITIALIZER(tprof_backend); /* S: */
106
107 static kmutex_t tprof_reader_lock;
108 static kcondvar_t tprof_reader_cv; /* L: */
109 static off_t tprof_reader_offset; /* R: */
110
111 static kmutex_t tprof_startstop_lock;
112 static kcondvar_t tprof_cv; /* L: */
113
114 static struct tprof_stat tprof_stat; /* L: */
115
116 static tprof_cpu_t *
117 tprof_cpu(struct cpu_info *ci)
118 {
119
120 return &tprof_cpus[cpu_index(ci)];
121 }
122
123 static tprof_cpu_t *
124 tprof_curcpu(void)
125 {
126
127 return tprof_cpu(curcpu());
128 }
129
130 static tprof_buf_t *
131 tprof_buf_alloc(void)
132 {
133 tprof_buf_t *new;
134 u_int size = tprof_samples_per_buf;
135
136 new = kmem_alloc(TPROF_BUF_BYTESIZE(size), KM_SLEEP);
137 new->b_used = 0;
138 new->b_size = size;
139 new->b_overflow = 0;
140 return new;
141 }
142
143 static void
144 tprof_buf_free(tprof_buf_t *buf)
145 {
146
147 kmem_free(buf, TPROF_BUF_BYTESIZE(buf->b_size));
148 }
149
150 static tprof_buf_t *
151 tprof_buf_switch(tprof_cpu_t *c, tprof_buf_t *new)
152 {
153 tprof_buf_t *old;
154
155 old = c->c_buf;
156 c->c_buf = new;
157 return old;
158 }
159
160 static tprof_buf_t *
161 tprof_buf_refresh(void)
162 {
163 tprof_cpu_t * const c = tprof_curcpu();
164 tprof_buf_t *new;
165
166 new = tprof_buf_alloc();
167 return tprof_buf_switch(c, new);
168 }
169
170 static void
171 tprof_worker(struct work *wk, void *dummy)
172 {
173 tprof_cpu_t * const c = tprof_curcpu();
174 tprof_buf_t *buf;
175 bool shouldstop;
176
177 KASSERT(wk == &c->c_work);
178 KASSERT(dummy == NULL);
179
180 /*
181 * get a per cpu buffer.
182 */
183 buf = tprof_buf_refresh();
184
185 /*
186 * and put it on the global list for read(2).
187 */
188 mutex_enter(&tprof_lock);
189 shouldstop = !tprof_running;
190 if (shouldstop) {
191 KASSERT(tprof_nworker > 0);
192 tprof_nworker--;
193 cv_broadcast(&tprof_cv);
194 cv_broadcast(&tprof_reader_cv);
195 }
196 if (buf->b_used == 0) {
197 tprof_stat.ts_emptybuf++;
198 } else if (tprof_nbuf_on_list < TPROF_MAX_BUF) {
199 tprof_stat.ts_sample += buf->b_used;
200 tprof_stat.ts_overflow += buf->b_overflow;
201 tprof_stat.ts_buf++;
202 STAILQ_INSERT_TAIL(&tprof_list, buf, b_list);
203 tprof_nbuf_on_list++;
204 buf = NULL;
205 cv_broadcast(&tprof_reader_cv);
206 } else {
207 tprof_stat.ts_dropbuf_sample += buf->b_used;
208 tprof_stat.ts_dropbuf++;
209 }
210 mutex_exit(&tprof_lock);
211 if (buf) {
212 tprof_buf_free(buf);
213 }
214 if (!shouldstop) {
215 callout_schedule(&c->c_callout, hz);
216 }
217 }
218
219 static void
220 tprof_kick(void *vp)
221 {
222 struct cpu_info * const ci = vp;
223 tprof_cpu_t * const c = tprof_cpu(ci);
224
225 workqueue_enqueue(tprof_wq, &c->c_work, ci);
226 }
227
228 static void
229 tprof_stop1(void)
230 {
231 CPU_INFO_ITERATOR cii;
232 struct cpu_info *ci;
233
234 KASSERT(mutex_owned(&tprof_startstop_lock));
235 KASSERT(tprof_nworker == 0);
236
237 for (CPU_INFO_FOREACH(cii, ci)) {
238 tprof_cpu_t * const c = tprof_cpu(ci);
239 tprof_buf_t *old;
240
241 old = tprof_buf_switch(c, NULL);
242 if (old != NULL) {
243 tprof_buf_free(old);
244 }
245 callout_destroy(&c->c_callout);
246 }
247 workqueue_destroy(tprof_wq);
248 }
249
250 static void
251 tprof_getinfo(struct tprof_info *info)
252 {
253 tprof_backend_t *tb;
254
255 KASSERT(mutex_owned(&tprof_startstop_lock));
256
257 memset(info, 0, sizeof(*info));
258 info->ti_version = TPROF_VERSION;
259 if ((tb = tprof_backend) != NULL) {
260 info->ti_ident = tb->tb_ops->tbo_ident();
261 }
262 }
263
264 static int
265 tprof_start(const tprof_param_t *param)
266 {
267 CPU_INFO_ITERATOR cii;
268 struct cpu_info *ci;
269 int error;
270 uint64_t freq;
271 tprof_backend_t *tb;
272
273 KASSERT(mutex_owned(&tprof_startstop_lock));
274 if (tprof_running) {
275 error = EBUSY;
276 goto done;
277 }
278
279 tb = tprof_backend;
280 if (tb == NULL) {
281 error = ENOENT;
282 goto done;
283 }
284 if (tb->tb_usecount > 0) {
285 error = EBUSY;
286 goto done;
287 }
288
289 tb->tb_usecount++;
290 freq = tb->tb_ops->tbo_estimate_freq();
291 tprof_samples_per_buf = MIN(freq * 2, TPROF_MAX_SAMPLES_PER_BUF);
292
293 error = workqueue_create(&tprof_wq, "tprofmv", tprof_worker, NULL,
294 PRI_NONE, IPL_SOFTCLOCK, WQ_MPSAFE | WQ_PERCPU);
295 if (error != 0) {
296 goto done;
297 }
298
299 for (CPU_INFO_FOREACH(cii, ci)) {
300 tprof_cpu_t * const c = tprof_cpu(ci);
301 tprof_buf_t *new;
302 tprof_buf_t *old;
303
304 new = tprof_buf_alloc();
305 old = tprof_buf_switch(c, new);
306 if (old != NULL) {
307 tprof_buf_free(old);
308 }
309 callout_init(&c->c_callout, CALLOUT_MPSAFE);
310 callout_setfunc(&c->c_callout, tprof_kick, ci);
311 }
312
313 error = tb->tb_ops->tbo_start(param);
314 if (error != 0) {
315 KASSERT(tb->tb_usecount > 0);
316 tb->tb_usecount--;
317 tprof_stop1();
318 goto done;
319 }
320
321 mutex_enter(&tprof_lock);
322 tprof_running = true;
323 mutex_exit(&tprof_lock);
324 for (CPU_INFO_FOREACH(cii, ci)) {
325 tprof_cpu_t * const c = tprof_cpu(ci);
326
327 mutex_enter(&tprof_lock);
328 tprof_nworker++;
329 mutex_exit(&tprof_lock);
330 workqueue_enqueue(tprof_wq, &c->c_work, ci);
331 }
332 done:
333 return error;
334 }
335
336 static void
337 tprof_stop(void)
338 {
339 tprof_backend_t *tb;
340
341 KASSERT(mutex_owned(&tprof_startstop_lock));
342 if (!tprof_running) {
343 goto done;
344 }
345
346 tb = tprof_backend;
347 KASSERT(tb->tb_usecount > 0);
348 tb->tb_ops->tbo_stop(NULL);
349 tb->tb_usecount--;
350
351 mutex_enter(&tprof_lock);
352 tprof_running = false;
353 cv_broadcast(&tprof_reader_cv);
354 while (tprof_nworker > 0) {
355 cv_wait(&tprof_cv, &tprof_lock);
356 }
357 mutex_exit(&tprof_lock);
358
359 tprof_stop1();
360 done:
361 ;
362 }
363
364 /*
365 * tprof_clear: drain unread samples.
366 */
367
368 static void
369 tprof_clear(void)
370 {
371 tprof_buf_t *buf;
372
373 mutex_enter(&tprof_reader_lock);
374 mutex_enter(&tprof_lock);
375 while ((buf = STAILQ_FIRST(&tprof_list)) != NULL) {
376 if (buf != NULL) {
377 STAILQ_REMOVE_HEAD(&tprof_list, b_list);
378 KASSERT(tprof_nbuf_on_list > 0);
379 tprof_nbuf_on_list--;
380 mutex_exit(&tprof_lock);
381 tprof_buf_free(buf);
382 mutex_enter(&tprof_lock);
383 }
384 }
385 KASSERT(tprof_nbuf_on_list == 0);
386 mutex_exit(&tprof_lock);
387 tprof_reader_offset = 0;
388 mutex_exit(&tprof_reader_lock);
389
390 memset(&tprof_stat, 0, sizeof(tprof_stat));
391 }
392
393 static tprof_backend_t *
394 tprof_backend_lookup(const char *name)
395 {
396 tprof_backend_t *tb;
397
398 KASSERT(mutex_owned(&tprof_startstop_lock));
399
400 LIST_FOREACH(tb, &tprof_backends, tb_list) {
401 if (!strcmp(tb->tb_name, name)) {
402 return tb;
403 }
404 }
405 return NULL;
406 }
407
408 /* -------------------- backend interfaces */
409
410 /*
411 * tprof_sample: record a sample on the per-cpu buffer.
412 *
413 * be careful; can be called in NMI context.
414 * we are bluntly assuming the followings are safe.
415 * curcpu()
416 * curlwp->l_lid
417 * curlwp->l_proc->p_pid
418 */
419
420 void
421 tprof_sample(void *unused, const tprof_frame_info_t *tfi)
422 {
423 tprof_cpu_t * const c = tprof_curcpu();
424 tprof_buf_t * const buf = c->c_buf;
425 tprof_sample_t *sp;
426 const uintptr_t pc = tfi->tfi_pc;
427 const lwp_t * const l = curlwp;
428 u_int idx;
429
430 idx = buf->b_used;
431 if (__predict_false(idx >= buf->b_size)) {
432 buf->b_overflow++;
433 return;
434 }
435 sp = &buf->b_data[idx];
436 sp->s_pid = l->l_proc->p_pid;
437 sp->s_lwpid = l->l_lid;
438 sp->s_cpuid = c->c_cpuid;
439 sp->s_flags = (tfi->tfi_inkernel) ? TPROF_SAMPLE_INKERNEL : 0;
440 sp->s_pc = pc;
441 buf->b_used = idx + 1;
442 }
443
444 /*
445 * tprof_backend_register:
446 */
447
448 int
449 tprof_backend_register(const char *name, const tprof_backend_ops_t *ops,
450 int vers)
451 {
452 tprof_backend_t *tb;
453
454 if (vers != TPROF_BACKEND_VERSION) {
455 return EINVAL;
456 }
457
458 mutex_enter(&tprof_startstop_lock);
459 tb = tprof_backend_lookup(name);
460 if (tb != NULL) {
461 mutex_exit(&tprof_startstop_lock);
462 return EEXIST;
463 }
464 #if 1 /* XXX for now */
465 if (!LIST_EMPTY(&tprof_backends)) {
466 mutex_exit(&tprof_startstop_lock);
467 return ENOTSUP;
468 }
469 #endif
470 tb = kmem_alloc(sizeof(*tb), KM_SLEEP);
471 tb->tb_name = name;
472 tb->tb_ops = ops;
473 tb->tb_usecount = 0;
474 LIST_INSERT_HEAD(&tprof_backends, tb, tb_list);
475 #if 1 /* XXX for now */
476 if (tprof_backend == NULL) {
477 tprof_backend = tb;
478 }
479 #endif
480 mutex_exit(&tprof_startstop_lock);
481
482 return 0;
483 }
484
485 /*
486 * tprof_backend_unregister:
487 */
488
489 int
490 tprof_backend_unregister(const char *name)
491 {
492 tprof_backend_t *tb;
493
494 mutex_enter(&tprof_startstop_lock);
495 tb = tprof_backend_lookup(name);
496 #if defined(DIAGNOSTIC)
497 if (tb == NULL) {
498 mutex_exit(&tprof_startstop_lock);
499 panic("%s: not found '%s'", __func__, name);
500 }
501 #endif /* defined(DIAGNOSTIC) */
502 if (tb->tb_usecount > 0) {
503 mutex_exit(&tprof_startstop_lock);
504 return EBUSY;
505 }
506 #if 1 /* XXX for now */
507 if (tprof_backend == tb) {
508 tprof_backend = NULL;
509 }
510 #endif
511 LIST_REMOVE(tb, tb_list);
512 mutex_exit(&tprof_startstop_lock);
513
514 kmem_free(tb, sizeof(*tb));
515
516 return 0;
517 }
518
519 /* -------------------- cdevsw interfaces */
520
521 static int
522 tprof_open(dev_t dev, int flags, int type, struct lwp *l)
523 {
524
525 if (minor(dev) != 0) {
526 return EXDEV;
527 }
528 mutex_enter(&tprof_lock);
529 if (tprof_owner != NULL) {
530 mutex_exit(&tprof_lock);
531 return EBUSY;
532 }
533 tprof_owner = curlwp;
534 mutex_exit(&tprof_lock);
535
536 return 0;
537 }
538
539 static int
540 tprof_close(dev_t dev, int flags, int type, struct lwp *l)
541 {
542
543 KASSERT(minor(dev) == 0);
544
545 mutex_enter(&tprof_startstop_lock);
546 mutex_enter(&tprof_lock);
547 tprof_owner = NULL;
548 mutex_exit(&tprof_lock);
549 tprof_stop();
550 tprof_clear();
551 mutex_exit(&tprof_startstop_lock);
552
553 return 0;
554 }
555
556 static int
557 tprof_read(dev_t dev, struct uio *uio, int flags)
558 {
559 tprof_buf_t *buf;
560 size_t bytes;
561 size_t resid;
562 size_t done;
563 int error = 0;
564
565 KASSERT(minor(dev) == 0);
566 mutex_enter(&tprof_reader_lock);
567 while (uio->uio_resid > 0 && error == 0) {
568 /*
569 * take the first buffer from the list.
570 */
571 mutex_enter(&tprof_lock);
572 buf = STAILQ_FIRST(&tprof_list);
573 if (buf == NULL) {
574 if (tprof_nworker == 0) {
575 mutex_exit(&tprof_lock);
576 error = 0;
577 break;
578 }
579 mutex_exit(&tprof_reader_lock);
580 error = cv_wait_sig(&tprof_reader_cv, &tprof_lock);
581 mutex_exit(&tprof_lock);
582 mutex_enter(&tprof_reader_lock);
583 continue;
584 }
585 STAILQ_REMOVE_HEAD(&tprof_list, b_list);
586 KASSERT(tprof_nbuf_on_list > 0);
587 tprof_nbuf_on_list--;
588 mutex_exit(&tprof_lock);
589
590 /*
591 * copy it out.
592 */
593 bytes = MIN(buf->b_used * sizeof(tprof_sample_t) -
594 tprof_reader_offset, uio->uio_resid);
595 resid = uio->uio_resid;
596 error = uiomove((char *)buf->b_data + tprof_reader_offset,
597 bytes, uio);
598 done = resid - uio->uio_resid;
599 tprof_reader_offset += done;
600
601 /*
602 * if we didn't consume the whole buffer,
603 * put it back to the list.
604 */
605 if (tprof_reader_offset <
606 buf->b_used * sizeof(tprof_sample_t)) {
607 mutex_enter(&tprof_lock);
608 STAILQ_INSERT_HEAD(&tprof_list, buf, b_list);
609 tprof_nbuf_on_list++;
610 cv_broadcast(&tprof_reader_cv);
611 mutex_exit(&tprof_lock);
612 } else {
613 tprof_buf_free(buf);
614 tprof_reader_offset = 0;
615 }
616 }
617 mutex_exit(&tprof_reader_lock);
618
619 return error;
620 }
621
622 static int
623 tprof_ioctl(dev_t dev, u_long cmd, void *data, int flags, struct lwp *l)
624 {
625 const tprof_param_t *param;
626 int error = 0;
627
628 KASSERT(minor(dev) == 0);
629
630 switch (cmd) {
631 case TPROF_IOC_GETINFO:
632 mutex_enter(&tprof_startstop_lock);
633 tprof_getinfo(data);
634 mutex_exit(&tprof_startstop_lock);
635 break;
636 case TPROF_IOC_START:
637 param = data;
638 mutex_enter(&tprof_startstop_lock);
639 error = tprof_start(param);
640 mutex_exit(&tprof_startstop_lock);
641 break;
642 case TPROF_IOC_STOP:
643 mutex_enter(&tprof_startstop_lock);
644 tprof_stop();
645 mutex_exit(&tprof_startstop_lock);
646 break;
647 case TPROF_IOC_GETSTAT:
648 mutex_enter(&tprof_lock);
649 memcpy(data, &tprof_stat, sizeof(tprof_stat));
650 mutex_exit(&tprof_lock);
651 break;
652 default:
653 error = EINVAL;
654 break;
655 }
656
657 return error;
658 }
659
660 const struct cdevsw tprof_cdevsw = {
661 .d_open = tprof_open,
662 .d_close = tprof_close,
663 .d_read = tprof_read,
664 .d_write = nowrite,
665 .d_ioctl = tprof_ioctl,
666 .d_stop = nostop,
667 .d_tty = notty,
668 .d_poll = nopoll,
669 .d_mmap = nommap,
670 .d_kqfilter = nokqfilter,
671 .d_discard = nodiscard,
672 .d_flag = D_OTHER | D_MPSAFE
673 };
674
675 void
676 tprofattach(int nunits)
677 {
678
679 /* nothing */
680 }
681
682 MODULE(MODULE_CLASS_DRIVER, tprof, NULL);
683
684 static void
685 tprof_driver_init(void)
686 {
687 unsigned int i;
688
689 mutex_init(&tprof_lock, MUTEX_DEFAULT, IPL_NONE);
690 mutex_init(&tprof_reader_lock, MUTEX_DEFAULT, IPL_NONE);
691 mutex_init(&tprof_startstop_lock, MUTEX_DEFAULT, IPL_NONE);
692 cv_init(&tprof_cv, "tprof");
693 cv_init(&tprof_reader_cv, "tprof_rd");
694 STAILQ_INIT(&tprof_list);
695 for (i = 0; i < __arraycount(tprof_cpus); i++) {
696 tprof_cpu_t * const c = &tprof_cpus[i];
697
698 c->c_buf = NULL;
699 c->c_cpuid = i;
700 }
701 }
702
703 static void
704 tprof_driver_fini(void)
705 {
706
707 mutex_destroy(&tprof_lock);
708 mutex_destroy(&tprof_reader_lock);
709 mutex_destroy(&tprof_startstop_lock);
710 cv_destroy(&tprof_cv);
711 cv_destroy(&tprof_reader_cv);
712 }
713
714 static int
715 tprof_modcmd(modcmd_t cmd, void *arg)
716 {
717
718 switch (cmd) {
719 case MODULE_CMD_INIT:
720 tprof_driver_init();
721 #if defined(_MODULE)
722 {
723 devmajor_t bmajor = NODEVMAJOR;
724 devmajor_t cmajor = NODEVMAJOR;
725 int error;
726
727 error = devsw_attach("tprof", NULL, &bmajor,
728 &tprof_cdevsw, &cmajor);
729 if (error) {
730 tprof_driver_fini();
731 return error;
732 }
733 }
734 #endif /* defined(_MODULE) */
735 return 0;
736
737 case MODULE_CMD_FINI:
738 #if defined(_MODULE)
739 {
740 int error;
741 error = devsw_detach(NULL, &tprof_cdevsw);
742 if (error) {
743 return error;
744 }
745 }
746 #endif /* defined(_MODULE) */
747 tprof_driver_fini();
748 return 0;
749
750 default:
751 return ENOTTY;
752 }
753 }
754