acpi_cpu.c revision 1.29 1 /* $NetBSD: acpi_cpu.c,v 1.29 2011/02/25 19:55:06 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.29 2011/02/25 19:55:06 jruoho Exp $");
31
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/kernel.h>
35 #include <sys/kmem.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/once.h>
39 #include <sys/sysctl.h>
40
41 #include <dev/acpi/acpireg.h>
42 #include <dev/acpi/acpivar.h>
43 #include <dev/acpi/acpi_cpu.h>
44
45 #include <machine/acpi_machdep.h>
46
47 #define _COMPONENT ACPI_BUS_COMPONENT
48 ACPI_MODULE_NAME ("acpi_cpu")
49
50 static int acpicpu_match(device_t, cfdata_t, void *);
51 static void acpicpu_attach(device_t, device_t, void *);
52 static int acpicpu_detach(device_t, int);
53 static int acpicpu_once_attach(void);
54 static int acpicpu_once_detach(void);
55 static void acpicpu_prestart(device_t);
56 static void acpicpu_start(device_t);
57 static void acpicpu_debug_print(struct acpicpu_softc *);
58 static const char *acpicpu_debug_print_dep(uint32_t);
59 static void acpicpu_sysctl(device_t);
60
61 static int acpicpu_object(ACPI_HANDLE, struct acpicpu_object *);
62 static struct cpu_info *acpicpu_ci(uint32_t);
63 static uint32_t acpicpu_cap(struct acpicpu_softc *);
64 static ACPI_STATUS acpicpu_cap_pdc(struct acpicpu_softc *, uint32_t);
65 static ACPI_STATUS acpicpu_cap_osc(struct acpicpu_softc *,
66 uint32_t, uint32_t *);
67 static void acpicpu_notify(ACPI_HANDLE, uint32_t, void *);
68 static bool acpicpu_suspend(device_t, const pmf_qual_t *);
69 static bool acpicpu_resume(device_t, const pmf_qual_t *);
70
71 extern uint32_t acpi_cpus;
72 struct acpicpu_softc **acpicpu_sc = NULL;
73 static struct sysctllog *acpicpu_log = NULL;
74 static bool acpicpu_dynamic = true;
75 static bool acpicpu_passive = true;
76
77 static const char * const acpicpu_hid[] = {
78 "ACPI0007",
79 NULL
80 };
81
82 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc),
83 acpicpu_match, acpicpu_attach, acpicpu_detach, NULL);
84
85 static int
86 acpicpu_match(device_t parent, cfdata_t match, void *aux)
87 {
88 struct acpi_attach_args *aa = aux;
89 struct acpicpu_object ao;
90 int rv;
91
92 if (aa->aa_node->ad_type != ACPI_TYPE_PROCESSOR)
93 return 0;
94
95 if (acpi_match_hid(aa->aa_node->ad_devinfo, acpicpu_hid) != 0)
96 return 1;
97
98 rv = acpicpu_object(aa->aa_node->ad_handle, &ao);
99
100 if (rv != 0 || acpicpu_ci(ao.ao_procid) == NULL)
101 return 0;
102
103 return 1;
104 }
105
106 static void
107 acpicpu_attach(device_t parent, device_t self, void *aux)
108 {
109 struct acpicpu_softc *sc = device_private(self);
110 struct acpi_attach_args *aa = aux;
111 static ONCE_DECL(once_attach);
112 cpuid_t id;
113 int rv;
114
115 rv = acpicpu_object(aa->aa_node->ad_handle, &sc->sc_object);
116
117 if (rv != 0)
118 return;
119
120 rv = RUN_ONCE(&once_attach, acpicpu_once_attach);
121
122 if (rv != 0)
123 return;
124
125 sc->sc_dev = self;
126 sc->sc_cold = true;
127 sc->sc_node = aa->aa_node;
128
129 sc->sc_ci = acpicpu_ci(sc->sc_object.ao_procid);
130
131 if (sc->sc_ci == NULL) {
132 aprint_error(": invalid CPU\n");
133 return;
134 }
135
136 id = sc->sc_ci->ci_acpiid;
137
138 if (acpicpu_sc[id] != NULL) {
139 aprint_error(": already attached\n");
140 return;
141 }
142
143 aprint_naive("\n");
144 aprint_normal(": ACPI CPU\n");
145
146 acpi_cpus++;
147 acpicpu_sc[id] = sc;
148
149 sc->sc_cap = acpicpu_cap(sc);
150 sc->sc_ncpus = acpi_md_ncpus();
151 sc->sc_flags |= acpicpu_md_flags();
152
153 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
154
155 acpicpu_cstate_attach(self);
156 acpicpu_pstate_attach(self);
157 acpicpu_tstate_attach(self);
158
159 (void)config_defer(self, acpicpu_prestart);
160 (void)acpi_register_notify(sc->sc_node, acpicpu_notify);
161 (void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume);
162
163 acpicpu_debug_print(sc);
164 }
165
166 static int
167 acpicpu_detach(device_t self, int flags)
168 {
169 struct acpicpu_softc *sc = device_private(self);
170 static ONCE_DECL(once_detach);
171 int rv = 0;
172
173 sc->sc_cold = true;
174 acpi_deregister_notify(sc->sc_node);
175
176 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
177 rv = acpicpu_cstate_detach(self);
178
179 if (rv != 0)
180 return rv;
181
182 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
183 rv = acpicpu_pstate_detach(self);
184
185 if (rv != 0)
186 return rv;
187
188 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
189 rv = acpicpu_tstate_detach(self);
190
191 if (rv != 0)
192 return rv;
193
194 rv = RUN_ONCE(&once_detach, acpicpu_once_detach);
195
196 if (rv != 0)
197 return rv;
198
199 mutex_destroy(&sc->sc_mtx);
200 acpi_cpus--;
201
202 return 0;
203 }
204
205 static int
206 acpicpu_once_attach(void)
207 {
208 struct acpicpu_softc *sc;
209 unsigned int i;
210
211 acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP);
212
213 if (acpicpu_sc == NULL)
214 return ENOMEM;
215
216 for (i = 0; i < maxcpus; i++)
217 acpicpu_sc[i] = NULL;
218
219 return 0;
220 }
221
222 static int
223 acpicpu_once_detach(void)
224 {
225 struct acpicpu_softc *sc;
226
227 if (acpicpu_sc != NULL)
228 kmem_free(acpicpu_sc, maxcpus * sizeof(*sc));
229
230 if (acpicpu_log != NULL)
231 sysctl_teardown(&acpicpu_log);
232
233 return 0;
234 }
235
236 static void
237 acpicpu_prestart(device_t self)
238 {
239 struct acpicpu_softc *sc = device_private(self);
240 static bool once = false;
241
242 if (once != false) {
243 sc->sc_cold = false;
244 return;
245 }
246
247 once = true;
248
249 (void)config_interrupts(self, acpicpu_start);
250 }
251
252 static void
253 acpicpu_start(device_t self)
254 {
255 struct acpicpu_softc *sc = device_private(self);
256
257 /*
258 * Run the state-specific initialization
259 * routines. These should be called only
260 * once, after interrupts are enabled and
261 * all ACPI CPUs have attached.
262 */
263 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
264 acpicpu_cstate_start(self);
265
266 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
267 acpicpu_pstate_start(self);
268
269 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
270 acpicpu_tstate_start(self);
271
272 acpicpu_sysctl(self);
273
274 aprint_debug_dev(sc->sc_dev, "ACPI CPUs started (cap "
275 "0x%02x, flags 0x%06x)\n", sc->sc_cap, sc->sc_flags);
276
277 sc->sc_cold = false;
278 }
279
280 static void
281 acpicpu_sysctl(device_t self)
282 {
283 const struct sysctlnode *node;
284 int err;
285
286 err = sysctl_createv(&acpicpu_log, 0, NULL, &node,
287 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
288 NULL, 0, NULL, 0, CTL_HW, CTL_EOL);
289
290 if (err != 0)
291 goto fail;
292
293 err = sysctl_createv(&acpicpu_log, 0, &node, &node,
294 CTLFLAG_PERMANENT, CTLTYPE_NODE, "acpi", NULL,
295 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
296
297 if (err != 0)
298 goto fail;
299
300 err = sysctl_createv(&acpicpu_log, 0, &node, &node,
301 0, CTLTYPE_NODE, "cpu", SYSCTL_DESCR("ACPI CPU"),
302 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
303
304 if (err != 0)
305 goto fail;
306
307 err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
308 CTLFLAG_READWRITE, CTLTYPE_BOOL, "dynamic",
309 SYSCTL_DESCR("Dynamic states"), NULL, 0,
310 &acpicpu_dynamic, 0, CTL_CREATE, CTL_EOL);
311
312 if (err != 0)
313 goto fail;
314
315 err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
316 CTLFLAG_READWRITE, CTLTYPE_BOOL, "passive",
317 SYSCTL_DESCR("Passive cooling"), NULL, 0,
318 &acpicpu_passive, 0, CTL_CREATE, CTL_EOL);
319
320 if (err != 0)
321 goto fail;
322
323 return;
324
325 fail:
326 aprint_error_dev(self, "failed to initialize sysctl (err %d)\n", err);
327 }
328
329 static int
330 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao)
331 {
332 ACPI_OBJECT *obj;
333 ACPI_BUFFER buf;
334 ACPI_STATUS rv;
335
336 rv = acpi_eval_struct(hdl, NULL, &buf);
337
338 if (ACPI_FAILURE(rv))
339 return 1;
340
341 obj = buf.Pointer;
342
343 if (obj->Type != ACPI_TYPE_PROCESSOR) {
344 rv = AE_TYPE;
345 goto out;
346 }
347
348 if (obj->Processor.ProcId > (uint32_t)maxcpus) {
349 rv = AE_LIMIT;
350 goto out;
351 }
352
353 KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX);
354
355 if (ao != NULL) {
356 ao->ao_procid = obj->Processor.ProcId;
357 ao->ao_pblklen = obj->Processor.PblkLength;
358 ao->ao_pblkaddr = obj->Processor.PblkAddress;
359 }
360
361 out:
362 if (buf.Pointer != NULL)
363 ACPI_FREE(buf.Pointer);
364
365 return ACPI_FAILURE(rv) ? 1 : 0;
366 }
367
368 static struct cpu_info *
369 acpicpu_ci(uint32_t id)
370 {
371 CPU_INFO_ITERATOR cii;
372 struct cpu_info *ci;
373
374 for (CPU_INFO_FOREACH(cii, ci)) {
375
376 if (id == ci->ci_acpiid)
377 return ci;
378 }
379
380 return NULL;
381 }
382
383 static uint32_t
384 acpicpu_cap(struct acpicpu_softc *sc)
385 {
386 uint32_t flags, cap = 0;
387 const char *str;
388 ACPI_STATUS rv;
389
390 /*
391 * Query and set machine-dependent capabilities.
392 * Note that the Intel-specific _PDC method was
393 * deprecated in the ACPI 3.0 in favor of _OSC.
394 */
395 flags = acpicpu_md_cap();
396 rv = acpicpu_cap_osc(sc, flags, &cap);
397
398 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
399 str = "_OSC";
400 goto fail;
401 }
402
403 rv = acpicpu_cap_pdc(sc, flags);
404
405 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
406 str = "_PDC";
407 goto fail;
408 }
409
410 if (cap == 0)
411 cap = flags;
412
413 return cap;
414
415 fail:
416 aprint_error_dev(sc->sc_dev, "failed to evaluate "
417 "%s: %s\n", str, AcpiFormatException(rv));
418
419 return 0;
420 }
421
422 static ACPI_STATUS
423 acpicpu_cap_pdc(struct acpicpu_softc *sc, uint32_t flags)
424 {
425 ACPI_OBJECT_LIST arg;
426 ACPI_OBJECT obj;
427 uint32_t cap[3];
428
429 arg.Count = 1;
430 arg.Pointer = &obj;
431
432 cap[0] = ACPICPU_PDC_REVID;
433 cap[1] = 1;
434 cap[2] = flags;
435
436 obj.Type = ACPI_TYPE_BUFFER;
437 obj.Buffer.Length = sizeof(cap);
438 obj.Buffer.Pointer = (void *)cap;
439
440 return AcpiEvaluateObject(sc->sc_node->ad_handle, "_PDC", &arg, NULL);
441 }
442
443 static ACPI_STATUS
444 acpicpu_cap_osc(struct acpicpu_softc *sc, uint32_t flags, uint32_t *val)
445 {
446 ACPI_OBJECT_LIST arg;
447 ACPI_OBJECT obj[4];
448 ACPI_OBJECT *osc;
449 ACPI_BUFFER buf;
450 ACPI_STATUS rv;
451 uint32_t cap[2];
452 uint32_t *ptr;
453 int i = 5;
454
455 static uint8_t intel_uuid[16] = {
456 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47,
457 0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53
458 };
459
460 cap[0] = ACPI_OSC_QUERY;
461 cap[1] = flags;
462
463 again:
464 arg.Count = 4;
465 arg.Pointer = obj;
466
467 obj[0].Type = ACPI_TYPE_BUFFER;
468 obj[0].Buffer.Length = sizeof(intel_uuid);
469 obj[0].Buffer.Pointer = intel_uuid;
470
471 obj[1].Type = ACPI_TYPE_INTEGER;
472 obj[1].Integer.Value = ACPICPU_PDC_REVID;
473
474 obj[2].Type = ACPI_TYPE_INTEGER;
475 obj[2].Integer.Value = __arraycount(cap);
476
477 obj[3].Type = ACPI_TYPE_BUFFER;
478 obj[3].Buffer.Length = sizeof(cap);
479 obj[3].Buffer.Pointer = (void *)cap;
480
481 buf.Pointer = NULL;
482 buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER;
483
484 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OSC", &arg, &buf);
485
486 if (ACPI_FAILURE(rv))
487 goto out;
488
489 osc = buf.Pointer;
490
491 if (osc->Type != ACPI_TYPE_BUFFER) {
492 rv = AE_TYPE;
493 goto out;
494 }
495
496 if (osc->Buffer.Length != sizeof(cap)) {
497 rv = AE_BUFFER_OVERFLOW;
498 goto out;
499 }
500
501 ptr = (uint32_t *)osc->Buffer.Pointer;
502
503 if ((ptr[0] & ACPI_OSC_ERROR) != 0) {
504 rv = AE_ERROR;
505 goto out;
506 }
507
508 if ((ptr[0] & (ACPI_OSC_ERROR_REV | ACPI_OSC_ERROR_UUID)) != 0) {
509 rv = AE_BAD_PARAMETER;
510 goto out;
511 }
512
513 /*
514 * "It is strongly recommended that the OS evaluate
515 * _OSC with the Query Support Flag set until _OSC
516 * returns the Capabilities Masked bit clear, to
517 * negotiate the set of features to be granted to
518 * the OS for native support (ACPI 4.0, 6.2.10)."
519 */
520 if ((ptr[0] & ACPI_OSC_ERROR_MASKED) != 0 && i >= 0) {
521
522 ACPI_FREE(buf.Pointer);
523 i--;
524
525 goto again;
526 }
527
528 if ((cap[0] & ACPI_OSC_QUERY) != 0) {
529
530 ACPI_FREE(buf.Pointer);
531 cap[0] &= ~ACPI_OSC_QUERY;
532
533 goto again;
534 }
535
536 /*
537 * It is permitted for _OSC to return all
538 * bits cleared, but this is specified to
539 * vary on per-device basis. Assume that
540 * everything rather than nothing will be
541 * supported in this case; we do not need
542 * the firmware to know the CPU features.
543 */
544 *val = (ptr[1] != 0) ? ptr[1] : cap[1];
545
546 out:
547 if (buf.Pointer != NULL)
548 ACPI_FREE(buf.Pointer);
549
550 return rv;
551 }
552
553 static void
554 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux)
555 {
556 ACPI_OSD_EXEC_CALLBACK func;
557 struct acpicpu_softc *sc;
558 device_t self = aux;
559
560 sc = device_private(self);
561
562 if (sc->sc_cold != false)
563 return;
564
565 if (acpicpu_dynamic != true)
566 return;
567
568 switch (evt) {
569
570 case ACPICPU_C_NOTIFY:
571
572 if ((sc->sc_flags & ACPICPU_FLAG_C) == 0)
573 return;
574
575 func = acpicpu_cstate_callback;
576 break;
577
578 case ACPICPU_P_NOTIFY:
579
580 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
581 return;
582
583 func = acpicpu_pstate_callback;
584 break;
585
586 case ACPICPU_T_NOTIFY:
587
588 if ((sc->sc_flags & ACPICPU_FLAG_T) == 0)
589 return;
590
591 func = acpicpu_tstate_callback;
592 break;
593
594 default:
595 aprint_error_dev(sc->sc_dev, "unknown notify: 0x%02X\n", evt);
596 return;
597 }
598
599 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
600 }
601
602 static bool
603 acpicpu_suspend(device_t self, const pmf_qual_t *qual)
604 {
605 struct acpicpu_softc *sc = device_private(self);
606
607 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
608 (void)acpicpu_cstate_suspend(self);
609
610 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
611 (void)acpicpu_pstate_suspend(self);
612
613 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
614 (void)acpicpu_tstate_suspend(self);
615
616 sc->sc_cold = true;
617
618 return true;
619 }
620
621 static bool
622 acpicpu_resume(device_t self, const pmf_qual_t *qual)
623 {
624 struct acpicpu_softc *sc = device_private(self);
625
626 sc->sc_cold = false;
627
628 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
629 (void)acpicpu_cstate_resume(self);
630
631 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
632 (void)acpicpu_pstate_resume(self);
633
634 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
635 (void)acpicpu_tstate_resume(self);
636
637 return true;
638 }
639
640 static void
641 acpicpu_debug_print(struct acpicpu_softc *sc)
642 {
643 struct acpicpu_dep *dep;
644
645 if ((sc->sc_flags & ACPICPU_FLAG_C_DEP) != 0) {
646
647 dep = &sc->sc_cstate_dep;
648
649 aprint_debug_dev(sc->sc_dev, "C-state coordination: "
650 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
651 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
652 }
653
654 if ((sc->sc_flags & ACPICPU_FLAG_P_DEP) != 0) {
655
656 dep = &sc->sc_pstate_dep;
657
658 aprint_debug_dev(sc->sc_dev, "P-state coordination: "
659 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
660 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
661 }
662
663 if ((sc->sc_flags & ACPICPU_FLAG_T_DEP) != 0) {
664
665 dep = &sc->sc_tstate_dep;
666
667 aprint_debug_dev(sc->sc_dev, "T-state coordination: "
668 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
669 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
670 }
671 }
672
673 static const char *
674 acpicpu_debug_print_dep(uint32_t val)
675 {
676
677 switch (val) {
678
679 case ACPICPU_DEP_SW_ALL:
680 return "SW_ALL";
681
682 case ACPICPU_DEP_SW_ANY:
683 return "SW_ANY";
684
685 case ACPICPU_DEP_HW_ALL:
686 return "HW_ALL";
687
688 default:
689 return "unknown";
690 }
691 }
692
693 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL);
694
695 #ifdef _MODULE
696 #include "ioconf.c"
697 #endif
698
699 static int
700 acpicpu_modcmd(modcmd_t cmd, void *aux)
701 {
702 int rv = 0;
703
704 switch (cmd) {
705
706 case MODULE_CMD_INIT:
707
708 #ifdef _MODULE
709 rv = config_init_component(cfdriver_ioconf_acpicpu,
710 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
711 #endif
712 break;
713
714 case MODULE_CMD_FINI:
715
716 #ifdef _MODULE
717 rv = config_fini_component(cfdriver_ioconf_acpicpu,
718 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
719 #endif
720 break;
721
722 default:
723 rv = ENOTTY;
724 }
725
726 return rv;
727 }
728