apm.c revision 1.29 1 /* $NetBSD: apm.c,v 1.29 2013/11/08 02:47:41 christos Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by John Kohl and Christopher G. Demetriou.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31 /*
32 * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: apm.c,v 1.29 2013/11/08 02:47:41 christos Exp $");
37
38 #include "opt_apm.h"
39
40 #if defined(DEBUG) && !defined(APMDEBUG)
41 #define APMDEBUG
42 #endif
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/signalvar.h>
47 #include <sys/kernel.h>
48 #include <sys/proc.h>
49 #include <sys/kthread.h>
50 #include <sys/malloc.h>
51 #include <sys/device.h>
52 #include <sys/fcntl.h>
53 #include <sys/ioctl.h>
54 #include <sys/select.h>
55 #include <sys/poll.h>
56 #include <sys/conf.h>
57
58 #include <dev/apm/apmvar.h>
59
60 #ifdef APMDEBUG
61 #define DPRINTF(f, x) do { if (apmdebug & (f)) printf x; } while (0)
62
63
64 #ifdef APMDEBUG_VALUE
65 int apmdebug = APMDEBUG_VALUE;
66 #else
67 int apmdebug = 0;
68 #endif /* APMDEBUG_VALUE */
69
70 #else
71 #define DPRINTF(f, x) /**/
72 #endif /* APMDEBUG */
73
74 #define SCFLAG_OREAD 0x0000001
75 #define SCFLAG_OWRITE 0x0000002
76 #define SCFLAG_OPEN (SCFLAG_OREAD|SCFLAG_OWRITE)
77
78 #define APMUNIT(dev) (minor(dev)&0xf0)
79 #define APM(dev) (minor(dev)&0x0f)
80 #define APM_NORMAL 0
81 #define APM_CTL 8
82
83 /*
84 * A brief note on the locking protocol: it's very simple; we
85 * assert an exclusive lock any time thread context enters the
86 * APM module. This is both the APM thread itself, as well as
87 * user context.
88 */
89 #define APM_LOCK(apmsc) \
90 (void) mutex_enter(&(apmsc)->sc_lock)
91 #define APM_UNLOCK(apmsc) \
92 (void) mutex_exit(&(apmsc)->sc_lock)
93
94 static void apm_event_handle(struct apm_softc *, u_int, u_int);
95 static void apm_periodic_check(struct apm_softc *);
96 static void apm_thread(void *);
97 static void apm_perror(const char *, int, ...)
98 __attribute__((__format__(__printf__,1,3)));
99 #ifdef APM_POWER_PRINT
100 static void apm_power_print(struct apm_softc *, struct apm_power_info *);
101 #endif
102 static int apm_record_event(struct apm_softc *, u_int);
103 static void apm_set_ver(struct apm_softc *);
104 static void apm_standby(struct apm_softc *);
105 static void apm_suspend(struct apm_softc *);
106 static void apm_resume(struct apm_softc *, u_int, u_int);
107
108 extern struct cfdriver apm_cd;
109
110 dev_type_open(apmopen);
111 dev_type_close(apmclose);
112 dev_type_ioctl(apmioctl);
113 dev_type_poll(apmpoll);
114 dev_type_kqfilter(apmkqfilter);
115
116 const struct cdevsw apm_cdevsw = {
117 apmopen, apmclose, noread, nowrite, apmioctl,
118 nostop, notty, apmpoll, nommap, apmkqfilter, D_OTHER,
119 };
120
121 /* configurable variables */
122 #ifdef APM_NO_STANDBY
123 int apm_do_standby = 0;
124 #else
125 int apm_do_standby = 1;
126 #endif
127 #ifdef APM_V10_ONLY
128 int apm_v11_enabled = 0;
129 #else
130 int apm_v11_enabled = 1;
131 #endif
132 #ifdef APM_NO_V12
133 int apm_v12_enabled = 0;
134 #else
135 int apm_v12_enabled = 1;
136 #endif
137
138 /* variables used during operation (XXX cgd) */
139 u_char apm_majver, apm_minver;
140 int apm_inited;
141 int apm_standbys, apm_userstandbys, apm_suspends, apm_battlow;
142 int apm_damn_fool_bios, apm_op_inprog;
143 int apm_evindex;
144
145 static int apm_spl; /* saved spl while suspended */
146
147 const char *
148 apm_strerror(int code)
149 {
150 switch (code) {
151 case APM_ERR_PM_DISABLED:
152 return ("power management disabled");
153 case APM_ERR_REALALREADY:
154 return ("real mode interface already connected");
155 case APM_ERR_NOTCONN:
156 return ("interface not connected");
157 case APM_ERR_16ALREADY:
158 return ("16-bit interface already connected");
159 case APM_ERR_16NOTSUPP:
160 return ("16-bit interface not supported");
161 case APM_ERR_32ALREADY:
162 return ("32-bit interface already connected");
163 case APM_ERR_32NOTSUPP:
164 return ("32-bit interface not supported");
165 case APM_ERR_UNRECOG_DEV:
166 return ("unrecognized device ID");
167 case APM_ERR_ERANGE:
168 return ("parameter out of range");
169 case APM_ERR_NOTENGAGED:
170 return ("interface not engaged");
171 case APM_ERR_UNABLE:
172 return ("unable to enter requested state");
173 case APM_ERR_NOEVENTS:
174 return ("no pending events");
175 case APM_ERR_NOT_PRESENT:
176 return ("no APM present");
177 default:
178 return ("unknown error code");
179 }
180 }
181
182 static void
183 apm_perror(const char *str, int errinfo, ...) /* XXX cgd */
184 {
185 va_list ap;
186
187 printf("APM ");
188
189 va_start(ap, errinfo);
190 vprintf(str, ap); /* XXX cgd */
191 va_end(ap);
192
193 printf(": %s\n", apm_strerror(errinfo));
194 }
195
196 #ifdef APM_POWER_PRINT
197 static void
198 apm_power_print(struct apm_softc *sc, struct apm_power_info *pi)
199 {
200
201 if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) {
202 aprint_normal_dev(sc->sc_dev,
203 "battery life expectancy: %d%%\n",
204 pi->battery_life);
205 }
206 aprint_normal_dev(sc->sc_dev, "A/C state: ");
207 switch (pi->ac_state) {
208 case APM_AC_OFF:
209 printf("off\n");
210 break;
211 case APM_AC_ON:
212 printf("on\n");
213 break;
214 case APM_AC_BACKUP:
215 printf("backup power\n");
216 break;
217 default:
218 case APM_AC_UNKNOWN:
219 printf("unknown\n");
220 break;
221 }
222 aprint_normal_dev(sc->sc_dev, "battery charge state:");
223 if (apm_minver == 0)
224 switch (pi->battery_state) {
225 case APM_BATT_HIGH:
226 printf("high\n");
227 break;
228 case APM_BATT_LOW:
229 printf("low\n");
230 break;
231 case APM_BATT_CRITICAL:
232 printf("critical\n");
233 break;
234 case APM_BATT_CHARGING:
235 printf("charging\n");
236 break;
237 case APM_BATT_UNKNOWN:
238 printf("unknown\n");
239 break;
240 default:
241 printf("undecoded state %x\n", pi->battery_state);
242 break;
243 }
244 else if (apm_minver >= 1) {
245 if (pi->battery_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
246 printf(" no battery");
247 else {
248 if (pi->battery_flags & APM_BATT_FLAG_HIGH)
249 printf(" high");
250 if (pi->battery_flags & APM_BATT_FLAG_LOW)
251 printf(" low");
252 if (pi->battery_flags & APM_BATT_FLAG_CRITICAL)
253 printf(" critical");
254 if (pi->battery_flags & APM_BATT_FLAG_CHARGING)
255 printf(" charging");
256 }
257 printf("\n");
258 if (pi->minutes_valid) {
259 aprint_normal_dev(sc->sc_dev, "estimated ");
260 if (pi->minutes_left / 60)
261 printf("%dh ", pi->minutes_left / 60);
262 printf("%dm\n", pi->minutes_left % 60);
263 }
264 }
265 return;
266 }
267 #endif
268
269 static void
270 apm_suspend(struct apm_softc *sc)
271 {
272 int error;
273
274 if (sc->sc_power_state == PWR_SUSPEND) {
275 #ifdef APMDEBUG
276 aprint_debug_dev(sc->sc_dev,
277 "apm_suspend: already suspended?\n");
278 #endif
279 return;
280 }
281 sc->sc_power_state = PWR_SUSPEND;
282
283 if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
284 pmf_system_suspend(PMF_Q_NONE);
285 apm_spl = splhigh();
286 }
287
288 error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
289 APM_SYS_SUSPEND);
290
291 if (error)
292 apm_resume(sc, 0, 0);
293 else
294 apm_resume(sc, APM_SYS_STANDBY_RESUME, 0);
295 }
296
297 static void
298 apm_standby(struct apm_softc *sc)
299 {
300 int error;
301
302 if (sc->sc_power_state == PWR_STANDBY) {
303 #ifdef APMDEBUG
304 aprint_debug_dev(sc->sc_dev,
305 "apm_standby: already standing by?\n");
306 #endif
307 return;
308 }
309 sc->sc_power_state = PWR_STANDBY;
310
311 if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
312 pmf_system_suspend(PMF_Q_NONE);
313 apm_spl = splhigh();
314 }
315 error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
316 APM_SYS_STANDBY);
317 if (error)
318 apm_resume(sc, 0, 0);
319 else
320 apm_resume(sc, APM_SYS_STANDBY_RESUME, 0);
321 }
322
323 static void
324 apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info)
325 {
326 if (sc->sc_power_state == PWR_RESUME) {
327 #ifdef APMDEBUG
328 aprint_debug_dev(sc->sc_dev, "apm_resume: already running?\n");
329 #endif
330 return;
331 }
332 sc->sc_power_state = PWR_RESUME;
333
334 #ifdef TIMER_FREQ
335 /*
336 * Some system requires its clock to be initialized after hybernation.
337 */
338 initrtclock(TIMER_FREQ);
339 #endif
340
341 inittodr(time_second);
342 if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
343 splx(apm_spl);
344 pmf_system_resume(PMF_Q_NONE);
345 }
346
347 apm_record_event(sc, event_type);
348 }
349
350 /*
351 * return 0 if the user will notice and handle the event,
352 * return 1 if the kernel driver should do so.
353 */
354 static int
355 apm_record_event(struct apm_softc *sc, u_int event_type)
356 {
357 struct apm_event_info *evp;
358
359 if ((sc->sc_flags & SCFLAG_OPEN) == 0)
360 return 1; /* no user waiting */
361 if (sc->sc_event_count == APM_NEVENTS)
362 return 1; /* overflow */
363 evp = &sc->sc_event_list[sc->sc_event_ptr];
364 sc->sc_event_count++;
365 sc->sc_event_ptr++;
366 sc->sc_event_ptr %= APM_NEVENTS;
367 evp->type = event_type;
368 evp->index = ++apm_evindex;
369 selnotify(&sc->sc_rsel, 0, 0);
370 return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
371 }
372
373 static void
374 apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info)
375 {
376 int error;
377 const char *code;
378 struct apm_power_info pi;
379
380 switch (event_code) {
381 case APM_USER_STANDBY_REQ:
382 DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n"));
383 if (apm_do_standby) {
384 if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
385 apm_userstandbys++;
386 apm_op_inprog++;
387 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
388 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
389 } else {
390 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
391 APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
392 /* in case BIOS hates being spurned */
393 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
394 }
395 break;
396
397 case APM_STANDBY_REQ:
398 DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n"));
399 if (apm_op_inprog) {
400 DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
401 ("damn fool BIOS did not wait for answer\n"));
402 /* just give up the fight */
403 apm_damn_fool_bios = 1;
404 }
405 if (apm_do_standby) {
406 if (apm_op_inprog == 0 &&
407 apm_record_event(sc, event_code))
408 apm_standbys++;
409 apm_op_inprog++;
410 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
411 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
412 } else {
413 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
414 APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
415 /* in case BIOS hates being spurned */
416 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
417 }
418 break;
419
420 case APM_USER_SUSPEND_REQ:
421 DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n"));
422 if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
423 apm_suspends++;
424 apm_op_inprog++;
425 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
426 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
427 break;
428
429 case APM_SUSPEND_REQ:
430 DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n"));
431 if (apm_op_inprog) {
432 DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
433 ("damn fool BIOS did not wait for answer\n"));
434 /* just give up the fight */
435 apm_damn_fool_bios = 1;
436 }
437 if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
438 apm_suspends++;
439 apm_op_inprog++;
440 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
441 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
442 break;
443
444 case APM_POWER_CHANGE:
445 DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n"));
446 error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
447 #ifdef APM_POWER_PRINT
448 /* only print if nobody is catching events. */
449 if (error == 0 &&
450 (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0)
451 apm_power_print(sc, &pi);
452 #else
453 __USE(error);
454 #endif
455 apm_record_event(sc, event_code);
456 break;
457
458 case APM_NORMAL_RESUME:
459 DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n"));
460 apm_resume(sc, event_code, event_info);
461 break;
462
463 case APM_CRIT_RESUME:
464 DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system"));
465 apm_resume(sc, event_code, event_info);
466 break;
467
468 case APM_SYS_STANDBY_RESUME:
469 DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n"));
470 apm_resume(sc, event_code, event_info);
471 break;
472
473 case APM_UPDATE_TIME:
474 DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n"));
475 apm_resume(sc, event_code, event_info);
476 break;
477
478 case APM_CRIT_SUSPEND_REQ:
479 DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n"));
480 apm_record_event(sc, event_code);
481 apm_suspend(sc);
482 break;
483
484 case APM_BATTERY_LOW:
485 DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n"));
486 apm_battlow++;
487 apm_record_event(sc, event_code);
488 break;
489
490 case APM_CAP_CHANGE:
491 DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n"));
492 if (apm_minver < 2) {
493 DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n"));
494 } else {
495 u_int numbatts, capflags;
496 (*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie,
497 &numbatts, &capflags);
498 (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
499 }
500 break;
501
502 default:
503 switch (event_code >> 8) {
504 case 0:
505 code = "reserved system";
506 break;
507 case 1:
508 code = "reserved device";
509 break;
510 case 2:
511 code = "OEM defined";
512 break;
513 default:
514 code = "reserved";
515 break;
516 }
517 printf("APM: %s event code %x\n", code, event_code);
518 }
519 }
520
521 static void
522 apm_periodic_check(struct apm_softc *sc)
523 {
524 int error;
525 u_int event_code, event_info;
526
527
528 /*
529 * tell the BIOS we're working on it, if asked to do a
530 * suspend/standby
531 */
532 if (apm_op_inprog)
533 (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
534 APM_LASTREQ_INPROG);
535
536 while ((error = (*sc->sc_ops->aa_get_event)(sc->sc_cookie, &event_code,
537 &event_info)) == 0 && !apm_damn_fool_bios)
538 apm_event_handle(sc, event_code, event_info);
539
540 if (error != APM_ERR_NOEVENTS)
541 apm_perror("get event", error);
542 if (apm_suspends) {
543 apm_op_inprog = 0;
544 apm_suspend(sc);
545 } else if (apm_standbys || apm_userstandbys) {
546 apm_op_inprog = 0;
547 apm_standby(sc);
548 }
549 apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
550 apm_damn_fool_bios = 0;
551 }
552
553 static void
554 apm_set_ver(struct apm_softc *sc)
555 {
556
557 if (apm_v12_enabled &&
558 APM_MAJOR_VERS(sc->sc_vers) == 1 &&
559 APM_MINOR_VERS(sc->sc_vers) == 2) {
560 apm_majver = 1;
561 apm_minver = 2;
562 goto ok;
563 }
564
565 if (apm_v11_enabled &&
566 APM_MAJOR_VERS(sc->sc_vers) == 1 &&
567 APM_MINOR_VERS(sc->sc_vers) == 1) {
568 apm_majver = 1;
569 apm_minver = 1;
570 } else {
571 apm_majver = 1;
572 apm_minver = 0;
573 }
574 ok:
575 aprint_normal("Power Management spec V%d.%d", apm_majver, apm_minver);
576 apm_inited = 1;
577 }
578
579 int
580 apm_match(void)
581 {
582 static int got;
583 return !got++;
584 }
585
586 void
587 apm_attach(struct apm_softc *sc)
588 {
589 u_int numbatts, capflags;
590
591 aprint_normal(": ");
592
593 switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) {
594 case 0x0100:
595 apm_v11_enabled = 0;
596 apm_v12_enabled = 0;
597 break;
598 case 0x0101:
599 apm_v12_enabled = 0;
600 /* fall through */
601 case 0x0102:
602 default:
603 break;
604 }
605
606 apm_set_ver(sc); /* prints version info */
607 aprint_normal("\n");
608 if (apm_minver >= 2)
609 (*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts,
610 &capflags);
611
612 /*
613 * enable power management
614 */
615 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
616
617 if (sc->sc_ops->aa_cpu_busy)
618 (*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie);
619
620 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
621
622 /* Initial state is `resumed'. */
623 sc->sc_power_state = PWR_RESUME;
624 selinit(&sc->sc_rsel);
625 selinit(&sc->sc_xsel);
626
627 /* Do an initial check. */
628 apm_periodic_check(sc);
629
630 /*
631 * Create a kernel thread to periodically check for APM events,
632 * and notify other subsystems when they occur.
633 */
634 if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc,
635 &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) {
636 /*
637 * We were unable to create the APM thread; bail out.
638 */
639 if (sc->sc_ops->aa_disconnect)
640 (*sc->sc_ops->aa_disconnect)(sc->sc_cookie);
641 aprint_error_dev(sc->sc_dev, "unable to create thread, "
642 "kernel APM support disabled\n");
643 }
644
645 if (!pmf_device_register(sc->sc_dev, NULL, NULL))
646 aprint_error_dev(sc->sc_dev, "couldn't establish power handler\n");
647 }
648
649 void
650 apm_thread(void *arg)
651 {
652 struct apm_softc *apmsc = arg;
653
654 /*
655 * Loop forever, doing a periodic check for APM events.
656 */
657 for (;;) {
658 APM_LOCK(apmsc);
659 apm_periodic_check(apmsc);
660 APM_UNLOCK(apmsc);
661 (void) tsleep(apmsc, PWAIT, "apmev", (8 * hz) / 7);
662 }
663 }
664
665 int
666 apmopen(dev_t dev, int flag, int mode, struct lwp *l)
667 {
668 int ctl = APM(dev);
669 int error = 0;
670 struct apm_softc *sc;
671
672 sc = device_lookup_private(&apm_cd, APMUNIT(dev));
673 if (!sc)
674 return ENXIO;
675
676 if (!apm_inited)
677 return ENXIO;
678
679 DPRINTF(APMDEBUG_DEVICE,
680 ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
681
682 APM_LOCK(sc);
683 switch (ctl) {
684 case APM_CTL:
685 if (!(flag & FWRITE)) {
686 error = EINVAL;
687 break;
688 }
689 if (sc->sc_flags & SCFLAG_OWRITE) {
690 error = EBUSY;
691 break;
692 }
693 sc->sc_flags |= SCFLAG_OWRITE;
694 break;
695 case APM_NORMAL:
696 if (!(flag & FREAD) || (flag & FWRITE)) {
697 error = EINVAL;
698 break;
699 }
700 sc->sc_flags |= SCFLAG_OREAD;
701 break;
702 default:
703 error = ENXIO;
704 break;
705 }
706 APM_UNLOCK(sc);
707
708 return (error);
709 }
710
711 int
712 apmclose(dev_t dev, int flag, int mode,
713 struct lwp *l)
714 {
715 struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
716 int ctl = APM(dev);
717
718 DPRINTF(APMDEBUG_DEVICE,
719 ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
720
721 APM_LOCK(sc);
722 switch (ctl) {
723 case APM_CTL:
724 sc->sc_flags &= ~SCFLAG_OWRITE;
725 break;
726 case APM_NORMAL:
727 sc->sc_flags &= ~SCFLAG_OREAD;
728 break;
729 }
730 if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
731 sc->sc_event_count = 0;
732 sc->sc_event_ptr = 0;
733 }
734 APM_UNLOCK(sc);
735 return 0;
736 }
737
738 int
739 apmioctl(dev_t dev, u_long cmd, void *data, int flag,
740 struct lwp *l)
741 {
742 struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
743 struct apm_power_info *powerp;
744 struct apm_event_info *evp;
745 #if 0
746 struct apm_ctl *actl;
747 #endif
748 int i, error = 0;
749 int batt_flags;
750 struct apm_ctl *actl;
751
752 APM_LOCK(sc);
753 switch (cmd) {
754 case APM_IOC_STANDBY:
755 if (!apm_do_standby) {
756 error = EOPNOTSUPP;
757 break;
758 }
759
760 if ((flag & FWRITE) == 0) {
761 error = EBADF;
762 break;
763 }
764 apm_userstandbys++;
765 break;
766
767 case APM_IOC_DEV_CTL:
768 actl = (struct apm_ctl *)data;
769 if ((flag & FWRITE) == 0) {
770 error = EBADF;
771 break;
772 }
773 #if 0
774 apm_get_powstate(actl->dev); /* XXX */
775 #endif
776 error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, actl->dev,
777 actl->mode);
778 apm_suspends++;
779 break;
780
781 case APM_IOC_SUSPEND:
782 if ((flag & FWRITE) == 0) {
783 error = EBADF;
784 break;
785 }
786 apm_suspends++;
787 break;
788
789 case APM_IOC_NEXTEVENT:
790 if (!sc->sc_event_count)
791 error = EAGAIN;
792 else {
793 evp = (struct apm_event_info *)data;
794 i = sc->sc_event_ptr + APM_NEVENTS - sc->sc_event_count;
795 i %= APM_NEVENTS;
796 *evp = sc->sc_event_list[i];
797 sc->sc_event_count--;
798 }
799 break;
800
801 case OAPM_IOC_GETPOWER:
802 case APM_IOC_GETPOWER:
803 powerp = (struct apm_power_info *)data;
804 if ((error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0,
805 powerp)) != 0) {
806 apm_perror("ioctl get power status", error);
807 error = EIO;
808 break;
809 }
810 switch (apm_minver) {
811 case 0:
812 break;
813 case 1:
814 default:
815 batt_flags = powerp->battery_flags;
816 powerp->battery_state = APM_BATT_UNKNOWN;
817 if (batt_flags & APM_BATT_FLAG_HIGH)
818 powerp->battery_state = APM_BATT_HIGH;
819 else if (batt_flags & APM_BATT_FLAG_LOW)
820 powerp->battery_state = APM_BATT_LOW;
821 else if (batt_flags & APM_BATT_FLAG_CRITICAL)
822 powerp->battery_state = APM_BATT_CRITICAL;
823 else if (batt_flags & APM_BATT_FLAG_CHARGING)
824 powerp->battery_state = APM_BATT_CHARGING;
825 else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
826 powerp->battery_state = APM_BATT_ABSENT;
827 break;
828 }
829 break;
830
831 default:
832 error = ENOTTY;
833 }
834 APM_UNLOCK(sc);
835
836 return (error);
837 }
838
839 int
840 apmpoll(dev_t dev, int events, struct lwp *l)
841 {
842 struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
843 int revents = 0;
844
845 APM_LOCK(sc);
846 if (events & (POLLIN | POLLRDNORM)) {
847 if (sc->sc_event_count)
848 revents |= events & (POLLIN | POLLRDNORM);
849 else
850 selrecord(l, &sc->sc_rsel);
851 }
852 APM_UNLOCK(sc);
853
854 return (revents);
855 }
856
857 static void
858 filt_apmrdetach(struct knote *kn)
859 {
860 struct apm_softc *sc = kn->kn_hook;
861
862 APM_LOCK(sc);
863 SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
864 APM_UNLOCK(sc);
865 }
866
867 static int
868 filt_apmread(struct knote *kn, long hint)
869 {
870 struct apm_softc *sc = kn->kn_hook;
871
872 kn->kn_data = sc->sc_event_count;
873 return (kn->kn_data > 0);
874 }
875
876 static const struct filterops apmread_filtops =
877 { 1, NULL, filt_apmrdetach, filt_apmread };
878
879 int
880 apmkqfilter(dev_t dev, struct knote *kn)
881 {
882 struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
883 struct klist *klist;
884
885 switch (kn->kn_filter) {
886 case EVFILT_READ:
887 klist = &sc->sc_rsel.sel_klist;
888 kn->kn_fop = &apmread_filtops;
889 break;
890
891 default:
892 return (EINVAL);
893 }
894
895 kn->kn_hook = sc;
896
897 APM_LOCK(sc);
898 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
899 APM_UNLOCK(sc);
900
901 return (0);
902 }
903