apm.c revision 1.20.4.4 1 /* $NetBSD: apm.c,v 1.20.4.4 2010/08/11 22:53:18 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by John Kohl and Christopher G. Demetriou.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31 /*
32 * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: apm.c,v 1.20.4.4 2010/08/11 22:53:18 yamt Exp $");
37
38 #include "opt_apm.h"
39
40 #ifdef APM_NOIDLE
41 #error APM_NOIDLE option deprecated; use APM_NO_IDLE instead
42 #endif
43
44 #if defined(DEBUG) && !defined(APMDEBUG)
45 #define APMDEBUG
46 #endif
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/signalvar.h>
51 #include <sys/kernel.h>
52 #include <sys/proc.h>
53 #include <sys/kthread.h>
54 #include <sys/malloc.h>
55 #include <sys/device.h>
56 #include <sys/fcntl.h>
57 #include <sys/ioctl.h>
58 #include <sys/select.h>
59 #include <sys/poll.h>
60 #include <sys/conf.h>
61
62 #include <dev/apm/apmvar.h>
63
64 #include <machine/stdarg.h>
65
66 #ifdef APMDEBUG
67 #define DPRINTF(f, x) do { if (apmdebug & (f)) printf x; } while (0)
68
69
70 #ifdef APMDEBUG_VALUE
71 int apmdebug = APMDEBUG_VALUE;
72 #else
73 int apmdebug = 0;
74 #endif /* APMDEBUG_VALUE */
75
76 #else
77 #define DPRINTF(f, x) /**/
78 #endif /* APMDEBUG */
79
80 #define SCFLAG_OREAD 0x0000001
81 #define SCFLAG_OWRITE 0x0000002
82 #define SCFLAG_OPEN (SCFLAG_OREAD|SCFLAG_OWRITE)
83
84 #define APMUNIT(dev) (minor(dev)&0xf0)
85 #define APM(dev) (minor(dev)&0x0f)
86 #define APM_NORMAL 0
87 #define APM_CTL 8
88
89 /*
90 * A brief note on the locking protocol: it's very simple; we
91 * assert an exclusive lock any time thread context enters the
92 * APM module. This is both the APM thread itself, as well as
93 * user context.
94 */
95 #define APM_LOCK(apmsc) \
96 (void) mutex_enter(&(apmsc)->sc_lock)
97 #define APM_UNLOCK(apmsc) \
98 (void) mutex_exit(&(apmsc)->sc_lock)
99
100 static void apm_event_handle(struct apm_softc *, u_int, u_int);
101 static void apm_periodic_check(struct apm_softc *);
102 static void apm_thread(void *);
103 static void apm_perror(const char *, int, ...)
104 __attribute__((__format__(__printf__,1,3)));
105 #ifdef APM_POWER_PRINT
106 static void apm_power_print(struct apm_softc *, struct apm_power_info *);
107 #endif
108 static int apm_record_event(struct apm_softc *, u_int);
109 static void apm_set_ver(struct apm_softc *);
110 static void apm_standby(struct apm_softc *);
111 static void apm_suspend(struct apm_softc *);
112 static void apm_resume(struct apm_softc *, u_int, u_int);
113
114 extern struct cfdriver apm_cd;
115
116 dev_type_open(apmopen);
117 dev_type_close(apmclose);
118 dev_type_ioctl(apmioctl);
119 dev_type_poll(apmpoll);
120 dev_type_kqfilter(apmkqfilter);
121
122 const struct cdevsw apm_cdevsw = {
123 apmopen, apmclose, noread, nowrite, apmioctl,
124 nostop, notty, apmpoll, nommap, apmkqfilter, D_OTHER,
125 };
126
127 /* configurable variables */
128 int apm_bogus_bios = 0;
129 #ifdef APM_DISABLE
130 int apm_enabled = 0;
131 #else
132 int apm_enabled = 1;
133 #endif
134 #ifdef APM_NO_IDLE
135 int apm_do_idle = 0;
136 #else
137 int apm_do_idle = 1;
138 #endif
139 #ifdef APM_NO_STANDBY
140 int apm_do_standby = 0;
141 #else
142 int apm_do_standby = 1;
143 #endif
144 #ifdef APM_V10_ONLY
145 int apm_v11_enabled = 0;
146 #else
147 int apm_v11_enabled = 1;
148 #endif
149 #ifdef APM_NO_V12
150 int apm_v12_enabled = 0;
151 #else
152 int apm_v12_enabled = 1;
153 #endif
154 #ifdef APM_FORCE_64K_SEGMENTS
155 int apm_force_64k_segments = 1;
156 #else
157 int apm_force_64k_segments = 0;
158 #endif
159 #ifdef APM_ALLOW_BOGUS_SEGMENTS
160 int apm_allow_bogus_segments = 1;
161 #else
162 int apm_allow_bogus_segments = 0;
163 #endif
164
165 /* variables used during operation (XXX cgd) */
166 u_char apm_majver, apm_minver;
167 int apm_inited;
168 int apm_standbys, apm_userstandbys, apm_suspends, apm_battlow;
169 int apm_damn_fool_bios, apm_op_inprog;
170 int apm_evindex;
171
172 static int apm_spl; /* saved spl while suspended */
173
174 const char *
175 apm_strerror(int code)
176 {
177 switch (code) {
178 case APM_ERR_PM_DISABLED:
179 return ("power management disabled");
180 case APM_ERR_REALALREADY:
181 return ("real mode interface already connected");
182 case APM_ERR_NOTCONN:
183 return ("interface not connected");
184 case APM_ERR_16ALREADY:
185 return ("16-bit interface already connected");
186 case APM_ERR_16NOTSUPP:
187 return ("16-bit interface not supported");
188 case APM_ERR_32ALREADY:
189 return ("32-bit interface already connected");
190 case APM_ERR_32NOTSUPP:
191 return ("32-bit interface not supported");
192 case APM_ERR_UNRECOG_DEV:
193 return ("unrecognized device ID");
194 case APM_ERR_ERANGE:
195 return ("parameter out of range");
196 case APM_ERR_NOTENGAGED:
197 return ("interface not engaged");
198 case APM_ERR_UNABLE:
199 return ("unable to enter requested state");
200 case APM_ERR_NOEVENTS:
201 return ("no pending events");
202 case APM_ERR_NOT_PRESENT:
203 return ("no APM present");
204 default:
205 return ("unknown error code");
206 }
207 }
208
209 static void
210 apm_perror(const char *str, int errinfo, ...) /* XXX cgd */
211 {
212 va_list ap;
213
214 printf("APM ");
215
216 va_start(ap, errinfo);
217 vprintf(str, ap); /* XXX cgd */
218 va_end(ap);
219
220 printf(": %s\n", apm_strerror(errinfo));
221 }
222
223 #ifdef APM_POWER_PRINT
224 static void
225 apm_power_print(struct apm_softc *sc, struct apm_power_info *pi)
226 {
227
228 if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) {
229 aprint_normal_dev(sc->sc_dev,
230 "battery life expectancy: %d%%\n",
231 pi->battery_life);
232 }
233 aprint_normal_dev(sc->sc_dev, "A/C state: ");
234 switch (pi->ac_state) {
235 case APM_AC_OFF:
236 printf("off\n");
237 break;
238 case APM_AC_ON:
239 printf("on\n");
240 break;
241 case APM_AC_BACKUP:
242 printf("backup power\n");
243 break;
244 default:
245 case APM_AC_UNKNOWN:
246 printf("unknown\n");
247 break;
248 }
249 aprint_normal_dev(sc->sc_dev, "battery charge state:");
250 if (apm_minver == 0)
251 switch (pi->battery_state) {
252 case APM_BATT_HIGH:
253 printf("high\n");
254 break;
255 case APM_BATT_LOW:
256 printf("low\n");
257 break;
258 case APM_BATT_CRITICAL:
259 printf("critical\n");
260 break;
261 case APM_BATT_CHARGING:
262 printf("charging\n");
263 break;
264 case APM_BATT_UNKNOWN:
265 printf("unknown\n");
266 break;
267 default:
268 printf("undecoded state %x\n", pi->battery_state);
269 break;
270 }
271 else if (apm_minver >= 1) {
272 if (pi->battery_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
273 printf(" no battery");
274 else {
275 if (pi->battery_flags & APM_BATT_FLAG_HIGH)
276 printf(" high");
277 if (pi->battery_flags & APM_BATT_FLAG_LOW)
278 printf(" low");
279 if (pi->battery_flags & APM_BATT_FLAG_CRITICAL)
280 printf(" critical");
281 if (pi->battery_flags & APM_BATT_FLAG_CHARGING)
282 printf(" charging");
283 }
284 printf("\n");
285 if (pi->minutes_valid) {
286 aprint_normal_dev(sc->sc_dev, "estimated ");
287 if (pi->minutes_left / 60)
288 printf("%dh ", pi->minutes_left / 60);
289 printf("%dm\n", pi->minutes_left % 60);
290 }
291 }
292 return;
293 }
294 #endif
295
296 static void
297 apm_suspend(struct apm_softc *sc)
298 {
299 int error;
300
301 if (sc->sc_power_state == PWR_SUSPEND) {
302 #ifdef APMDEBUG
303 aprint_debug_dev(sc->sc_dev,
304 "apm_suspend: already suspended?\n");
305 #endif
306 return;
307 }
308 sc->sc_power_state = PWR_SUSPEND;
309
310 if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
311 pmf_system_suspend(PMF_Q_NONE);
312 apm_spl = splhigh();
313 }
314
315 error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
316 APM_SYS_SUSPEND);
317
318 if (error)
319 apm_resume(sc, 0, 0);
320 else
321 apm_resume(sc, APM_SYS_STANDBY_RESUME, 0);
322 }
323
324 static void
325 apm_standby(struct apm_softc *sc)
326 {
327 int error;
328
329 if (sc->sc_power_state == PWR_STANDBY) {
330 #ifdef APMDEBUG
331 aprint_debug_dev(sc->sc_dev,
332 "apm_standby: already standing by?\n");
333 #endif
334 return;
335 }
336 sc->sc_power_state = PWR_STANDBY;
337
338 if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
339 pmf_system_suspend(PMF_Q_NONE);
340 apm_spl = splhigh();
341 }
342 error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
343 APM_SYS_STANDBY);
344 if (error)
345 apm_resume(sc, 0, 0);
346 else
347 apm_resume(sc, APM_SYS_STANDBY_RESUME, 0);
348 }
349
350 static void
351 apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info)
352 {
353 if (sc->sc_power_state == PWR_RESUME) {
354 #ifdef APMDEBUG
355 aprint_debug_dev(sc->sc_dev, "apm_resume: already running?\n");
356 #endif
357 return;
358 }
359 sc->sc_power_state = PWR_RESUME;
360
361 #ifdef TIMER_FREQ
362 /*
363 * Some system requires its clock to be initialized after hybernation.
364 */
365 initrtclock(TIMER_FREQ);
366 #endif
367
368 inittodr(time_second);
369 if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
370 splx(apm_spl);
371 pmf_system_resume(PMF_Q_NONE);
372 }
373
374 apm_record_event(sc, event_type);
375 }
376
377 /*
378 * return 0 if the user will notice and handle the event,
379 * return 1 if the kernel driver should do so.
380 */
381 static int
382 apm_record_event(struct apm_softc *sc, u_int event_type)
383 {
384 struct apm_event_info *evp;
385
386 if ((sc->sc_flags & SCFLAG_OPEN) == 0)
387 return 1; /* no user waiting */
388 if (sc->sc_event_count == APM_NEVENTS)
389 return 1; /* overflow */
390 evp = &sc->sc_event_list[sc->sc_event_ptr];
391 sc->sc_event_count++;
392 sc->sc_event_ptr++;
393 sc->sc_event_ptr %= APM_NEVENTS;
394 evp->type = event_type;
395 evp->index = ++apm_evindex;
396 selnotify(&sc->sc_rsel, 0, 0);
397 return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
398 }
399
400 static void
401 apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info)
402 {
403 int error;
404 const char *code;
405 struct apm_power_info pi;
406
407 switch (event_code) {
408 case APM_USER_STANDBY_REQ:
409 DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n"));
410 if (apm_do_standby) {
411 if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
412 apm_userstandbys++;
413 apm_op_inprog++;
414 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
415 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
416 } else {
417 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
418 APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
419 /* in case BIOS hates being spurned */
420 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
421 }
422 break;
423
424 case APM_STANDBY_REQ:
425 DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n"));
426 if (apm_op_inprog) {
427 DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
428 ("damn fool BIOS did not wait for answer\n"));
429 /* just give up the fight */
430 apm_damn_fool_bios = 1;
431 }
432 if (apm_do_standby) {
433 if (apm_op_inprog == 0 &&
434 apm_record_event(sc, event_code))
435 apm_standbys++;
436 apm_op_inprog++;
437 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
438 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
439 } else {
440 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
441 APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
442 /* in case BIOS hates being spurned */
443 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
444 }
445 break;
446
447 case APM_USER_SUSPEND_REQ:
448 DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n"));
449 if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
450 apm_suspends++;
451 apm_op_inprog++;
452 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
453 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
454 break;
455
456 case APM_SUSPEND_REQ:
457 DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n"));
458 if (apm_op_inprog) {
459 DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
460 ("damn fool BIOS did not wait for answer\n"));
461 /* just give up the fight */
462 apm_damn_fool_bios = 1;
463 }
464 if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
465 apm_suspends++;
466 apm_op_inprog++;
467 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
468 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
469 break;
470
471 case APM_POWER_CHANGE:
472 DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n"));
473 error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
474 #ifdef APM_POWER_PRINT
475 /* only print if nobody is catching events. */
476 if (error == 0 &&
477 (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0)
478 apm_power_print(sc, &pi);
479 #endif
480 apm_record_event(sc, event_code);
481 break;
482
483 case APM_NORMAL_RESUME:
484 DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n"));
485 apm_resume(sc, event_code, event_info);
486 break;
487
488 case APM_CRIT_RESUME:
489 DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system"));
490 apm_resume(sc, event_code, event_info);
491 break;
492
493 case APM_SYS_STANDBY_RESUME:
494 DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n"));
495 apm_resume(sc, event_code, event_info);
496 break;
497
498 case APM_UPDATE_TIME:
499 DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n"));
500 apm_resume(sc, event_code, event_info);
501 break;
502
503 case APM_CRIT_SUSPEND_REQ:
504 DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n"));
505 apm_record_event(sc, event_code);
506 apm_suspend(sc);
507 break;
508
509 case APM_BATTERY_LOW:
510 DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n"));
511 apm_battlow++;
512 apm_record_event(sc, event_code);
513 break;
514
515 case APM_CAP_CHANGE:
516 DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n"));
517 if (apm_minver < 2) {
518 DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n"));
519 } else {
520 u_int numbatts, capflags;
521 (*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie,
522 &numbatts, &capflags);
523 (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
524 }
525 break;
526
527 default:
528 switch (event_code >> 8) {
529 case 0:
530 code = "reserved system";
531 break;
532 case 1:
533 code = "reserved device";
534 break;
535 case 2:
536 code = "OEM defined";
537 break;
538 default:
539 code = "reserved";
540 break;
541 }
542 printf("APM: %s event code %x\n", code, event_code);
543 }
544 }
545
546 static void
547 apm_periodic_check(struct apm_softc *sc)
548 {
549 int error;
550 u_int event_code, event_info;
551
552
553 /*
554 * tell the BIOS we're working on it, if asked to do a
555 * suspend/standby
556 */
557 if (apm_op_inprog)
558 (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
559 APM_LASTREQ_INPROG);
560
561 while ((error = (*sc->sc_ops->aa_get_event)(sc->sc_cookie, &event_code,
562 &event_info)) == 0 && !apm_damn_fool_bios)
563 apm_event_handle(sc, event_code, event_info);
564
565 if (error != APM_ERR_NOEVENTS)
566 apm_perror("get event", error);
567 if (apm_suspends) {
568 apm_op_inprog = 0;
569 apm_suspend(sc);
570 } else if (apm_standbys || apm_userstandbys) {
571 apm_op_inprog = 0;
572 apm_standby(sc);
573 }
574 apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
575 apm_damn_fool_bios = 0;
576 }
577
578 static void
579 apm_set_ver(struct apm_softc *sc)
580 {
581
582 if (apm_v12_enabled &&
583 APM_MAJOR_VERS(sc->sc_vers) == 1 &&
584 APM_MINOR_VERS(sc->sc_vers) == 2) {
585 apm_majver = 1;
586 apm_minver = 2;
587 goto ok;
588 }
589
590 if (apm_v11_enabled &&
591 APM_MAJOR_VERS(sc->sc_vers) == 1 &&
592 APM_MINOR_VERS(sc->sc_vers) == 1) {
593 apm_majver = 1;
594 apm_minver = 1;
595 } else {
596 apm_majver = 1;
597 apm_minver = 0;
598 }
599 ok:
600 aprint_normal("Power Management spec V%d.%d", apm_majver, apm_minver);
601 apm_inited = 1;
602 if (sc->sc_detail & APM_IDLE_SLOWS) {
603 #ifdef DIAGNOSTIC
604 /* not relevant often */
605 aprint_normal(" (slowidle)");
606 #endif
607 /* leave apm_do_idle at its user-configured setting */
608 } else
609 apm_do_idle = 0;
610 #ifdef DIAGNOSTIC
611 if (sc->sc_detail & APM_BIOS_PM_DISABLED)
612 aprint_normal(" (BIOS mgmt disabled)");
613 if (sc->sc_detail & APM_BIOS_PM_DISENGAGED)
614 aprint_normal(" (BIOS managing devices)");
615 #endif
616 }
617
618 int
619 apm_match(void)
620 {
621 static int got;
622 return !got++;
623 }
624
625 void
626 apm_attach(struct apm_softc *sc)
627 {
628 u_int numbatts, capflags;
629
630 aprint_normal(": ");
631
632 switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) {
633 case 0x0100:
634 apm_v11_enabled = 0;
635 apm_v12_enabled = 0;
636 break;
637 case 0x0101:
638 apm_v12_enabled = 0;
639 /* fall through */
640 case 0x0102:
641 default:
642 break;
643 }
644
645 apm_set_ver(sc); /* prints version info */
646 aprint_normal("\n");
647 if (apm_minver >= 2)
648 (*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts,
649 &capflags);
650
651 /*
652 * enable power management
653 */
654 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
655
656 if (sc->sc_ops->aa_cpu_busy)
657 (*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie);
658
659 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
660
661 /* Initial state is `resumed'. */
662 sc->sc_power_state = PWR_RESUME;
663 selinit(&sc->sc_rsel);
664 selinit(&sc->sc_xsel);
665
666 /* Do an initial check. */
667 apm_periodic_check(sc);
668
669 /*
670 * Create a kernel thread to periodically check for APM events,
671 * and notify other subsystems when they occur.
672 */
673 if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc,
674 &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) {
675 /*
676 * We were unable to create the APM thread; bail out.
677 */
678 if (sc->sc_ops->aa_disconnect)
679 (*sc->sc_ops->aa_disconnect)(sc->sc_cookie);
680 aprint_error_dev(sc->sc_dev, "unable to create thread, "
681 "kernel APM support disabled\n");
682 }
683
684 if (!pmf_device_register(sc->sc_dev, NULL, NULL))
685 aprint_error_dev(sc->sc_dev, "couldn't establish power handler\n");
686 }
687
688 void
689 apm_thread(void *arg)
690 {
691 struct apm_softc *apmsc = arg;
692
693 /*
694 * Loop forever, doing a periodic check for APM events.
695 */
696 for (;;) {
697 APM_LOCK(apmsc);
698 apm_periodic_check(apmsc);
699 APM_UNLOCK(apmsc);
700 (void) tsleep(apmsc, PWAIT, "apmev", (8 * hz) / 7);
701 }
702 }
703
704 int
705 apmopen(dev_t dev, int flag, int mode, struct lwp *l)
706 {
707 int ctl = APM(dev);
708 int error = 0;
709 struct apm_softc *sc;
710
711 sc = device_lookup_private(&apm_cd, APMUNIT(dev));
712 if (!sc)
713 return ENXIO;
714
715 if (!apm_inited)
716 return ENXIO;
717
718 DPRINTF(APMDEBUG_DEVICE,
719 ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
720
721 APM_LOCK(sc);
722 switch (ctl) {
723 case APM_CTL:
724 if (!(flag & FWRITE)) {
725 error = EINVAL;
726 break;
727 }
728 if (sc->sc_flags & SCFLAG_OWRITE) {
729 error = EBUSY;
730 break;
731 }
732 sc->sc_flags |= SCFLAG_OWRITE;
733 break;
734 case APM_NORMAL:
735 if (!(flag & FREAD) || (flag & FWRITE)) {
736 error = EINVAL;
737 break;
738 }
739 sc->sc_flags |= SCFLAG_OREAD;
740 break;
741 default:
742 error = ENXIO;
743 break;
744 }
745 APM_UNLOCK(sc);
746
747 return (error);
748 }
749
750 int
751 apmclose(dev_t dev, int flag, int mode,
752 struct lwp *l)
753 {
754 struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
755 int ctl = APM(dev);
756
757 DPRINTF(APMDEBUG_DEVICE,
758 ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
759
760 APM_LOCK(sc);
761 switch (ctl) {
762 case APM_CTL:
763 sc->sc_flags &= ~SCFLAG_OWRITE;
764 break;
765 case APM_NORMAL:
766 sc->sc_flags &= ~SCFLAG_OREAD;
767 break;
768 }
769 if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
770 sc->sc_event_count = 0;
771 sc->sc_event_ptr = 0;
772 }
773 APM_UNLOCK(sc);
774 return 0;
775 }
776
777 int
778 apmioctl(dev_t dev, u_long cmd, void *data, int flag,
779 struct lwp *l)
780 {
781 struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
782 struct apm_power_info *powerp;
783 struct apm_event_info *evp;
784 #if 0
785 struct apm_ctl *actl;
786 #endif
787 int i, error = 0;
788 int batt_flags;
789 struct apm_ctl *actl;
790
791 APM_LOCK(sc);
792 switch (cmd) {
793 case APM_IOC_STANDBY:
794 if (!apm_do_standby) {
795 error = EOPNOTSUPP;
796 break;
797 }
798
799 if ((flag & FWRITE) == 0) {
800 error = EBADF;
801 break;
802 }
803 apm_userstandbys++;
804 break;
805
806 case APM_IOC_DEV_CTL:
807 actl = (struct apm_ctl *)data;
808 if ((flag & FWRITE) == 0) {
809 error = EBADF;
810 break;
811 }
812 #if 0
813 apm_get_powstate(actl->dev); /* XXX */
814 #endif
815 error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, actl->dev,
816 actl->mode);
817 apm_suspends++;
818 break;
819
820 case APM_IOC_SUSPEND:
821 if ((flag & FWRITE) == 0) {
822 error = EBADF;
823 break;
824 }
825 apm_suspends++;
826 break;
827
828 case APM_IOC_NEXTEVENT:
829 if (!sc->sc_event_count)
830 error = EAGAIN;
831 else {
832 evp = (struct apm_event_info *)data;
833 i = sc->sc_event_ptr + APM_NEVENTS - sc->sc_event_count;
834 i %= APM_NEVENTS;
835 *evp = sc->sc_event_list[i];
836 sc->sc_event_count--;
837 }
838 break;
839
840 case OAPM_IOC_GETPOWER:
841 case APM_IOC_GETPOWER:
842 powerp = (struct apm_power_info *)data;
843 if ((error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0,
844 powerp)) != 0) {
845 apm_perror("ioctl get power status", error);
846 error = EIO;
847 break;
848 }
849 switch (apm_minver) {
850 case 0:
851 break;
852 case 1:
853 default:
854 batt_flags = powerp->battery_flags;
855 powerp->battery_state = APM_BATT_UNKNOWN;
856 if (batt_flags & APM_BATT_FLAG_HIGH)
857 powerp->battery_state = APM_BATT_HIGH;
858 else if (batt_flags & APM_BATT_FLAG_LOW)
859 powerp->battery_state = APM_BATT_LOW;
860 else if (batt_flags & APM_BATT_FLAG_CRITICAL)
861 powerp->battery_state = APM_BATT_CRITICAL;
862 else if (batt_flags & APM_BATT_FLAG_CHARGING)
863 powerp->battery_state = APM_BATT_CHARGING;
864 else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
865 powerp->battery_state = APM_BATT_ABSENT;
866 break;
867 }
868 break;
869
870 default:
871 error = ENOTTY;
872 }
873 APM_UNLOCK(sc);
874
875 return (error);
876 }
877
878 int
879 apmpoll(dev_t dev, int events, struct lwp *l)
880 {
881 struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
882 int revents = 0;
883
884 APM_LOCK(sc);
885 if (events & (POLLIN | POLLRDNORM)) {
886 if (sc->sc_event_count)
887 revents |= events & (POLLIN | POLLRDNORM);
888 else
889 selrecord(l, &sc->sc_rsel);
890 }
891 APM_UNLOCK(sc);
892
893 return (revents);
894 }
895
896 static void
897 filt_apmrdetach(struct knote *kn)
898 {
899 struct apm_softc *sc = kn->kn_hook;
900
901 APM_LOCK(sc);
902 SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
903 APM_UNLOCK(sc);
904 }
905
906 static int
907 filt_apmread(struct knote *kn, long hint)
908 {
909 struct apm_softc *sc = kn->kn_hook;
910
911 kn->kn_data = sc->sc_event_count;
912 return (kn->kn_data > 0);
913 }
914
915 static const struct filterops apmread_filtops =
916 { 1, NULL, filt_apmrdetach, filt_apmread };
917
918 int
919 apmkqfilter(dev_t dev, struct knote *kn)
920 {
921 struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
922 struct klist *klist;
923
924 switch (kn->kn_filter) {
925 case EVFILT_READ:
926 klist = &sc->sc_rsel.sel_klist;
927 kn->kn_fop = &apmread_filtops;
928 break;
929
930 default:
931 return (EINVAL);
932 }
933
934 kn->kn_hook = sc;
935
936 APM_LOCK(sc);
937 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
938 APM_UNLOCK(sc);
939
940 return (0);
941 }
942