apm.c revision 1.22.14.1 1 /* $NetBSD: apm.c,v 1.22.14.1 2010/04/21 00:27:34 matt Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by John Kohl and Christopher G. Demetriou.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31 /*
32 * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: apm.c,v 1.22.14.1 2010/04/21 00:27:34 matt Exp $");
37
38 #include "opt_apm.h"
39
40 #ifdef APM_NOIDLE
41 #error APM_NOIDLE option deprecated; use APM_NO_IDLE instead
42 #endif
43
44 #if defined(DEBUG) && !defined(APMDEBUG)
45 #define APMDEBUG
46 #endif
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/signalvar.h>
51 #include <sys/kernel.h>
52 #include <sys/proc.h>
53 #include <sys/kthread.h>
54 #include <sys/user.h>
55 #include <sys/malloc.h>
56 #include <sys/device.h>
57 #include <sys/fcntl.h>
58 #include <sys/ioctl.h>
59 #include <sys/select.h>
60 #include <sys/poll.h>
61 #include <sys/conf.h>
62
63 #include <dev/apm/apmvar.h>
64
65 #include <machine/stdarg.h>
66
67 #ifdef APMDEBUG
68 #define DPRINTF(f, x) do { if (apmdebug & (f)) printf x; } while (0)
69
70
71 #ifdef APMDEBUG_VALUE
72 int apmdebug = APMDEBUG_VALUE;
73 #else
74 int apmdebug = 0;
75 #endif /* APMDEBUG_VALUE */
76
77 #else
78 #define DPRINTF(f, x) /**/
79 #endif /* APMDEBUG */
80
81 #define SCFLAG_OREAD 0x0000001
82 #define SCFLAG_OWRITE 0x0000002
83 #define SCFLAG_OPEN (SCFLAG_OREAD|SCFLAG_OWRITE)
84
85 #define APMUNIT(dev) (minor(dev)&0xf0)
86 #define APM(dev) (minor(dev)&0x0f)
87 #define APM_NORMAL 0
88 #define APM_CTL 8
89
90 /*
91 * A brief note on the locking protocol: it's very simple; we
92 * assert an exclusive lock any time thread context enters the
93 * APM module. This is both the APM thread itself, as well as
94 * user context.
95 */
96 #define APM_LOCK(apmsc) \
97 (void) mutex_enter(&(apmsc)->sc_lock)
98 #define APM_UNLOCK(apmsc) \
99 (void) mutex_exit(&(apmsc)->sc_lock)
100
101 static void apm_event_handle(struct apm_softc *, u_int, u_int);
102 static void apm_periodic_check(struct apm_softc *);
103 static void apm_thread(void *);
104 static void apm_perror(const char *, int, ...)
105 __attribute__((__format__(__printf__,1,3)));
106 #ifdef APM_POWER_PRINT
107 static void apm_power_print(struct apm_softc *, struct apm_power_info *);
108 #endif
109 static int apm_record_event(struct apm_softc *, u_int);
110 static void apm_set_ver(struct apm_softc *);
111 static void apm_standby(struct apm_softc *);
112 static void apm_suspend(struct apm_softc *);
113 static void apm_resume(struct apm_softc *, u_int, u_int);
114
115 extern struct cfdriver apm_cd;
116
117 dev_type_open(apmopen);
118 dev_type_close(apmclose);
119 dev_type_ioctl(apmioctl);
120 dev_type_poll(apmpoll);
121 dev_type_kqfilter(apmkqfilter);
122
123 const struct cdevsw apm_cdevsw = {
124 apmopen, apmclose, noread, nowrite, apmioctl,
125 nostop, notty, apmpoll, nommap, apmkqfilter, D_OTHER,
126 };
127
128 /* configurable variables */
129 int apm_bogus_bios = 0;
130 #ifdef APM_DISABLE
131 int apm_enabled = 0;
132 #else
133 int apm_enabled = 1;
134 #endif
135 #ifdef APM_NO_IDLE
136 int apm_do_idle = 0;
137 #else
138 int apm_do_idle = 1;
139 #endif
140 #ifdef APM_NO_STANDBY
141 int apm_do_standby = 0;
142 #else
143 int apm_do_standby = 1;
144 #endif
145 #ifdef APM_V10_ONLY
146 int apm_v11_enabled = 0;
147 #else
148 int apm_v11_enabled = 1;
149 #endif
150 #ifdef APM_NO_V12
151 int apm_v12_enabled = 0;
152 #else
153 int apm_v12_enabled = 1;
154 #endif
155 #ifdef APM_FORCE_64K_SEGMENTS
156 int apm_force_64k_segments = 1;
157 #else
158 int apm_force_64k_segments = 0;
159 #endif
160 #ifdef APM_ALLOW_BOGUS_SEGMENTS
161 int apm_allow_bogus_segments = 1;
162 #else
163 int apm_allow_bogus_segments = 0;
164 #endif
165
166 /* variables used during operation (XXX cgd) */
167 u_char apm_majver, apm_minver;
168 int apm_inited;
169 int apm_standbys, apm_userstandbys, apm_suspends, apm_battlow;
170 int apm_damn_fool_bios, apm_op_inprog;
171 int apm_evindex;
172
173 static int apm_spl; /* saved spl while suspended */
174
175 const char *
176 apm_strerror(int code)
177 {
178 switch (code) {
179 case APM_ERR_PM_DISABLED:
180 return ("power management disabled");
181 case APM_ERR_REALALREADY:
182 return ("real mode interface already connected");
183 case APM_ERR_NOTCONN:
184 return ("interface not connected");
185 case APM_ERR_16ALREADY:
186 return ("16-bit interface already connected");
187 case APM_ERR_16NOTSUPP:
188 return ("16-bit interface not supported");
189 case APM_ERR_32ALREADY:
190 return ("32-bit interface already connected");
191 case APM_ERR_32NOTSUPP:
192 return ("32-bit interface not supported");
193 case APM_ERR_UNRECOG_DEV:
194 return ("unrecognized device ID");
195 case APM_ERR_ERANGE:
196 return ("parameter out of range");
197 case APM_ERR_NOTENGAGED:
198 return ("interface not engaged");
199 case APM_ERR_UNABLE:
200 return ("unable to enter requested state");
201 case APM_ERR_NOEVENTS:
202 return ("no pending events");
203 case APM_ERR_NOT_PRESENT:
204 return ("no APM present");
205 default:
206 return ("unknown error code");
207 }
208 }
209
210 static void
211 apm_perror(const char *str, int errinfo, ...) /* XXX cgd */
212 {
213 va_list ap;
214
215 printf("APM ");
216
217 va_start(ap, errinfo);
218 vprintf(str, ap); /* XXX cgd */
219 va_end(ap);
220
221 printf(": %s\n", apm_strerror(errinfo));
222 }
223
224 #ifdef APM_POWER_PRINT
225 static void
226 apm_power_print(struct apm_softc *sc, struct apm_power_info *pi)
227 {
228
229 if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) {
230 aprint_normal_dev(sc->sc_dev,
231 "battery life expectancy: %d%%\n",
232 pi->battery_life);
233 }
234 aprint_normal_dev(sc->sc_dev, "A/C state: ");
235 switch (pi->ac_state) {
236 case APM_AC_OFF:
237 printf("off\n");
238 break;
239 case APM_AC_ON:
240 printf("on\n");
241 break;
242 case APM_AC_BACKUP:
243 printf("backup power\n");
244 break;
245 default:
246 case APM_AC_UNKNOWN:
247 printf("unknown\n");
248 break;
249 }
250 aprint_normal_dev(sc->sc_dev, "battery charge state:");
251 if (apm_minver == 0)
252 switch (pi->battery_state) {
253 case APM_BATT_HIGH:
254 printf("high\n");
255 break;
256 case APM_BATT_LOW:
257 printf("low\n");
258 break;
259 case APM_BATT_CRITICAL:
260 printf("critical\n");
261 break;
262 case APM_BATT_CHARGING:
263 printf("charging\n");
264 break;
265 case APM_BATT_UNKNOWN:
266 printf("unknown\n");
267 break;
268 default:
269 printf("undecoded state %x\n", pi->battery_state);
270 break;
271 }
272 else if (apm_minver >= 1) {
273 if (pi->battery_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
274 printf(" no battery");
275 else {
276 if (pi->battery_flags & APM_BATT_FLAG_HIGH)
277 printf(" high");
278 if (pi->battery_flags & APM_BATT_FLAG_LOW)
279 printf(" low");
280 if (pi->battery_flags & APM_BATT_FLAG_CRITICAL)
281 printf(" critical");
282 if (pi->battery_flags & APM_BATT_FLAG_CHARGING)
283 printf(" charging");
284 }
285 printf("\n");
286 if (pi->minutes_valid) {
287 aprint_normal_dev(sc->sc_dev, "estimated ");
288 if (pi->minutes_left / 60)
289 printf("%dh ", pi->minutes_left / 60);
290 printf("%dm\n", pi->minutes_left % 60);
291 }
292 }
293 return;
294 }
295 #endif
296
297 static void
298 apm_suspend(struct apm_softc *sc)
299 {
300 int error;
301
302 if (sc->sc_power_state == PWR_SUSPEND) {
303 #ifdef APMDEBUG
304 aprint_debug_dev(sc->sc_dev,
305 "apm_suspend: already suspended?\n");
306 #endif
307 return;
308 }
309 sc->sc_power_state = PWR_SUSPEND;
310
311 if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
312 pmf_system_suspend(PMF_F_NONE);
313 apm_spl = splhigh();
314 }
315
316 error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
317 APM_SYS_SUSPEND);
318
319 if (error)
320 apm_resume(sc, 0, 0);
321 else
322 apm_resume(sc, APM_SYS_STANDBY_RESUME, 0);
323 }
324
325 static void
326 apm_standby(struct apm_softc *sc)
327 {
328 int error;
329
330 if (sc->sc_power_state == PWR_STANDBY) {
331 #ifdef APMDEBUG
332 aprint_debug_dev(sc->sc_dev,
333 "apm_standby: already standing by?\n");
334 #endif
335 return;
336 }
337 sc->sc_power_state = PWR_STANDBY;
338
339 if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
340 pmf_system_suspend(PMF_F_NONE);
341 apm_spl = splhigh();
342 }
343 error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
344 APM_SYS_STANDBY);
345 if (error)
346 apm_resume(sc, 0, 0);
347 else
348 apm_resume(sc, APM_SYS_STANDBY_RESUME, 0);
349 }
350
351 static void
352 apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info)
353 {
354 if (sc->sc_power_state == PWR_RESUME) {
355 #ifdef APMDEBUG
356 aprint_debug_dev(sc->sc_dev, "apm_resume: already running?\n");
357 #endif
358 return;
359 }
360 sc->sc_power_state = PWR_RESUME;
361
362 #ifdef TIMER_FREQ
363 /*
364 * Some system requires its clock to be initialized after hybernation.
365 */
366 initrtclock(TIMER_FREQ);
367 #endif
368
369 inittodr(time_second);
370 if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
371 splx(apm_spl);
372 pmf_system_resume(PMF_F_NONE);
373 }
374
375 apm_record_event(sc, event_type);
376 }
377
378 /*
379 * return 0 if the user will notice and handle the event,
380 * return 1 if the kernel driver should do so.
381 */
382 static int
383 apm_record_event(struct apm_softc *sc, u_int event_type)
384 {
385 struct apm_event_info *evp;
386
387 if ((sc->sc_flags & SCFLAG_OPEN) == 0)
388 return 1; /* no user waiting */
389 if (sc->sc_event_count == APM_NEVENTS)
390 return 1; /* overflow */
391 evp = &sc->sc_event_list[sc->sc_event_ptr];
392 sc->sc_event_count++;
393 sc->sc_event_ptr++;
394 sc->sc_event_ptr %= APM_NEVENTS;
395 evp->type = event_type;
396 evp->index = ++apm_evindex;
397 selnotify(&sc->sc_rsel, 0, 0);
398 return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
399 }
400
401 static void
402 apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info)
403 {
404 int error;
405 const char *code;
406 struct apm_power_info pi;
407
408 switch (event_code) {
409 case APM_USER_STANDBY_REQ:
410 DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n"));
411 if (apm_do_standby) {
412 if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
413 apm_userstandbys++;
414 apm_op_inprog++;
415 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
416 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
417 } else {
418 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
419 APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
420 /* in case BIOS hates being spurned */
421 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
422 }
423 break;
424
425 case APM_STANDBY_REQ:
426 DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n"));
427 if (apm_op_inprog) {
428 DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
429 ("damn fool BIOS did not wait for answer\n"));
430 /* just give up the fight */
431 apm_damn_fool_bios = 1;
432 }
433 if (apm_do_standby) {
434 if (apm_op_inprog == 0 &&
435 apm_record_event(sc, event_code))
436 apm_standbys++;
437 apm_op_inprog++;
438 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
439 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
440 } else {
441 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
442 APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
443 /* in case BIOS hates being spurned */
444 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
445 }
446 break;
447
448 case APM_USER_SUSPEND_REQ:
449 DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n"));
450 if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
451 apm_suspends++;
452 apm_op_inprog++;
453 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
454 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
455 break;
456
457 case APM_SUSPEND_REQ:
458 DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n"));
459 if (apm_op_inprog) {
460 DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
461 ("damn fool BIOS did not wait for answer\n"));
462 /* just give up the fight */
463 apm_damn_fool_bios = 1;
464 }
465 if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
466 apm_suspends++;
467 apm_op_inprog++;
468 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
469 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
470 break;
471
472 case APM_POWER_CHANGE:
473 DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n"));
474 error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
475 #ifdef APM_POWER_PRINT
476 /* only print if nobody is catching events. */
477 if (error == 0 &&
478 (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0)
479 apm_power_print(sc, &pi);
480 #endif
481 apm_record_event(sc, event_code);
482 break;
483
484 case APM_NORMAL_RESUME:
485 DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n"));
486 apm_resume(sc, event_code, event_info);
487 break;
488
489 case APM_CRIT_RESUME:
490 DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system"));
491 apm_resume(sc, event_code, event_info);
492 break;
493
494 case APM_SYS_STANDBY_RESUME:
495 DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n"));
496 apm_resume(sc, event_code, event_info);
497 break;
498
499 case APM_UPDATE_TIME:
500 DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n"));
501 apm_resume(sc, event_code, event_info);
502 break;
503
504 case APM_CRIT_SUSPEND_REQ:
505 DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n"));
506 apm_record_event(sc, event_code);
507 apm_suspend(sc);
508 break;
509
510 case APM_BATTERY_LOW:
511 DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n"));
512 apm_battlow++;
513 apm_record_event(sc, event_code);
514 break;
515
516 case APM_CAP_CHANGE:
517 DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n"));
518 if (apm_minver < 2) {
519 DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n"));
520 } else {
521 u_int numbatts, capflags;
522 (*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie,
523 &numbatts, &capflags);
524 (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
525 }
526 break;
527
528 default:
529 switch (event_code >> 8) {
530 case 0:
531 code = "reserved system";
532 break;
533 case 1:
534 code = "reserved device";
535 break;
536 case 2:
537 code = "OEM defined";
538 break;
539 default:
540 code = "reserved";
541 break;
542 }
543 printf("APM: %s event code %x\n", code, event_code);
544 }
545 }
546
547 static void
548 apm_periodic_check(struct apm_softc *sc)
549 {
550 int error;
551 u_int event_code, event_info;
552
553
554 /*
555 * tell the BIOS we're working on it, if asked to do a
556 * suspend/standby
557 */
558 if (apm_op_inprog)
559 (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
560 APM_LASTREQ_INPROG);
561
562 while ((error = (*sc->sc_ops->aa_get_event)(sc->sc_cookie, &event_code,
563 &event_info)) == 0 && !apm_damn_fool_bios)
564 apm_event_handle(sc, event_code, event_info);
565
566 if (error != APM_ERR_NOEVENTS)
567 apm_perror("get event", error);
568 if (apm_suspends) {
569 apm_op_inprog = 0;
570 apm_suspend(sc);
571 } else if (apm_standbys || apm_userstandbys) {
572 apm_op_inprog = 0;
573 apm_standby(sc);
574 }
575 apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
576 apm_damn_fool_bios = 0;
577 }
578
579 static void
580 apm_set_ver(struct apm_softc *sc)
581 {
582
583 if (apm_v12_enabled &&
584 APM_MAJOR_VERS(sc->sc_vers) == 1 &&
585 APM_MINOR_VERS(sc->sc_vers) == 2) {
586 apm_majver = 1;
587 apm_minver = 2;
588 goto ok;
589 }
590
591 if (apm_v11_enabled &&
592 APM_MAJOR_VERS(sc->sc_vers) == 1 &&
593 APM_MINOR_VERS(sc->sc_vers) == 1) {
594 apm_majver = 1;
595 apm_minver = 1;
596 } else {
597 apm_majver = 1;
598 apm_minver = 0;
599 }
600 ok:
601 aprint_normal("Power Management spec V%d.%d", apm_majver, apm_minver);
602 apm_inited = 1;
603 if (sc->sc_detail & APM_IDLE_SLOWS) {
604 #ifdef DIAGNOSTIC
605 /* not relevant often */
606 aprint_normal(" (slowidle)");
607 #endif
608 /* leave apm_do_idle at its user-configured setting */
609 } else
610 apm_do_idle = 0;
611 #ifdef DIAGNOSTIC
612 if (sc->sc_detail & APM_BIOS_PM_DISABLED)
613 aprint_normal(" (BIOS mgmt disabled)");
614 if (sc->sc_detail & APM_BIOS_PM_DISENGAGED)
615 aprint_normal(" (BIOS managing devices)");
616 #endif
617 }
618
619 int
620 apm_match(void)
621 {
622 static int got;
623 return !got++;
624 }
625
626 void
627 apm_attach(struct apm_softc *sc)
628 {
629 u_int numbatts, capflags;
630
631 aprint_normal(": ");
632
633 switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) {
634 case 0x0100:
635 apm_v11_enabled = 0;
636 apm_v12_enabled = 0;
637 break;
638 case 0x0101:
639 apm_v12_enabled = 0;
640 /* fall through */
641 case 0x0102:
642 default:
643 break;
644 }
645
646 apm_set_ver(sc); /* prints version info */
647 aprint_normal("\n");
648 if (apm_minver >= 2)
649 (*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts,
650 &capflags);
651
652 /*
653 * enable power management
654 */
655 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
656
657 if (sc->sc_ops->aa_cpu_busy)
658 (*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie);
659
660 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
661
662 /* Initial state is `resumed'. */
663 sc->sc_power_state = PWR_RESUME;
664 selinit(&sc->sc_rsel);
665 selinit(&sc->sc_xsel);
666
667 /* Do an initial check. */
668 apm_periodic_check(sc);
669
670 /*
671 * Create a kernel thread to periodically check for APM events,
672 * and notify other subsystems when they occur.
673 */
674 if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc,
675 &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) {
676 /*
677 * We were unable to create the APM thread; bail out.
678 */
679 if (sc->sc_ops->aa_disconnect)
680 (*sc->sc_ops->aa_disconnect)(sc->sc_cookie);
681 aprint_error_dev(sc->sc_dev, "unable to create thread, "
682 "kernel APM support disabled\n");
683 }
684
685 if (!pmf_device_register(sc->sc_dev, NULL, NULL))
686 aprint_error_dev(sc->sc_dev, "couldn't establish power handler\n");
687 }
688
689 void
690 apm_thread(void *arg)
691 {
692 struct apm_softc *apmsc = arg;
693
694 /*
695 * Loop forever, doing a periodic check for APM events.
696 */
697 for (;;) {
698 APM_LOCK(apmsc);
699 apm_periodic_check(apmsc);
700 APM_UNLOCK(apmsc);
701 (void) tsleep(apmsc, PWAIT, "apmev", (8 * hz) / 7);
702 }
703 }
704
705 int
706 apmopen(dev_t dev, int flag, int mode, struct lwp *l)
707 {
708 int ctl = APM(dev);
709 int error = 0;
710 struct apm_softc *sc;
711
712 sc = device_lookup_private(&apm_cd, APMUNIT(dev));
713 if (!sc)
714 return ENXIO;
715
716 if (!apm_inited)
717 return ENXIO;
718
719 DPRINTF(APMDEBUG_DEVICE,
720 ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
721
722 APM_LOCK(sc);
723 switch (ctl) {
724 case APM_CTL:
725 if (!(flag & FWRITE)) {
726 error = EINVAL;
727 break;
728 }
729 if (sc->sc_flags & SCFLAG_OWRITE) {
730 error = EBUSY;
731 break;
732 }
733 sc->sc_flags |= SCFLAG_OWRITE;
734 break;
735 case APM_NORMAL:
736 if (!(flag & FREAD) || (flag & FWRITE)) {
737 error = EINVAL;
738 break;
739 }
740 sc->sc_flags |= SCFLAG_OREAD;
741 break;
742 default:
743 error = ENXIO;
744 break;
745 }
746 APM_UNLOCK(sc);
747
748 return (error);
749 }
750
751 int
752 apmclose(dev_t dev, int flag, int mode,
753 struct lwp *l)
754 {
755 struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
756 int ctl = APM(dev);
757
758 DPRINTF(APMDEBUG_DEVICE,
759 ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
760
761 APM_LOCK(sc);
762 switch (ctl) {
763 case APM_CTL:
764 sc->sc_flags &= ~SCFLAG_OWRITE;
765 break;
766 case APM_NORMAL:
767 sc->sc_flags &= ~SCFLAG_OREAD;
768 break;
769 }
770 if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
771 sc->sc_event_count = 0;
772 sc->sc_event_ptr = 0;
773 }
774 APM_UNLOCK(sc);
775 return 0;
776 }
777
778 int
779 apmioctl(dev_t dev, u_long cmd, void *data, int flag,
780 struct lwp *l)
781 {
782 struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
783 struct apm_power_info *powerp;
784 struct apm_event_info *evp;
785 #if 0
786 struct apm_ctl *actl;
787 #endif
788 int i, error = 0;
789 int batt_flags;
790 struct apm_ctl *actl;
791
792 APM_LOCK(sc);
793 switch (cmd) {
794 case APM_IOC_STANDBY:
795 if (!apm_do_standby) {
796 error = EOPNOTSUPP;
797 break;
798 }
799
800 if ((flag & FWRITE) == 0) {
801 error = EBADF;
802 break;
803 }
804 apm_userstandbys++;
805 break;
806
807 case APM_IOC_DEV_CTL:
808 actl = (struct apm_ctl *)data;
809 if ((flag & FWRITE) == 0) {
810 error = EBADF;
811 break;
812 }
813 #if 0
814 apm_get_powstate(actl->dev); /* XXX */
815 #endif
816 error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, actl->dev,
817 actl->mode);
818 apm_suspends++;
819 break;
820
821 case APM_IOC_SUSPEND:
822 if ((flag & FWRITE) == 0) {
823 error = EBADF;
824 break;
825 }
826 apm_suspends++;
827 break;
828
829 case APM_IOC_NEXTEVENT:
830 if (!sc->sc_event_count)
831 error = EAGAIN;
832 else {
833 evp = (struct apm_event_info *)data;
834 i = sc->sc_event_ptr + APM_NEVENTS - sc->sc_event_count;
835 i %= APM_NEVENTS;
836 *evp = sc->sc_event_list[i];
837 sc->sc_event_count--;
838 }
839 break;
840
841 case APM_IOC_GETPOWER:
842 powerp = (struct apm_power_info *)data;
843 if ((error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0,
844 powerp)) != 0) {
845 apm_perror("ioctl get power status", error);
846 error = EIO;
847 break;
848 }
849 switch (apm_minver) {
850 case 0:
851 break;
852 case 1:
853 default:
854 batt_flags = powerp->battery_flags;
855 powerp->battery_state = APM_BATT_UNKNOWN;
856 if (batt_flags & APM_BATT_FLAG_HIGH)
857 powerp->battery_state = APM_BATT_HIGH;
858 else if (batt_flags & APM_BATT_FLAG_LOW)
859 powerp->battery_state = APM_BATT_LOW;
860 else if (batt_flags & APM_BATT_FLAG_CRITICAL)
861 powerp->battery_state = APM_BATT_CRITICAL;
862 else if (batt_flags & APM_BATT_FLAG_CHARGING)
863 powerp->battery_state = APM_BATT_CHARGING;
864 else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
865 powerp->battery_state = APM_BATT_ABSENT;
866 break;
867 }
868 break;
869
870 default:
871 error = ENOTTY;
872 }
873 APM_UNLOCK(sc);
874
875 return (error);
876 }
877
878 int
879 apmpoll(dev_t dev, int events, struct lwp *l)
880 {
881 struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
882 int revents = 0;
883
884 APM_LOCK(sc);
885 if (events & (POLLIN | POLLRDNORM)) {
886 if (sc->sc_event_count)
887 revents |= events & (POLLIN | POLLRDNORM);
888 else
889 selrecord(l, &sc->sc_rsel);
890 }
891 APM_UNLOCK(sc);
892
893 return (revents);
894 }
895
896 static void
897 filt_apmrdetach(struct knote *kn)
898 {
899 struct apm_softc *sc = kn->kn_hook;
900
901 APM_LOCK(sc);
902 SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
903 APM_UNLOCK(sc);
904 }
905
906 static int
907 filt_apmread(struct knote *kn, long hint)
908 {
909 struct apm_softc *sc = kn->kn_hook;
910
911 kn->kn_data = sc->sc_event_count;
912 return (kn->kn_data > 0);
913 }
914
915 static const struct filterops apmread_filtops =
916 { 1, NULL, filt_apmrdetach, filt_apmread };
917
918 int
919 apmkqfilter(dev_t dev, struct knote *kn)
920 {
921 struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
922 struct klist *klist;
923
924 switch (kn->kn_filter) {
925 case EVFILT_READ:
926 klist = &sc->sc_rsel.sel_klist;
927 kn->kn_fop = &apmread_filtops;
928 break;
929
930 default:
931 return (EINVAL);
932 }
933
934 kn->kn_hook = sc;
935
936 APM_LOCK(sc);
937 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
938 APM_UNLOCK(sc);
939
940 return (0);
941 }
942