apmdev.c revision 1.7 1 /* $NetBSD: apmdev.c,v 1.7 2006/10/09 10:33:42 peter Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by John Kohl and Christopher G. Demetriou.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*
39 * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08
40 */
41
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: apmdev.c,v 1.7 2006/10/09 10:33:42 peter Exp $");
44
45 #ifdef _KERNEL_OPT
46 #include "opt_apmdev.h"
47 #endif
48
49 #ifdef APM_NOIDLE
50 #error APM_NOIDLE option deprecated; use APM_NO_IDLE instead
51 #endif
52
53 #if defined(DEBUG) && !defined(APMDEBUG)
54 #define APMDEBUG
55 #endif
56
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/signalvar.h>
60 #include <sys/kernel.h>
61 #include <sys/proc.h>
62 #include <sys/kthread.h>
63 #include <sys/lock.h>
64 #include <sys/user.h>
65 #include <sys/malloc.h>
66 #include <sys/device.h>
67 #include <sys/fcntl.h>
68 #include <sys/ioctl.h>
69 #include <sys/select.h>
70 #include <sys/poll.h>
71 #include <sys/conf.h>
72
73 #include <dev/hpc/apm/apmvar.h>
74
75 #include <machine/stdarg.h>
76
77 #if defined(APMDEBUG)
78 #define DPRINTF(f, x) do { if (apmdebug & (f)) printf x; } while (0)
79
80 #define APMDEBUG_INFO 0x01
81 #define APMDEBUG_APMCALLS 0x02
82 #define APMDEBUG_EVENTS 0x04
83 #define APMDEBUG_PROBE 0x10
84 #define APMDEBUG_ATTACH 0x40
85 #define APMDEBUG_DEVICE 0x20
86 #define APMDEBUG_ANOM 0x40
87
88 #ifdef APMDEBUG_VALUE
89 int apmdebug = APMDEBUG_VALUE;
90 #else
91 int apmdebug = 0;
92 #endif
93 #else
94 #define DPRINTF(f, x) /**/
95 #endif
96
97 #define APM_NEVENTS 16
98
99 struct apm_softc {
100 struct device sc_dev;
101 struct selinfo sc_rsel;
102 struct selinfo sc_xsel;
103 int sc_flags;
104 int event_count;
105 int event_ptr;
106 int sc_power_state;
107 struct proc *sc_thread;
108 struct lock sc_lock;
109 struct apm_event_info event_list[APM_NEVENTS];
110 struct apm_accessops *ops;
111 void *cookie;
112 };
113 #define SCFLAG_OREAD 0x0000001
114 #define SCFLAG_OWRITE 0x0000002
115 #define SCFLAG_OPEN (SCFLAG_OREAD|SCFLAG_OWRITE)
116
117 #define APMUNIT(dev) (minor(dev)&0xf0)
118 #define APMDEV(dev) (minor(dev)&0x0f)
119 #define APMDEV_NORMAL 0
120 #define APMDEV_CTL 8
121
122 /*
123 * A brief note on the locking protocol: it's very simple; we
124 * assert an exclusive lock any time thread context enters the
125 * APM module. This is both the APM thread itself, as well as
126 * user context.
127 */
128 #define APM_LOCK(apmsc) \
129 (void) lockmgr(&(apmsc)->sc_lock, LK_EXCLUSIVE, NULL)
130 #define APM_UNLOCK(apmsc) \
131 (void) lockmgr(&(apmsc)->sc_lock, LK_RELEASE, NULL)
132
133 static void apmattach(struct device *, struct device *, void *);
134 static int apmmatch(struct device *, struct cfdata *, void *);
135
136 static void apm_event_handle(struct apm_softc *, u_int, u_int);
137 static void apm_periodic_check(struct apm_softc *);
138 static void apm_create_thread(void *);
139 static void apm_thread(void *);
140 static void apm_perror(const char *, int, ...)
141 __attribute__((__format__(__printf__,1,3)));
142 #ifdef APM_POWER_PRINT
143 static void apm_power_print(struct apm_softc *, struct apm_power_info *);
144 #endif
145 static int apm_record_event(struct apm_softc *, u_int);
146 static void apm_set_ver(struct apm_softc *, u_long);
147 static void apm_standby(struct apm_softc *);
148 static const char *apm_strerror(int);
149 static void apm_suspend(struct apm_softc *);
150 static void apm_resume(struct apm_softc *, u_int, u_int);
151
152 CFATTACH_DECL(apmdev, sizeof(struct apm_softc),
153 apmmatch, apmattach, NULL, NULL);
154
155 extern struct cfdriver apmdev_cd;
156
157 dev_type_open(apmdevopen);
158 dev_type_close(apmdevclose);
159 dev_type_ioctl(apmdevioctl);
160 dev_type_poll(apmdevpoll);
161 dev_type_kqfilter(apmdevkqfilter);
162
163 const struct cdevsw apmdev_cdevsw = {
164 apmdevopen, apmdevclose, noread, nowrite, apmdevioctl,
165 nostop, notty, apmdevpoll, nommap, apmdevkqfilter,
166 };
167
168 /* configurable variables */
169 int apm_bogus_bios = 0;
170 #ifdef APM_DISABLE
171 int apm_enabled = 0;
172 #else
173 int apm_enabled = 1;
174 #endif
175 #ifdef APM_NO_IDLE
176 int apm_do_idle = 0;
177 #else
178 int apm_do_idle = 1;
179 #endif
180 #ifdef APM_NO_STANDBY
181 int apm_do_standby = 0;
182 #else
183 int apm_do_standby = 1;
184 #endif
185 #ifdef APM_V10_ONLY
186 int apm_v11_enabled = 0;
187 #else
188 int apm_v11_enabled = 1;
189 #endif
190 #ifdef APM_NO_V12
191 int apm_v12_enabled = 0;
192 #else
193 int apm_v12_enabled = 1;
194 #endif
195
196 /* variables used during operation (XXX cgd) */
197 u_char apm_majver, apm_minver;
198 int apm_inited;
199 int apm_standbys, apm_userstandbys, apm_suspends, apm_battlow;
200 int apm_damn_fool_bios, apm_op_inprog;
201 int apm_evindex;
202
203 static int apm_spl; /* saved spl while suspended */
204
205 static const char *
206 apm_strerror(int code)
207 {
208 switch (code) {
209 case APM_ERR_PM_DISABLED:
210 return ("power management disabled");
211 case APM_ERR_REALALREADY:
212 return ("real mode interface already connected");
213 case APM_ERR_NOTCONN:
214 return ("interface not connected");
215 case APM_ERR_16ALREADY:
216 return ("16-bit interface already connected");
217 case APM_ERR_16NOTSUPP:
218 return ("16-bit interface not supported");
219 case APM_ERR_32ALREADY:
220 return ("32-bit interface already connected");
221 case APM_ERR_32NOTSUPP:
222 return ("32-bit interface not supported");
223 case APM_ERR_UNRECOG_DEV:
224 return ("unrecognized device ID");
225 case APM_ERR_ERANGE:
226 return ("parameter out of range");
227 case APM_ERR_NOTENGAGED:
228 return ("interface not engaged");
229 case APM_ERR_UNABLE:
230 return ("unable to enter requested state");
231 case APM_ERR_NOEVENTS:
232 return ("no pending events");
233 case APM_ERR_NOT_PRESENT:
234 return ("no APM present");
235 default:
236 return ("unknown error code");
237 }
238 }
239
240 static void
241 apm_perror(const char *str, int errinfo, ...) /* XXX cgd */
242 {
243 va_list ap;
244
245 printf("APM ");
246
247 va_start(ap, errinfo);
248 vprintf(str, ap); /* XXX cgd */
249 va_end(ap);
250
251 printf(": %s\n", apm_strerror(errinfo));
252 }
253
254 #ifdef APM_POWER_PRINT
255 static void
256 apm_power_print(struct apm_softc *sc, struct apm_power_info *pi)
257 {
258
259 if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) {
260 printf("%s: battery life expectancy: %d%%\n",
261 sc->sc_dev.dv_xname, pi->battery_life);
262 }
263 printf("%s: A/C state: ", sc->sc_dev.dv_xname);
264 switch (pi->ac_state) {
265 case APM_AC_OFF:
266 printf("off\n");
267 break;
268 case APM_AC_ON:
269 printf("on\n");
270 break;
271 case APM_AC_BACKUP:
272 printf("backup power\n");
273 break;
274 default:
275 case APM_AC_UNKNOWN:
276 printf("unknown\n");
277 break;
278 }
279 if (apm_major == 1 && apm_minor == 0) {
280 printf("%s: battery charge state:", sc->sc_dev.dv_xname);
281 switch (pi->battery_state) {
282 case APM_BATT_HIGH:
283 printf("high\n");
284 break;
285 case APM_BATT_LOW:
286 printf("low\n");
287 break;
288 case APM_BATT_CRITICAL:
289 printf("critical\n");
290 break;
291 case APM_BATT_CHARGING:
292 printf("charging\n");
293 break;
294 case APM_BATT_UNKNOWN:
295 printf("unknown\n");
296 break;
297 default:
298 printf("undecoded state %x\n", pi->battery_state);
299 break;
300 }
301 } else {
302 if (pi->battery_state&APM_BATT_FLAG_CHARGING)
303 printf("charging ");
304 }
305 if (pi->battery_state&APM_BATT_FLAG_UNKNOWN)
306 printf("unknown\n");
307 else if (pi->battery_state&APM_BATT_FLAG_CRITICAL)
308 printf("critical\n");
309 else if (pi->battery_state&APM_BATT_FLAG_LOW)
310 printf("low\n");
311 else if (pi->battery_state&APM_BATT_FLAG_HIGH)
312 printf("high\n");
313 }
314 if (pi->minutes_left != 0) {
315 printf("%s: estimated ", sc->sc_dev.dv_xname);
316 printf("%dh ", pi->minutes_left / 60);
317 }
318 return;
319 }
320 #endif
321
322 static void
323 apm_suspend(struct apm_softc *sc)
324 {
325
326 if (sc->sc_power_state == PWR_SUSPEND) {
327 #ifdef APMDEBUG
328 printf("%s: apm_suspend: already suspended?\n",
329 sc->sc_dev.dv_xname);
330 #endif
331 return;
332 }
333 sc->sc_power_state = PWR_SUSPEND;
334
335 dopowerhooks(PWR_SOFTSUSPEND);
336 (void) tsleep(sc, PWAIT, "apmsuspend", hz/2);
337
338 apm_spl = splhigh();
339
340 dopowerhooks(PWR_SUSPEND);
341
342 /* XXX cgd */
343 (void)sc->ops->set_powstate(sc->cookie, APM_DEV_ALLDEVS, APM_SYS_SUSPEND);
344 }
345
346 static void
347 apm_standby(struct apm_softc *sc)
348 {
349
350 if (sc->sc_power_state == PWR_STANDBY) {
351 #ifdef APMDEBUG
352 printf("%s: apm_standby: already standing by?\n",
353 sc->sc_dev.dv_xname);
354 #endif
355 return;
356 }
357 sc->sc_power_state = PWR_STANDBY;
358
359 dopowerhooks(PWR_SOFTSTANDBY);
360 (void) tsleep(sc, PWAIT, "apmstandby", hz/2);
361
362 apm_spl = splhigh();
363
364 dopowerhooks(PWR_STANDBY);
365 /* XXX cgd */
366 (void)sc->ops->set_powstate(sc->cookie, APM_DEV_ALLDEVS, APM_SYS_STANDBY);
367 }
368
369 static void
370 apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info)
371 {
372
373 if (sc->sc_power_state == PWR_RESUME) {
374 #ifdef APMDEBUG
375 printf("%s: apm_resume: already running?\n",
376 sc->sc_dev.dv_xname);
377 #endif
378 return;
379 }
380 sc->sc_power_state = PWR_RESUME;
381
382 /*
383 * Some system requires its clock to be initialized after hybernation.
384 */
385 /* XXX
386 initrtclock();
387 */
388
389 inittodr(time_second);
390 dopowerhooks(PWR_RESUME);
391
392 splx(apm_spl);
393
394 dopowerhooks(PWR_SOFTRESUME);
395
396 apm_record_event(sc, event_type);
397 }
398
399 /*
400 * return 0 if the user will notice and handle the event,
401 * return 1 if the kernel driver should do so.
402 */
403 static int
404 apm_record_event(struct apm_softc *sc, u_int event_type)
405 {
406 struct apm_event_info *evp;
407
408 if ((sc->sc_flags & SCFLAG_OPEN) == 0)
409 return 1; /* no user waiting */
410 if (sc->event_count == APM_NEVENTS)
411 return 1; /* overflow */
412 evp = &sc->event_list[sc->event_ptr];
413 sc->event_count++;
414 sc->event_ptr++;
415 sc->event_ptr %= APM_NEVENTS;
416 evp->type = event_type;
417 evp->index = ++apm_evindex;
418 selnotify(&sc->sc_rsel, 0);
419 return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
420 }
421
422 static void
423 apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info)
424 {
425 int error;
426 const char *code;
427 struct apm_power_info pi;
428
429 switch (event_code) {
430 case APM_USER_STANDBY_REQ:
431 DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n"));
432 if (apm_do_standby) {
433 if (apm_record_event(sc, event_code))
434 apm_userstandbys++;
435 apm_op_inprog++;
436 (void)sc->ops->set_powstate(sc->cookie,
437 APM_DEV_ALLDEVS,
438 APM_LASTREQ_INPROG);
439 } else {
440 (void)sc->ops->set_powstate(sc->cookie,
441 APM_DEV_ALLDEVS,
442 APM_LASTREQ_REJECTED);
443 /* in case BIOS hates being spurned */
444 sc->ops->enable(sc->cookie, 1);
445 }
446 break;
447
448 case APM_STANDBY_REQ:
449 DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n"));
450 if (apm_standbys || apm_suspends) {
451 DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
452 ("damn fool BIOS did not wait for answer\n"));
453 /* just give up the fight */
454 apm_damn_fool_bios = 1;
455 }
456 if (apm_do_standby) {
457 if (apm_record_event(sc, event_code))
458 apm_standbys++;
459 apm_op_inprog++;
460 (void)sc->ops->set_powstate(sc->cookie,
461 APM_DEV_ALLDEVS,
462 APM_LASTREQ_INPROG);
463 } else {
464 (void)sc->ops->set_powstate(sc->cookie,
465 APM_DEV_ALLDEVS,
466 APM_LASTREQ_REJECTED);
467 /* in case BIOS hates being spurned */
468 sc->ops->enable(sc->cookie, 1);
469 }
470 break;
471
472 case APM_USER_SUSPEND_REQ:
473 DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n"));
474 if (apm_record_event(sc, event_code))
475 apm_suspends++;
476 apm_op_inprog++;
477 (void)sc->ops->set_powstate(sc->cookie,
478 APM_DEV_ALLDEVS,
479 APM_LASTREQ_INPROG);
480 break;
481
482 case APM_SUSPEND_REQ:
483 DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n"));
484 if (apm_standbys || apm_suspends) {
485 DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
486 ("damn fool BIOS did not wait for answer\n"));
487 /* just give up the fight */
488 apm_damn_fool_bios = 1;
489 }
490 if (apm_record_event(sc, event_code))
491 apm_suspends++;
492 apm_op_inprog++;
493 (void)sc->ops->set_powstate(sc->cookie,
494 APM_DEV_ALLDEVS,
495 APM_LASTREQ_INPROG);
496 break;
497
498 case APM_POWER_CHANGE:
499 DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n"));
500 error = sc->ops->get_powstat(sc->cookie, &pi);
501 #ifdef APM_POWER_PRINT
502 /* only print if nobody is catching events. */
503 if (error == 0 &&
504 (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0)
505 apm_power_print(sc, &pi);
506 #endif
507 apm_record_event(sc, event_code);
508 break;
509
510 case APM_NORMAL_RESUME:
511 DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n"));
512 apm_resume(sc, event_code, event_info);
513 break;
514
515 case APM_CRIT_RESUME:
516 DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system"));
517 apm_resume(sc, event_code, event_info);
518 break;
519
520 case APM_SYS_STANDBY_RESUME:
521 DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n"));
522 apm_resume(sc, event_code, event_info);
523 break;
524
525 case APM_UPDATE_TIME:
526 DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n"));
527 apm_resume(sc, event_code, event_info);
528 break;
529
530 case APM_CRIT_SUSPEND_REQ:
531 DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n"));
532 apm_record_event(sc, event_code);
533 apm_suspend(sc);
534 break;
535
536 case APM_BATTERY_LOW:
537 DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n"));
538 apm_battlow++;
539 apm_record_event(sc, event_code);
540 break;
541
542 case APM_CAP_CHANGE:
543 DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n"));
544 if (apm_minver < 2) {
545 DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n"));
546 } else {
547 u_int numbatts, capflags;
548 sc->ops->get_capabilities(sc->cookie,
549 &numbatts, &capflags);
550 sc->ops->get_powstat(sc->cookie, &pi); /* XXX */
551 }
552 break;
553
554 default:
555 switch (event_code >> 8) {
556 case 0:
557 code = "reserved system";
558 break;
559 case 1:
560 code = "reserved device";
561 break;
562 case 2:
563 code = "OEM defined";
564 break;
565 default:
566 code = "reserved";
567 break;
568 }
569 printf("APM: %s event code %x\n", code, event_code);
570 }
571 }
572
573 static void
574 apm_periodic_check(struct apm_softc *sc)
575 {
576 int error;
577 u_int event_code, event_info;
578
579
580 /*
581 * tell the BIOS we're working on it, if asked to do a
582 * suspend/standby
583 */
584 if (apm_op_inprog)
585 sc->ops->set_powstate(sc->cookie, APM_DEV_ALLDEVS,
586 APM_LASTREQ_INPROG);
587
588 while ((error = sc->ops->get_event(sc->cookie, &event_code,
589 &event_info)) == 0
590 && !apm_damn_fool_bios)
591 apm_event_handle(sc, event_code, event_info);
592
593 if (error != APM_ERR_NOEVENTS)
594 apm_perror("get event", error);
595 if (apm_suspends) {
596 apm_op_inprog = 0;
597 apm_suspend(sc);
598 } else if (apm_standbys || apm_userstandbys) {
599 apm_op_inprog = 0;
600 apm_standby(sc);
601 }
602 apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
603 apm_damn_fool_bios = 0;
604 }
605
606 static void
607 apm_set_ver(struct apm_softc *self, u_long detail)
608 {
609
610 if (apm_v12_enabled &&
611 APM_MAJOR_VERS(detail) == 1 &&
612 APM_MINOR_VERS(detail) == 2) {
613 apm_majver = 1;
614 apm_minver = 2;
615 goto ok;
616 }
617
618 if (apm_v11_enabled &&
619 APM_MAJOR_VERS(detail) == 1 &&
620 APM_MINOR_VERS(detail) == 1) {
621 apm_majver = 1;
622 apm_minver = 1;
623 } else {
624 apm_majver = 1;
625 apm_minver = 0;
626 }
627 ok:
628 printf("Power Management spec V%d.%d", apm_majver, apm_minver);
629 apm_inited = 1;
630 if (detail & APM_IDLE_SLOWS) {
631 #ifdef DIAGNOSTIC
632 /* not relevant often */
633 printf(" (slowidle)");
634 #endif
635 /* leave apm_do_idle at its user-configured setting */
636 } else
637 apm_do_idle = 0;
638 #ifdef DIAGNOSTIC
639 if (detail & APM_BIOS_PM_DISABLED)
640 printf(" (BIOS mgmt disabled)");
641 if (detail & APM_BIOS_PM_DISENGAGED)
642 printf(" (BIOS managing devices)");
643 #endif
644 }
645
646 static int
647 apmmatch(struct device *parent, struct cfdata *match, void *aux)
648 {
649
650 /* There can be only one! */
651 if (apm_inited)
652 return 0;
653
654 return (1);
655 }
656
657 static void
658 apmattach(struct device *parent, struct device *self, void *aux)
659 {
660 struct apm_softc *sc = (void *)self;
661 struct apmdev_attach_args *aaa = aux;
662 struct apm_power_info pinfo;
663 u_int numbatts, capflags;
664 int error;
665
666 printf(": ");
667
668 sc->ops = aaa->accessops;
669 sc->cookie = aaa->accesscookie;
670
671 switch ((APM_MAJOR_VERS(aaa->apm_detail) << 8) +
672 APM_MINOR_VERS(aaa->apm_detail)) {
673 case 0x0100:
674 apm_v11_enabled = 0;
675 apm_v12_enabled = 0;
676 break;
677 case 0x0101:
678 apm_v12_enabled = 0;
679 /* fall through */
680 case 0x0102:
681 default:
682 break;
683 }
684
685 apm_set_ver(sc, aaa->apm_detail); /* prints version info */
686 printf("\n");
687 if (apm_minver >= 2)
688 sc->ops->get_capabilities(sc->cookie, &numbatts, &capflags);
689
690 /*
691 * enable power management
692 */
693 sc->ops->enable(sc->cookie, 1);
694
695 error = sc->ops->get_powstat(sc->cookie, &pinfo);
696 if (error == 0) {
697 #ifdef APM_POWER_PRINT
698 apm_power_print(apmsc, &pinfo);
699 #endif
700 } else
701 apm_perror("get power status", error);
702 sc->ops->cpu_busy(sc->cookie);
703
704 lockinit(&sc->sc_lock, PWAIT, "apmlk", 0, 0);
705
706 /* Initial state is `resumed'. */
707 sc->sc_power_state = PWR_RESUME;
708
709 /* Do an initial check. */
710 apm_periodic_check(sc);
711
712 /*
713 * Create a kernel thread to periodically check for APM events,
714 * and notify other subsystems when they occur.
715 */
716 kthread_create(apm_create_thread, sc);
717
718 return;
719 }
720
721 /*
722 * Print function (for parent devices).
723 */
724 int
725 apmprint(void *aux, const char *pnp)
726 {
727 if (pnp)
728 aprint_normal("apm at %s", pnp);
729
730 return (UNCONF);
731 }
732
733 void
734 apm_create_thread(void *arg)
735 {
736 struct apm_softc *sc = arg;
737
738 if (kthread_create1(apm_thread, sc, &sc->sc_thread,
739 "%s", sc->sc_dev.dv_xname) == 0)
740 return;
741
742 /*
743 * We were unable to create the APM thread; bail out.
744 */
745 sc->ops->disconnect(sc->cookie);
746 printf("%s: unable to create thread, kernel APM support disabled\n",
747 sc->sc_dev.dv_xname);
748 }
749
750 void
751 apm_thread(void *arg)
752 {
753 struct apm_softc *apmsc = arg;
754
755 /*
756 * Loop forever, doing a periodic check for APM events.
757 */
758 for (;;) {
759 APM_LOCK(apmsc);
760 apm_periodic_check(apmsc);
761 APM_UNLOCK(apmsc);
762 (void) tsleep(apmsc, PWAIT, "apmev", (8 * hz) / 7);
763 }
764 }
765
766 int
767 apmdevopen(dev_t dev, int flag, int mode, struct lwp *l)
768 {
769 int unit = APMUNIT(dev);
770 int ctl = APMDEV(dev);
771 int error = 0;
772 struct apm_softc *sc;
773
774 if (unit >= apmdev_cd.cd_ndevs)
775 return ENXIO;
776 sc = apmdev_cd.cd_devs[unit];
777 if (!sc)
778 return ENXIO;
779
780 if (!apm_inited)
781 return ENXIO;
782
783 DPRINTF(APMDEBUG_DEVICE,
784 ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
785
786 APM_LOCK(sc);
787 switch (ctl) {
788 case APMDEV_CTL:
789 if (!(flag & FWRITE)) {
790 error = EINVAL;
791 break;
792 }
793 if (sc->sc_flags & SCFLAG_OWRITE) {
794 error = EBUSY;
795 break;
796 }
797 sc->sc_flags |= SCFLAG_OWRITE;
798 break;
799 case APMDEV_NORMAL:
800 if (!(flag & FREAD) || (flag & FWRITE)) {
801 error = EINVAL;
802 break;
803 }
804 sc->sc_flags |= SCFLAG_OREAD;
805 break;
806 default:
807 error = ENXIO;
808 break;
809 }
810 APM_UNLOCK(sc);
811
812 return (error);
813 }
814
815 int
816 apmdevclose(dev_t dev, int flag, int mode, struct lwp *l)
817 {
818 struct apm_softc *sc = apmdev_cd.cd_devs[APMUNIT(dev)];
819 int ctl = APMDEV(dev);
820
821 DPRINTF(APMDEBUG_DEVICE,
822 ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
823
824 APM_LOCK(sc);
825 switch (ctl) {
826 case APMDEV_CTL:
827 sc->sc_flags &= ~SCFLAG_OWRITE;
828 break;
829 case APMDEV_NORMAL:
830 sc->sc_flags &= ~SCFLAG_OREAD;
831 break;
832 }
833 if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
834 sc->event_count = 0;
835 sc->event_ptr = 0;
836 }
837 APM_UNLOCK(sc);
838 return 0;
839 }
840
841 int
842 apmdevioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
843 {
844 struct apm_softc *sc = apmdev_cd.cd_devs[APMUNIT(dev)];
845 struct apm_power_info *powerp;
846 struct apm_event_info *evp;
847 #if 0
848 struct apm_ctl *actl;
849 #endif
850 int i, error = 0;
851 int batt_flags;
852
853 APM_LOCK(sc);
854 switch (cmd) {
855 case APM_IOC_STANDBY:
856 if (!apm_do_standby) {
857 error = EOPNOTSUPP;
858 break;
859 }
860
861 if ((flag & FWRITE) == 0) {
862 error = EBADF;
863 break;
864 }
865 apm_userstandbys++;
866 break;
867
868 case APM_IOC_SUSPEND:
869 if ((flag & FWRITE) == 0) {
870 error = EBADF;
871 break;
872 }
873 apm_suspends++;
874 break;
875
876 case APM_IOC_NEXTEVENT:
877 if (!sc->event_count)
878 error = EAGAIN;
879 else {
880 evp = (struct apm_event_info *)data;
881 i = sc->event_ptr + APM_NEVENTS - sc->event_count;
882 i %= APM_NEVENTS;
883 *evp = sc->event_list[i];
884 sc->event_count--;
885 }
886 break;
887
888 case OAPM_IOC_GETPOWER:
889 case APM_IOC_GETPOWER:
890 powerp = (struct apm_power_info *)data;
891 if ((error = sc->ops->get_powstat(sc->cookie, powerp)) != 0) {
892 apm_perror("ioctl get power status", error);
893 error = EIO;
894 break;
895 }
896 switch (apm_minver) {
897 case 0:
898 break;
899 case 1:
900 default:
901 batt_flags = powerp->battery_state;
902 powerp->battery_state = APM_BATT_UNKNOWN;
903 if (batt_flags & APM_BATT_FLAG_HIGH)
904 powerp->battery_state = APM_BATT_HIGH;
905 else if (batt_flags & APM_BATT_FLAG_LOW)
906 powerp->battery_state = APM_BATT_LOW;
907 else if (batt_flags & APM_BATT_FLAG_CRITICAL)
908 powerp->battery_state = APM_BATT_CRITICAL;
909 else if (batt_flags & APM_BATT_FLAG_CHARGING)
910 powerp->battery_state = APM_BATT_CHARGING;
911 else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
912 powerp->battery_state = APM_BATT_ABSENT;
913 break;
914 }
915 break;
916
917 default:
918 error = ENOTTY;
919 }
920 APM_UNLOCK(sc);
921
922 return (error);
923 }
924
925 int
926 apmdevpoll(dev_t dev, int events, struct lwp *l)
927 {
928 struct apm_softc *sc = apmdev_cd.cd_devs[APMUNIT(dev)];
929 int revents = 0;
930
931 APM_LOCK(sc);
932 if (events & (POLLIN | POLLRDNORM)) {
933 if (sc->event_count)
934 revents |= events & (POLLIN | POLLRDNORM);
935 else
936 selrecord(l, &sc->sc_rsel);
937 }
938 APM_UNLOCK(sc);
939
940 return (revents);
941 }
942
943 static void
944 filt_apmrdetach(struct knote *kn)
945 {
946 struct apm_softc *sc = kn->kn_hook;
947
948 APM_LOCK(sc);
949 SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
950 APM_UNLOCK(sc);
951 }
952
953 static int
954 filt_apmread(struct knote *kn, long hint)
955 {
956 struct apm_softc *sc = kn->kn_hook;
957
958 kn->kn_data = sc->event_count;
959 return (kn->kn_data > 0);
960 }
961
962 static const struct filterops apmread_filtops =
963 { 1, NULL, filt_apmrdetach, filt_apmread };
964
965 int
966 apmdevkqfilter(dev_t dev, struct knote *kn)
967 {
968 struct apm_softc *sc = apmdev_cd.cd_devs[APMUNIT(dev)];
969 struct klist *klist;
970
971 switch (kn->kn_filter) {
972 case EVFILT_READ:
973 klist = &sc->sc_rsel.sel_klist;
974 kn->kn_fop = &apmread_filtops;
975 break;
976
977 default:
978 return (1);
979 }
980
981 kn->kn_hook = sc;
982
983 APM_LOCK(sc);
984 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
985 APM_UNLOCK(sc);
986
987 return (0);
988 }
989