apmdev.c revision 1.30 1 /* $NetBSD: apmdev.c,v 1.30 2014/07/25 08:10:37 dholland Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by John Kohl and Christopher G. Demetriou.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31 /*
32 * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: apmdev.c,v 1.30 2014/07/25 08:10:37 dholland Exp $");
37
38 #ifdef _KERNEL_OPT
39 #include "opt_apm.h"
40 #endif
41
42 #if defined(DEBUG) && !defined(APMDEBUG)
43 #define APMDEBUG
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/signalvar.h>
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/kthread.h>
52 #include <sys/malloc.h>
53 #include <sys/device.h>
54 #include <sys/fcntl.h>
55 #include <sys/ioctl.h>
56 #include <sys/select.h>
57 #include <sys/poll.h>
58 #include <sys/conf.h>
59
60 #include <dev/hpc/apm/apmvar.h>
61
62 #ifdef APMDEBUG
63 #define DPRINTF(f, x) do { if (apmdebug & (f)) printf x; } while (0)
64
65
66 #ifdef APMDEBUG_VALUE
67 int apmdebug = APMDEBUG_VALUE;
68 #else
69 int apmdebug = 0;
70 #endif /* APMDEBUG_VALUE */
71
72 #else
73 #define DPRINTF(f, x) /**/
74 #endif /* APMDEBUG */
75
76 #define SCFLAG_OREAD 0x0000001
77 #define SCFLAG_OWRITE 0x0000002
78 #define SCFLAG_OPEN (SCFLAG_OREAD|SCFLAG_OWRITE)
79
80 #define APMUNIT(dev) (minor(dev)&0xf0)
81 #define APM(dev) (minor(dev)&0x0f)
82 #define APM_NORMAL 0
83 #define APM_CTL 8
84
85 /*
86 * A brief note on the locking protocol: it's very simple; we
87 * assert an exclusive lock any time thread context enters the
88 * APM module. This is both the APM thread itself, as well as
89 * user context.
90 */
91 #define APM_LOCK(apmsc) \
92 (void) mutex_enter(&(apmsc)->sc_lock)
93 #define APM_UNLOCK(apmsc) \
94 (void) mutex_exit(&(apmsc)->sc_lock)
95
96 static void apmdevattach(device_t, device_t, void *);
97 static int apmdevmatch(device_t, cfdata_t, void *);
98
99 static void apm_event_handle(struct apm_softc *, u_int, u_int);
100 static void apm_periodic_check(struct apm_softc *);
101 static void apm_thread(void *);
102 static void apm_perror(const char *, int, ...)
103 __attribute__((__format__(__printf__,1,3)));
104 #ifdef APM_POWER_PRINT
105 static void apm_power_print(struct apm_softc *, struct apm_power_info *);
106 #endif
107 static int apm_record_event(struct apm_softc *, u_int);
108 static void apm_set_ver(struct apm_softc *);
109 static void apm_standby(struct apm_softc *);
110 static void apm_suspend(struct apm_softc *);
111 static void apm_resume(struct apm_softc *, u_int, u_int);
112
113 CFATTACH_DECL_NEW(apmdev, sizeof(struct apm_softc),
114 apmdevmatch, apmdevattach, NULL, NULL);
115
116 extern struct cfdriver apmdev_cd;
117
118 dev_type_open(apmdevopen);
119 dev_type_close(apmdevclose);
120 dev_type_ioctl(apmdevioctl);
121 dev_type_poll(apmdevpoll);
122 dev_type_kqfilter(apmdevkqfilter);
123
124 const struct cdevsw apmdev_cdevsw = {
125 .d_open = apmdevopen,
126 .d_close = apmdevclose,
127 .d_read = noread,
128 .d_write = nowrite,
129 .d_ioctl = apmdevioctl,
130 .d_stop = nostop,
131 .d_tty = notty,
132 .d_poll = apmdevpoll,
133 .d_mmap = nommap,
134 .d_kqfilter = apmdevkqfilter,
135 .d_discard = nodiscard,
136 .d_flag = D_OTHER
137 };
138
139 /* configurable variables */
140 int apm_bogus_bios = 0;
141 #ifdef APM_NO_STANDBY
142 int apm_do_standby = 0;
143 #else
144 int apm_do_standby = 1;
145 #endif
146 #ifdef APM_V10_ONLY
147 int apm_v11_enabled = 0;
148 #else
149 int apm_v11_enabled = 1;
150 #endif
151 #ifdef APM_NO_V12
152 int apm_v12_enabled = 0;
153 #else
154 int apm_v12_enabled = 1;
155 #endif
156
157 /* variables used during operation (XXX cgd) */
158 u_char apm_majver, apm_minver;
159 int apm_inited;
160 int apm_standbys, apm_userstandbys, apm_suspends, apm_battlow;
161 int apm_damn_fool_bios, apm_op_inprog;
162 int apm_evindex;
163
164 static int apm_spl; /* saved spl while suspended */
165
166 const char *
167 apm_strerror(int code)
168 {
169 switch (code) {
170 case APM_ERR_PM_DISABLED:
171 return ("power management disabled");
172 case APM_ERR_REALALREADY:
173 return ("real mode interface already connected");
174 case APM_ERR_NOTCONN:
175 return ("interface not connected");
176 case APM_ERR_16ALREADY:
177 return ("16-bit interface already connected");
178 case APM_ERR_16NOTSUPP:
179 return ("16-bit interface not supported");
180 case APM_ERR_32ALREADY:
181 return ("32-bit interface already connected");
182 case APM_ERR_32NOTSUPP:
183 return ("32-bit interface not supported");
184 case APM_ERR_UNRECOG_DEV:
185 return ("unrecognized device ID");
186 case APM_ERR_ERANGE:
187 return ("parameter out of range");
188 case APM_ERR_NOTENGAGED:
189 return ("interface not engaged");
190 case APM_ERR_UNABLE:
191 return ("unable to enter requested state");
192 case APM_ERR_NOEVENTS:
193 return ("no pending events");
194 case APM_ERR_NOT_PRESENT:
195 return ("no APM present");
196 default:
197 return ("unknown error code");
198 }
199 }
200
201 static void
202 apm_perror(const char *str, int errinfo, ...) /* XXX cgd */
203 {
204 va_list ap;
205
206 printf("APM ");
207
208 va_start(ap, errinfo);
209 vprintf(str, ap); /* XXX cgd */
210 va_end(ap);
211
212 printf(": %s\n", apm_strerror(errinfo));
213 }
214
215 #ifdef APM_POWER_PRINT
216 static void
217 apm_power_print(struct apm_softc *sc, struct apm_power_info *pi)
218 {
219
220 if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) {
221 aprint_normal_dev(sc->sc_dev,
222 "battery life expectancy: %d%%\n",
223 pi->battery_life);
224 }
225 aprint_normal_dev(sc->sc_dev, "A/C state: ");
226 switch (pi->ac_state) {
227 case APM_AC_OFF:
228 printf("off\n");
229 break;
230 case APM_AC_ON:
231 printf("on\n");
232 break;
233 case APM_AC_BACKUP:
234 printf("backup power\n");
235 break;
236 default:
237 case APM_AC_UNKNOWN:
238 printf("unknown\n");
239 break;
240 }
241 aprint_normal_dev(sc->sc_dev, "battery charge state:");
242 if (apm_minver == 0)
243 switch (pi->battery_state) {
244 case APM_BATT_HIGH:
245 printf("high\n");
246 break;
247 case APM_BATT_LOW:
248 printf("low\n");
249 break;
250 case APM_BATT_CRITICAL:
251 printf("critical\n");
252 break;
253 case APM_BATT_CHARGING:
254 printf("charging\n");
255 break;
256 case APM_BATT_UNKNOWN:
257 printf("unknown\n");
258 break;
259 default:
260 printf("undecoded state %x\n", pi->battery_state);
261 break;
262 }
263 else if (apm_minver >= 1) {
264 if (pi->battery_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
265 printf(" no battery");
266 else {
267 if (pi->battery_flags & APM_BATT_FLAG_HIGH)
268 printf(" high");
269 if (pi->battery_flags & APM_BATT_FLAG_LOW)
270 printf(" low");
271 if (pi->battery_flags & APM_BATT_FLAG_CRITICAL)
272 printf(" critical");
273 if (pi->battery_flags & APM_BATT_FLAG_CHARGING)
274 printf(" charging");
275 }
276 printf("\n");
277 if (pi->minutes_valid) {
278 aprint_normal_dev(sc->sc_dev, "estimated ");
279 if (pi->minutes_left / 60)
280 printf("%dh ", pi->minutes_left / 60);
281 printf("%dm\n", pi->minutes_left % 60);
282 }
283 }
284 return;
285 }
286 #endif
287
288 static void
289 apm_suspend(struct apm_softc *sc)
290 {
291 int error;
292
293 if (sc->sc_power_state == PWR_SUSPEND) {
294 #ifdef APMDEBUG
295 aprint_debug_dev(sc->sc_dev,
296 "apm_suspend: already suspended?\n");
297 #endif
298 return;
299 }
300 sc->sc_power_state = PWR_SUSPEND;
301
302 dopowerhooks(PWR_SOFTSUSPEND);
303 (void) tsleep(sc, PWAIT, "apmsuspend", hz/2);
304
305 apm_spl = splhigh();
306
307 dopowerhooks(PWR_SUSPEND);
308
309 error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
310 APM_SYS_SUSPEND);
311
312 if (error)
313 apm_resume(sc, 0, 0);
314 }
315
316 static void
317 apm_standby(struct apm_softc *sc)
318 {
319 int error;
320
321 if (sc->sc_power_state == PWR_STANDBY) {
322 #ifdef APMDEBUG
323 aprint_debug_dev(sc->sc_dev,
324 "apm_standby: already standing by?\n");
325 #endif
326 return;
327 }
328 sc->sc_power_state = PWR_STANDBY;
329
330 dopowerhooks(PWR_SOFTSTANDBY);
331 (void) tsleep(sc, PWAIT, "apmstandby", hz/2);
332
333 apm_spl = splhigh();
334
335 dopowerhooks(PWR_STANDBY);
336
337 error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
338 APM_SYS_STANDBY);
339 if (error)
340 apm_resume(sc, 0, 0);
341 }
342
343 static void
344 apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info)
345 {
346
347 if (sc->sc_power_state == PWR_RESUME) {
348 #ifdef APMDEBUG
349 aprint_debug_dev(sc->sc_dev, "apm_resume: already running?\n");
350 #endif
351 return;
352 }
353 sc->sc_power_state = PWR_RESUME;
354
355 #if 0 /* XXX: def TIME_FREQ */
356 /*
357 * Some system requires its clock to be initialized after hybernation.
358 */
359 initrtclock(TIMER_FREQ);
360 #endif
361
362 inittodr(time_second);
363 dopowerhooks(PWR_RESUME);
364
365 splx(apm_spl);
366
367 dopowerhooks(PWR_SOFTRESUME);
368
369 apm_record_event(sc, event_type);
370 }
371
372 /*
373 * return 0 if the user will notice and handle the event,
374 * return 1 if the kernel driver should do so.
375 */
376 static int
377 apm_record_event(struct apm_softc *sc, u_int event_type)
378 {
379 struct apm_event_info *evp;
380
381 if ((sc->sc_flags & SCFLAG_OPEN) == 0)
382 return 1; /* no user waiting */
383 if (sc->sc_event_count == APM_NEVENTS)
384 return 1; /* overflow */
385 evp = &sc->sc_event_list[sc->sc_event_ptr];
386 sc->sc_event_count++;
387 sc->sc_event_ptr++;
388 sc->sc_event_ptr %= APM_NEVENTS;
389 evp->type = event_type;
390 evp->index = ++apm_evindex;
391 selnotify(&sc->sc_rsel, 0, 0);
392 return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
393 }
394
395 static void
396 apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info)
397 {
398 int error;
399 const char *code;
400 struct apm_power_info pi;
401
402 switch (event_code) {
403 case APM_USER_STANDBY_REQ:
404 DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n"));
405 if (apm_do_standby) {
406 if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
407 apm_userstandbys++;
408 apm_op_inprog++;
409 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
410 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
411 } else {
412 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
413 APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
414 /* in case BIOS hates being spurned */
415 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
416 }
417 break;
418
419 case APM_STANDBY_REQ:
420 DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n"));
421 if (apm_standbys || apm_suspends) {
422 DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
423 ("damn fool BIOS did not wait for answer\n"));
424 /* just give up the fight */
425 apm_damn_fool_bios = 1;
426 }
427 if (apm_do_standby) {
428 if (apm_op_inprog == 0 &&
429 apm_record_event(sc, event_code))
430 apm_standbys++;
431 apm_op_inprog++;
432 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
433 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
434 } else {
435 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
436 APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
437 /* in case BIOS hates being spurned */
438 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
439 }
440 break;
441
442 case APM_USER_SUSPEND_REQ:
443 DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n"));
444 if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
445 apm_suspends++;
446 apm_op_inprog++;
447 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
448 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
449 break;
450
451 case APM_SUSPEND_REQ:
452 DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n"));
453 if (apm_standbys || apm_suspends) {
454 DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
455 ("damn fool BIOS did not wait for answer\n"));
456 /* just give up the fight */
457 apm_damn_fool_bios = 1;
458 }
459 if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
460 apm_suspends++;
461 apm_op_inprog++;
462 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
463 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
464 break;
465
466 case APM_POWER_CHANGE:
467 DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n"));
468 error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
469 #ifdef APM_POWER_PRINT
470 /* only print if nobody is catching events. */
471 if (error == 0 &&
472 (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0)
473 apm_power_print(sc, &pi);
474 #else
475 __USE(error);
476 #endif
477 apm_record_event(sc, event_code);
478 break;
479
480 case APM_NORMAL_RESUME:
481 DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n"));
482 apm_resume(sc, event_code, event_info);
483 break;
484
485 case APM_CRIT_RESUME:
486 DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system"));
487 apm_resume(sc, event_code, event_info);
488 break;
489
490 case APM_SYS_STANDBY_RESUME:
491 DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n"));
492 apm_resume(sc, event_code, event_info);
493 break;
494
495 case APM_UPDATE_TIME:
496 DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n"));
497 apm_resume(sc, event_code, event_info);
498 break;
499
500 case APM_CRIT_SUSPEND_REQ:
501 DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n"));
502 apm_record_event(sc, event_code);
503 apm_suspend(sc);
504 break;
505
506 case APM_BATTERY_LOW:
507 DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n"));
508 apm_battlow++;
509 apm_record_event(sc, event_code);
510 break;
511
512 case APM_CAP_CHANGE:
513 DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n"));
514 if (apm_minver < 2) {
515 DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n"));
516 } else {
517 u_int numbatts, capflags;
518 (*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie,
519 &numbatts, &capflags);
520 (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
521 }
522 break;
523
524 default:
525 switch (event_code >> 8) {
526 case 0:
527 code = "reserved system";
528 break;
529 case 1:
530 code = "reserved device";
531 break;
532 case 2:
533 code = "OEM defined";
534 break;
535 default:
536 code = "reserved";
537 break;
538 }
539 printf("APM: %s event code %x\n", code, event_code);
540 }
541 }
542
543 static void
544 apm_periodic_check(struct apm_softc *sc)
545 {
546 int error;
547 u_int event_code, event_info;
548
549
550 /*
551 * tell the BIOS we're working on it, if asked to do a
552 * suspend/standby
553 */
554 if (apm_op_inprog)
555 (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
556 APM_LASTREQ_INPROG);
557
558 while ((error = (*sc->sc_ops->aa_get_event)(sc->sc_cookie, &event_code,
559 &event_info)) == 0 && !apm_damn_fool_bios)
560 apm_event_handle(sc, event_code, event_info);
561
562 if (error != APM_ERR_NOEVENTS)
563 apm_perror("get event", error);
564 if (apm_suspends) {
565 apm_op_inprog = 0;
566 apm_suspend(sc);
567 } else if (apm_standbys || apm_userstandbys) {
568 apm_op_inprog = 0;
569 apm_standby(sc);
570 }
571 apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
572 apm_damn_fool_bios = 0;
573 }
574
575 static void
576 apm_set_ver(struct apm_softc *sc)
577 {
578
579 if (apm_v12_enabled &&
580 APM_MAJOR_VERS(sc->sc_vers) == 1 &&
581 APM_MINOR_VERS(sc->sc_vers) == 2) {
582 apm_majver = 1;
583 apm_minver = 2;
584 goto ok;
585 }
586
587 if (apm_v11_enabled &&
588 APM_MAJOR_VERS(sc->sc_vers) == 1 &&
589 APM_MINOR_VERS(sc->sc_vers) == 1) {
590 apm_majver = 1;
591 apm_minver = 1;
592 } else {
593 apm_majver = 1;
594 apm_minver = 0;
595 }
596 ok:
597 aprint_normal("Power Management spec V%d.%d", apm_majver, apm_minver);
598 apm_inited = 1;
599 }
600
601 static int
602 apmdevmatch(device_t parent, cfdata_t match, void *aux)
603 {
604
605 return apm_match();
606 }
607
608 static void
609 apmdevattach(device_t parent, device_t self, void *aux)
610 {
611 struct apm_softc *sc;
612 struct apmdev_attach_args *aaa = aux;
613
614 sc = device_private(self);
615 sc->sc_dev = self;
616
617 sc->sc_detail = aaa->apm_detail;
618 sc->sc_vers = aaa->apm_detail & 0xffff; /* XXX: magic */
619
620 sc->sc_ops = aaa->accessops;
621 sc->sc_cookie = aaa->accesscookie;
622
623 apm_attach(sc);
624 }
625
626 /*
627 * Print function (for parent devices).
628 */
629 int
630 apmprint(void *aux, const char *pnp)
631 {
632 if (pnp)
633 aprint_normal("apm at %s", pnp);
634
635 return (UNCONF);
636 }
637
638 int
639 apm_match(void)
640 {
641 static int got;
642 return !got++;
643 }
644
645 void
646 apm_attach(struct apm_softc *sc)
647 {
648 struct apm_power_info pinfo;
649 u_int numbatts, capflags;
650 int error;
651
652 aprint_naive("\n");
653 aprint_normal(": ");
654
655 switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) {
656 case 0x0100:
657 apm_v11_enabled = 0;
658 apm_v12_enabled = 0;
659 break;
660 case 0x0101:
661 apm_v12_enabled = 0;
662 /* fall through */
663 case 0x0102:
664 default:
665 break;
666 }
667
668 apm_set_ver(sc); /* prints version info */
669 aprint_normal("\n");
670 if (apm_minver >= 2)
671 (*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts,
672 &capflags);
673
674 /*
675 * enable power management
676 */
677 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
678
679 error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pinfo);
680 if (error == 0) {
681 #ifdef APM_POWER_PRINT
682 apm_power_print(sc, &pinfo);
683 #endif
684 } else
685 apm_perror("get power status", error);
686
687 if (sc->sc_ops->aa_cpu_busy)
688 (*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie);
689
690 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
691
692 /* Initial state is `resumed'. */
693 sc->sc_power_state = PWR_RESUME;
694 selinit(&sc->sc_rsel);
695 selinit(&sc->sc_xsel);
696
697 /* Do an initial check. */
698 apm_periodic_check(sc);
699
700 /*
701 * Create a kernel thread to periodically check for APM events,
702 * and notify other subsystems when they occur.
703 */
704 if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc,
705 &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) {
706 /*
707 * We were unable to create the APM thread; bail out.
708 */
709 if (sc->sc_ops->aa_disconnect)
710 (*sc->sc_ops->aa_disconnect)(sc->sc_cookie);
711 aprint_error_dev(sc->sc_dev, "unable to create thread, "
712 "kernel APM support disabled\n");
713 }
714 }
715
716 void
717 apm_thread(void *arg)
718 {
719 struct apm_softc *apmsc = arg;
720
721 /*
722 * Loop forever, doing a periodic check for APM events.
723 */
724 for (;;) {
725 APM_LOCK(apmsc);
726 apm_periodic_check(apmsc);
727 APM_UNLOCK(apmsc);
728 (void) tsleep(apmsc, PWAIT, "apmev", (8 * hz) / 7);
729 }
730 }
731
732 int
733 apmdevopen(dev_t dev, int flag, int mode, struct lwp *l)
734 {
735 int ctl = APM(dev);
736 int error = 0;
737 struct apm_softc *sc;
738
739 sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
740 if (!sc)
741 return ENXIO;
742
743 if (!apm_inited)
744 return ENXIO;
745
746 DPRINTF(APMDEBUG_DEVICE,
747 ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
748
749 APM_LOCK(sc);
750 switch (ctl) {
751 case APM_CTL:
752 if (!(flag & FWRITE)) {
753 error = EINVAL;
754 break;
755 }
756 if (sc->sc_flags & SCFLAG_OWRITE) {
757 error = EBUSY;
758 break;
759 }
760 sc->sc_flags |= SCFLAG_OWRITE;
761 break;
762 case APM_NORMAL:
763 if (!(flag & FREAD) || (flag & FWRITE)) {
764 error = EINVAL;
765 break;
766 }
767 sc->sc_flags |= SCFLAG_OREAD;
768 break;
769 default:
770 error = ENXIO;
771 break;
772 }
773 APM_UNLOCK(sc);
774
775 return (error);
776 }
777
778 int
779 apmdevclose(dev_t dev, int flag, int mode,
780 struct lwp *l)
781 {
782 struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
783 int ctl = APM(dev);
784
785 DPRINTF(APMDEBUG_DEVICE,
786 ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
787
788 APM_LOCK(sc);
789 switch (ctl) {
790 case APM_CTL:
791 sc->sc_flags &= ~SCFLAG_OWRITE;
792 break;
793 case APM_NORMAL:
794 sc->sc_flags &= ~SCFLAG_OREAD;
795 break;
796 }
797 if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
798 sc->sc_event_count = 0;
799 sc->sc_event_ptr = 0;
800 }
801 APM_UNLOCK(sc);
802 return 0;
803 }
804
805 int
806 apmdevioctl(dev_t dev, u_long cmd, void *data, int flag,
807 struct lwp *l)
808 {
809 struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
810 struct apm_power_info *powerp;
811 struct apm_event_info *evp;
812 #if 0
813 struct apm_ctl *actl;
814 #endif
815 int i, error = 0;
816 int batt_flags;
817
818 APM_LOCK(sc);
819 switch (cmd) {
820 case APM_IOC_STANDBY:
821 if (!apm_do_standby) {
822 error = EOPNOTSUPP;
823 break;
824 }
825
826 if ((flag & FWRITE) == 0) {
827 error = EBADF;
828 break;
829 }
830 apm_userstandbys++;
831 break;
832
833 case APM_IOC_SUSPEND:
834 if ((flag & FWRITE) == 0) {
835 error = EBADF;
836 break;
837 }
838 apm_suspends++;
839 break;
840
841 case APM_IOC_NEXTEVENT:
842 if (!sc->sc_event_count)
843 error = EAGAIN;
844 else {
845 evp = (struct apm_event_info *)data;
846 i = sc->sc_event_ptr + APM_NEVENTS - sc->sc_event_count;
847 i %= APM_NEVENTS;
848 *evp = sc->sc_event_list[i];
849 sc->sc_event_count--;
850 }
851 break;
852
853 case OAPM_IOC_GETPOWER:
854 case APM_IOC_GETPOWER:
855 powerp = (struct apm_power_info *)data;
856 if ((error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0,
857 powerp)) != 0) {
858 apm_perror("ioctl get power status", error);
859 error = EIO;
860 break;
861 }
862 switch (apm_minver) {
863 case 0:
864 break;
865 case 1:
866 default:
867 batt_flags = powerp->battery_flags;
868 powerp->battery_state = APM_BATT_UNKNOWN;
869 if (batt_flags & APM_BATT_FLAG_HIGH)
870 powerp->battery_state = APM_BATT_HIGH;
871 else if (batt_flags & APM_BATT_FLAG_LOW)
872 powerp->battery_state = APM_BATT_LOW;
873 else if (batt_flags & APM_BATT_FLAG_CRITICAL)
874 powerp->battery_state = APM_BATT_CRITICAL;
875 else if (batt_flags & APM_BATT_FLAG_CHARGING)
876 powerp->battery_state = APM_BATT_CHARGING;
877 else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
878 powerp->battery_state = APM_BATT_ABSENT;
879 break;
880 }
881 break;
882
883 default:
884 error = ENOTTY;
885 }
886 APM_UNLOCK(sc);
887
888 return (error);
889 }
890
891 int
892 apmdevpoll(dev_t dev, int events, struct lwp *l)
893 {
894 struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
895 int revents = 0;
896
897 APM_LOCK(sc);
898 if (events & (POLLIN | POLLRDNORM)) {
899 if (sc->sc_event_count)
900 revents |= events & (POLLIN | POLLRDNORM);
901 else
902 selrecord(l, &sc->sc_rsel);
903 }
904 APM_UNLOCK(sc);
905
906 return (revents);
907 }
908
909 static void
910 filt_apmrdetach(struct knote *kn)
911 {
912 struct apm_softc *sc = kn->kn_hook;
913
914 APM_LOCK(sc);
915 SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
916 APM_UNLOCK(sc);
917 }
918
919 static int
920 filt_apmread(struct knote *kn, long hint)
921 {
922 struct apm_softc *sc = kn->kn_hook;
923
924 kn->kn_data = sc->sc_event_count;
925 return (kn->kn_data > 0);
926 }
927
928 static const struct filterops apmread_filtops =
929 { 1, NULL, filt_apmrdetach, filt_apmread };
930
931 int
932 apmdevkqfilter(dev_t dev, struct knote *kn)
933 {
934 struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
935 struct klist *klist;
936
937 switch (kn->kn_filter) {
938 case EVFILT_READ:
939 klist = &sc->sc_rsel.sel_klist;
940 kn->kn_fop = &apmread_filtops;
941 break;
942
943 default:
944 return (EINVAL);
945 }
946
947 kn->kn_hook = sc;
948
949 APM_LOCK(sc);
950 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
951 APM_UNLOCK(sc);
952
953 return (0);
954 }
955