1 /* $NetBSD: apm.c,v 1.32 2022/02/11 04:23:18 thorpej Exp $ */ 2 /* $OpenBSD: apm.c,v 1.5 2002/06/07 07:13:59 miod Exp $ */ 3 4 /*- 5 * Copyright (c) 2001 Alexander Guy. All rights reserved. 6 * Copyright (c) 1998-2001 Michael Shalayeff. All rights reserved. 7 * Copyright (c) 1995 John T. Kohl. All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the names of the authors nor the names of contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: apm.c,v 1.32 2022/02/11 04:23:18 thorpej Exp $"); 37 38 #include "apm.h" 39 40 #if NAPM > 1 41 #error only one APM emulation device may be configured 42 #endif 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/proc.h> 48 #include <sys/device.h> 49 #include <sys/fcntl.h> 50 #include <sys/ioctl.h> 51 #include <sys/mutex.h> 52 #include <sys/select.h> 53 #include <sys/poll.h> 54 #include <sys/conf.h> 55 56 #include <machine/cpu.h> 57 #include <machine/apmvar.h> 58 59 #include <macppc/dev/adbvar.h> 60 #include <macppc/dev/pm_direct.h> 61 62 #if defined(APMDEBUG) 63 #define DPRINTF(x) printf x 64 #else 65 #define DPRINTF(x) /**/ 66 #endif 67 68 #define APM_NEVENTS 16 69 70 struct apm_softc { 71 struct selinfo sc_rsel; 72 int sc_flags; 73 int event_count; 74 int event_ptr; 75 kmutex_t sc_lock; 76 struct apm_event_info event_list[APM_NEVENTS]; 77 }; 78 79 /* 80 * A brief note on the locking protocol: it's very simple; we 81 * assert an exclusive lock any time thread context enters the 82 * APM module. This is both the APM thread itself, as well as 83 * user context. 84 */ 85 #define APM_LOCK(apmsc) mutex_enter(&(apmsc)->sc_lock) 86 #define APM_UNLOCK(apmsc) mutex_exit(&(apmsc)->sc_lock) 87 88 int apmmatch(device_t, cfdata_t, void *); 89 void apmattach(device_t, device_t, void *); 90 91 #if 0 92 static int apm_record_event(struct apm_softc *, u_int); 93 #endif 94 95 CFATTACH_DECL_NEW(apm, sizeof(struct apm_softc), 96 apmmatch, apmattach, NULL, NULL); 97 98 extern struct cfdriver apm_cd; 99 100 dev_type_open(apmopen); 101 dev_type_close(apmclose); 102 dev_type_ioctl(apmioctl); 103 dev_type_poll(apmpoll); 104 dev_type_kqfilter(apmkqfilter); 105 106 const struct cdevsw apm_cdevsw = { 107 .d_open = apmopen, 108 .d_close = apmclose, 109 .d_read = noread, 110 .d_write = nowrite, 111 .d_ioctl = apmioctl, 112 .d_stop = nostop, 113 .d_tty = notty, 114 .d_poll = apmpoll, 115 .d_mmap = nommap, 116 .d_kqfilter = apmkqfilter, 117 .d_discard = nodiscard, 118 .d_flag = 0 119 }; 120 121 int apm_evindex; 122 123 #define APMUNIT(dev) (minor(dev)&0xf0) 124 #define APMDEV(dev) (minor(dev)&0x0f) 125 #define APMDEV_NORMAL 0 126 #define APMDEV_CTL 8 127 128 /* 129 * Flags to control kernel display 130 * SCFLAG_NOPRINT: do not output APM power messages due to 131 * a power change event. 132 * 133 * SCFLAG_PCTPRINT: do not output APM power messages due to 134 * to a power change event unless the battery 135 * percentage changes. 136 */ 137 138 #define SCFLAG_NOPRINT 0x0008000 139 #define SCFLAG_PCTPRINT 0x0004000 140 #define SCFLAG_PRINT (SCFLAG_NOPRINT|SCFLAG_PCTPRINT) 141 142 #define SCFLAG_OREAD (1 << 0) 143 #define SCFLAG_OWRITE (1 << 1) 144 #define SCFLAG_OPEN (SCFLAG_OREAD|SCFLAG_OWRITE) 145 146 147 int 148 apmmatch(device_t parent, cfdata_t match, void *aux) 149 { 150 struct adb_attach_args *aa = (void *)aux; 151 if (aa->origaddr != ADBADDR_APM || 152 aa->handler_id != ADBADDR_APM || 153 aa->adbaddr != ADBADDR_APM) 154 return 0; 155 156 if (adbHardware != ADB_HW_PMU) 157 return 0; 158 159 return 1; 160 } 161 162 void 163 apmattach(device_t parent, device_t self, void *aux) 164 { 165 struct apm_softc *sc = device_private(self); 166 struct pmu_battery_info info; 167 168 pm_battery_info(0, &info); 169 170 printf(": battery flags 0x%X, ", info.flags); 171 printf("%d%% charged\n", ((info.cur_charge * 100) / info.max_charge)); 172 173 sc->sc_flags = 0; 174 sc->event_ptr = 0; 175 sc->event_count = 0; 176 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE); 177 selinit(&sc->sc_rsel); 178 } 179 180 int 181 apmopen(dev_t dev, int flag, int mode, struct lwp *l) 182 { 183 struct apm_softc *sc; 184 int error = 0; 185 186 /* apm0 only */ 187 sc = device_lookup_private(&apm_cd, APMUNIT(dev)); 188 if (sc == NULL) 189 return ENXIO; 190 191 DPRINTF(("apmopen: dev %d pid %d flag %x mode %x\n", 192 APMDEV(dev), l->l_proc->p_pid, flag, mode)); 193 194 APM_LOCK(sc); 195 switch (APMDEV(dev)) { 196 case APMDEV_CTL: 197 if (!(flag & FWRITE)) { 198 error = EINVAL; 199 break; 200 } 201 if (sc->sc_flags & SCFLAG_OWRITE) { 202 error = EBUSY; 203 break; 204 } 205 sc->sc_flags |= SCFLAG_OWRITE; 206 break; 207 case APMDEV_NORMAL: 208 if (!(flag & FREAD) || (flag & FWRITE)) { 209 error = EINVAL; 210 break; 211 } 212 sc->sc_flags |= SCFLAG_OREAD; 213 break; 214 default: 215 error = ENXIO; 216 break; 217 } 218 APM_UNLOCK(sc); 219 return error; 220 } 221 222 int 223 apmclose(dev_t dev, int flag, int mode, struct lwp *l) 224 { 225 struct apm_softc *sc; 226 227 /* apm0 only */ 228 sc = device_lookup_private(&apm_cd, APMUNIT(dev)); 229 if (sc == NULL) 230 return ENXIO; 231 232 DPRINTF(("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode)); 233 234 APM_LOCK(sc); 235 switch (APMDEV(dev)) { 236 case APMDEV_CTL: 237 sc->sc_flags &= ~SCFLAG_OWRITE; 238 break; 239 case APMDEV_NORMAL: 240 sc->sc_flags &= ~SCFLAG_OREAD; 241 break; 242 } 243 APM_UNLOCK(sc); 244 return 0; 245 } 246 247 int 248 apmioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) 249 { 250 struct apm_softc *sc; 251 struct pmu_battery_info batt; 252 struct apm_power_info *power; 253 int error = 0; 254 255 /* apm0 only */ 256 sc = device_lookup_private(&apm_cd, APMUNIT(dev)); 257 if (sc == NULL) 258 return ENXIO; 259 260 APM_LOCK(sc); 261 switch (cmd) { 262 /* some ioctl names from linux */ 263 case APM_IOC_STANDBY: 264 if ((flag & FWRITE) == 0) 265 error = EBADF; 266 case APM_IOC_SUSPEND: 267 if ((flag & FWRITE) == 0) 268 error = EBADF; 269 break; 270 case APM_IOC_PRN_CTL: 271 if ((flag & FWRITE) == 0) 272 error = EBADF; 273 else { 274 int op = *(int *)data; 275 DPRINTF(( "APM_IOC_PRN_CTL: %d\n", op )); 276 switch (op) { 277 case APM_PRINT_ON: /* enable printing */ 278 sc->sc_flags &= ~SCFLAG_PRINT; 279 break; 280 case APM_PRINT_OFF: /* disable printing */ 281 sc->sc_flags &= ~SCFLAG_PRINT; 282 sc->sc_flags |= SCFLAG_NOPRINT; 283 break; 284 case APM_PRINT_PCT: /* disable some printing */ 285 sc->sc_flags &= ~SCFLAG_PRINT; 286 sc->sc_flags |= SCFLAG_PCTPRINT; 287 break; 288 default: 289 error = EINVAL; 290 break; 291 } 292 } 293 break; 294 case APM_IOC_DEV_CTL: 295 if ((flag & FWRITE) == 0) 296 error = EBADF; 297 break; 298 case APM_IOC_GETPOWER: 299 power = (struct apm_power_info *)data; 300 301 pm_battery_info(0, &batt); 302 303 power->ac_state = ((batt.flags & PMU_PWR_AC_PRESENT) ? 304 APM_AC_ON : APM_AC_OFF); 305 power->battery_life = 306 ((batt.cur_charge * 100) / batt.max_charge); 307 308 /* 309 * If the battery is charging, return the minutes left until 310 * charging is complete. apmd knows this. 311 */ 312 313 if (!(batt.flags & PMU_PWR_BATT_PRESENT)) { 314 power->battery_state = APM_BATT_UNKNOWN; 315 power->minutes_left = 0; 316 power->battery_life = 0; 317 } else if ((power->ac_state == APM_AC_ON) && 318 (batt.draw > 0)) { 319 power->minutes_left = batt.secs_remaining / 60; 320 power->battery_state = APM_BATT_CHARGING; 321 } else { 322 power->minutes_left = batt.secs_remaining / 60; 323 324 /* XXX - Arbitrary */ 325 if (power->battery_life > 60) { 326 power->battery_state = APM_BATT_HIGH; 327 } else if (power->battery_life < 10) { 328 power->battery_state = APM_BATT_CRITICAL; 329 } else { 330 power->battery_state = APM_BATT_LOW; 331 } 332 } 333 334 break; 335 336 default: 337 error = ENOTTY; 338 } 339 APM_UNLOCK(sc); 340 341 return error; 342 } 343 344 #if 0 345 /* 346 * return 0 if the user will notice and handle the event, 347 * return 1 if the kernel driver should do so. 348 */ 349 static int 350 apm_record_event(struct apm_softc *sc, u_int event_type) 351 { 352 struct apm_event_info *evp; 353 354 if ((sc->sc_flags & SCFLAG_OPEN) == 0) 355 return 1; /* no user waiting */ 356 if (sc->event_count == APM_NEVENTS) { 357 DPRINTF(("apm_record_event: queue full!\n")); 358 return 1; /* overflow */ 359 } 360 evp = &sc->event_list[sc->event_ptr]; 361 sc->event_count++; 362 sc->event_ptr++; 363 sc->event_ptr %= APM_NEVENTS; 364 evp->type = event_type; 365 evp->index = ++apm_evindex; 366 selnotify(&sc->sc_rsel, 0, 0); 367 return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */ 368 } 369 #endif 370 371 int 372 apmpoll(dev_t dev, int events, struct lwp *l) 373 { 374 struct apm_softc *sc = device_lookup_private(&apm_cd,APMUNIT(dev)); 375 int revents = 0; 376 377 APM_LOCK(sc); 378 if (events & (POLLIN | POLLRDNORM)) { 379 if (sc->event_count) 380 revents |= events & (POLLIN | POLLRDNORM); 381 else 382 selrecord(l, &sc->sc_rsel); 383 } 384 APM_UNLOCK(sc); 385 386 return (revents); 387 } 388 389 static void 390 filt_apmrdetach(struct knote *kn) 391 { 392 struct apm_softc *sc = (struct apm_softc *)kn->kn_hook; 393 394 APM_LOCK(sc); 395 selremove_knote(&sc->sc_rsel, kn); 396 APM_UNLOCK(sc); 397 } 398 399 static int 400 filt_apmread(struct knote *kn, long hint) 401 { 402 struct apm_softc *sc = kn->kn_hook; 403 404 kn->kn_data = sc->event_count; 405 return (kn->kn_data > 0); 406 } 407 408 static struct filterops apmread_filtops = { 409 .f_flags = FILTEROP_ISFD, 410 .f_attach = NULL, 411 .f_detach = filt_apmrdetach, 412 .f_event = filt_apmread, 413 }; 414 415 int 416 apmkqfilter(dev_t dev, struct knote *kn) 417 { 418 struct apm_softc *sc = device_lookup_private(&apm_cd,APMUNIT(dev)); 419 420 switch (kn->kn_filter) { 421 case EVFILT_READ: 422 kn->kn_fop = &apmread_filtops; 423 break; 424 default: 425 return (EINVAL); 426 } 427 428 kn->kn_hook = sc; 429 430 APM_LOCK(sc); 431 selrecord_knote(&sc->sc_rsel, kn); 432 APM_UNLOCK(sc); 433 434 return (0); 435 } 436