kern_pmf.c revision 1.12 1 /* $NetBSD: kern_pmf.c,v 1.12 2008/02/20 22:52:55 drochner Exp $ */
2
3 /*-
4 * Copyright (c) 2007 Jared D. McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Jared D. McNeill.
18 * 4. Neither the name of The NetBSD Foundation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: kern_pmf.c,v 1.12 2008/02/20 22:52:55 drochner Exp $");
37
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/malloc.h>
41 #include <sys/buf.h>
42 #include <sys/callout.h>
43 #include <sys/kernel.h>
44 #include <sys/device.h>
45 #include <sys/pmf.h>
46 #include <sys/queue.h>
47 #include <sys/syscallargs.h> /* for sys_sync */
48 #include <sys/workqueue.h>
49 #include <prop/proplib.h>
50
51 /* XXX ugly special case, but for now the only client */
52 #include "wsdisplay.h"
53 #if NWSDISPLAY > 0
54 #include <dev/wscons/wsdisplayvar.h>
55 #endif
56
57 #ifdef PMF_DEBUG
58 int pmf_debug_event;
59 int pmf_debug_idle;
60 int pmf_debug_transition;
61
62 #define PMF_EVENT_PRINTF(x) if (pmf_debug_event) printf x
63 #define PMF_IDLE_PRINTF(x) if (pmf_debug_idle) printf x
64 #define PMF_TRANSITION_PRINTF(x) if (pmf_debug_transition) printf x
65 #define PMF_TRANSITION_PRINTF2(y,x) if (pmf_debug_transition>y) printf x
66 #else
67 #define PMF_EVENT_PRINTF(x) do { } while (0)
68 #define PMF_IDLE_PRINTF(x) do { } while (0)
69 #define PMF_TRANSITION_PRINTF(x) do { } while (0)
70 #define PMF_TRANSITION_PRINTF2(y,x) do { } while (0)
71 #endif
72
73 /* #define PMF_DEBUG */
74
75 MALLOC_DEFINE(M_PMF, "pmf", "device pmf messaging memory");
76
77 static prop_dictionary_t pmf_platform = NULL;
78 static struct workqueue *pmf_event_workqueue;
79
80 typedef struct pmf_event_handler {
81 TAILQ_ENTRY(pmf_event_handler) pmf_link;
82 pmf_generic_event_t pmf_event;
83 void (*pmf_handler)(device_t);
84 device_t pmf_device;
85 bool pmf_global;
86 } pmf_event_handler_t;
87
88 static TAILQ_HEAD(, pmf_event_handler) pmf_all_events =
89 TAILQ_HEAD_INITIALIZER(pmf_all_events);
90
91 typedef struct pmf_event_workitem {
92 struct work pew_work;
93 pmf_generic_event_t pew_event;
94 device_t pew_device;
95 } pmf_event_workitem_t;
96
97 static void
98 pmf_event_worker(struct work *wk, void *dummy)
99 {
100 pmf_event_workitem_t *pew;
101 pmf_event_handler_t *event;
102
103 pew = (void *)wk;
104 KASSERT(wk == &pew->pew_work);
105 KASSERT(pew != NULL);
106
107 TAILQ_FOREACH(event, &pmf_all_events, pmf_link) {
108 if (event->pmf_event != pew->pew_event)
109 continue;
110 if (event->pmf_device == pew->pew_device || event->pmf_global)
111 (*event->pmf_handler)(event->pmf_device);
112 }
113
114 free(pew, M_TEMP);
115
116 return;
117 }
118
119 static bool
120 pmf_check_system_drivers(void)
121 {
122 device_t curdev;
123 bool unsupported_devs;
124
125 unsupported_devs = false;
126 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
127 if (device_pmf_is_registered(curdev))
128 continue;
129 if (!unsupported_devs)
130 printf("Devices without power management support:");
131 printf(" %s", device_xname(curdev));
132 unsupported_devs = true;
133 }
134 if (unsupported_devs) {
135 printf("\n");
136 return false;
137 }
138 return true;
139 }
140
141 bool
142 pmf_system_bus_resume(void)
143 {
144 int depth, maxdepth;
145 bool rv;
146 device_t curdev;
147
148 maxdepth = 0;
149 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
150 if (curdev->dv_depth > maxdepth)
151 maxdepth = curdev->dv_depth;
152 }
153 ++maxdepth;
154
155 aprint_debug("Powering devices:");
156 /* D0 handlers are run in order */
157 depth = 0;
158 rv = true;
159 for (depth = 0; depth < maxdepth; ++depth) {
160 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
161 if (!device_pmf_is_registered(curdev))
162 continue;
163 if (device_is_active(curdev) ||
164 !device_is_enabled(curdev))
165 continue;
166 if (curdev->dv_depth != depth)
167 continue;
168
169 aprint_debug(" %s", device_xname(curdev));
170
171 if (!device_pmf_bus_resume(curdev))
172 aprint_debug("(failed)");
173 }
174 }
175 aprint_debug("\n");
176
177 return rv;
178 }
179
180 bool
181 pmf_system_resume(void)
182 {
183 int depth, maxdepth;
184 bool rv;
185 device_t curdev, parent;
186
187 if (!pmf_check_system_drivers())
188 return false;
189
190 maxdepth = 0;
191 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
192 if (curdev->dv_depth > maxdepth)
193 maxdepth = curdev->dv_depth;
194 }
195 ++maxdepth;
196
197 aprint_debug("Resuming devices:");
198 /* D0 handlers are run in order */
199 depth = 0;
200 rv = true;
201 for (depth = 0; depth < maxdepth; ++depth) {
202 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
203 if (device_is_active(curdev) ||
204 !device_is_enabled(curdev))
205 continue;
206 if (curdev->dv_depth != depth)
207 continue;
208 parent = device_parent(curdev);
209 if (parent != NULL &&
210 !device_is_active(parent))
211 continue;
212
213 aprint_debug(" %s", device_xname(curdev));
214
215 if (!pmf_device_resume(curdev)) {
216 rv = false;
217 aprint_debug("(failed)");
218 }
219 }
220 }
221 aprint_debug(".\n");
222
223 KERNEL_UNLOCK_ONE(0);
224 #if NWSDISPLAY > 0
225 if (rv)
226 wsdisplay_handlex(1);
227 #endif
228 return rv;
229 }
230
231 bool
232 pmf_system_suspend(void)
233 {
234 int depth, maxdepth;
235 device_t curdev;
236
237 if (!pmf_check_system_drivers())
238 return false;
239 #if NWSDISPLAY > 0
240 if (wsdisplay_handlex(0))
241 return false;
242 #endif
243 KERNEL_LOCK(1, 0);
244
245 /*
246 * Flush buffers only if the shutdown didn't do so
247 * already and if there was no panic.
248 */
249 if (doing_shutdown == 0 && panicstr == NULL) {
250 printf("Flushing disk caches: ");
251 sys_sync(NULL, NULL, NULL);
252 if (buf_syncwait() != 0)
253 printf("giving up\n");
254 else
255 printf("done\n");
256 }
257
258 aprint_debug("Suspending devices:");
259
260 maxdepth = 0;
261 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
262 if (curdev->dv_depth > maxdepth)
263 maxdepth = curdev->dv_depth;
264 }
265
266 for (depth = maxdepth; depth >= 0; --depth) {
267 TAILQ_FOREACH_REVERSE(curdev, &alldevs, devicelist, dv_list) {
268 if (curdev->dv_depth != depth)
269 continue;
270 if (!device_is_active(curdev))
271 continue;
272
273 aprint_debug(" %s", device_xname(curdev));
274
275 /* XXX joerg check return value and abort suspend */
276 if (!pmf_device_suspend(curdev))
277 aprint_debug("(failed)");
278 }
279 }
280
281 aprint_debug(".\n");
282
283 return true;
284 }
285
286 void
287 pmf_system_shutdown(void)
288 {
289 int depth, maxdepth;
290 device_t curdev;
291
292 aprint_debug("Shutting down devices:");
293
294 maxdepth = 0;
295 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
296 if (curdev->dv_depth > maxdepth)
297 maxdepth = curdev->dv_depth;
298 }
299
300 for (depth = maxdepth; depth >= 0; --depth) {
301 TAILQ_FOREACH_REVERSE(curdev, &alldevs, devicelist, dv_list) {
302 if (curdev->dv_depth != depth)
303 continue;
304 if (!device_is_active(curdev))
305 continue;
306
307 aprint_debug(" %s", device_xname(curdev));
308
309 if (!device_pmf_is_registered(curdev))
310 continue;
311 if (!device_pmf_class_suspend(curdev)) {
312 aprint_debug("(failed)");
313 continue;
314 }
315 if (!device_pmf_driver_suspend(curdev)) {
316 aprint_debug("(failed)");
317 continue;
318 }
319 }
320 }
321
322 aprint_debug(".\n");
323 }
324
325 bool
326 pmf_set_platform(const char *key, const char *value)
327 {
328 if (pmf_platform == NULL)
329 pmf_platform = prop_dictionary_create();
330 if (pmf_platform == NULL)
331 return false;
332
333 return prop_dictionary_set_cstring(pmf_platform, key, value);
334 }
335
336 const char *
337 pmf_get_platform(const char *key)
338 {
339 const char *value;
340
341 if (pmf_platform == NULL)
342 return NULL;
343
344 if (!prop_dictionary_get_cstring_nocopy(pmf_platform, key, &value))
345 return NULL;
346
347 return value;
348 }
349
350 bool
351 pmf_device_register(device_t dev,
352 bool (*suspend)(device_t), bool (*resume)(device_t))
353 {
354 device_pmf_driver_register(dev, suspend, resume);
355
356 if (!device_pmf_driver_child_register(dev)) {
357 device_pmf_driver_deregister(dev);
358 return false;
359 }
360
361 return true;
362 }
363
364 void
365 pmf_device_deregister(device_t dev)
366 {
367 device_pmf_class_deregister(dev);
368 device_pmf_bus_deregister(dev);
369 device_pmf_driver_deregister(dev);
370 }
371
372 bool
373 pmf_device_suspend(device_t dev)
374 {
375 PMF_TRANSITION_PRINTF(("%s: suspend enter\n", device_xname(dev)));
376 if (!device_pmf_is_registered(dev))
377 return false;
378 PMF_TRANSITION_PRINTF2(1, ("%s: class suspend\n", device_xname(dev)));
379 if (!device_pmf_class_suspend(dev))
380 return false;
381 PMF_TRANSITION_PRINTF2(1, ("%s: driver suspend\n", device_xname(dev)));
382 if (!device_pmf_driver_suspend(dev))
383 return false;
384 PMF_TRANSITION_PRINTF2(1, ("%s: bus suspend\n", device_xname(dev)));
385 if (!device_pmf_bus_suspend(dev))
386 return false;
387 PMF_TRANSITION_PRINTF(("%s: suspend exit\n", device_xname(dev)));
388 return true;
389 }
390
391 bool
392 pmf_device_resume(device_t dev)
393 {
394 PMF_TRANSITION_PRINTF(("%s: resume enter\n", device_xname(dev)));
395 if (!device_pmf_is_registered(dev))
396 return false;
397 PMF_TRANSITION_PRINTF2(1, ("%s: bus resume\n", device_xname(dev)));
398 if (!device_pmf_bus_resume(dev))
399 return false;
400 PMF_TRANSITION_PRINTF2(1, ("%s: driver resume\n", device_xname(dev)));
401 if (!device_pmf_driver_resume(dev))
402 return false;
403 PMF_TRANSITION_PRINTF2(1, ("%s: class resume\n", device_xname(dev)));
404 if (!device_pmf_class_resume(dev))
405 return false;
406 PMF_TRANSITION_PRINTF(("%s: resume exit\n", device_xname(dev)));
407 return true;
408 }
409
410 bool
411 pmf_device_recursive_suspend(device_t dv)
412 {
413 device_t curdev;
414
415 if (!device_is_active(dv))
416 return true;
417
418 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
419 if (device_parent(curdev) != dv)
420 continue;
421 if (!pmf_device_recursive_suspend(curdev))
422 return false;
423 }
424
425 return pmf_device_suspend(dv);
426 }
427
428 bool
429 pmf_device_recursive_resume(device_t dv)
430 {
431 device_t parent;
432
433 if (device_is_active(dv))
434 return true;
435
436 parent = device_parent(dv);
437 if (parent != NULL) {
438 if (!pmf_device_recursive_resume(parent))
439 return false;
440 }
441
442 return pmf_device_resume(dv);
443 }
444
445 bool
446 pmf_device_resume_subtree(device_t dv)
447 {
448 device_t curdev;
449
450 if (!pmf_device_recursive_resume(dv))
451 return false;
452
453 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
454 if (device_parent(curdev) != dv)
455 continue;
456 if (!pmf_device_resume_subtree(curdev))
457 return false;
458 }
459 return true;
460 }
461
462 #include <net/if.h>
463
464 static bool
465 pmf_class_network_suspend(device_t dev)
466 {
467 struct ifnet *ifp = device_pmf_class_private(dev);
468 int s;
469
470 s = splnet();
471 (*ifp->if_stop)(ifp, 1);
472 splx(s);
473
474 return true;
475 }
476
477 static bool
478 pmf_class_network_resume(device_t dev)
479 {
480 struct ifnet *ifp = device_pmf_class_private(dev);
481 int s;
482
483 s = splnet();
484 if (ifp->if_flags & IFF_UP) {
485 ifp->if_flags &= ~IFF_RUNNING;
486 (*ifp->if_init)(ifp);
487 (*ifp->if_start)(ifp);
488 }
489 splx(s);
490
491 return true;
492 }
493
494 void
495 pmf_class_network_register(device_t dev, struct ifnet *ifp)
496 {
497 device_pmf_class_register(dev, ifp, pmf_class_network_suspend,
498 pmf_class_network_resume, NULL);
499 }
500
501 bool
502 pmf_event_inject(device_t dv, pmf_generic_event_t ev)
503 {
504 pmf_event_workitem_t *pew;
505
506 pew = malloc(sizeof(pmf_event_workitem_t), M_TEMP, M_NOWAIT);
507 if (pew == NULL) {
508 PMF_EVENT_PRINTF(("%s: PMF event %d dropped (no memory)\n",
509 dv ? device_xname(dv) : "<anonymous>", ev));
510 return false;
511 }
512
513 pew->pew_event = ev;
514 pew->pew_device = dv;
515
516 workqueue_enqueue(pmf_event_workqueue, (void *)pew, NULL);
517 PMF_EVENT_PRINTF(("%s: PMF event %d injected\n",
518 dv ? device_xname(dv) : "<anonymous>", ev));
519
520 return true;
521 }
522
523 bool
524 pmf_event_register(device_t dv, pmf_generic_event_t ev,
525 void (*handler)(device_t), bool global)
526 {
527 pmf_event_handler_t *event;
528
529 event = malloc(sizeof(*event), M_DEVBUF, M_WAITOK);
530 event->pmf_event = ev;
531 event->pmf_handler = handler;
532 event->pmf_device = dv;
533 event->pmf_global = global;
534 TAILQ_INSERT_TAIL(&pmf_all_events, event, pmf_link);
535
536 return true;
537 }
538
539 void
540 pmf_event_deregister(device_t dv, pmf_generic_event_t ev,
541 void (*handler)(device_t), bool global)
542 {
543 pmf_event_handler_t *event;
544
545 TAILQ_FOREACH(event, &pmf_all_events, pmf_link) {
546 if (event->pmf_event != ev)
547 continue;
548 if (event->pmf_device != dv)
549 continue;
550 if (event->pmf_global != global)
551 continue;
552 if (event->pmf_handler != handler)
553 continue;
554 TAILQ_REMOVE(&pmf_all_events, event, pmf_link);
555 free(event, M_DEVBUF);
556 return;
557 }
558 }
559
560 struct display_class_softc {
561 TAILQ_ENTRY(display_class_softc) dc_link;
562 device_t dc_dev;
563 };
564
565 static TAILQ_HEAD(, display_class_softc) all_displays;
566 static callout_t global_idle_counter;
567 static int idle_timeout = 30;
568
569 static void
570 input_idle(void *dummy)
571 {
572 PMF_IDLE_PRINTF(("Input idle handler called\n"));
573 pmf_event_inject(NULL, PMFE_DISPLAY_OFF);
574 }
575
576 static void
577 input_activity_handler(device_t dv, devactive_t type)
578 {
579 if (!TAILQ_EMPTY(&all_displays))
580 callout_schedule(&global_idle_counter, idle_timeout * hz);
581 }
582
583 static void
584 pmf_class_input_deregister(device_t dv)
585 {
586 device_active_deregister(dv, input_activity_handler);
587 }
588
589 bool
590 pmf_class_input_register(device_t dv)
591 {
592 if (!device_active_register(dv, input_activity_handler))
593 return false;
594
595 device_pmf_class_register(dv, NULL, NULL, NULL,
596 pmf_class_input_deregister);
597
598 return true;
599 }
600
601 static void
602 pmf_class_display_deregister(device_t dv)
603 {
604 struct display_class_softc *sc = device_pmf_class_private(dv);
605 int s;
606
607 s = splsoftclock();
608 TAILQ_REMOVE(&all_displays, sc, dc_link);
609 if (TAILQ_EMPTY(&all_displays))
610 callout_stop(&global_idle_counter);
611 splx(s);
612
613 free(sc, M_DEVBUF);
614 }
615
616 bool
617 pmf_class_display_register(device_t dv)
618 {
619 struct display_class_softc *sc;
620 int s;
621
622 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK);
623
624 s = splsoftclock();
625 if (TAILQ_EMPTY(&all_displays))
626 callout_schedule(&global_idle_counter, idle_timeout * hz);
627
628 TAILQ_INSERT_HEAD(&all_displays, sc, dc_link);
629 splx(s);
630
631 device_pmf_class_register(dv, sc, NULL, NULL,
632 pmf_class_display_deregister);
633
634 return true;
635 }
636
637 void
638 pmf_init(void)
639 {
640 int err;
641
642 KASSERT(pmf_event_workqueue == NULL);
643 err = workqueue_create(&pmf_event_workqueue, "pmfevent",
644 pmf_event_worker, NULL, PRI_NONE, IPL_VM, 0);
645 if (err)
646 panic("couldn't create pmfevent workqueue");
647
648 callout_init(&global_idle_counter, 0);
649 callout_setfunc(&global_idle_counter, input_idle, NULL);
650 }
651