kern_pmf.c revision 1.11 1 /* $NetBSD: kern_pmf.c,v 1.11 2008/01/30 00:50:17 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2007 Jared D. McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Jared D. McNeill.
18 * 4. Neither the name of The NetBSD Foundation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: kern_pmf.c,v 1.11 2008/01/30 00:50:17 jmcneill Exp $");
37
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/malloc.h>
41 #include <sys/buf.h>
42 #include <sys/callout.h>
43 #include <sys/kernel.h>
44 #include <sys/device.h>
45 #include <sys/pmf.h>
46 #include <sys/queue.h>
47 #include <sys/syscallargs.h> /* for sys_sync */
48 #include <sys/workqueue.h>
49 #include <prop/proplib.h>
50
51 #ifdef PMF_DEBUG
52 int pmf_debug_event;
53 int pmf_debug_idle;
54 int pmf_debug_transition;
55
56 #define PMF_EVENT_PRINTF(x) if (pmf_debug_event) printf x
57 #define PMF_IDLE_PRINTF(x) if (pmf_debug_idle) printf x
58 #define PMF_TRANSITION_PRINTF(x) if (pmf_debug_transition) printf x
59 #define PMF_TRANSITION_PRINTF2(y,x) if (pmf_debug_transition>y) printf x
60 #else
61 #define PMF_EVENT_PRINTF(x) do { } while (0)
62 #define PMF_IDLE_PRINTF(x) do { } while (0)
63 #define PMF_TRANSITION_PRINTF(x) do { } while (0)
64 #define PMF_TRANSITION_PRINTF2(y,x) do { } while (0)
65 #endif
66
67 /* #define PMF_DEBUG */
68
69 MALLOC_DEFINE(M_PMF, "pmf", "device pmf messaging memory");
70
71 static prop_dictionary_t pmf_platform = NULL;
72 static struct workqueue *pmf_event_workqueue;
73
74 typedef struct pmf_event_handler {
75 TAILQ_ENTRY(pmf_event_handler) pmf_link;
76 pmf_generic_event_t pmf_event;
77 void (*pmf_handler)(device_t);
78 device_t pmf_device;
79 bool pmf_global;
80 } pmf_event_handler_t;
81
82 static TAILQ_HEAD(, pmf_event_handler) pmf_all_events =
83 TAILQ_HEAD_INITIALIZER(pmf_all_events);
84
85 typedef struct pmf_event_workitem {
86 struct work pew_work;
87 pmf_generic_event_t pew_event;
88 device_t pew_device;
89 } pmf_event_workitem_t;
90
91 static void
92 pmf_event_worker(struct work *wk, void *dummy)
93 {
94 pmf_event_workitem_t *pew;
95 pmf_event_handler_t *event;
96
97 pew = (void *)wk;
98 KASSERT(wk == &pew->pew_work);
99 KASSERT(pew != NULL);
100
101 TAILQ_FOREACH(event, &pmf_all_events, pmf_link) {
102 if (event->pmf_event != pew->pew_event)
103 continue;
104 if (event->pmf_device == pew->pew_device || event->pmf_global)
105 (*event->pmf_handler)(event->pmf_device);
106 }
107
108 free(pew, M_TEMP);
109
110 return;
111 }
112
113 static bool
114 pmf_check_system_drivers(void)
115 {
116 device_t curdev;
117 bool unsupported_devs;
118
119 unsupported_devs = false;
120 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
121 if (device_pmf_is_registered(curdev))
122 continue;
123 if (!unsupported_devs)
124 printf("Devices without power management support:");
125 printf(" %s", device_xname(curdev));
126 unsupported_devs = true;
127 }
128 if (unsupported_devs) {
129 printf("\n");
130 return false;
131 }
132 return true;
133 }
134
135 bool
136 pmf_system_bus_resume(void)
137 {
138 int depth, maxdepth;
139 bool rv;
140 device_t curdev;
141
142 maxdepth = 0;
143 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
144 if (curdev->dv_depth > maxdepth)
145 maxdepth = curdev->dv_depth;
146 }
147 ++maxdepth;
148
149 aprint_debug("Powering devices:");
150 /* D0 handlers are run in order */
151 depth = 0;
152 rv = true;
153 for (depth = 0; depth < maxdepth; ++depth) {
154 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
155 if (!device_pmf_is_registered(curdev))
156 continue;
157 if (device_is_active(curdev) ||
158 !device_is_enabled(curdev))
159 continue;
160 if (curdev->dv_depth != depth)
161 continue;
162
163 aprint_debug(" %s", device_xname(curdev));
164
165 if (!device_pmf_bus_resume(curdev))
166 aprint_debug("(failed)");
167 }
168 }
169 aprint_debug("\n");
170
171 return rv;
172 }
173
174 bool
175 pmf_system_resume(void)
176 {
177 int depth, maxdepth;
178 bool rv;
179 device_t curdev, parent;
180
181 if (!pmf_check_system_drivers())
182 return false;
183
184 maxdepth = 0;
185 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
186 if (curdev->dv_depth > maxdepth)
187 maxdepth = curdev->dv_depth;
188 }
189 ++maxdepth;
190
191 aprint_debug("Resuming devices:");
192 /* D0 handlers are run in order */
193 depth = 0;
194 rv = true;
195 for (depth = 0; depth < maxdepth; ++depth) {
196 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
197 if (device_is_active(curdev) ||
198 !device_is_enabled(curdev))
199 continue;
200 if (curdev->dv_depth != depth)
201 continue;
202 parent = device_parent(curdev);
203 if (parent != NULL &&
204 !device_is_active(parent))
205 continue;
206
207 aprint_debug(" %s", device_xname(curdev));
208
209 if (!pmf_device_resume(curdev)) {
210 rv = false;
211 aprint_debug("(failed)");
212 }
213 }
214 }
215 aprint_debug(".\n");
216
217 return rv;
218 }
219
220 bool
221 pmf_system_suspend(void)
222 {
223 int depth, maxdepth;
224 device_t curdev;
225
226 if (!pmf_check_system_drivers())
227 return false;
228
229 /*
230 * Flush buffers only if the shutdown didn't do so
231 * already and if there was no panic.
232 */
233 if (doing_shutdown == 0 && panicstr == NULL) {
234 printf("Flushing disk caches: ");
235 sys_sync(NULL, NULL, NULL);
236 if (buf_syncwait() != 0)
237 printf("giving up\n");
238 else
239 printf("done\n");
240 }
241
242 aprint_debug("Suspending devices:");
243
244 maxdepth = 0;
245 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
246 if (curdev->dv_depth > maxdepth)
247 maxdepth = curdev->dv_depth;
248 }
249
250 for (depth = maxdepth; depth >= 0; --depth) {
251 TAILQ_FOREACH_REVERSE(curdev, &alldevs, devicelist, dv_list) {
252 if (curdev->dv_depth != depth)
253 continue;
254 if (!device_is_active(curdev))
255 continue;
256
257 aprint_debug(" %s", device_xname(curdev));
258
259 /* XXX joerg check return value and abort suspend */
260 if (!pmf_device_suspend(curdev))
261 aprint_debug("(failed)");
262 }
263 }
264
265 aprint_debug(".\n");
266
267 return true;
268 }
269
270 void
271 pmf_system_shutdown(void)
272 {
273 int depth, maxdepth;
274 device_t curdev;
275
276 aprint_debug("Shutting down devices:");
277
278 maxdepth = 0;
279 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
280 if (curdev->dv_depth > maxdepth)
281 maxdepth = curdev->dv_depth;
282 }
283
284 for (depth = maxdepth; depth >= 0; --depth) {
285 TAILQ_FOREACH_REVERSE(curdev, &alldevs, devicelist, dv_list) {
286 if (curdev->dv_depth != depth)
287 continue;
288 if (!device_is_active(curdev))
289 continue;
290
291 aprint_debug(" %s", device_xname(curdev));
292
293 if (!device_pmf_is_registered(curdev))
294 continue;
295 if (!device_pmf_class_suspend(curdev)) {
296 aprint_debug("(failed)");
297 continue;
298 }
299 if (!device_pmf_driver_suspend(curdev)) {
300 aprint_debug("(failed)");
301 continue;
302 }
303 }
304 }
305
306 aprint_debug(".\n");
307 }
308
309 bool
310 pmf_set_platform(const char *key, const char *value)
311 {
312 if (pmf_platform == NULL)
313 pmf_platform = prop_dictionary_create();
314 if (pmf_platform == NULL)
315 return false;
316
317 return prop_dictionary_set_cstring(pmf_platform, key, value);
318 }
319
320 const char *
321 pmf_get_platform(const char *key)
322 {
323 const char *value;
324
325 if (pmf_platform == NULL)
326 return NULL;
327
328 if (!prop_dictionary_get_cstring_nocopy(pmf_platform, key, &value))
329 return NULL;
330
331 return value;
332 }
333
334 bool
335 pmf_device_register(device_t dev,
336 bool (*suspend)(device_t), bool (*resume)(device_t))
337 {
338 device_pmf_driver_register(dev, suspend, resume);
339
340 if (!device_pmf_driver_child_register(dev)) {
341 device_pmf_driver_deregister(dev);
342 return false;
343 }
344
345 return true;
346 }
347
348 void
349 pmf_device_deregister(device_t dev)
350 {
351 device_pmf_class_deregister(dev);
352 device_pmf_bus_deregister(dev);
353 device_pmf_driver_deregister(dev);
354 }
355
356 bool
357 pmf_device_suspend(device_t dev)
358 {
359 PMF_TRANSITION_PRINTF(("%s: suspend enter\n", device_xname(dev)));
360 if (!device_pmf_is_registered(dev))
361 return false;
362 PMF_TRANSITION_PRINTF2(1, ("%s: class suspend\n", device_xname(dev)));
363 if (!device_pmf_class_suspend(dev))
364 return false;
365 PMF_TRANSITION_PRINTF2(1, ("%s: driver suspend\n", device_xname(dev)));
366 if (!device_pmf_driver_suspend(dev))
367 return false;
368 PMF_TRANSITION_PRINTF2(1, ("%s: bus suspend\n", device_xname(dev)));
369 if (!device_pmf_bus_suspend(dev))
370 return false;
371 PMF_TRANSITION_PRINTF(("%s: suspend exit\n", device_xname(dev)));
372 return true;
373 }
374
375 bool
376 pmf_device_resume(device_t dev)
377 {
378 PMF_TRANSITION_PRINTF(("%s: resume enter\n", device_xname(dev)));
379 if (!device_pmf_is_registered(dev))
380 return false;
381 PMF_TRANSITION_PRINTF2(1, ("%s: bus resume\n", device_xname(dev)));
382 if (!device_pmf_bus_resume(dev))
383 return false;
384 PMF_TRANSITION_PRINTF2(1, ("%s: driver resume\n", device_xname(dev)));
385 if (!device_pmf_driver_resume(dev))
386 return false;
387 PMF_TRANSITION_PRINTF2(1, ("%s: class resume\n", device_xname(dev)));
388 if (!device_pmf_class_resume(dev))
389 return false;
390 PMF_TRANSITION_PRINTF(("%s: resume exit\n", device_xname(dev)));
391 return true;
392 }
393
394 bool
395 pmf_device_recursive_suspend(device_t dv)
396 {
397 device_t curdev;
398
399 if (!device_is_active(dv))
400 return true;
401
402 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
403 if (device_parent(curdev) != dv)
404 continue;
405 if (!pmf_device_recursive_suspend(curdev))
406 return false;
407 }
408
409 return pmf_device_suspend(dv);
410 }
411
412 bool
413 pmf_device_recursive_resume(device_t dv)
414 {
415 device_t parent;
416
417 if (device_is_active(dv))
418 return true;
419
420 parent = device_parent(dv);
421 if (parent != NULL) {
422 if (!pmf_device_recursive_resume(parent))
423 return false;
424 }
425
426 return pmf_device_resume(dv);
427 }
428
429 bool
430 pmf_device_resume_subtree(device_t dv)
431 {
432 device_t curdev;
433
434 if (!pmf_device_recursive_resume(dv))
435 return false;
436
437 TAILQ_FOREACH(curdev, &alldevs, dv_list) {
438 if (device_parent(curdev) != dv)
439 continue;
440 if (!pmf_device_resume_subtree(curdev))
441 return false;
442 }
443 return true;
444 }
445
446 #include <net/if.h>
447
448 static bool
449 pmf_class_network_suspend(device_t dev)
450 {
451 struct ifnet *ifp = device_pmf_class_private(dev);
452 int s;
453
454 s = splnet();
455 (*ifp->if_stop)(ifp, 1);
456 splx(s);
457
458 return true;
459 }
460
461 static bool
462 pmf_class_network_resume(device_t dev)
463 {
464 struct ifnet *ifp = device_pmf_class_private(dev);
465 int s;
466
467 s = splnet();
468 if (ifp->if_flags & IFF_UP) {
469 ifp->if_flags &= ~IFF_RUNNING;
470 (*ifp->if_init)(ifp);
471 (*ifp->if_start)(ifp);
472 }
473 splx(s);
474
475 return true;
476 }
477
478 void
479 pmf_class_network_register(device_t dev, struct ifnet *ifp)
480 {
481 device_pmf_class_register(dev, ifp, pmf_class_network_suspend,
482 pmf_class_network_resume, NULL);
483 }
484
485 bool
486 pmf_event_inject(device_t dv, pmf_generic_event_t ev)
487 {
488 pmf_event_workitem_t *pew;
489
490 pew = malloc(sizeof(pmf_event_workitem_t), M_TEMP, M_NOWAIT);
491 if (pew == NULL) {
492 PMF_EVENT_PRINTF(("%s: PMF event %d dropped (no memory)\n",
493 dv ? device_xname(dv) : "<anonymous>", ev));
494 return false;
495 }
496
497 pew->pew_event = ev;
498 pew->pew_device = dv;
499
500 workqueue_enqueue(pmf_event_workqueue, (void *)pew, NULL);
501 PMF_EVENT_PRINTF(("%s: PMF event %d injected\n",
502 dv ? device_xname(dv) : "<anonymous>", ev));
503
504 return true;
505 }
506
507 bool
508 pmf_event_register(device_t dv, pmf_generic_event_t ev,
509 void (*handler)(device_t), bool global)
510 {
511 pmf_event_handler_t *event;
512
513 event = malloc(sizeof(*event), M_DEVBUF, M_WAITOK);
514 event->pmf_event = ev;
515 event->pmf_handler = handler;
516 event->pmf_device = dv;
517 event->pmf_global = global;
518 TAILQ_INSERT_TAIL(&pmf_all_events, event, pmf_link);
519
520 return true;
521 }
522
523 void
524 pmf_event_deregister(device_t dv, pmf_generic_event_t ev,
525 void (*handler)(device_t), bool global)
526 {
527 pmf_event_handler_t *event;
528
529 TAILQ_FOREACH(event, &pmf_all_events, pmf_link) {
530 if (event->pmf_event != ev)
531 continue;
532 if (event->pmf_device != dv)
533 continue;
534 if (event->pmf_global != global)
535 continue;
536 if (event->pmf_handler != handler)
537 continue;
538 TAILQ_REMOVE(&pmf_all_events, event, pmf_link);
539 free(event, M_DEVBUF);
540 return;
541 }
542 }
543
544 struct display_class_softc {
545 TAILQ_ENTRY(display_class_softc) dc_link;
546 device_t dc_dev;
547 };
548
549 static TAILQ_HEAD(, display_class_softc) all_displays;
550 static callout_t global_idle_counter;
551 static int idle_timeout = 30;
552
553 static void
554 input_idle(void *dummy)
555 {
556 PMF_IDLE_PRINTF(("Input idle handler called\n"));
557 pmf_event_inject(NULL, PMFE_DISPLAY_OFF);
558 }
559
560 static void
561 input_activity_handler(device_t dv, devactive_t type)
562 {
563 if (!TAILQ_EMPTY(&all_displays))
564 callout_schedule(&global_idle_counter, idle_timeout * hz);
565 }
566
567 static void
568 pmf_class_input_deregister(device_t dv)
569 {
570 device_active_deregister(dv, input_activity_handler);
571 }
572
573 bool
574 pmf_class_input_register(device_t dv)
575 {
576 if (!device_active_register(dv, input_activity_handler))
577 return false;
578
579 device_pmf_class_register(dv, NULL, NULL, NULL,
580 pmf_class_input_deregister);
581
582 return true;
583 }
584
585 static void
586 pmf_class_display_deregister(device_t dv)
587 {
588 struct display_class_softc *sc = device_pmf_class_private(dv);
589 int s;
590
591 s = splsoftclock();
592 TAILQ_REMOVE(&all_displays, sc, dc_link);
593 if (TAILQ_EMPTY(&all_displays))
594 callout_stop(&global_idle_counter);
595 splx(s);
596
597 free(sc, M_DEVBUF);
598 }
599
600 bool
601 pmf_class_display_register(device_t dv)
602 {
603 struct display_class_softc *sc;
604 int s;
605
606 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK);
607
608 s = splsoftclock();
609 if (TAILQ_EMPTY(&all_displays))
610 callout_schedule(&global_idle_counter, idle_timeout * hz);
611
612 TAILQ_INSERT_HEAD(&all_displays, sc, dc_link);
613 splx(s);
614
615 device_pmf_class_register(dv, sc, NULL, NULL,
616 pmf_class_display_deregister);
617
618 return true;
619 }
620
621 void
622 pmf_init(void)
623 {
624 int err;
625
626 KASSERT(pmf_event_workqueue == NULL);
627 err = workqueue_create(&pmf_event_workqueue, "pmfevent",
628 pmf_event_worker, NULL, PRI_NONE, IPL_VM, 0);
629 if (err)
630 panic("couldn't create pmfevent workqueue");
631
632 callout_init(&global_idle_counter, 0);
633 callout_setfunc(&global_idle_counter, input_idle, NULL);
634 }
635