df539322d9f5a2197a0371847f802f62af4ce3a0
[firefly-linux-kernel-4.4.55.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/timer.h>
36
37 #include "../base.h"
38 #include "power.h"
39
40 typedef int (*pm_callback_t)(struct device *);
41
42 /*
43  * The entries in the dpm_list list are in a depth first order, simply
44  * because children are guaranteed to be discovered after parents, and
45  * are inserted at the back of the list on discovery.
46  *
47  * Since device_pm_add() may be called with a device lock held,
48  * we must never try to acquire a device lock while holding
49  * dpm_list_mutex.
50  */
51
52 LIST_HEAD(dpm_list);
53 static LIST_HEAD(dpm_prepared_list);
54 static LIST_HEAD(dpm_suspended_list);
55 static LIST_HEAD(dpm_late_early_list);
56 static LIST_HEAD(dpm_noirq_list);
57
58 struct suspend_stats suspend_stats;
59 static DEFINE_MUTEX(dpm_list_mtx);
60 static pm_message_t pm_transition;
61
62 static void dpm_drv_timeout(unsigned long data);
63 struct dpm_drv_wd_data {
64         struct device *dev;
65         struct task_struct *tsk;
66 };
67
68 static int async_error;
69
70 static char *pm_verb(int event)
71 {
72         switch (event) {
73         case PM_EVENT_SUSPEND:
74                 return "suspend";
75         case PM_EVENT_RESUME:
76                 return "resume";
77         case PM_EVENT_FREEZE:
78                 return "freeze";
79         case PM_EVENT_QUIESCE:
80                 return "quiesce";
81         case PM_EVENT_HIBERNATE:
82                 return "hibernate";
83         case PM_EVENT_THAW:
84                 return "thaw";
85         case PM_EVENT_RESTORE:
86                 return "restore";
87         case PM_EVENT_RECOVER:
88                 return "recover";
89         default:
90                 return "(unknown PM event)";
91         }
92 }
93
94 /**
95  * device_pm_sleep_init - Initialize system suspend-related device fields.
96  * @dev: Device object being initialized.
97  */
98 void device_pm_sleep_init(struct device *dev)
99 {
100         dev->power.is_prepared = false;
101         dev->power.is_suspended = false;
102         dev->power.is_noirq_suspended = false;
103         dev->power.is_late_suspended = false;
104         init_completion(&dev->power.completion);
105         complete_all(&dev->power.completion);
106         dev->power.wakeup = NULL;
107         INIT_LIST_HEAD(&dev->power.entry);
108 }
109
110 /**
111  * device_pm_lock - Lock the list of active devices used by the PM core.
112  */
113 void device_pm_lock(void)
114 {
115         mutex_lock(&dpm_list_mtx);
116 }
117
118 /**
119  * device_pm_unlock - Unlock the list of active devices used by the PM core.
120  */
121 void device_pm_unlock(void)
122 {
123         mutex_unlock(&dpm_list_mtx);
124 }
125
126 /**
127  * device_pm_add - Add a device to the PM core's list of active devices.
128  * @dev: Device to add to the list.
129  */
130 void device_pm_add(struct device *dev)
131 {
132         pr_debug("PM: Adding info for %s:%s\n",
133                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
134         mutex_lock(&dpm_list_mtx);
135         if (dev->parent && dev->parent->power.is_prepared)
136                 dev_warn(dev, "parent %s should not be sleeping\n",
137                         dev_name(dev->parent));
138         list_add_tail(&dev->power.entry, &dpm_list);
139         mutex_unlock(&dpm_list_mtx);
140 }
141
142 /**
143  * device_pm_remove - Remove a device from the PM core's list of active devices.
144  * @dev: Device to be removed from the list.
145  */
146 void device_pm_remove(struct device *dev)
147 {
148         pr_debug("PM: Removing info for %s:%s\n",
149                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
150         complete_all(&dev->power.completion);
151         mutex_lock(&dpm_list_mtx);
152         list_del_init(&dev->power.entry);
153         mutex_unlock(&dpm_list_mtx);
154         device_wakeup_disable(dev);
155         pm_runtime_remove(dev);
156 }
157
158 /**
159  * device_pm_move_before - Move device in the PM core's list of active devices.
160  * @deva: Device to move in dpm_list.
161  * @devb: Device @deva should come before.
162  */
163 void device_pm_move_before(struct device *deva, struct device *devb)
164 {
165         pr_debug("PM: Moving %s:%s before %s:%s\n",
166                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
167                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
168         /* Delete deva from dpm_list and reinsert before devb. */
169         list_move_tail(&deva->power.entry, &devb->power.entry);
170 }
171
172 /**
173  * device_pm_move_after - Move device in the PM core's list of active devices.
174  * @deva: Device to move in dpm_list.
175  * @devb: Device @deva should come after.
176  */
177 void device_pm_move_after(struct device *deva, struct device *devb)
178 {
179         pr_debug("PM: Moving %s:%s after %s:%s\n",
180                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
181                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
182         /* Delete deva from dpm_list and reinsert after devb. */
183         list_move(&deva->power.entry, &devb->power.entry);
184 }
185
186 /**
187  * device_pm_move_last - Move device to end of the PM core's list of devices.
188  * @dev: Device to move in dpm_list.
189  */
190 void device_pm_move_last(struct device *dev)
191 {
192         pr_debug("PM: Moving %s:%s to end of list\n",
193                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
194         list_move_tail(&dev->power.entry, &dpm_list);
195 }
196
197 static ktime_t initcall_debug_start(struct device *dev)
198 {
199         ktime_t calltime = ktime_set(0, 0);
200
201         if (pm_print_times_enabled) {
202                 pr_info("calling  %s+ @ %i, parent: %s\n",
203                         dev_name(dev), task_pid_nr(current),
204                         dev->parent ? dev_name(dev->parent) : "none");
205                 calltime = ktime_get();
206         }
207
208         return calltime;
209 }
210
211 static void initcall_debug_report(struct device *dev, ktime_t calltime,
212                                   int error, pm_message_t state, char *info)
213 {
214         ktime_t rettime;
215         s64 nsecs;
216
217         rettime = ktime_get();
218         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
219
220         if (pm_print_times_enabled) {
221                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
222                         error, (unsigned long long)nsecs >> 10);
223         }
224 }
225
226 /**
227  * dpm_wait - Wait for a PM operation to complete.
228  * @dev: Device to wait for.
229  * @async: If unset, wait only if the device's power.async_suspend flag is set.
230  */
231 static void dpm_wait(struct device *dev, bool async)
232 {
233         if (!dev)
234                 return;
235
236         if (async || (pm_async_enabled && dev->power.async_suspend))
237                 wait_for_completion(&dev->power.completion);
238 }
239
240 static int dpm_wait_fn(struct device *dev, void *async_ptr)
241 {
242         dpm_wait(dev, *((bool *)async_ptr));
243         return 0;
244 }
245
246 static void dpm_wait_for_children(struct device *dev, bool async)
247 {
248        device_for_each_child(dev, &async, dpm_wait_fn);
249 }
250
251 /**
252  * pm_op - Return the PM operation appropriate for given PM event.
253  * @ops: PM operations to choose from.
254  * @state: PM transition of the system being carried out.
255  */
256 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
257 {
258         switch (state.event) {
259 #ifdef CONFIG_SUSPEND
260         case PM_EVENT_SUSPEND:
261                 return ops->suspend;
262         case PM_EVENT_RESUME:
263                 return ops->resume;
264 #endif /* CONFIG_SUSPEND */
265 #ifdef CONFIG_HIBERNATE_CALLBACKS
266         case PM_EVENT_FREEZE:
267         case PM_EVENT_QUIESCE:
268                 return ops->freeze;
269         case PM_EVENT_HIBERNATE:
270                 return ops->poweroff;
271         case PM_EVENT_THAW:
272         case PM_EVENT_RECOVER:
273                 return ops->thaw;
274                 break;
275         case PM_EVENT_RESTORE:
276                 return ops->restore;
277 #endif /* CONFIG_HIBERNATE_CALLBACKS */
278         }
279
280         return NULL;
281 }
282
283 /**
284  * pm_late_early_op - Return the PM operation appropriate for given PM event.
285  * @ops: PM operations to choose from.
286  * @state: PM transition of the system being carried out.
287  *
288  * Runtime PM is disabled for @dev while this function is being executed.
289  */
290 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
291                                       pm_message_t state)
292 {
293         switch (state.event) {
294 #ifdef CONFIG_SUSPEND
295         case PM_EVENT_SUSPEND:
296                 return ops->suspend_late;
297         case PM_EVENT_RESUME:
298                 return ops->resume_early;
299 #endif /* CONFIG_SUSPEND */
300 #ifdef CONFIG_HIBERNATE_CALLBACKS
301         case PM_EVENT_FREEZE:
302         case PM_EVENT_QUIESCE:
303                 return ops->freeze_late;
304         case PM_EVENT_HIBERNATE:
305                 return ops->poweroff_late;
306         case PM_EVENT_THAW:
307         case PM_EVENT_RECOVER:
308                 return ops->thaw_early;
309         case PM_EVENT_RESTORE:
310                 return ops->restore_early;
311 #endif /* CONFIG_HIBERNATE_CALLBACKS */
312         }
313
314         return NULL;
315 }
316
317 /**
318  * pm_noirq_op - Return the PM operation appropriate for given PM event.
319  * @ops: PM operations to choose from.
320  * @state: PM transition of the system being carried out.
321  *
322  * The driver of @dev will not receive interrupts while this function is being
323  * executed.
324  */
325 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
326 {
327         switch (state.event) {
328 #ifdef CONFIG_SUSPEND
329         case PM_EVENT_SUSPEND:
330                 return ops->suspend_noirq;
331         case PM_EVENT_RESUME:
332                 return ops->resume_noirq;
333 #endif /* CONFIG_SUSPEND */
334 #ifdef CONFIG_HIBERNATE_CALLBACKS
335         case PM_EVENT_FREEZE:
336         case PM_EVENT_QUIESCE:
337                 return ops->freeze_noirq;
338         case PM_EVENT_HIBERNATE:
339                 return ops->poweroff_noirq;
340         case PM_EVENT_THAW:
341         case PM_EVENT_RECOVER:
342                 return ops->thaw_noirq;
343         case PM_EVENT_RESTORE:
344                 return ops->restore_noirq;
345 #endif /* CONFIG_HIBERNATE_CALLBACKS */
346         }
347
348         return NULL;
349 }
350
351 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
352 {
353         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
354                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
355                 ", may wakeup" : "");
356 }
357
358 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
359                         int error)
360 {
361         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
362                 dev_name(dev), pm_verb(state.event), info, error);
363 }
364
365 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
366 {
367         ktime_t calltime;
368         u64 usecs64;
369         int usecs;
370
371         calltime = ktime_get();
372         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
373         do_div(usecs64, NSEC_PER_USEC);
374         usecs = usecs64;
375         if (usecs == 0)
376                 usecs = 1;
377         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
378                 info ?: "", info ? " " : "", pm_verb(state.event),
379                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
380 }
381
382 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
383                             pm_message_t state, char *info)
384 {
385         ktime_t calltime;
386         int error;
387
388         if (!cb)
389                 return 0;
390
391         calltime = initcall_debug_start(dev);
392
393         pm_dev_dbg(dev, state, info);
394         trace_device_pm_callback_start(dev, info, state.event);
395         error = cb(dev);
396         trace_device_pm_callback_end(dev, error);
397         suspend_report_result(cb, error);
398
399         initcall_debug_report(dev, calltime, error, state, info);
400
401         return error;
402 }
403
404 #ifdef CONFIG_DPM_WATCHDOG
405 struct dpm_watchdog {
406         struct device           *dev;
407         struct task_struct      *tsk;
408         struct timer_list       timer;
409 };
410
411 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
412         struct dpm_watchdog wd
413
414 /**
415  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
416  * @data: Watchdog object address.
417  *
418  * Called when a driver has timed out suspending or resuming.
419  * There's not much we can do here to recover so panic() to
420  * capture a crash-dump in pstore.
421  */
422 static void dpm_watchdog_handler(unsigned long data)
423 {
424         struct dpm_watchdog *wd = (void *)data;
425
426         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
427         show_stack(wd->tsk, NULL);
428         panic("%s %s: unrecoverable failure\n",
429                 dev_driver_string(wd->dev), dev_name(wd->dev));
430 }
431
432 /**
433  * dpm_watchdog_set - Enable pm watchdog for given device.
434  * @wd: Watchdog. Must be allocated on the stack.
435  * @dev: Device to handle.
436  */
437 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
438 {
439         struct timer_list *timer = &wd->timer;
440
441         wd->dev = dev;
442         wd->tsk = current;
443
444         init_timer_on_stack(timer);
445         /* use same timeout value for both suspend and resume */
446         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
447         timer->function = dpm_watchdog_handler;
448         timer->data = (unsigned long)wd;
449         add_timer(timer);
450 }
451
452 /**
453  * dpm_watchdog_clear - Disable suspend/resume watchdog.
454  * @wd: Watchdog to disable.
455  */
456 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
457 {
458         struct timer_list *timer = &wd->timer;
459
460         del_timer_sync(timer);
461         destroy_timer_on_stack(timer);
462 }
463 #else
464 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
465 #define dpm_watchdog_set(x, y)
466 #define dpm_watchdog_clear(x)
467 #endif
468
469 /*------------------------- Resume routines -------------------------*/
470
471 /**
472  * device_resume_noirq - Execute an "early resume" callback for given device.
473  * @dev: Device to handle.
474  * @state: PM transition of the system being carried out.
475  * @async: If true, the device is being resumed asynchronously.
476  *
477  * The driver of @dev will not receive interrupts while this function is being
478  * executed.
479  */
480 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
481 {
482         pm_callback_t callback = NULL;
483         char *info = NULL;
484         int error = 0;
485
486         TRACE_DEVICE(dev);
487         TRACE_RESUME(0);
488
489         if (dev->power.syscore || dev->power.direct_complete)
490                 goto Out;
491
492         if (!dev->power.is_noirq_suspended)
493                 goto Out;
494
495         dpm_wait(dev->parent, async);
496
497         if (dev->pm_domain) {
498                 info = "noirq power domain ";
499                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
500         } else if (dev->type && dev->type->pm) {
501                 info = "noirq type ";
502                 callback = pm_noirq_op(dev->type->pm, state);
503         } else if (dev->class && dev->class->pm) {
504                 info = "noirq class ";
505                 callback = pm_noirq_op(dev->class->pm, state);
506         } else if (dev->bus && dev->bus->pm) {
507                 info = "noirq bus ";
508                 callback = pm_noirq_op(dev->bus->pm, state);
509         }
510
511         if (!callback && dev->driver && dev->driver->pm) {
512                 info = "noirq driver ";
513                 callback = pm_noirq_op(dev->driver->pm, state);
514         }
515
516         error = dpm_run_callback(callback, dev, state, info);
517         dev->power.is_noirq_suspended = false;
518
519  Out:
520         complete_all(&dev->power.completion);
521         TRACE_RESUME(error);
522         return error;
523 }
524
525 static bool is_async(struct device *dev)
526 {
527         return dev->power.async_suspend && pm_async_enabled
528                 && !pm_trace_is_enabled();
529 }
530
531 static void async_resume_noirq(void *data, async_cookie_t cookie)
532 {
533         struct device *dev = (struct device *)data;
534         int error;
535
536         error = device_resume_noirq(dev, pm_transition, true);
537         if (error)
538                 pm_dev_err(dev, pm_transition, " async", error);
539
540         put_device(dev);
541 }
542
543 /**
544  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
545  * @state: PM transition of the system being carried out.
546  *
547  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
548  * enable device drivers to receive interrupts.
549  */
550 void dpm_resume_noirq(pm_message_t state)
551 {
552         struct device *dev;
553         ktime_t starttime = ktime_get();
554
555         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
556         mutex_lock(&dpm_list_mtx);
557         pm_transition = state;
558
559         /*
560          * Advanced the async threads upfront,
561          * in case the starting of async threads is
562          * delayed by non-async resuming devices.
563          */
564         list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
565                 reinit_completion(&dev->power.completion);
566                 if (is_async(dev)) {
567                         get_device(dev);
568                         async_schedule(async_resume_noirq, dev);
569                 }
570         }
571
572         while (!list_empty(&dpm_noirq_list)) {
573                 dev = to_device(dpm_noirq_list.next);
574                 get_device(dev);
575                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
576                 mutex_unlock(&dpm_list_mtx);
577
578                 if (!is_async(dev)) {
579                         int error;
580
581                         error = device_resume_noirq(dev, state, false);
582                         if (error) {
583                                 suspend_stats.failed_resume_noirq++;
584                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
585                                 dpm_save_failed_dev(dev_name(dev));
586                                 pm_dev_err(dev, state, " noirq", error);
587                         }
588                 }
589
590                 mutex_lock(&dpm_list_mtx);
591                 put_device(dev);
592         }
593         mutex_unlock(&dpm_list_mtx);
594         async_synchronize_full();
595         dpm_show_time(starttime, state, "noirq");
596         resume_device_irqs();
597         device_wakeup_disarm_wake_irqs();
598         cpuidle_resume();
599         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
600 }
601
602 /**
603  * device_resume_early - Execute an "early resume" callback for given device.
604  * @dev: Device to handle.
605  * @state: PM transition of the system being carried out.
606  * @async: If true, the device is being resumed asynchronously.
607  *
608  * Runtime PM is disabled for @dev while this function is being executed.
609  */
610 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
611 {
612         pm_callback_t callback = NULL;
613         char *info = NULL;
614         int error = 0;
615
616         TRACE_DEVICE(dev);
617         TRACE_RESUME(0);
618
619         if (dev->power.syscore || dev->power.direct_complete)
620                 goto Out;
621
622         if (!dev->power.is_late_suspended)
623                 goto Out;
624
625         dpm_wait(dev->parent, async);
626
627         if (dev->pm_domain) {
628                 info = "early power domain ";
629                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
630         } else if (dev->type && dev->type->pm) {
631                 info = "early type ";
632                 callback = pm_late_early_op(dev->type->pm, state);
633         } else if (dev->class && dev->class->pm) {
634                 info = "early class ";
635                 callback = pm_late_early_op(dev->class->pm, state);
636         } else if (dev->bus && dev->bus->pm) {
637                 info = "early bus ";
638                 callback = pm_late_early_op(dev->bus->pm, state);
639         }
640
641         if (!callback && dev->driver && dev->driver->pm) {
642                 info = "early driver ";
643                 callback = pm_late_early_op(dev->driver->pm, state);
644         }
645
646         error = dpm_run_callback(callback, dev, state, info);
647         dev->power.is_late_suspended = false;
648
649  Out:
650         TRACE_RESUME(error);
651
652         pm_runtime_enable(dev);
653         complete_all(&dev->power.completion);
654         return error;
655 }
656
657 static void async_resume_early(void *data, async_cookie_t cookie)
658 {
659         struct device *dev = (struct device *)data;
660         int error;
661
662         error = device_resume_early(dev, pm_transition, true);
663         if (error)
664                 pm_dev_err(dev, pm_transition, " async", error);
665
666         put_device(dev);
667 }
668
669 /**
670  * dpm_resume_early - Execute "early resume" callbacks for all devices.
671  * @state: PM transition of the system being carried out.
672  */
673 void dpm_resume_early(pm_message_t state)
674 {
675         struct device *dev;
676         ktime_t starttime = ktime_get();
677
678         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
679         mutex_lock(&dpm_list_mtx);
680         pm_transition = state;
681
682         /*
683          * Advanced the async threads upfront,
684          * in case the starting of async threads is
685          * delayed by non-async resuming devices.
686          */
687         list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
688                 reinit_completion(&dev->power.completion);
689                 if (is_async(dev)) {
690                         get_device(dev);
691                         async_schedule(async_resume_early, dev);
692                 }
693         }
694
695         while (!list_empty(&dpm_late_early_list)) {
696                 dev = to_device(dpm_late_early_list.next);
697                 get_device(dev);
698                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
699                 mutex_unlock(&dpm_list_mtx);
700
701                 if (!is_async(dev)) {
702                         int error;
703
704                         error = device_resume_early(dev, state, false);
705                         if (error) {
706                                 suspend_stats.failed_resume_early++;
707                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
708                                 dpm_save_failed_dev(dev_name(dev));
709                                 pm_dev_err(dev, state, " early", error);
710                         }
711                 }
712                 mutex_lock(&dpm_list_mtx);
713                 put_device(dev);
714         }
715         mutex_unlock(&dpm_list_mtx);
716         async_synchronize_full();
717         dpm_show_time(starttime, state, "early");
718         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
719 }
720
721 /**
722  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
723  * @state: PM transition of the system being carried out.
724  */
725 void dpm_resume_start(pm_message_t state)
726 {
727         dpm_resume_noirq(state);
728         dpm_resume_early(state);
729 }
730 EXPORT_SYMBOL_GPL(dpm_resume_start);
731
732 /**
733  * device_resume - Execute "resume" callbacks for given device.
734  * @dev: Device to handle.
735  * @state: PM transition of the system being carried out.
736  * @async: If true, the device is being resumed asynchronously.
737  */
738 static int device_resume(struct device *dev, pm_message_t state, bool async)
739 {
740         pm_callback_t callback = NULL;
741         char *info = NULL;
742         int error = 0;
743         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
744
745         TRACE_DEVICE(dev);
746         TRACE_RESUME(0);
747
748         if (dev->power.syscore)
749                 goto Complete;
750
751         if (dev->power.direct_complete) {
752                 /* Match the pm_runtime_disable() in __device_suspend(). */
753                 pm_runtime_enable(dev);
754                 goto Complete;
755         }
756
757         dpm_wait(dev->parent, async);
758         dpm_watchdog_set(&wd, dev);
759         device_lock(dev);
760
761         /*
762          * This is a fib.  But we'll allow new children to be added below
763          * a resumed device, even if the device hasn't been completed yet.
764          */
765         dev->power.is_prepared = false;
766
767         if (!dev->power.is_suspended)
768                 goto Unlock;
769
770         if (dev->pm_domain) {
771                 info = "power domain ";
772                 callback = pm_op(&dev->pm_domain->ops, state);
773                 goto Driver;
774         }
775
776         if (dev->type && dev->type->pm) {
777                 info = "type ";
778                 callback = pm_op(dev->type->pm, state);
779                 goto Driver;
780         }
781
782         if (dev->class) {
783                 if (dev->class->pm) {
784                         info = "class ";
785                         callback = pm_op(dev->class->pm, state);
786                         goto Driver;
787                 } else if (dev->class->resume) {
788                         info = "legacy class ";
789                         callback = dev->class->resume;
790                         goto End;
791                 }
792         }
793
794         if (dev->bus) {
795                 if (dev->bus->pm) {
796                         info = "bus ";
797                         callback = pm_op(dev->bus->pm, state);
798                 } else if (dev->bus->resume) {
799                         info = "legacy bus ";
800                         callback = dev->bus->resume;
801                         goto End;
802                 }
803         }
804
805  Driver:
806         if (!callback && dev->driver && dev->driver->pm) {
807                 info = "driver ";
808                 callback = pm_op(dev->driver->pm, state);
809         }
810
811  End:
812         error = dpm_run_callback(callback, dev, state, info);
813         dev->power.is_suspended = false;
814
815  Unlock:
816         device_unlock(dev);
817         dpm_watchdog_clear(&wd);
818
819  Complete:
820         complete_all(&dev->power.completion);
821
822         TRACE_RESUME(error);
823
824         return error;
825 }
826
827 static void async_resume(void *data, async_cookie_t cookie)
828 {
829         struct device *dev = (struct device *)data;
830         int error;
831
832         error = device_resume(dev, pm_transition, true);
833         if (error)
834                 pm_dev_err(dev, pm_transition, " async", error);
835         put_device(dev);
836 }
837
838 /**
839  *      dpm_drv_timeout - Driver suspend / resume watchdog handler
840  *      @data: struct device which timed out
841  *
842  *      Called when a driver has timed out suspending or resuming.
843  *      There's not much we can do here to recover so
844  *      BUG() out for a crash-dump
845  *
846  */
847 static void dpm_drv_timeout(unsigned long data)
848 {
849         struct dpm_drv_wd_data *wd_data = (void *)data;
850         struct device *dev = wd_data->dev;
851         struct task_struct *tsk = wd_data->tsk;
852
853         printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev),
854                (dev->driver ? dev->driver->name : "no driver"));
855
856         printk(KERN_EMERG "dpm suspend stack:\n");
857         show_stack(tsk, NULL);
858
859         BUG();
860 }
861
862 /**
863  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
864  * @state: PM transition of the system being carried out.
865  *
866  * Execute the appropriate "resume" callback for all devices whose status
867  * indicates that they are suspended.
868  */
869 void dpm_resume(pm_message_t state)
870 {
871         struct device *dev;
872         ktime_t starttime = ktime_get();
873
874         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
875         might_sleep();
876
877         mutex_lock(&dpm_list_mtx);
878         pm_transition = state;
879         async_error = 0;
880
881         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
882                 reinit_completion(&dev->power.completion);
883                 if (is_async(dev)) {
884                         get_device(dev);
885                         async_schedule(async_resume, dev);
886                 }
887         }
888
889         while (!list_empty(&dpm_suspended_list)) {
890                 dev = to_device(dpm_suspended_list.next);
891                 get_device(dev);
892                 if (!is_async(dev)) {
893                         int error;
894
895                         mutex_unlock(&dpm_list_mtx);
896
897                         error = device_resume(dev, state, false);
898                         if (error) {
899                                 suspend_stats.failed_resume++;
900                                 dpm_save_failed_step(SUSPEND_RESUME);
901                                 dpm_save_failed_dev(dev_name(dev));
902                                 pm_dev_err(dev, state, "", error);
903                         }
904
905                         mutex_lock(&dpm_list_mtx);
906                 }
907                 if (!list_empty(&dev->power.entry))
908                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
909                 put_device(dev);
910         }
911         mutex_unlock(&dpm_list_mtx);
912         async_synchronize_full();
913         dpm_show_time(starttime, state, NULL);
914
915         cpufreq_resume();
916         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
917 }
918
919 /**
920  * device_complete - Complete a PM transition for given device.
921  * @dev: Device to handle.
922  * @state: PM transition of the system being carried out.
923  */
924 static void device_complete(struct device *dev, pm_message_t state)
925 {
926         void (*callback)(struct device *) = NULL;
927         char *info = NULL;
928
929         if (dev->power.syscore)
930                 return;
931
932         device_lock(dev);
933
934         if (dev->pm_domain) {
935                 info = "completing power domain ";
936                 callback = dev->pm_domain->ops.complete;
937         } else if (dev->type && dev->type->pm) {
938                 info = "completing type ";
939                 callback = dev->type->pm->complete;
940         } else if (dev->class && dev->class->pm) {
941                 info = "completing class ";
942                 callback = dev->class->pm->complete;
943         } else if (dev->bus && dev->bus->pm) {
944                 info = "completing bus ";
945                 callback = dev->bus->pm->complete;
946         }
947
948         if (!callback && dev->driver && dev->driver->pm) {
949                 info = "completing driver ";
950                 callback = dev->driver->pm->complete;
951         }
952
953         if (callback) {
954                 pm_dev_dbg(dev, state, info);
955                 callback(dev);
956         }
957
958         device_unlock(dev);
959
960         pm_runtime_put(dev);
961 }
962
963 /**
964  * dpm_complete - Complete a PM transition for all non-sysdev devices.
965  * @state: PM transition of the system being carried out.
966  *
967  * Execute the ->complete() callbacks for all devices whose PM status is not
968  * DPM_ON (this allows new devices to be registered).
969  */
970 void dpm_complete(pm_message_t state)
971 {
972         struct list_head list;
973
974         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
975         might_sleep();
976
977         INIT_LIST_HEAD(&list);
978         mutex_lock(&dpm_list_mtx);
979         while (!list_empty(&dpm_prepared_list)) {
980                 struct device *dev = to_device(dpm_prepared_list.prev);
981
982                 get_device(dev);
983                 dev->power.is_prepared = false;
984                 list_move(&dev->power.entry, &list);
985                 mutex_unlock(&dpm_list_mtx);
986
987                 trace_device_pm_callback_start(dev, "", state.event);
988                 device_complete(dev, state);
989                 trace_device_pm_callback_end(dev, 0);
990
991                 mutex_lock(&dpm_list_mtx);
992                 put_device(dev);
993         }
994         list_splice(&list, &dpm_list);
995         mutex_unlock(&dpm_list_mtx);
996         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
997 }
998
999 /**
1000  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1001  * @state: PM transition of the system being carried out.
1002  *
1003  * Execute "resume" callbacks for all devices and complete the PM transition of
1004  * the system.
1005  */
1006 void dpm_resume_end(pm_message_t state)
1007 {
1008         dpm_resume(state);
1009         dpm_complete(state);
1010 }
1011 EXPORT_SYMBOL_GPL(dpm_resume_end);
1012
1013
1014 /*------------------------- Suspend routines -------------------------*/
1015
1016 /**
1017  * resume_event - Return a "resume" message for given "suspend" sleep state.
1018  * @sleep_state: PM message representing a sleep state.
1019  *
1020  * Return a PM message representing the resume event corresponding to given
1021  * sleep state.
1022  */
1023 static pm_message_t resume_event(pm_message_t sleep_state)
1024 {
1025         switch (sleep_state.event) {
1026         case PM_EVENT_SUSPEND:
1027                 return PMSG_RESUME;
1028         case PM_EVENT_FREEZE:
1029         case PM_EVENT_QUIESCE:
1030                 return PMSG_RECOVER;
1031         case PM_EVENT_HIBERNATE:
1032                 return PMSG_RESTORE;
1033         }
1034         return PMSG_ON;
1035 }
1036
1037 /**
1038  * device_suspend_noirq - Execute a "late suspend" callback for given device.
1039  * @dev: Device to handle.
1040  * @state: PM transition of the system being carried out.
1041  * @async: If true, the device is being suspended asynchronously.
1042  *
1043  * The driver of @dev will not receive interrupts while this function is being
1044  * executed.
1045  */
1046 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1047 {
1048         pm_callback_t callback = NULL;
1049         char *info = NULL;
1050         int error = 0;
1051
1052         TRACE_DEVICE(dev);
1053         TRACE_SUSPEND(0);
1054
1055         if (async_error)
1056                 goto Complete;
1057
1058         if (pm_wakeup_pending()) {
1059                 async_error = -EBUSY;
1060                 goto Complete;
1061         }
1062
1063         if (dev->power.syscore || dev->power.direct_complete)
1064                 goto Complete;
1065
1066         dpm_wait_for_children(dev, async);
1067
1068         if (dev->pm_domain) {
1069                 info = "noirq power domain ";
1070                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1071         } else if (dev->type && dev->type->pm) {
1072                 info = "noirq type ";
1073                 callback = pm_noirq_op(dev->type->pm, state);
1074         } else if (dev->class && dev->class->pm) {
1075                 info = "noirq class ";
1076                 callback = pm_noirq_op(dev->class->pm, state);
1077         } else if (dev->bus && dev->bus->pm) {
1078                 info = "noirq bus ";
1079                 callback = pm_noirq_op(dev->bus->pm, state);
1080         }
1081
1082         if (!callback && dev->driver && dev->driver->pm) {
1083                 info = "noirq driver ";
1084                 callback = pm_noirq_op(dev->driver->pm, state);
1085         }
1086
1087         error = dpm_run_callback(callback, dev, state, info);
1088         if (!error)
1089                 dev->power.is_noirq_suspended = true;
1090         else
1091                 async_error = error;
1092
1093 Complete:
1094         complete_all(&dev->power.completion);
1095         TRACE_SUSPEND(error);
1096         return error;
1097 }
1098
1099 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1100 {
1101         struct device *dev = (struct device *)data;
1102         int error;
1103
1104         error = __device_suspend_noirq(dev, pm_transition, true);
1105         if (error) {
1106                 dpm_save_failed_dev(dev_name(dev));
1107                 pm_dev_err(dev, pm_transition, " async", error);
1108         }
1109
1110         put_device(dev);
1111 }
1112
1113 static int device_suspend_noirq(struct device *dev)
1114 {
1115         reinit_completion(&dev->power.completion);
1116
1117         if (is_async(dev)) {
1118                 get_device(dev);
1119                 async_schedule(async_suspend_noirq, dev);
1120                 return 0;
1121         }
1122         return __device_suspend_noirq(dev, pm_transition, false);
1123 }
1124
1125 /**
1126  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1127  * @state: PM transition of the system being carried out.
1128  *
1129  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1130  * handlers for all non-sysdev devices.
1131  */
1132 int dpm_suspend_noirq(pm_message_t state)
1133 {
1134         ktime_t starttime = ktime_get();
1135         int error = 0;
1136
1137         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1138         cpuidle_pause();
1139         device_wakeup_arm_wake_irqs();
1140         suspend_device_irqs();
1141         mutex_lock(&dpm_list_mtx);
1142         pm_transition = state;
1143         async_error = 0;
1144
1145         while (!list_empty(&dpm_late_early_list)) {
1146                 struct device *dev = to_device(dpm_late_early_list.prev);
1147
1148                 get_device(dev);
1149                 mutex_unlock(&dpm_list_mtx);
1150
1151                 error = device_suspend_noirq(dev);
1152
1153                 mutex_lock(&dpm_list_mtx);
1154                 if (error) {
1155                         pm_dev_err(dev, state, " noirq", error);
1156                         dpm_save_failed_dev(dev_name(dev));
1157                         put_device(dev);
1158                         break;
1159                 }
1160                 if (!list_empty(&dev->power.entry))
1161                         list_move(&dev->power.entry, &dpm_noirq_list);
1162                 put_device(dev);
1163
1164                 if (async_error)
1165                         break;
1166         }
1167         mutex_unlock(&dpm_list_mtx);
1168         async_synchronize_full();
1169         if (!error)
1170                 error = async_error;
1171
1172         if (error) {
1173                 suspend_stats.failed_suspend_noirq++;
1174                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1175                 dpm_resume_noirq(resume_event(state));
1176         } else {
1177                 dpm_show_time(starttime, state, "noirq");
1178         }
1179         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1180         return error;
1181 }
1182
1183 /**
1184  * device_suspend_late - Execute a "late suspend" callback for given device.
1185  * @dev: Device to handle.
1186  * @state: PM transition of the system being carried out.
1187  * @async: If true, the device is being suspended asynchronously.
1188  *
1189  * Runtime PM is disabled for @dev while this function is being executed.
1190  */
1191 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1192 {
1193         pm_callback_t callback = NULL;
1194         char *info = NULL;
1195         int error = 0;
1196
1197         TRACE_DEVICE(dev);
1198         TRACE_SUSPEND(0);
1199
1200         __pm_runtime_disable(dev, false);
1201
1202         if (async_error)
1203                 goto Complete;
1204
1205         if (pm_wakeup_pending()) {
1206                 async_error = -EBUSY;
1207                 goto Complete;
1208         }
1209
1210         if (dev->power.syscore || dev->power.direct_complete)
1211                 goto Complete;
1212
1213         dpm_wait_for_children(dev, async);
1214
1215         if (dev->pm_domain) {
1216                 info = "late power domain ";
1217                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1218         } else if (dev->type && dev->type->pm) {
1219                 info = "late type ";
1220                 callback = pm_late_early_op(dev->type->pm, state);
1221         } else if (dev->class && dev->class->pm) {
1222                 info = "late class ";
1223                 callback = pm_late_early_op(dev->class->pm, state);
1224         } else if (dev->bus && dev->bus->pm) {
1225                 info = "late bus ";
1226                 callback = pm_late_early_op(dev->bus->pm, state);
1227         }
1228
1229         if (!callback && dev->driver && dev->driver->pm) {
1230                 info = "late driver ";
1231                 callback = pm_late_early_op(dev->driver->pm, state);
1232         }
1233
1234         error = dpm_run_callback(callback, dev, state, info);
1235         if (!error)
1236                 dev->power.is_late_suspended = true;
1237         else
1238                 async_error = error;
1239
1240 Complete:
1241         TRACE_SUSPEND(error);
1242         complete_all(&dev->power.completion);
1243         return error;
1244 }
1245
1246 static void async_suspend_late(void *data, async_cookie_t cookie)
1247 {
1248         struct device *dev = (struct device *)data;
1249         int error;
1250
1251         error = __device_suspend_late(dev, pm_transition, true);
1252         if (error) {
1253                 dpm_save_failed_dev(dev_name(dev));
1254                 pm_dev_err(dev, pm_transition, " async", error);
1255         }
1256         put_device(dev);
1257 }
1258
1259 static int device_suspend_late(struct device *dev)
1260 {
1261         reinit_completion(&dev->power.completion);
1262
1263         if (is_async(dev)) {
1264                 get_device(dev);
1265                 async_schedule(async_suspend_late, dev);
1266                 return 0;
1267         }
1268
1269         return __device_suspend_late(dev, pm_transition, false);
1270 }
1271
1272 /**
1273  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1274  * @state: PM transition of the system being carried out.
1275  */
1276 int dpm_suspend_late(pm_message_t state)
1277 {
1278         ktime_t starttime = ktime_get();
1279         int error = 0;
1280
1281         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1282         mutex_lock(&dpm_list_mtx);
1283         pm_transition = state;
1284         async_error = 0;
1285
1286         while (!list_empty(&dpm_suspended_list)) {
1287                 struct device *dev = to_device(dpm_suspended_list.prev);
1288
1289                 get_device(dev);
1290                 mutex_unlock(&dpm_list_mtx);
1291
1292                 error = device_suspend_late(dev);
1293
1294                 mutex_lock(&dpm_list_mtx);
1295                 if (error) {
1296                         pm_dev_err(dev, state, " late", error);
1297                         dpm_save_failed_dev(dev_name(dev));
1298                         put_device(dev);
1299                         break;
1300                 }
1301                 if (!list_empty(&dev->power.entry))
1302                         list_move(&dev->power.entry, &dpm_late_early_list);
1303                 put_device(dev);
1304
1305                 if (async_error)
1306                         break;
1307         }
1308         mutex_unlock(&dpm_list_mtx);
1309         async_synchronize_full();
1310         if (!error)
1311                 error = async_error;
1312         if (error) {
1313                 suspend_stats.failed_suspend_late++;
1314                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1315                 dpm_resume_early(resume_event(state));
1316         } else {
1317                 dpm_show_time(starttime, state, "late");
1318         }
1319         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1320         return error;
1321 }
1322
1323 /**
1324  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1325  * @state: PM transition of the system being carried out.
1326  */
1327 int dpm_suspend_end(pm_message_t state)
1328 {
1329         int error = dpm_suspend_late(state);
1330         if (error)
1331                 return error;
1332
1333         error = dpm_suspend_noirq(state);
1334         if (error) {
1335                 dpm_resume_early(resume_event(state));
1336                 return error;
1337         }
1338
1339         return 0;
1340 }
1341 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1342
1343 /**
1344  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1345  * @dev: Device to suspend.
1346  * @state: PM transition of the system being carried out.
1347  * @cb: Suspend callback to execute.
1348  * @info: string description of caller.
1349  */
1350 static int legacy_suspend(struct device *dev, pm_message_t state,
1351                           int (*cb)(struct device *dev, pm_message_t state),
1352                           char *info)
1353 {
1354         int error;
1355         ktime_t calltime;
1356
1357         calltime = initcall_debug_start(dev);
1358
1359         trace_device_pm_callback_start(dev, info, state.event);
1360         error = cb(dev, state);
1361         trace_device_pm_callback_end(dev, error);
1362         suspend_report_result(cb, error);
1363
1364         initcall_debug_report(dev, calltime, error, state, info);
1365
1366         return error;
1367 }
1368
1369 /**
1370  * device_suspend - Execute "suspend" callbacks for given device.
1371  * @dev: Device to handle.
1372  * @state: PM transition of the system being carried out.
1373  * @async: If true, the device is being suspended asynchronously.
1374  */
1375 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1376 {
1377         pm_callback_t callback = NULL;
1378         char *info = NULL;
1379         int error = 0;
1380         struct timer_list timer;
1381         struct dpm_drv_wd_data data;
1382         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1383
1384         TRACE_DEVICE(dev);
1385         TRACE_SUSPEND(0);
1386
1387         dpm_wait_for_children(dev, async);
1388
1389         if (async_error)
1390                 goto Complete;
1391
1392         /*
1393          * If a device configured to wake up the system from sleep states
1394          * has been suspended at run time and there's a resume request pending
1395          * for it, this is equivalent to the device signaling wakeup, so the
1396          * system suspend operation should be aborted.
1397          */
1398         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1399                 pm_wakeup_event(dev, 0);
1400
1401         if (pm_wakeup_pending()) {
1402                 async_error = -EBUSY;
1403                 goto Complete;
1404         }
1405
1406         if (dev->power.syscore)
1407                 goto Complete;
1408         
1409         data.dev = dev;
1410         data.tsk = get_current();
1411         init_timer_on_stack(&timer);
1412         timer.expires = jiffies + HZ * 12;
1413         timer.function = dpm_drv_timeout;
1414         timer.data = (unsigned long)&data;
1415         add_timer(&timer);
1416
1417         if (dev->power.direct_complete) {
1418                 if (pm_runtime_status_suspended(dev)) {
1419                         pm_runtime_disable(dev);
1420                         if (pm_runtime_status_suspended(dev))
1421                                 goto Complete;
1422
1423                         pm_runtime_enable(dev);
1424                 }
1425                 dev->power.direct_complete = false;
1426         }
1427
1428         dpm_watchdog_set(&wd, dev);
1429         device_lock(dev);
1430
1431         if (dev->pm_domain) {
1432                 info = "power domain ";
1433                 callback = pm_op(&dev->pm_domain->ops, state);
1434                 goto Run;
1435         }
1436
1437         if (dev->type && dev->type->pm) {
1438                 info = "type ";
1439                 callback = pm_op(dev->type->pm, state);
1440                 goto Run;
1441         }
1442
1443         if (dev->class) {
1444                 if (dev->class->pm) {
1445                         info = "class ";
1446                         callback = pm_op(dev->class->pm, state);
1447                         goto Run;
1448                 } else if (dev->class->suspend) {
1449                         pm_dev_dbg(dev, state, "legacy class ");
1450                         error = legacy_suspend(dev, state, dev->class->suspend,
1451                                                 "legacy class ");
1452                         goto End;
1453                 }
1454         }
1455
1456         if (dev->bus) {
1457                 if (dev->bus->pm) {
1458                         info = "bus ";
1459                         callback = pm_op(dev->bus->pm, state);
1460                 } else if (dev->bus->suspend) {
1461                         pm_dev_dbg(dev, state, "legacy bus ");
1462                         error = legacy_suspend(dev, state, dev->bus->suspend,
1463                                                 "legacy bus ");
1464                         goto End;
1465                 }
1466         }
1467
1468  Run:
1469         if (!callback && dev->driver && dev->driver->pm) {
1470                 info = "driver ";
1471                 callback = pm_op(dev->driver->pm, state);
1472         }
1473
1474         error = dpm_run_callback(callback, dev, state, info);
1475
1476  End:
1477         if (!error) {
1478                 struct device *parent = dev->parent;
1479
1480                 dev->power.is_suspended = true;
1481                 if (parent) {
1482                         spin_lock_irq(&parent->power.lock);
1483
1484                         dev->parent->power.direct_complete = false;
1485                         if (dev->power.wakeup_path
1486                             && !dev->parent->power.ignore_children)
1487                                 dev->parent->power.wakeup_path = true;
1488
1489                         spin_unlock_irq(&parent->power.lock);
1490                 }
1491         }
1492
1493         device_unlock(dev);
1494         dpm_watchdog_clear(&wd);
1495
1496         del_timer_sync(&timer);
1497         destroy_timer_on_stack(&timer);
1498
1499  Complete:
1500         complete_all(&dev->power.completion);
1501         if (error)
1502                 async_error = error;
1503
1504         TRACE_SUSPEND(error);
1505         return error;
1506 }
1507
1508 static void async_suspend(void *data, async_cookie_t cookie)
1509 {
1510         struct device *dev = (struct device *)data;
1511         int error;
1512
1513         error = __device_suspend(dev, pm_transition, true);
1514         if (error) {
1515                 dpm_save_failed_dev(dev_name(dev));
1516                 pm_dev_err(dev, pm_transition, " async", error);
1517         }
1518
1519         put_device(dev);
1520 }
1521
1522 static int device_suspend(struct device *dev)
1523 {
1524         reinit_completion(&dev->power.completion);
1525
1526         if (is_async(dev)) {
1527                 get_device(dev);
1528                 async_schedule(async_suspend, dev);
1529                 return 0;
1530         }
1531
1532         return __device_suspend(dev, pm_transition, false);
1533 }
1534
1535 /**
1536  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1537  * @state: PM transition of the system being carried out.
1538  */
1539 int dpm_suspend(pm_message_t state)
1540 {
1541         ktime_t starttime = ktime_get();
1542         int error = 0;
1543
1544         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1545         might_sleep();
1546
1547         cpufreq_suspend();
1548
1549         mutex_lock(&dpm_list_mtx);
1550         pm_transition = state;
1551         async_error = 0;
1552         while (!list_empty(&dpm_prepared_list)) {
1553                 struct device *dev = to_device(dpm_prepared_list.prev);
1554
1555                 get_device(dev);
1556                 mutex_unlock(&dpm_list_mtx);
1557
1558                 error = device_suspend(dev);
1559
1560                 mutex_lock(&dpm_list_mtx);
1561                 if (error) {
1562                         pm_dev_err(dev, state, "", error);
1563                         dpm_save_failed_dev(dev_name(dev));
1564                         put_device(dev);
1565                         break;
1566                 }
1567                 if (!list_empty(&dev->power.entry))
1568                         list_move(&dev->power.entry, &dpm_suspended_list);
1569                 put_device(dev);
1570                 if (async_error)
1571                         break;
1572         }
1573         mutex_unlock(&dpm_list_mtx);
1574         async_synchronize_full();
1575         if (!error)
1576                 error = async_error;
1577         if (error) {
1578                 suspend_stats.failed_suspend++;
1579                 dpm_save_failed_step(SUSPEND_SUSPEND);
1580         } else
1581                 dpm_show_time(starttime, state, NULL);
1582         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1583         return error;
1584 }
1585
1586 /**
1587  * device_prepare - Prepare a device for system power transition.
1588  * @dev: Device to handle.
1589  * @state: PM transition of the system being carried out.
1590  *
1591  * Execute the ->prepare() callback(s) for given device.  No new children of the
1592  * device may be registered after this function has returned.
1593  */
1594 static int device_prepare(struct device *dev, pm_message_t state)
1595 {
1596         int (*callback)(struct device *) = NULL;
1597         char *info = NULL;
1598         int ret = 0;
1599
1600         if (dev->power.syscore)
1601                 return 0;
1602
1603         /*
1604          * If a device's parent goes into runtime suspend at the wrong time,
1605          * it won't be possible to resume the device.  To prevent this we
1606          * block runtime suspend here, during the prepare phase, and allow
1607          * it again during the complete phase.
1608          */
1609         pm_runtime_get_noresume(dev);
1610
1611         device_lock(dev);
1612
1613         dev->power.wakeup_path = device_may_wakeup(dev);
1614
1615         if (dev->pm_domain) {
1616                 info = "preparing power domain ";
1617                 callback = dev->pm_domain->ops.prepare;
1618         } else if (dev->type && dev->type->pm) {
1619                 info = "preparing type ";
1620                 callback = dev->type->pm->prepare;
1621         } else if (dev->class && dev->class->pm) {
1622                 info = "preparing class ";
1623                 callback = dev->class->pm->prepare;
1624         } else if (dev->bus && dev->bus->pm) {
1625                 info = "preparing bus ";
1626                 callback = dev->bus->pm->prepare;
1627         }
1628
1629         if (!callback && dev->driver && dev->driver->pm) {
1630                 info = "preparing driver ";
1631                 callback = dev->driver->pm->prepare;
1632         }
1633
1634         if (callback)
1635                 ret = callback(dev);
1636
1637         device_unlock(dev);
1638
1639         if (ret < 0) {
1640                 suspend_report_result(callback, ret);
1641                 pm_runtime_put(dev);
1642                 return ret;
1643         }
1644         /*
1645          * A positive return value from ->prepare() means "this device appears
1646          * to be runtime-suspended and its state is fine, so if it really is
1647          * runtime-suspended, you can leave it in that state provided that you
1648          * will do the same thing with all of its descendants".  This only
1649          * applies to suspend transitions, however.
1650          */
1651         spin_lock_irq(&dev->power.lock);
1652         dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1653         spin_unlock_irq(&dev->power.lock);
1654         return 0;
1655 }
1656
1657 /**
1658  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1659  * @state: PM transition of the system being carried out.
1660  *
1661  * Execute the ->prepare() callback(s) for all devices.
1662  */
1663 int dpm_prepare(pm_message_t state)
1664 {
1665         int error = 0;
1666
1667         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1668         might_sleep();
1669
1670         mutex_lock(&dpm_list_mtx);
1671         while (!list_empty(&dpm_list)) {
1672                 struct device *dev = to_device(dpm_list.next);
1673
1674                 get_device(dev);
1675                 mutex_unlock(&dpm_list_mtx);
1676
1677                 trace_device_pm_callback_start(dev, "", state.event);
1678                 error = device_prepare(dev, state);
1679                 trace_device_pm_callback_end(dev, error);
1680
1681                 mutex_lock(&dpm_list_mtx);
1682                 if (error) {
1683                         if (error == -EAGAIN) {
1684                                 put_device(dev);
1685                                 error = 0;
1686                                 continue;
1687                         }
1688                         printk(KERN_INFO "PM: Device %s not prepared "
1689                                 "for power transition: code %d\n",
1690                                 dev_name(dev), error);
1691                         put_device(dev);
1692                         break;
1693                 }
1694                 dev->power.is_prepared = true;
1695                 if (!list_empty(&dev->power.entry))
1696                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1697                 put_device(dev);
1698         }
1699         mutex_unlock(&dpm_list_mtx);
1700         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1701         return error;
1702 }
1703
1704 /**
1705  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1706  * @state: PM transition of the system being carried out.
1707  *
1708  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1709  * callbacks for them.
1710  */
1711 int dpm_suspend_start(pm_message_t state)
1712 {
1713         int error;
1714
1715         error = dpm_prepare(state);
1716         if (error) {
1717                 suspend_stats.failed_prepare++;
1718                 dpm_save_failed_step(SUSPEND_PREPARE);
1719         } else
1720                 error = dpm_suspend(state);
1721         return error;
1722 }
1723 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1724
1725 void __suspend_report_result(const char *function, void *fn, int ret)
1726 {
1727         if (ret)
1728                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1729 }
1730 EXPORT_SYMBOL_GPL(__suspend_report_result);
1731
1732 /**
1733  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1734  * @dev: Device to wait for.
1735  * @subordinate: Device that needs to wait for @dev.
1736  */
1737 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1738 {
1739         dpm_wait(dev, subordinate->power.async_suspend);
1740         return async_error;
1741 }
1742 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1743
1744 /**
1745  * dpm_for_each_dev - device iterator.
1746  * @data: data for the callback.
1747  * @fn: function to be called for each device.
1748  *
1749  * Iterate over devices in dpm_list, and call @fn for each device,
1750  * passing it @data.
1751  */
1752 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1753 {
1754         struct device *dev;
1755
1756         if (!fn)
1757                 return;
1758
1759         device_pm_lock();
1760         list_for_each_entry(dev, &dpm_list, power.entry)
1761                 fn(dev, data);
1762         device_pm_unlock();
1763 }
1764 EXPORT_SYMBOL_GPL(dpm_for_each_dev);