Merge tag 'mvebu-dt-4.2-3' of git://git.infradead.org/linux-mvebu into next/late
[firefly-linux-kernel-4.4.55.git] / drivers / base / power / wakeup.c
1 /*
2  * drivers/base/power/wakeup.c - System wakeup events framework
3  *
4  * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  *
6  * This file is released under the GPLv2.
7  */
8
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/capability.h>
13 #include <linux/export.h>
14 #include <linux/suspend.h>
15 #include <linux/seq_file.h>
16 #include <linux/debugfs.h>
17 #include <linux/pm_wakeirq.h>
18 #include <trace/events/power.h>
19
20 #include "power.h"
21
22 /*
23  * If set, the suspend/hibernate code will abort transitions to a sleep state
24  * if wakeup events are registered during or immediately before the transition.
25  */
26 bool events_check_enabled __read_mostly;
27
28 /* If set and the system is suspending, terminate the suspend. */
29 static bool pm_abort_suspend __read_mostly;
30
31 /*
32  * Combined counters of registered wakeup events and wakeup events in progress.
33  * They need to be modified together atomically, so it's better to use one
34  * atomic variable to hold them both.
35  */
36 static atomic_t combined_event_count = ATOMIC_INIT(0);
37
38 #define IN_PROGRESS_BITS        (sizeof(int) * 4)
39 #define MAX_IN_PROGRESS         ((1 << IN_PROGRESS_BITS) - 1)
40
41 static void split_counters(unsigned int *cnt, unsigned int *inpr)
42 {
43         unsigned int comb = atomic_read(&combined_event_count);
44
45         *cnt = (comb >> IN_PROGRESS_BITS);
46         *inpr = comb & MAX_IN_PROGRESS;
47 }
48
49 /* A preserved old value of the events counter. */
50 static unsigned int saved_count;
51
52 static DEFINE_SPINLOCK(events_lock);
53
54 static void pm_wakeup_timer_fn(unsigned long data);
55
56 static LIST_HEAD(wakeup_sources);
57
58 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
59
60 /**
61  * wakeup_source_prepare - Prepare a new wakeup source for initialization.
62  * @ws: Wakeup source to prepare.
63  * @name: Pointer to the name of the new wakeup source.
64  *
65  * Callers must ensure that the @name string won't be freed when @ws is still in
66  * use.
67  */
68 void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
69 {
70         if (ws) {
71                 memset(ws, 0, sizeof(*ws));
72                 ws->name = name;
73         }
74 }
75 EXPORT_SYMBOL_GPL(wakeup_source_prepare);
76
77 /**
78  * wakeup_source_create - Create a struct wakeup_source object.
79  * @name: Name of the new wakeup source.
80  */
81 struct wakeup_source *wakeup_source_create(const char *name)
82 {
83         struct wakeup_source *ws;
84
85         ws = kmalloc(sizeof(*ws), GFP_KERNEL);
86         if (!ws)
87                 return NULL;
88
89         wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL);
90         return ws;
91 }
92 EXPORT_SYMBOL_GPL(wakeup_source_create);
93
94 /**
95  * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
96  * @ws: Wakeup source to prepare for destruction.
97  *
98  * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
99  * be run in parallel with this function for the same wakeup source object.
100  */
101 void wakeup_source_drop(struct wakeup_source *ws)
102 {
103         if (!ws)
104                 return;
105
106         del_timer_sync(&ws->timer);
107         __pm_relax(ws);
108 }
109 EXPORT_SYMBOL_GPL(wakeup_source_drop);
110
111 /**
112  * wakeup_source_destroy - Destroy a struct wakeup_source object.
113  * @ws: Wakeup source to destroy.
114  *
115  * Use only for wakeup source objects created with wakeup_source_create().
116  */
117 void wakeup_source_destroy(struct wakeup_source *ws)
118 {
119         if (!ws)
120                 return;
121
122         wakeup_source_drop(ws);
123         kfree(ws->name);
124         kfree(ws);
125 }
126 EXPORT_SYMBOL_GPL(wakeup_source_destroy);
127
128 /**
129  * wakeup_source_add - Add given object to the list of wakeup sources.
130  * @ws: Wakeup source object to add to the list.
131  */
132 void wakeup_source_add(struct wakeup_source *ws)
133 {
134         unsigned long flags;
135
136         if (WARN_ON(!ws))
137                 return;
138
139         spin_lock_init(&ws->lock);
140         setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
141         ws->active = false;
142         ws->last_time = ktime_get();
143
144         spin_lock_irqsave(&events_lock, flags);
145         list_add_rcu(&ws->entry, &wakeup_sources);
146         spin_unlock_irqrestore(&events_lock, flags);
147 }
148 EXPORT_SYMBOL_GPL(wakeup_source_add);
149
150 /**
151  * wakeup_source_remove - Remove given object from the wakeup sources list.
152  * @ws: Wakeup source object to remove from the list.
153  */
154 void wakeup_source_remove(struct wakeup_source *ws)
155 {
156         unsigned long flags;
157
158         if (WARN_ON(!ws))
159                 return;
160
161         spin_lock_irqsave(&events_lock, flags);
162         list_del_rcu(&ws->entry);
163         spin_unlock_irqrestore(&events_lock, flags);
164         synchronize_rcu();
165 }
166 EXPORT_SYMBOL_GPL(wakeup_source_remove);
167
168 /**
169  * wakeup_source_register - Create wakeup source and add it to the list.
170  * @name: Name of the wakeup source to register.
171  */
172 struct wakeup_source *wakeup_source_register(const char *name)
173 {
174         struct wakeup_source *ws;
175
176         ws = wakeup_source_create(name);
177         if (ws)
178                 wakeup_source_add(ws);
179
180         return ws;
181 }
182 EXPORT_SYMBOL_GPL(wakeup_source_register);
183
184 /**
185  * wakeup_source_unregister - Remove wakeup source from the list and remove it.
186  * @ws: Wakeup source object to unregister.
187  */
188 void wakeup_source_unregister(struct wakeup_source *ws)
189 {
190         if (ws) {
191                 wakeup_source_remove(ws);
192                 wakeup_source_destroy(ws);
193         }
194 }
195 EXPORT_SYMBOL_GPL(wakeup_source_unregister);
196
197 /**
198  * device_wakeup_attach - Attach a wakeup source object to a device object.
199  * @dev: Device to handle.
200  * @ws: Wakeup source object to attach to @dev.
201  *
202  * This causes @dev to be treated as a wakeup device.
203  */
204 static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
205 {
206         spin_lock_irq(&dev->power.lock);
207         if (dev->power.wakeup) {
208                 spin_unlock_irq(&dev->power.lock);
209                 return -EEXIST;
210         }
211         dev->power.wakeup = ws;
212         spin_unlock_irq(&dev->power.lock);
213         return 0;
214 }
215
216 /**
217  * device_wakeup_enable - Enable given device to be a wakeup source.
218  * @dev: Device to handle.
219  *
220  * Create a wakeup source object, register it and attach it to @dev.
221  */
222 int device_wakeup_enable(struct device *dev)
223 {
224         struct wakeup_source *ws;
225         int ret;
226
227         if (!dev || !dev->power.can_wakeup)
228                 return -EINVAL;
229
230         ws = wakeup_source_register(dev_name(dev));
231         if (!ws)
232                 return -ENOMEM;
233
234         ret = device_wakeup_attach(dev, ws);
235         if (ret)
236                 wakeup_source_unregister(ws);
237
238         return ret;
239 }
240 EXPORT_SYMBOL_GPL(device_wakeup_enable);
241
242 /**
243  * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
244  * @dev: Device to handle
245  * @wakeirq: Device specific wakeirq entry
246  *
247  * Attach a device wakeirq to the wakeup source so the device
248  * wake IRQ can be configured automatically for suspend and
249  * resume.
250  */
251 int device_wakeup_attach_irq(struct device *dev,
252                              struct wake_irq *wakeirq)
253 {
254         struct wakeup_source *ws;
255         int ret = 0;
256
257         spin_lock_irq(&dev->power.lock);
258         ws = dev->power.wakeup;
259         if (!ws) {
260                 dev_err(dev, "forgot to call call device_init_wakeup?\n");
261                 ret = -EINVAL;
262                 goto unlock;
263         }
264
265         if (ws->wakeirq) {
266                 ret = -EEXIST;
267                 goto unlock;
268         }
269
270         ws->wakeirq = wakeirq;
271
272 unlock:
273         spin_unlock_irq(&dev->power.lock);
274
275         return ret;
276 }
277
278 /**
279  * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
280  * @dev: Device to handle
281  *
282  * Removes a device wakeirq from the wakeup source.
283  */
284 void device_wakeup_detach_irq(struct device *dev)
285 {
286         struct wakeup_source *ws;
287
288         spin_lock_irq(&dev->power.lock);
289         ws = dev->power.wakeup;
290         if (!ws)
291                 goto unlock;
292
293         ws->wakeirq = NULL;
294
295 unlock:
296         spin_unlock_irq(&dev->power.lock);
297 }
298
299 /**
300  * device_wakeup_arm_wake_irqs(void)
301  *
302  * Itereates over the list of device wakeirqs to arm them.
303  */
304 void device_wakeup_arm_wake_irqs(void)
305 {
306         struct wakeup_source *ws;
307
308         rcu_read_lock();
309         list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
310                 if (ws->wakeirq)
311                         dev_pm_arm_wake_irq(ws->wakeirq);
312         }
313         rcu_read_unlock();
314 }
315
316 /**
317  * device_wakeup_disarm_wake_irqs(void)
318  *
319  * Itereates over the list of device wakeirqs to disarm them.
320  */
321 void device_wakeup_disarm_wake_irqs(void)
322 {
323         struct wakeup_source *ws;
324
325         rcu_read_lock();
326         list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
327                 if (ws->wakeirq)
328                         dev_pm_disarm_wake_irq(ws->wakeirq);
329         }
330         rcu_read_unlock();
331 }
332
333 /**
334  * device_wakeup_detach - Detach a device's wakeup source object from it.
335  * @dev: Device to detach the wakeup source object from.
336  *
337  * After it returns, @dev will not be treated as a wakeup device any more.
338  */
339 static struct wakeup_source *device_wakeup_detach(struct device *dev)
340 {
341         struct wakeup_source *ws;
342
343         spin_lock_irq(&dev->power.lock);
344         ws = dev->power.wakeup;
345         dev->power.wakeup = NULL;
346         spin_unlock_irq(&dev->power.lock);
347         return ws;
348 }
349
350 /**
351  * device_wakeup_disable - Do not regard a device as a wakeup source any more.
352  * @dev: Device to handle.
353  *
354  * Detach the @dev's wakeup source object from it, unregister this wakeup source
355  * object and destroy it.
356  */
357 int device_wakeup_disable(struct device *dev)
358 {
359         struct wakeup_source *ws;
360
361         if (!dev || !dev->power.can_wakeup)
362                 return -EINVAL;
363
364         ws = device_wakeup_detach(dev);
365         if (ws)
366                 wakeup_source_unregister(ws);
367
368         return 0;
369 }
370 EXPORT_SYMBOL_GPL(device_wakeup_disable);
371
372 /**
373  * device_set_wakeup_capable - Set/reset device wakeup capability flag.
374  * @dev: Device to handle.
375  * @capable: Whether or not @dev is capable of waking up the system from sleep.
376  *
377  * If @capable is set, set the @dev's power.can_wakeup flag and add its
378  * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
379  * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
380  *
381  * This function may sleep and it can't be called from any context where
382  * sleeping is not allowed.
383  */
384 void device_set_wakeup_capable(struct device *dev, bool capable)
385 {
386         if (!!dev->power.can_wakeup == !!capable)
387                 return;
388
389         if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
390                 if (capable) {
391                         if (wakeup_sysfs_add(dev))
392                                 return;
393                 } else {
394                         wakeup_sysfs_remove(dev);
395                 }
396         }
397         dev->power.can_wakeup = capable;
398 }
399 EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
400
401 /**
402  * device_init_wakeup - Device wakeup initialization.
403  * @dev: Device to handle.
404  * @enable: Whether or not to enable @dev as a wakeup device.
405  *
406  * By default, most devices should leave wakeup disabled.  The exceptions are
407  * devices that everyone expects to be wakeup sources: keyboards, power buttons,
408  * possibly network interfaces, etc.  Also, devices that don't generate their
409  * own wakeup requests but merely forward requests from one bus to another
410  * (like PCI bridges) should have wakeup enabled by default.
411  */
412 int device_init_wakeup(struct device *dev, bool enable)
413 {
414         int ret = 0;
415
416         if (!dev)
417                 return -EINVAL;
418
419         if (enable) {
420                 device_set_wakeup_capable(dev, true);
421                 ret = device_wakeup_enable(dev);
422         } else {
423                 if (dev->power.can_wakeup)
424                         device_wakeup_disable(dev);
425
426                 device_set_wakeup_capable(dev, false);
427         }
428
429         return ret;
430 }
431 EXPORT_SYMBOL_GPL(device_init_wakeup);
432
433 /**
434  * device_set_wakeup_enable - Enable or disable a device to wake up the system.
435  * @dev: Device to handle.
436  */
437 int device_set_wakeup_enable(struct device *dev, bool enable)
438 {
439         if (!dev || !dev->power.can_wakeup)
440                 return -EINVAL;
441
442         return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
443 }
444 EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
445
446 /*
447  * The functions below use the observation that each wakeup event starts a
448  * period in which the system should not be suspended.  The moment this period
449  * will end depends on how the wakeup event is going to be processed after being
450  * detected and all of the possible cases can be divided into two distinct
451  * groups.
452  *
453  * First, a wakeup event may be detected by the same functional unit that will
454  * carry out the entire processing of it and possibly will pass it to user space
455  * for further processing.  In that case the functional unit that has detected
456  * the event may later "close" the "no suspend" period associated with it
457  * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
458  * pm_relax(), balanced with each other, is supposed to be used in such
459  * situations.
460  *
461  * Second, a wakeup event may be detected by one functional unit and processed
462  * by another one.  In that case the unit that has detected it cannot really
463  * "close" the "no suspend" period associated with it, unless it knows in
464  * advance what's going to happen to the event during processing.  This
465  * knowledge, however, may not be available to it, so it can simply specify time
466  * to wait before the system can be suspended and pass it as the second
467  * argument of pm_wakeup_event().
468  *
469  * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
470  * "no suspend" period will be ended either by the pm_relax(), or by the timer
471  * function executed when the timer expires, whichever comes first.
472  */
473
474 /**
475  * wakup_source_activate - Mark given wakeup source as active.
476  * @ws: Wakeup source to handle.
477  *
478  * Update the @ws' statistics and, if @ws has just been activated, notify the PM
479  * core of the event by incrementing the counter of of wakeup events being
480  * processed.
481  */
482 static void wakeup_source_activate(struct wakeup_source *ws)
483 {
484         unsigned int cec;
485
486         /*
487          * active wakeup source should bring the system
488          * out of PM_SUSPEND_FREEZE state
489          */
490         freeze_wake();
491
492         ws->active = true;
493         ws->active_count++;
494         ws->last_time = ktime_get();
495         if (ws->autosleep_enabled)
496                 ws->start_prevent_time = ws->last_time;
497
498         /* Increment the counter of events in progress. */
499         cec = atomic_inc_return(&combined_event_count);
500
501         trace_wakeup_source_activate(ws->name, cec);
502 }
503
504 /**
505  * wakeup_source_report_event - Report wakeup event using the given source.
506  * @ws: Wakeup source to report the event for.
507  */
508 static void wakeup_source_report_event(struct wakeup_source *ws)
509 {
510         ws->event_count++;
511         /* This is racy, but the counter is approximate anyway. */
512         if (events_check_enabled)
513                 ws->wakeup_count++;
514
515         if (!ws->active)
516                 wakeup_source_activate(ws);
517 }
518
519 /**
520  * __pm_stay_awake - Notify the PM core of a wakeup event.
521  * @ws: Wakeup source object associated with the source of the event.
522  *
523  * It is safe to call this function from interrupt context.
524  */
525 void __pm_stay_awake(struct wakeup_source *ws)
526 {
527         unsigned long flags;
528
529         if (!ws)
530                 return;
531
532         spin_lock_irqsave(&ws->lock, flags);
533
534         wakeup_source_report_event(ws);
535         del_timer(&ws->timer);
536         ws->timer_expires = 0;
537
538         spin_unlock_irqrestore(&ws->lock, flags);
539 }
540 EXPORT_SYMBOL_GPL(__pm_stay_awake);
541
542 /**
543  * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
544  * @dev: Device the wakeup event is related to.
545  *
546  * Notify the PM core of a wakeup event (signaled by @dev) by calling
547  * __pm_stay_awake for the @dev's wakeup source object.
548  *
549  * Call this function after detecting of a wakeup event if pm_relax() is going
550  * to be called directly after processing the event (and possibly passing it to
551  * user space for further processing).
552  */
553 void pm_stay_awake(struct device *dev)
554 {
555         unsigned long flags;
556
557         if (!dev)
558                 return;
559
560         spin_lock_irqsave(&dev->power.lock, flags);
561         __pm_stay_awake(dev->power.wakeup);
562         spin_unlock_irqrestore(&dev->power.lock, flags);
563 }
564 EXPORT_SYMBOL_GPL(pm_stay_awake);
565
566 #ifdef CONFIG_PM_AUTOSLEEP
567 static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
568 {
569         ktime_t delta = ktime_sub(now, ws->start_prevent_time);
570         ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
571 }
572 #else
573 static inline void update_prevent_sleep_time(struct wakeup_source *ws,
574                                              ktime_t now) {}
575 #endif
576
577 /**
578  * wakup_source_deactivate - Mark given wakeup source as inactive.
579  * @ws: Wakeup source to handle.
580  *
581  * Update the @ws' statistics and notify the PM core that the wakeup source has
582  * become inactive by decrementing the counter of wakeup events being processed
583  * and incrementing the counter of registered wakeup events.
584  */
585 static void wakeup_source_deactivate(struct wakeup_source *ws)
586 {
587         unsigned int cnt, inpr, cec;
588         ktime_t duration;
589         ktime_t now;
590
591         ws->relax_count++;
592         /*
593          * __pm_relax() may be called directly or from a timer function.
594          * If it is called directly right after the timer function has been
595          * started, but before the timer function calls __pm_relax(), it is
596          * possible that __pm_stay_awake() will be called in the meantime and
597          * will set ws->active.  Then, ws->active may be cleared immediately
598          * by the __pm_relax() called from the timer function, but in such a
599          * case ws->relax_count will be different from ws->active_count.
600          */
601         if (ws->relax_count != ws->active_count) {
602                 ws->relax_count--;
603                 return;
604         }
605
606         ws->active = false;
607
608         now = ktime_get();
609         duration = ktime_sub(now, ws->last_time);
610         ws->total_time = ktime_add(ws->total_time, duration);
611         if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
612                 ws->max_time = duration;
613
614         ws->last_time = now;
615         del_timer(&ws->timer);
616         ws->timer_expires = 0;
617
618         if (ws->autosleep_enabled)
619                 update_prevent_sleep_time(ws, now);
620
621         /*
622          * Increment the counter of registered wakeup events and decrement the
623          * couter of wakeup events in progress simultaneously.
624          */
625         cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
626         trace_wakeup_source_deactivate(ws->name, cec);
627
628         split_counters(&cnt, &inpr);
629         if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
630                 wake_up(&wakeup_count_wait_queue);
631 }
632
633 /**
634  * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
635  * @ws: Wakeup source object associated with the source of the event.
636  *
637  * Call this function for wakeup events whose processing started with calling
638  * __pm_stay_awake().
639  *
640  * It is safe to call it from interrupt context.
641  */
642 void __pm_relax(struct wakeup_source *ws)
643 {
644         unsigned long flags;
645
646         if (!ws)
647                 return;
648
649         spin_lock_irqsave(&ws->lock, flags);
650         if (ws->active)
651                 wakeup_source_deactivate(ws);
652         spin_unlock_irqrestore(&ws->lock, flags);
653 }
654 EXPORT_SYMBOL_GPL(__pm_relax);
655
656 /**
657  * pm_relax - Notify the PM core that processing of a wakeup event has ended.
658  * @dev: Device that signaled the event.
659  *
660  * Execute __pm_relax() for the @dev's wakeup source object.
661  */
662 void pm_relax(struct device *dev)
663 {
664         unsigned long flags;
665
666         if (!dev)
667                 return;
668
669         spin_lock_irqsave(&dev->power.lock, flags);
670         __pm_relax(dev->power.wakeup);
671         spin_unlock_irqrestore(&dev->power.lock, flags);
672 }
673 EXPORT_SYMBOL_GPL(pm_relax);
674
675 /**
676  * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
677  * @data: Address of the wakeup source object associated with the event source.
678  *
679  * Call wakeup_source_deactivate() for the wakeup source whose address is stored
680  * in @data if it is currently active and its timer has not been canceled and
681  * the expiration time of the timer is not in future.
682  */
683 static void pm_wakeup_timer_fn(unsigned long data)
684 {
685         struct wakeup_source *ws = (struct wakeup_source *)data;
686         unsigned long flags;
687
688         spin_lock_irqsave(&ws->lock, flags);
689
690         if (ws->active && ws->timer_expires
691             && time_after_eq(jiffies, ws->timer_expires)) {
692                 wakeup_source_deactivate(ws);
693                 ws->expire_count++;
694         }
695
696         spin_unlock_irqrestore(&ws->lock, flags);
697 }
698
699 /**
700  * __pm_wakeup_event - Notify the PM core of a wakeup event.
701  * @ws: Wakeup source object associated with the event source.
702  * @msec: Anticipated event processing time (in milliseconds).
703  *
704  * Notify the PM core of a wakeup event whose source is @ws that will take
705  * approximately @msec milliseconds to be processed by the kernel.  If @ws is
706  * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
707  * execute pm_wakeup_timer_fn() in future.
708  *
709  * It is safe to call this function from interrupt context.
710  */
711 void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
712 {
713         unsigned long flags;
714         unsigned long expires;
715
716         if (!ws)
717                 return;
718
719         spin_lock_irqsave(&ws->lock, flags);
720
721         wakeup_source_report_event(ws);
722
723         if (!msec) {
724                 wakeup_source_deactivate(ws);
725                 goto unlock;
726         }
727
728         expires = jiffies + msecs_to_jiffies(msec);
729         if (!expires)
730                 expires = 1;
731
732         if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
733                 mod_timer(&ws->timer, expires);
734                 ws->timer_expires = expires;
735         }
736
737  unlock:
738         spin_unlock_irqrestore(&ws->lock, flags);
739 }
740 EXPORT_SYMBOL_GPL(__pm_wakeup_event);
741
742
743 /**
744  * pm_wakeup_event - Notify the PM core of a wakeup event.
745  * @dev: Device the wakeup event is related to.
746  * @msec: Anticipated event processing time (in milliseconds).
747  *
748  * Call __pm_wakeup_event() for the @dev's wakeup source object.
749  */
750 void pm_wakeup_event(struct device *dev, unsigned int msec)
751 {
752         unsigned long flags;
753
754         if (!dev)
755                 return;
756
757         spin_lock_irqsave(&dev->power.lock, flags);
758         __pm_wakeup_event(dev->power.wakeup, msec);
759         spin_unlock_irqrestore(&dev->power.lock, flags);
760 }
761 EXPORT_SYMBOL_GPL(pm_wakeup_event);
762
763 void pm_print_active_wakeup_sources(void)
764 {
765         struct wakeup_source *ws;
766         int active = 0;
767         struct wakeup_source *last_activity_ws = NULL;
768
769         rcu_read_lock();
770         list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
771                 if (ws->active) {
772                         pr_info("active wakeup source: %s\n", ws->name);
773                         active = 1;
774                 } else if (!active &&
775                            (!last_activity_ws ||
776                             ktime_to_ns(ws->last_time) >
777                             ktime_to_ns(last_activity_ws->last_time))) {
778                         last_activity_ws = ws;
779                 }
780         }
781
782         if (!active && last_activity_ws)
783                 pr_info("last active wakeup source: %s\n",
784                         last_activity_ws->name);
785         rcu_read_unlock();
786 }
787 EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
788
789 /**
790  * pm_wakeup_pending - Check if power transition in progress should be aborted.
791  *
792  * Compare the current number of registered wakeup events with its preserved
793  * value from the past and return true if new wakeup events have been registered
794  * since the old value was stored.  Also return true if the current number of
795  * wakeup events being processed is different from zero.
796  */
797 bool pm_wakeup_pending(void)
798 {
799         unsigned long flags;
800         bool ret = false;
801
802         spin_lock_irqsave(&events_lock, flags);
803         if (events_check_enabled) {
804                 unsigned int cnt, inpr;
805
806                 split_counters(&cnt, &inpr);
807                 ret = (cnt != saved_count || inpr > 0);
808                 events_check_enabled = !ret;
809         }
810         spin_unlock_irqrestore(&events_lock, flags);
811
812         if (ret) {
813                 pr_info("PM: Wakeup pending, aborting suspend\n");
814                 pm_print_active_wakeup_sources();
815         }
816
817         return ret || pm_abort_suspend;
818 }
819
820 void pm_system_wakeup(void)
821 {
822         pm_abort_suspend = true;
823         freeze_wake();
824 }
825 EXPORT_SYMBOL_GPL(pm_system_wakeup);
826
827 void pm_wakeup_clear(void)
828 {
829         pm_abort_suspend = false;
830 }
831
832 /**
833  * pm_get_wakeup_count - Read the number of registered wakeup events.
834  * @count: Address to store the value at.
835  * @block: Whether or not to block.
836  *
837  * Store the number of registered wakeup events at the address in @count.  If
838  * @block is set, block until the current number of wakeup events being
839  * processed is zero.
840  *
841  * Return 'false' if the current number of wakeup events being processed is
842  * nonzero.  Otherwise return 'true'.
843  */
844 bool pm_get_wakeup_count(unsigned int *count, bool block)
845 {
846         unsigned int cnt, inpr;
847
848         if (block) {
849                 DEFINE_WAIT(wait);
850
851                 for (;;) {
852                         prepare_to_wait(&wakeup_count_wait_queue, &wait,
853                                         TASK_INTERRUPTIBLE);
854                         split_counters(&cnt, &inpr);
855                         if (inpr == 0 || signal_pending(current))
856                                 break;
857
858                         schedule();
859                 }
860                 finish_wait(&wakeup_count_wait_queue, &wait);
861         }
862
863         split_counters(&cnt, &inpr);
864         *count = cnt;
865         return !inpr;
866 }
867
868 /**
869  * pm_save_wakeup_count - Save the current number of registered wakeup events.
870  * @count: Value to compare with the current number of registered wakeup events.
871  *
872  * If @count is equal to the current number of registered wakeup events and the
873  * current number of wakeup events being processed is zero, store @count as the
874  * old number of registered wakeup events for pm_check_wakeup_events(), enable
875  * wakeup events detection and return 'true'.  Otherwise disable wakeup events
876  * detection and return 'false'.
877  */
878 bool pm_save_wakeup_count(unsigned int count)
879 {
880         unsigned int cnt, inpr;
881         unsigned long flags;
882
883         events_check_enabled = false;
884         spin_lock_irqsave(&events_lock, flags);
885         split_counters(&cnt, &inpr);
886         if (cnt == count && inpr == 0) {
887                 saved_count = count;
888                 events_check_enabled = true;
889         }
890         spin_unlock_irqrestore(&events_lock, flags);
891         return events_check_enabled;
892 }
893
894 #ifdef CONFIG_PM_AUTOSLEEP
895 /**
896  * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
897  * @enabled: Whether to set or to clear the autosleep_enabled flags.
898  */
899 void pm_wakep_autosleep_enabled(bool set)
900 {
901         struct wakeup_source *ws;
902         ktime_t now = ktime_get();
903
904         rcu_read_lock();
905         list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
906                 spin_lock_irq(&ws->lock);
907                 if (ws->autosleep_enabled != set) {
908                         ws->autosleep_enabled = set;
909                         if (ws->active) {
910                                 if (set)
911                                         ws->start_prevent_time = now;
912                                 else
913                                         update_prevent_sleep_time(ws, now);
914                         }
915                 }
916                 spin_unlock_irq(&ws->lock);
917         }
918         rcu_read_unlock();
919 }
920 #endif /* CONFIG_PM_AUTOSLEEP */
921
922 static struct dentry *wakeup_sources_stats_dentry;
923
924 /**
925  * print_wakeup_source_stats - Print wakeup source statistics information.
926  * @m: seq_file to print the statistics into.
927  * @ws: Wakeup source object to print the statistics for.
928  */
929 static int print_wakeup_source_stats(struct seq_file *m,
930                                      struct wakeup_source *ws)
931 {
932         unsigned long flags;
933         ktime_t total_time;
934         ktime_t max_time;
935         unsigned long active_count;
936         ktime_t active_time;
937         ktime_t prevent_sleep_time;
938
939         spin_lock_irqsave(&ws->lock, flags);
940
941         total_time = ws->total_time;
942         max_time = ws->max_time;
943         prevent_sleep_time = ws->prevent_sleep_time;
944         active_count = ws->active_count;
945         if (ws->active) {
946                 ktime_t now = ktime_get();
947
948                 active_time = ktime_sub(now, ws->last_time);
949                 total_time = ktime_add(total_time, active_time);
950                 if (active_time.tv64 > max_time.tv64)
951                         max_time = active_time;
952
953                 if (ws->autosleep_enabled)
954                         prevent_sleep_time = ktime_add(prevent_sleep_time,
955                                 ktime_sub(now, ws->start_prevent_time));
956         } else {
957                 active_time = ktime_set(0, 0);
958         }
959
960         seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
961                    ws->name, active_count, ws->event_count,
962                    ws->wakeup_count, ws->expire_count,
963                    ktime_to_ms(active_time), ktime_to_ms(total_time),
964                    ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
965                    ktime_to_ms(prevent_sleep_time));
966
967         spin_unlock_irqrestore(&ws->lock, flags);
968
969         return 0;
970 }
971
972 /**
973  * wakeup_sources_stats_show - Print wakeup sources statistics information.
974  * @m: seq_file to print the statistics into.
975  */
976 static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
977 {
978         struct wakeup_source *ws;
979
980         seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
981                 "expire_count\tactive_since\ttotal_time\tmax_time\t"
982                 "last_change\tprevent_suspend_time\n");
983
984         rcu_read_lock();
985         list_for_each_entry_rcu(ws, &wakeup_sources, entry)
986                 print_wakeup_source_stats(m, ws);
987         rcu_read_unlock();
988
989         return 0;
990 }
991
992 static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
993 {
994         return single_open(file, wakeup_sources_stats_show, NULL);
995 }
996
997 static const struct file_operations wakeup_sources_stats_fops = {
998         .owner = THIS_MODULE,
999         .open = wakeup_sources_stats_open,
1000         .read = seq_read,
1001         .llseek = seq_lseek,
1002         .release = single_release,
1003 };
1004
1005 static int __init wakeup_sources_debugfs_init(void)
1006 {
1007         wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
1008                         S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
1009         return 0;
1010 }
1011
1012 postcore_initcall(wakeup_sources_debugfs_init);