xref: /linux-6.15/drivers/base/power/runtime.c (revision 72263869)
15de363b6SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
25e928f77SRafael J. Wysocki /*
362052ab1SRafael J. Wysocki  * drivers/base/power/runtime.c - Helper functions for device runtime PM
45e928f77SRafael J. Wysocki  *
55e928f77SRafael J. Wysocki  * Copyright (c) 2009 Rafael J. Wysocki <[email protected]>, Novell Inc.
61bfee5bcSAlan Stern  * Copyright (C) 2010 Alan Stern <[email protected]>
75e928f77SRafael J. Wysocki  */
85b3cc15aSIngo Molnar #include <linux/sched/mm.h>
98234f673SVincent Guittot #include <linux/ktime.h>
108234f673SVincent Guittot #include <linux/hrtimer.h>
111b6bc32fSPaul Gortmaker #include <linux/export.h>
125e928f77SRafael J. Wysocki #include <linux/pm_runtime.h>
134990d4feSTony Lindgren #include <linux/pm_wakeirq.h>
14ed509c7eSKent Overstreet #include <linux/rculist.h>
15c3dc2f14SMing Lei #include <trace/events/rpm.h>
1621d5c57bSRafael J. Wysocki 
1721d5c57bSRafael J. Wysocki #include "../base.h"
187490e442SAlan Stern #include "power.h"
195e928f77SRafael J. Wysocki 
20dbcd2d72SAndrzej Hajda typedef int (*pm_callback_t)(struct device *);
215f59df79SUlf Hansson 
__rpm_get_callback(struct device * dev,size_t cb_offset)22dbcd2d72SAndrzej Hajda static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
235f59df79SUlf Hansson {
24dbcd2d72SAndrzej Hajda 	pm_callback_t cb;
25dbcd2d72SAndrzej Hajda 	const struct dev_pm_ops *ops;
26dbcd2d72SAndrzej Hajda 
27dbcd2d72SAndrzej Hajda 	if (dev->pm_domain)
28dbcd2d72SAndrzej Hajda 		ops = &dev->pm_domain->ops;
29dbcd2d72SAndrzej Hajda 	else if (dev->type && dev->type->pm)
30dbcd2d72SAndrzej Hajda 		ops = dev->type->pm;
31dbcd2d72SAndrzej Hajda 	else if (dev->class && dev->class->pm)
32dbcd2d72SAndrzej Hajda 		ops = dev->class->pm;
33dbcd2d72SAndrzej Hajda 	else if (dev->bus && dev->bus->pm)
34dbcd2d72SAndrzej Hajda 		ops = dev->bus->pm;
35dbcd2d72SAndrzej Hajda 	else
36dbcd2d72SAndrzej Hajda 		ops = NULL;
37dbcd2d72SAndrzej Hajda 
38dbcd2d72SAndrzej Hajda 	if (ops)
39dbcd2d72SAndrzej Hajda 		cb = *(pm_callback_t *)((void *)ops + cb_offset);
40dbcd2d72SAndrzej Hajda 	else
41dbcd2d72SAndrzej Hajda 		cb = NULL;
42dbcd2d72SAndrzej Hajda 
43dbcd2d72SAndrzej Hajda 	if (!cb && dev->driver && dev->driver->pm)
44dbcd2d72SAndrzej Hajda 		cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
45dbcd2d72SAndrzej Hajda 
46dbcd2d72SAndrzej Hajda 	return cb;
475f59df79SUlf Hansson }
485f59df79SUlf Hansson 
49dbcd2d72SAndrzej Hajda #define RPM_GET_CALLBACK(dev, callback) \
50dbcd2d72SAndrzej Hajda 		__rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
515f59df79SUlf Hansson 
52140a6c94SAlan Stern static int rpm_resume(struct device *dev, int rpmflags);
537490e442SAlan Stern static int rpm_suspend(struct device *dev, int rpmflags);
545e928f77SRafael J. Wysocki 
555e928f77SRafael J. Wysocki /**
564769373cSAlan Stern  * update_pm_runtime_accounting - Update the time accounting of power states
574769373cSAlan Stern  * @dev: Device to update the accounting for
584769373cSAlan Stern  *
594769373cSAlan Stern  * In order to be able to have time accounting of the various power states
604769373cSAlan Stern  * (as used by programs such as PowerTOP to show the effectiveness of runtime
614769373cSAlan Stern  * PM), we need to track the time spent in each state.
624769373cSAlan Stern  * update_pm_runtime_accounting must be called each time before the
634769373cSAlan Stern  * runtime_status field is updated, to account the time in the old state
644769373cSAlan Stern  * correctly.
654769373cSAlan Stern  */
update_pm_runtime_accounting(struct device * dev)660996584bSUlf Hansson static void update_pm_runtime_accounting(struct device *dev)
674769373cSAlan Stern {
68fed7e88cSVincent Guittot 	u64 now, last, delta;
694769373cSAlan Stern 
704769373cSAlan Stern 	if (dev->power.disable_depth > 0)
714769373cSAlan Stern 		return;
724769373cSAlan Stern 
73fed7e88cSVincent Guittot 	last = dev->power.accounting_timestamp;
74fed7e88cSVincent Guittot 
75fed7e88cSVincent Guittot 	now = ktime_get_mono_fast_ns();
764769373cSAlan Stern 	dev->power.accounting_timestamp = now;
774769373cSAlan Stern 
78c155f649SVincent Guittot 	/*
79c155f649SVincent Guittot 	 * Because ktime_get_mono_fast_ns() is not monotonic during
80c155f649SVincent Guittot 	 * timekeeping updates, ensure that 'now' is after the last saved
81c155f649SVincent Guittot 	 * timesptamp.
82c155f649SVincent Guittot 	 */
83c155f649SVincent Guittot 	if (now < last)
84c155f649SVincent Guittot 		return;
85c155f649SVincent Guittot 
86c155f649SVincent Guittot 	delta = now - last;
87c155f649SVincent Guittot 
884769373cSAlan Stern 	if (dev->power.runtime_status == RPM_SUSPENDED)
89a08c2a5aSThara Gopinath 		dev->power.suspended_time += delta;
904769373cSAlan Stern 	else
91a08c2a5aSThara Gopinath 		dev->power.active_time += delta;
924769373cSAlan Stern }
934769373cSAlan Stern 
__update_runtime_status(struct device * dev,enum rpm_status status)944769373cSAlan Stern static void __update_runtime_status(struct device *dev, enum rpm_status status)
954769373cSAlan Stern {
964769373cSAlan Stern 	update_pm_runtime_accounting(dev);
97015abee4SVilas Bhat 	trace_rpm_status(dev, status);
984769373cSAlan Stern 	dev->power.runtime_status = status;
994769373cSAlan Stern }
1004769373cSAlan Stern 
rpm_get_accounted_time(struct device * dev,bool suspended)101fdc56c07SUlf Hansson static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
1028a62ffe2SVincent Guittot {
103a08c2a5aSThara Gopinath 	u64 time;
104a08c2a5aSThara Gopinath 	unsigned long flags;
1058a62ffe2SVincent Guittot 
1068a62ffe2SVincent Guittot 	spin_lock_irqsave(&dev->power.lock, flags);
1078a62ffe2SVincent Guittot 
1088a62ffe2SVincent Guittot 	update_pm_runtime_accounting(dev);
109fdc56c07SUlf Hansson 	time = suspended ? dev->power.suspended_time : dev->power.active_time;
1108a62ffe2SVincent Guittot 
1118a62ffe2SVincent Guittot 	spin_unlock_irqrestore(&dev->power.lock, flags);
1128a62ffe2SVincent Guittot 
113a08c2a5aSThara Gopinath 	return time;
1148a62ffe2SVincent Guittot }
115fdc56c07SUlf Hansson 
pm_runtime_active_time(struct device * dev)116fdc56c07SUlf Hansson u64 pm_runtime_active_time(struct device *dev)
117fdc56c07SUlf Hansson {
118fdc56c07SUlf Hansson 	return rpm_get_accounted_time(dev, false);
119fdc56c07SUlf Hansson }
120fdc56c07SUlf Hansson 
pm_runtime_suspended_time(struct device * dev)121fdc56c07SUlf Hansson u64 pm_runtime_suspended_time(struct device *dev)
122fdc56c07SUlf Hansson {
123fdc56c07SUlf Hansson 	return rpm_get_accounted_time(dev, true);
124fdc56c07SUlf Hansson }
1258a62ffe2SVincent Guittot EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
1268a62ffe2SVincent Guittot 
1274769373cSAlan Stern /**
1285e928f77SRafael J. Wysocki  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
1295e928f77SRafael J. Wysocki  * @dev: Device to handle.
1305e928f77SRafael J. Wysocki  */
pm_runtime_deactivate_timer(struct device * dev)1315e928f77SRafael J. Wysocki static void pm_runtime_deactivate_timer(struct device *dev)
1325e928f77SRafael J. Wysocki {
1335e928f77SRafael J. Wysocki 	if (dev->power.timer_expires > 0) {
13474fb4486SVincent Guittot 		hrtimer_try_to_cancel(&dev->power.suspend_timer);
1355e928f77SRafael J. Wysocki 		dev->power.timer_expires = 0;
1365e928f77SRafael J. Wysocki 	}
1375e928f77SRafael J. Wysocki }
1385e928f77SRafael J. Wysocki 
1395e928f77SRafael J. Wysocki /**
1405e928f77SRafael J. Wysocki  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
1415e928f77SRafael J. Wysocki  * @dev: Device to handle.
1425e928f77SRafael J. Wysocki  */
pm_runtime_cancel_pending(struct device * dev)1435e928f77SRafael J. Wysocki static void pm_runtime_cancel_pending(struct device *dev)
1445e928f77SRafael J. Wysocki {
1455e928f77SRafael J. Wysocki 	pm_runtime_deactivate_timer(dev);
1465e928f77SRafael J. Wysocki 	/*
1475e928f77SRafael J. Wysocki 	 * In case there's a request pending, make sure its work function will
1485e928f77SRafael J. Wysocki 	 * return without doing anything.
1495e928f77SRafael J. Wysocki 	 */
1505e928f77SRafael J. Wysocki 	dev->power.request = RPM_REQ_NONE;
1515e928f77SRafael J. Wysocki }
1525e928f77SRafael J. Wysocki 
15315bcb91dSAlan Stern /*
15415bcb91dSAlan Stern  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
15515bcb91dSAlan Stern  * @dev: Device to handle.
15615bcb91dSAlan Stern  *
15715bcb91dSAlan Stern  * Compute the autosuspend-delay expiration time based on the device's
15815bcb91dSAlan Stern  * power.last_busy time.  If the delay has already expired or is disabled
15915bcb91dSAlan Stern  * (negative) or the power.use_autosuspend flag isn't set, return 0.
1601f7b7081SLadislav Michl  * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
16115bcb91dSAlan Stern  *
16215bcb91dSAlan Stern  * This function may be called either with or without dev->power.lock held.
16315bcb91dSAlan Stern  * Either way it can be racy, since power.last_busy may be updated at any time.
16415bcb91dSAlan Stern  */
pm_runtime_autosuspend_expiration(struct device * dev)1658234f673SVincent Guittot u64 pm_runtime_autosuspend_expiration(struct device *dev)
16615bcb91dSAlan Stern {
16715bcb91dSAlan Stern 	int autosuspend_delay;
168f800ea32SLadislav Michl 	u64 expires;
16915bcb91dSAlan Stern 
17015bcb91dSAlan Stern 	if (!dev->power.use_autosuspend)
171f800ea32SLadislav Michl 		return 0;
17215bcb91dSAlan Stern 
1736aa7de05SMark Rutland 	autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
17415bcb91dSAlan Stern 	if (autosuspend_delay < 0)
175f800ea32SLadislav Michl 		return 0;
17615bcb91dSAlan Stern 
177f800ea32SLadislav Michl 	expires  = READ_ONCE(dev->power.last_busy);
178f800ea32SLadislav Michl 	expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
179f800ea32SLadislav Michl 	if (expires > ktime_get_mono_fast_ns())
180f800ea32SLadislav Michl 		return expires;	/* Expires in the future */
18115bcb91dSAlan Stern 
182f800ea32SLadislav Michl 	return 0;
18315bcb91dSAlan Stern }
18415bcb91dSAlan Stern EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
18515bcb91dSAlan Stern 
dev_memalloc_noio(struct device * dev,void * data)186e823407fSMing Lei static int dev_memalloc_noio(struct device *dev, void *data)
187e823407fSMing Lei {
188e823407fSMing Lei 	return dev->power.memalloc_noio;
189e823407fSMing Lei }
190e823407fSMing Lei 
191e823407fSMing Lei /*
192e823407fSMing Lei  * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
193e823407fSMing Lei  * @dev: Device to handle.
194e823407fSMing Lei  * @enable: True for setting the flag and False for clearing the flag.
195e823407fSMing Lei  *
196e823407fSMing Lei  * Set the flag for all devices in the path from the device to the
197e823407fSMing Lei  * root device in the device tree if @enable is true, otherwise clear
198e823407fSMing Lei  * the flag for devices in the path whose siblings don't set the flag.
199e823407fSMing Lei  *
200e823407fSMing Lei  * The function should only be called by block device, or network
201e823407fSMing Lei  * device driver for solving the deadlock problem during runtime
202e823407fSMing Lei  * resume/suspend:
203e823407fSMing Lei  *
204e823407fSMing Lei  *     If memory allocation with GFP_KERNEL is called inside runtime
205e823407fSMing Lei  *     resume/suspend callback of any one of its ancestors(or the
206e823407fSMing Lei  *     block device itself), the deadlock may be triggered inside the
207e823407fSMing Lei  *     memory allocation since it might not complete until the block
208e823407fSMing Lei  *     device becomes active and the involed page I/O finishes. The
209e823407fSMing Lei  *     situation is pointed out first by Alan Stern. Network device
210e823407fSMing Lei  *     are involved in iSCSI kind of situation.
211e823407fSMing Lei  *
212e823407fSMing Lei  * The lock of dev_hotplug_mutex is held in the function for handling
213e823407fSMing Lei  * hotplug race because pm_runtime_set_memalloc_noio() may be called
214e823407fSMing Lei  * in async probe().
215e823407fSMing Lei  *
216e823407fSMing Lei  * The function should be called between device_add() and device_del()
217e823407fSMing Lei  * on the affected device(block/network device).
218e823407fSMing Lei  */
pm_runtime_set_memalloc_noio(struct device * dev,bool enable)219e823407fSMing Lei void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
220e823407fSMing Lei {
221e823407fSMing Lei 	static DEFINE_MUTEX(dev_hotplug_mutex);
222e823407fSMing Lei 
223e823407fSMing Lei 	mutex_lock(&dev_hotplug_mutex);
224e823407fSMing Lei 	for (;;) {
225e823407fSMing Lei 		bool enabled;
226e823407fSMing Lei 
227e823407fSMing Lei 		/* hold power lock since bitfield is not SMP-safe. */
228e823407fSMing Lei 		spin_lock_irq(&dev->power.lock);
229e823407fSMing Lei 		enabled = dev->power.memalloc_noio;
230e823407fSMing Lei 		dev->power.memalloc_noio = enable;
231e823407fSMing Lei 		spin_unlock_irq(&dev->power.lock);
232e823407fSMing Lei 
233e823407fSMing Lei 		/*
234e823407fSMing Lei 		 * not need to enable ancestors any more if the device
235e823407fSMing Lei 		 * has been enabled.
236e823407fSMing Lei 		 */
237e823407fSMing Lei 		if (enabled && enable)
238e823407fSMing Lei 			break;
239e823407fSMing Lei 
240e823407fSMing Lei 		dev = dev->parent;
241e823407fSMing Lei 
242e823407fSMing Lei 		/*
243e823407fSMing Lei 		 * clear flag of the parent device only if all the
244e823407fSMing Lei 		 * children don't set the flag because ancestor's
245e823407fSMing Lei 		 * flag was set by any one of the descendants.
246e823407fSMing Lei 		 */
247e823407fSMing Lei 		if (!dev || (!enable &&
248dbfa4478SRafael J. Wysocki 		    device_for_each_child(dev, NULL, dev_memalloc_noio)))
249e823407fSMing Lei 			break;
250e823407fSMing Lei 	}
251e823407fSMing Lei 	mutex_unlock(&dev_hotplug_mutex);
252e823407fSMing Lei }
253e823407fSMing Lei EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
254e823407fSMing Lei 
2555e928f77SRafael J. Wysocki /**
2561bfee5bcSAlan Stern  * rpm_check_suspend_allowed - Test whether a device may be suspended.
2571bfee5bcSAlan Stern  * @dev: Device to test.
2585e928f77SRafael J. Wysocki  */
rpm_check_suspend_allowed(struct device * dev)2591bfee5bcSAlan Stern static int rpm_check_suspend_allowed(struct device *dev)
2605e928f77SRafael J. Wysocki {
2615e928f77SRafael J. Wysocki 	int retval = 0;
2625e928f77SRafael J. Wysocki 
2635e928f77SRafael J. Wysocki 	if (dev->power.runtime_error)
2645e928f77SRafael J. Wysocki 		retval = -EINVAL;
265632e270eSRafael J. Wysocki 	else if (dev->power.disable_depth > 0)
266632e270eSRafael J. Wysocki 		retval = -EACCES;
26782586a72SRafael J. Wysocki 	else if (atomic_read(&dev->power.usage_count))
2685e928f77SRafael J. Wysocki 		retval = -EAGAIN;
269dbfa4478SRafael J. Wysocki 	else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))
2705e928f77SRafael J. Wysocki 		retval = -EBUSY;
2711bfee5bcSAlan Stern 
2721bfee5bcSAlan Stern 	/* Pending resume requests take precedence over suspends. */
273dbfa4478SRafael J. Wysocki 	else if ((dev->power.deferred_resume &&
274dbfa4478SRafael J. Wysocki 	    dev->power.runtime_status == RPM_SUSPENDING) ||
275dbfa4478SRafael J. Wysocki 	    (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))
2761bfee5bcSAlan Stern 		retval = -EAGAIN;
2778262331eSViresh Kumar 	else if (__dev_pm_qos_resume_latency(dev) == 0)
27855d7ec45SRafael J. Wysocki 		retval = -EPERM;
2791bfee5bcSAlan Stern 	else if (dev->power.runtime_status == RPM_SUSPENDED)
2801bfee5bcSAlan Stern 		retval = 1;
2811bfee5bcSAlan Stern 
2821bfee5bcSAlan Stern 	return retval;
2831bfee5bcSAlan Stern }
2841bfee5bcSAlan Stern 
rpm_get_suppliers(struct device * dev)28521d5c57bSRafael J. Wysocki static int rpm_get_suppliers(struct device *dev)
28621d5c57bSRafael J. Wysocki {
28721d5c57bSRafael J. Wysocki 	struct device_link *link;
28821d5c57bSRafael J. Wysocki 
289c2fa1e1bSJoel Fernandes (Google) 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
290c2fa1e1bSJoel Fernandes (Google) 				device_links_read_lock_held()) {
29121d5c57bSRafael J. Wysocki 		int retval;
29221d5c57bSRafael J. Wysocki 
293d12544fbSXiang Chen 		if (!(link->flags & DL_FLAG_PM_RUNTIME))
29421d5c57bSRafael J. Wysocki 			continue;
29521d5c57bSRafael J. Wysocki 
29621d5c57bSRafael J. Wysocki 		retval = pm_runtime_get_sync(link->supplier);
29731eb7431SRafael J. Wysocki 		/* Ignore suppliers with disabled runtime PM. */
29831eb7431SRafael J. Wysocki 		if (retval < 0 && retval != -EACCES) {
29921d5c57bSRafael J. Wysocki 			pm_runtime_put_noidle(link->supplier);
30021d5c57bSRafael J. Wysocki 			return retval;
30121d5c57bSRafael J. Wysocki 		}
302e2f3cd83SRafael J. Wysocki 		refcount_inc(&link->rpm_active);
30321d5c57bSRafael J. Wysocki 	}
30421d5c57bSRafael J. Wysocki 	return 0;
30521d5c57bSRafael J. Wysocki }
30621d5c57bSRafael J. Wysocki 
307d1579e61SRafael J. Wysocki /**
308d1579e61SRafael J. Wysocki  * pm_runtime_release_supplier - Drop references to device link's supplier.
309d1579e61SRafael J. Wysocki  * @link: Target device link.
310d1579e61SRafael J. Wysocki  *
31107358194SRafael J. Wysocki  * Drop all runtime PM references associated with @link to its supplier device.
312d1579e61SRafael J. Wysocki  */
pm_runtime_release_supplier(struct device_link * link)31307358194SRafael J. Wysocki void pm_runtime_release_supplier(struct device_link *link)
314d1579e61SRafael J. Wysocki {
315d1579e61SRafael J. Wysocki 	struct device *supplier = link->supplier;
316d1579e61SRafael J. Wysocki 
317d1579e61SRafael J. Wysocki 	/*
318d1579e61SRafael J. Wysocki 	 * The additional power.usage_count check is a safety net in case
319d1579e61SRafael J. Wysocki 	 * the rpm_active refcount becomes saturated, in which case
320d1579e61SRafael J. Wysocki 	 * refcount_dec_not_one() would return true forever, but it is not
321d1579e61SRafael J. Wysocki 	 * strictly necessary.
322d1579e61SRafael J. Wysocki 	 */
323d1579e61SRafael J. Wysocki 	while (refcount_dec_not_one(&link->rpm_active) &&
324d1579e61SRafael J. Wysocki 	       atomic_read(&supplier->power.usage_count) > 0)
325d1579e61SRafael J. Wysocki 		pm_runtime_put_noidle(supplier);
326d1579e61SRafael J. Wysocki }
327d1579e61SRafael J. Wysocki 
__rpm_put_suppliers(struct device * dev,bool try_to_suspend)3285244f5e2SRafael J. Wysocki static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
32921d5c57bSRafael J. Wysocki {
33021d5c57bSRafael J. Wysocki 	struct device_link *link;
33121d5c57bSRafael J. Wysocki 
332c2fa1e1bSJoel Fernandes (Google) 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
33307358194SRafael J. Wysocki 				device_links_read_lock_held()) {
33407358194SRafael J. Wysocki 		pm_runtime_release_supplier(link);
33507358194SRafael J. Wysocki 		if (try_to_suspend)
33607358194SRafael J. Wysocki 			pm_request_idle(link->supplier);
33707358194SRafael J. Wysocki 	}
33821d5c57bSRafael J. Wysocki }
33921d5c57bSRafael J. Wysocki 
rpm_put_suppliers(struct device * dev)3405244f5e2SRafael J. Wysocki static void rpm_put_suppliers(struct device *dev)
3415244f5e2SRafael J. Wysocki {
3425244f5e2SRafael J. Wysocki 	__rpm_put_suppliers(dev, true);
3435244f5e2SRafael J. Wysocki }
3445244f5e2SRafael J. Wysocki 
rpm_suspend_suppliers(struct device * dev)3455244f5e2SRafael J. Wysocki static void rpm_suspend_suppliers(struct device *dev)
3465244f5e2SRafael J. Wysocki {
3475244f5e2SRafael J. Wysocki 	struct device_link *link;
3485244f5e2SRafael J. Wysocki 	int idx = device_links_read_lock();
3495244f5e2SRafael J. Wysocki 
3505244f5e2SRafael J. Wysocki 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
3515244f5e2SRafael J. Wysocki 				device_links_read_lock_held())
3525244f5e2SRafael J. Wysocki 		pm_request_idle(link->supplier);
3535244f5e2SRafael J. Wysocki 
3545244f5e2SRafael J. Wysocki 	device_links_read_unlock(idx);
3555244f5e2SRafael J. Wysocki }
3565244f5e2SRafael J. Wysocki 
3571bfee5bcSAlan Stern /**
358ad3c36a5SRafael J. Wysocki  * __rpm_callback - Run a given runtime PM callback for a given device.
359ad3c36a5SRafael J. Wysocki  * @cb: Runtime PM callback to run.
360ad3c36a5SRafael J. Wysocki  * @dev: Device to run the callback for.
361ad3c36a5SRafael J. Wysocki  */
__rpm_callback(int (* cb)(struct device *),struct device * dev)362ad3c36a5SRafael J. Wysocki static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
363ad3c36a5SRafael J. Wysocki 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
364ad3c36a5SRafael J. Wysocki {
36563d00be6SUlf Hansson 	int retval = 0, idx;
3660cab893fSRafael J. Wysocki 	bool use_links = dev->power.links_count > 0;
367ad3c36a5SRafael J. Wysocki 
36821d5c57bSRafael J. Wysocki 	if (dev->power.irq_safe) {
369ad3c36a5SRafael J. Wysocki 		spin_unlock(&dev->power.lock);
37021d5c57bSRafael J. Wysocki 	} else {
371ad3c36a5SRafael J. Wysocki 		spin_unlock_irq(&dev->power.lock);
372ad3c36a5SRafael J. Wysocki 
3730cab893fSRafael J. Wysocki 		/*
3740cab893fSRafael J. Wysocki 		 * Resume suppliers if necessary.
3750cab893fSRafael J. Wysocki 		 *
3760cab893fSRafael J. Wysocki 		 * The device's runtime PM status cannot change until this
3770cab893fSRafael J. Wysocki 		 * routine returns, so it is safe to read the status outside of
3780cab893fSRafael J. Wysocki 		 * the lock.
3790cab893fSRafael J. Wysocki 		 */
3800cab893fSRafael J. Wysocki 		if (use_links && dev->power.runtime_status == RPM_RESUMING) {
38121d5c57bSRafael J. Wysocki 			idx = device_links_read_lock();
38221d5c57bSRafael J. Wysocki 
38321d5c57bSRafael J. Wysocki 			retval = rpm_get_suppliers(dev);
3845244f5e2SRafael J. Wysocki 			if (retval) {
3855244f5e2SRafael J. Wysocki 				rpm_put_suppliers(dev);
38621d5c57bSRafael J. Wysocki 				goto fail;
3875244f5e2SRafael J. Wysocki 			}
38821d5c57bSRafael J. Wysocki 
38921d5c57bSRafael J. Wysocki 			device_links_read_unlock(idx);
39021d5c57bSRafael J. Wysocki 		}
39121d5c57bSRafael J. Wysocki 	}
39221d5c57bSRafael J. Wysocki 
39363d00be6SUlf Hansson 	if (cb)
394ad3c36a5SRafael J. Wysocki 		retval = cb(dev);
395ad3c36a5SRafael J. Wysocki 
39621d5c57bSRafael J. Wysocki 	if (dev->power.irq_safe) {
397ad3c36a5SRafael J. Wysocki 		spin_lock(&dev->power.lock);
3980cab893fSRafael J. Wysocki 	} else {
39921d5c57bSRafael J. Wysocki 		/*
4000cab893fSRafael J. Wysocki 		 * If the device is suspending and the callback has returned
4010cab893fSRafael J. Wysocki 		 * success, drop the usage counters of the suppliers that have
4020cab893fSRafael J. Wysocki 		 * been reference counted on its resume.
40321d5c57bSRafael J. Wysocki 		 *
4040cab893fSRafael J. Wysocki 		 * Do that if resume fails too.
40521d5c57bSRafael J. Wysocki 		 */
406dbfa4478SRafael J. Wysocki 		if (use_links &&
407dbfa4478SRafael J. Wysocki 		    ((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||
408dbfa4478SRafael J. Wysocki 		    (dev->power.runtime_status == RPM_RESUMING && retval))) {
40921d5c57bSRafael J. Wysocki 			idx = device_links_read_lock();
41021d5c57bSRafael J. Wysocki 
4115244f5e2SRafael J. Wysocki 			__rpm_put_suppliers(dev, false);
41221d5c57bSRafael J. Wysocki 
4135244f5e2SRafael J. Wysocki fail:
41421d5c57bSRafael J. Wysocki 			device_links_read_unlock(idx);
4150cab893fSRafael J. Wysocki 		}
41621d5c57bSRafael J. Wysocki 
417ad3c36a5SRafael J. Wysocki 		spin_lock_irq(&dev->power.lock);
41821d5c57bSRafael J. Wysocki 	}
419ad3c36a5SRafael J. Wysocki 
420ad3c36a5SRafael J. Wysocki 	return retval;
421ad3c36a5SRafael J. Wysocki }
422ad3c36a5SRafael J. Wysocki 
423ad3c36a5SRafael J. Wysocki /**
4240307f4e8SRafael J. Wysocki  * rpm_callback - Run a given runtime PM callback for a given device.
4250307f4e8SRafael J. Wysocki  * @cb: Runtime PM callback to run.
4260307f4e8SRafael J. Wysocki  * @dev: Device to run the callback for.
4270307f4e8SRafael J. Wysocki  */
rpm_callback(int (* cb)(struct device *),struct device * dev)4280307f4e8SRafael J. Wysocki static int rpm_callback(int (*cb)(struct device *), struct device *dev)
4290307f4e8SRafael J. Wysocki {
4300307f4e8SRafael J. Wysocki 	int retval;
4310307f4e8SRafael J. Wysocki 
4320307f4e8SRafael J. Wysocki 	if (dev->power.memalloc_noio) {
4330307f4e8SRafael J. Wysocki 		unsigned int noio_flag;
4340307f4e8SRafael J. Wysocki 
4350307f4e8SRafael J. Wysocki 		/*
4360307f4e8SRafael J. Wysocki 		 * Deadlock might be caused if memory allocation with
4370307f4e8SRafael J. Wysocki 		 * GFP_KERNEL happens inside runtime_suspend and
4380307f4e8SRafael J. Wysocki 		 * runtime_resume callbacks of one block device's
4390307f4e8SRafael J. Wysocki 		 * ancestor or the block device itself. Network
4400307f4e8SRafael J. Wysocki 		 * device might be thought as part of iSCSI block
4410307f4e8SRafael J. Wysocki 		 * device, so network device and its ancestor should
4420307f4e8SRafael J. Wysocki 		 * be marked as memalloc_noio too.
4430307f4e8SRafael J. Wysocki 		 */
4440307f4e8SRafael J. Wysocki 		noio_flag = memalloc_noio_save();
4450307f4e8SRafael J. Wysocki 		retval = __rpm_callback(cb, dev);
4460307f4e8SRafael J. Wysocki 		memalloc_noio_restore(noio_flag);
4470307f4e8SRafael J. Wysocki 	} else {
4480307f4e8SRafael J. Wysocki 		retval = __rpm_callback(cb, dev);
4490307f4e8SRafael J. Wysocki 	}
4500307f4e8SRafael J. Wysocki 
451*72263869SRafael J. Wysocki 	/*
452*72263869SRafael J. Wysocki 	 * Since -EACCES means that runtime PM is disabled for the given device,
453*72263869SRafael J. Wysocki 	 * it should not be returned by runtime PM callbacks.  If it is returned
454*72263869SRafael J. Wysocki 	 * nevertheless, assume it to be a transient error and convert it to
455*72263869SRafael J. Wysocki 	 * -EAGAIN.
456*72263869SRafael J. Wysocki 	 */
457*72263869SRafael J. Wysocki 	if (retval == -EACCES)
458*72263869SRafael J. Wysocki 		retval = -EAGAIN;
459*72263869SRafael J. Wysocki 
460*72263869SRafael J. Wysocki 	if (retval != -EAGAIN && retval != -EBUSY)
4610307f4e8SRafael J. Wysocki 		dev->power.runtime_error = retval;
462*72263869SRafael J. Wysocki 
463*72263869SRafael J. Wysocki 	return retval;
4640307f4e8SRafael J. Wysocki }
4650307f4e8SRafael J. Wysocki 
4660307f4e8SRafael J. Wysocki /**
467140a6c94SAlan Stern  * rpm_idle - Notify device bus type if the device can be suspended.
4681bfee5bcSAlan Stern  * @dev: Device to notify the bus type about.
4691bfee5bcSAlan Stern  * @rpmflags: Flag bits.
4701bfee5bcSAlan Stern  *
47162052ab1SRafael J. Wysocki  * Check if the device's runtime PM status allows it to be suspended.  If
4721bfee5bcSAlan Stern  * another idle notification has been started earlier, return immediately.  If
4731bfee5bcSAlan Stern  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
474d66e6db2SUlf Hansson  * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
475d66e6db2SUlf Hansson  * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
4761bfee5bcSAlan Stern  *
4771bfee5bcSAlan Stern  * This function must be called under dev->power.lock with interrupts disabled.
4781bfee5bcSAlan Stern  */
rpm_idle(struct device * dev,int rpmflags)479140a6c94SAlan Stern static int rpm_idle(struct device *dev, int rpmflags)
4801bfee5bcSAlan Stern {
48171c63122SRafael J. Wysocki 	int (*callback)(struct device *);
4821bfee5bcSAlan Stern 	int retval;
4831bfee5bcSAlan Stern 
484db8f5086SPeter Zijlstra 	trace_rpm_idle(dev, rpmflags);
4851bfee5bcSAlan Stern 	retval = rpm_check_suspend_allowed(dev);
4861bfee5bcSAlan Stern 	if (retval < 0)
4871bfee5bcSAlan Stern 		;	/* Conditions are wrong. */
4881bfee5bcSAlan Stern 
4891bfee5bcSAlan Stern 	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
4901bfee5bcSAlan Stern 	else if (dev->power.runtime_status != RPM_ACTIVE)
4911bfee5bcSAlan Stern 		retval = -EAGAIN;
4921bfee5bcSAlan Stern 
4931bfee5bcSAlan Stern 	/*
4941bfee5bcSAlan Stern 	 * Any pending request other than an idle notification takes
4951bfee5bcSAlan Stern 	 * precedence over us, except that the timer may be running.
4961bfee5bcSAlan Stern 	 */
4971bfee5bcSAlan Stern 	else if (dev->power.request_pending &&
4981bfee5bcSAlan Stern 	    dev->power.request > RPM_REQ_IDLE)
4991bfee5bcSAlan Stern 		retval = -EAGAIN;
5001bfee5bcSAlan Stern 
5011bfee5bcSAlan Stern 	/* Act as though RPM_NOWAIT is always set. */
5021bfee5bcSAlan Stern 	else if (dev->power.idle_notification)
5031bfee5bcSAlan Stern 		retval = -EINPROGRESS;
504dbfa4478SRafael J. Wysocki 
5055e928f77SRafael J. Wysocki 	if (retval)
5065e928f77SRafael J. Wysocki 		goto out;
5075e928f77SRafael J. Wysocki 
5081bfee5bcSAlan Stern 	/* Pending requests need to be canceled. */
5095e928f77SRafael J. Wysocki 	dev->power.request = RPM_REQ_NONE;
5101bfee5bcSAlan Stern 
5115a2bd1b1SUlf Hansson 	callback = RPM_GET_CALLBACK(dev, runtime_idle);
5125a2bd1b1SUlf Hansson 
5135a2bd1b1SUlf Hansson 	/* If no callback assume success. */
5145a2bd1b1SUlf Hansson 	if (!callback || dev->power.no_callbacks)
5157490e442SAlan Stern 		goto out;
5167490e442SAlan Stern 
5171bfee5bcSAlan Stern 	/* Carry out an asynchronous or a synchronous idle notification. */
5181bfee5bcSAlan Stern 	if (rpmflags & RPM_ASYNC) {
5191bfee5bcSAlan Stern 		dev->power.request = RPM_REQ_IDLE;
5201bfee5bcSAlan Stern 		if (!dev->power.request_pending) {
5211bfee5bcSAlan Stern 			dev->power.request_pending = true;
5221bfee5bcSAlan Stern 			queue_work(pm_wq, &dev->power.work);
5235e928f77SRafael J. Wysocki 		}
524db8f5086SPeter Zijlstra 		trace_rpm_return_int(dev, _THIS_IP_, 0);
52545f0a85cSRafael J. Wysocki 		return 0;
5265e928f77SRafael J. Wysocki 	}
5275e928f77SRafael J. Wysocki 
5285e928f77SRafael J. Wysocki 	dev->power.idle_notification = true;
5295e928f77SRafael J. Wysocki 
530bc80c2e4SRafael J. Wysocki 	if (dev->power.irq_safe)
531bc80c2e4SRafael J. Wysocki 		spin_unlock(&dev->power.lock);
532bc80c2e4SRafael J. Wysocki 	else
533bc80c2e4SRafael J. Wysocki 		spin_unlock_irq(&dev->power.lock);
534bc80c2e4SRafael J. Wysocki 
535bc80c2e4SRafael J. Wysocki 	retval = callback(dev);
536bc80c2e4SRafael J. Wysocki 
537bc80c2e4SRafael J. Wysocki 	if (dev->power.irq_safe)
538bc80c2e4SRafael J. Wysocki 		spin_lock(&dev->power.lock);
539bc80c2e4SRafael J. Wysocki 	else
540bc80c2e4SRafael J. Wysocki 		spin_lock_irq(&dev->power.lock);
5415e928f77SRafael J. Wysocki 
5425e928f77SRafael J. Wysocki 	dev->power.idle_notification = false;
5435e928f77SRafael J. Wysocki 	wake_up_all(&dev->power.wait_queue);
5445e928f77SRafael J. Wysocki 
5455e928f77SRafael J. Wysocki  out:
546db8f5086SPeter Zijlstra 	trace_rpm_return_int(dev, _THIS_IP_, retval);
547d66e6db2SUlf Hansson 	return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
5485e928f77SRafael J. Wysocki }
5495e928f77SRafael J. Wysocki 
5505e928f77SRafael J. Wysocki /**
55162052ab1SRafael J. Wysocki  * rpm_suspend - Carry out runtime suspend of given device.
5525e928f77SRafael J. Wysocki  * @dev: Device to suspend.
5533f9af051SAlan Stern  * @rpmflags: Flag bits.
5545e928f77SRafael J. Wysocki  *
55547d8f0baSMing Lei  * Check if the device's runtime PM status allows it to be suspended.
55647d8f0baSMing Lei  * Cancel a pending idle notification, autosuspend or suspend. If
55747d8f0baSMing Lei  * another suspend has been started earlier, either return immediately
55847d8f0baSMing Lei  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
55947d8f0baSMing Lei  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
560857b36c7SMing Lei  * otherwise run the ->runtime_suspend() callback directly. When
561857b36c7SMing Lei  * ->runtime_suspend succeeded, if a deferred resume was requested while
562857b36c7SMing Lei  * the callback was running then carry it out, otherwise send an idle
563857b36c7SMing Lei  * notification for its parent (if the suspend succeeded and both
564857b36c7SMing Lei  * ignore_children of parent->power and irq_safe of dev->power are not set).
565886486b7SAlan Stern  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
566886486b7SAlan Stern  * flag is set and the next autosuspend-delay expiration time is in the
567886486b7SAlan Stern  * future, schedule another autosuspend attempt.
5685e928f77SRafael J. Wysocki  *
5695e928f77SRafael J. Wysocki  * This function must be called under dev->power.lock with interrupts disabled.
5705e928f77SRafael J. Wysocki  */
rpm_suspend(struct device * dev,int rpmflags)571140a6c94SAlan Stern static int rpm_suspend(struct device *dev, int rpmflags)
5725e928f77SRafael J. Wysocki 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
5735e928f77SRafael J. Wysocki {
57471c63122SRafael J. Wysocki 	int (*callback)(struct device *);
5755e928f77SRafael J. Wysocki 	struct device *parent = NULL;
5761bfee5bcSAlan Stern 	int retval;
5775e928f77SRafael J. Wysocki 
578db8f5086SPeter Zijlstra 	trace_rpm_suspend(dev, rpmflags);
5795e928f77SRafael J. Wysocki 
5805e928f77SRafael J. Wysocki  repeat:
5811bfee5bcSAlan Stern 	retval = rpm_check_suspend_allowed(dev);
5821bfee5bcSAlan Stern 	if (retval < 0)
5833618bbaaSAndy Shevchenko 		goto out;	/* Conditions are wrong. */
5841bfee5bcSAlan Stern 
5851bfee5bcSAlan Stern 	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
5863618bbaaSAndy Shevchenko 	if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
5875e928f77SRafael J. Wysocki 		retval = -EAGAIN;
588dbfa4478SRafael J. Wysocki 
5891bfee5bcSAlan Stern 	if (retval)
5905e928f77SRafael J. Wysocki 		goto out;
5915e928f77SRafael J. Wysocki 
59215bcb91dSAlan Stern 	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
593dbfa4478SRafael J. Wysocki 	if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {
5948234f673SVincent Guittot 		u64 expires = pm_runtime_autosuspend_expiration(dev);
59515bcb91dSAlan Stern 
59615bcb91dSAlan Stern 		if (expires != 0) {
59715bcb91dSAlan Stern 			/* Pending requests need to be canceled. */
59815bcb91dSAlan Stern 			dev->power.request = RPM_REQ_NONE;
59915bcb91dSAlan Stern 
60015bcb91dSAlan Stern 			/*
60115bcb91dSAlan Stern 			 * Optimization: If the timer is already running and is
60215bcb91dSAlan Stern 			 * set to expire at or before the autosuspend delay,
60315bcb91dSAlan Stern 			 * avoid the overhead of resetting it.  Just let it
60415bcb91dSAlan Stern 			 * expire; pm_suspend_timer_fn() will take care of the
60515bcb91dSAlan Stern 			 * rest.
60615bcb91dSAlan Stern 			 */
6078234f673SVincent Guittot 			if (!(dev->power.timer_expires &&
6088234f673SVincent Guittot 			    dev->power.timer_expires <= expires)) {
6098234f673SVincent Guittot 				/*
6108234f673SVincent Guittot 				 * We add a slack of 25% to gather wakeups
6118234f673SVincent Guittot 				 * without sacrificing the granularity.
6128234f673SVincent Guittot 				 */
613ca27e4cdSVincent Guittot 				u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
6148234f673SVincent Guittot 						    (NSEC_PER_MSEC >> 2);
6158234f673SVincent Guittot 
61615bcb91dSAlan Stern 				dev->power.timer_expires = expires;
6178234f673SVincent Guittot 				hrtimer_start_range_ns(&dev->power.suspend_timer,
6188234f673SVincent Guittot 						       ns_to_ktime(expires),
6198234f673SVincent Guittot 						       slack,
6208234f673SVincent Guittot 						       HRTIMER_MODE_ABS);
62115bcb91dSAlan Stern 			}
62215bcb91dSAlan Stern 			dev->power.timer_autosuspends = 1;
62315bcb91dSAlan Stern 			goto out;
62415bcb91dSAlan Stern 		}
62515bcb91dSAlan Stern 	}
62615bcb91dSAlan Stern 
6275e928f77SRafael J. Wysocki 	/* Other scheduled or pending requests need to be canceled. */
6285e928f77SRafael J. Wysocki 	pm_runtime_cancel_pending(dev);
6295e928f77SRafael J. Wysocki 
6305e928f77SRafael J. Wysocki 	if (dev->power.runtime_status == RPM_SUSPENDING) {
6315e928f77SRafael J. Wysocki 		DEFINE_WAIT(wait);
6325e928f77SRafael J. Wysocki 
6331bfee5bcSAlan Stern 		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
6345e928f77SRafael J. Wysocki 			retval = -EINPROGRESS;
6355e928f77SRafael J. Wysocki 			goto out;
6365e928f77SRafael J. Wysocki 		}
6375e928f77SRafael J. Wysocki 
638ad3c36a5SRafael J. Wysocki 		if (dev->power.irq_safe) {
639ad3c36a5SRafael J. Wysocki 			spin_unlock(&dev->power.lock);
640ad3c36a5SRafael J. Wysocki 
641ad3c36a5SRafael J. Wysocki 			cpu_relax();
642ad3c36a5SRafael J. Wysocki 
643ad3c36a5SRafael J. Wysocki 			spin_lock(&dev->power.lock);
644ad3c36a5SRafael J. Wysocki 			goto repeat;
645ad3c36a5SRafael J. Wysocki 		}
646ad3c36a5SRafael J. Wysocki 
6475e928f77SRafael J. Wysocki 		/* Wait for the other suspend running in parallel with us. */
6485e928f77SRafael J. Wysocki 		for (;;) {
6495e928f77SRafael J. Wysocki 			prepare_to_wait(&dev->power.wait_queue, &wait,
6505e928f77SRafael J. Wysocki 					TASK_UNINTERRUPTIBLE);
6515e928f77SRafael J. Wysocki 			if (dev->power.runtime_status != RPM_SUSPENDING)
6525e928f77SRafael J. Wysocki 				break;
6535e928f77SRafael J. Wysocki 
6545e928f77SRafael J. Wysocki 			spin_unlock_irq(&dev->power.lock);
6555e928f77SRafael J. Wysocki 
6565e928f77SRafael J. Wysocki 			schedule();
6575e928f77SRafael J. Wysocki 
6585e928f77SRafael J. Wysocki 			spin_lock_irq(&dev->power.lock);
6595e928f77SRafael J. Wysocki 		}
6605e928f77SRafael J. Wysocki 		finish_wait(&dev->power.wait_queue, &wait);
6615e928f77SRafael J. Wysocki 		goto repeat;
6625e928f77SRafael J. Wysocki 	}
6635e928f77SRafael J. Wysocki 
6647490e442SAlan Stern 	if (dev->power.no_callbacks)
6657490e442SAlan Stern 		goto no_callback;	/* Assume success. */
6667490e442SAlan Stern 
6671bfee5bcSAlan Stern 	/* Carry out an asynchronous or a synchronous suspend. */
6681bfee5bcSAlan Stern 	if (rpmflags & RPM_ASYNC) {
66915bcb91dSAlan Stern 		dev->power.request = (rpmflags & RPM_AUTO) ?
67015bcb91dSAlan Stern 		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
6711bfee5bcSAlan Stern 		if (!dev->power.request_pending) {
6721bfee5bcSAlan Stern 			dev->power.request_pending = true;
6731bfee5bcSAlan Stern 			queue_work(pm_wq, &dev->power.work);
6741bfee5bcSAlan Stern 		}
6751bfee5bcSAlan Stern 		goto out;
6761bfee5bcSAlan Stern 	}
6771bfee5bcSAlan Stern 
6788d4b9d1bSArjan van de Ven 	__update_runtime_status(dev, RPM_SUSPENDING);
6795e928f77SRafael J. Wysocki 
680dbcd2d72SAndrzej Hajda 	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
68135cd133cSRafael J. Wysocki 
682bed57030STony Lindgren 	dev_pm_enable_wake_irq_check(dev, true);
68371c63122SRafael J. Wysocki 	retval = rpm_callback(callback, dev);
68400dc9ad1SRafael J. Wysocki 	if (retval)
68500dc9ad1SRafael J. Wysocki 		goto fail;
686886486b7SAlan Stern 
68725971410SChunfeng Yun 	dev_pm_enable_wake_irq_complete(dev);
68825971410SChunfeng Yun 
6897490e442SAlan Stern  no_callback:
6908d4b9d1bSArjan van de Ven 	__update_runtime_status(dev, RPM_SUSPENDED);
691240c7337SAlan Stern 	pm_runtime_deactivate_timer(dev);
6925e928f77SRafael J. Wysocki 
6935e928f77SRafael J. Wysocki 	if (dev->parent) {
6945e928f77SRafael J. Wysocki 		parent = dev->parent;
6955e928f77SRafael J. Wysocki 		atomic_add_unless(&parent->power.child_count, -1, 0);
6965e928f77SRafael J. Wysocki 	}
6975e928f77SRafael J. Wysocki 	wake_up_all(&dev->power.wait_queue);
6985e928f77SRafael J. Wysocki 
6995e928f77SRafael J. Wysocki 	if (dev->power.deferred_resume) {
70058a34de7SRafael J. Wysocki 		dev->power.deferred_resume = false;
701140a6c94SAlan Stern 		rpm_resume(dev, 0);
7025e928f77SRafael J. Wysocki 		retval = -EAGAIN;
7035e928f77SRafael J. Wysocki 		goto out;
7045e928f77SRafael J. Wysocki 	}
7055e928f77SRafael J. Wysocki 
7065244f5e2SRafael J. Wysocki 	if (dev->power.irq_safe)
7075244f5e2SRafael J. Wysocki 		goto out;
7085244f5e2SRafael J. Wysocki 
709c3810c88SAlan Stern 	/* Maybe the parent is now able to suspend. */
7105244f5e2SRafael J. Wysocki 	if (parent && !parent->power.ignore_children) {
711c3810c88SAlan Stern 		spin_unlock(&dev->power.lock);
7125e928f77SRafael J. Wysocki 
713c3810c88SAlan Stern 		spin_lock(&parent->power.lock);
714c3810c88SAlan Stern 		rpm_idle(parent, RPM_ASYNC);
715c3810c88SAlan Stern 		spin_unlock(&parent->power.lock);
7165e928f77SRafael J. Wysocki 
717c3810c88SAlan Stern 		spin_lock(&dev->power.lock);
7185e928f77SRafael J. Wysocki 	}
7195244f5e2SRafael J. Wysocki 	/* Maybe the suppliers are now able to suspend. */
7205244f5e2SRafael J. Wysocki 	if (dev->power.links_count > 0) {
7215244f5e2SRafael J. Wysocki 		spin_unlock_irq(&dev->power.lock);
7225244f5e2SRafael J. Wysocki 
7235244f5e2SRafael J. Wysocki 		rpm_suspend_suppliers(dev);
7245244f5e2SRafael J. Wysocki 
7255244f5e2SRafael J. Wysocki 		spin_lock_irq(&dev->power.lock);
7265244f5e2SRafael J. Wysocki 	}
7275e928f77SRafael J. Wysocki 
7285e928f77SRafael J. Wysocki  out:
729db8f5086SPeter Zijlstra 	trace_rpm_return_int(dev, _THIS_IP_, retval);
7305e928f77SRafael J. Wysocki 
7315e928f77SRafael J. Wysocki 	return retval;
73200dc9ad1SRafael J. Wysocki 
73300dc9ad1SRafael J. Wysocki  fail:
73425971410SChunfeng Yun 	dev_pm_disable_wake_irq_check(dev, true);
73500dc9ad1SRafael J. Wysocki 	__update_runtime_status(dev, RPM_ACTIVE);
73600dc9ad1SRafael J. Wysocki 	dev->power.deferred_resume = false;
737f2791d73SAlan Stern 	wake_up_all(&dev->power.wait_queue);
738f2791d73SAlan Stern 
73900dc9ad1SRafael J. Wysocki 	/*
740*72263869SRafael J. Wysocki 	 * On transient errors, if the callback routine failed an autosuspend,
741*72263869SRafael J. Wysocki 	 * and if the last_busy time has been updated so that there is a new
742*72263869SRafael J. Wysocki 	 * autosuspend expiration time, automatically reschedule another
743*72263869SRafael J. Wysocki 	 * autosuspend.
74400dc9ad1SRafael J. Wysocki 	 */
745*72263869SRafael J. Wysocki 	if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) &&
74600dc9ad1SRafael J. Wysocki 	    pm_runtime_autosuspend_expiration(dev) != 0)
74700dc9ad1SRafael J. Wysocki 		goto repeat;
748*72263869SRafael J. Wysocki 
74900dc9ad1SRafael J. Wysocki 	pm_runtime_cancel_pending(dev);
750*72263869SRafael J. Wysocki 
75100dc9ad1SRafael J. Wysocki 	goto out;
7525e928f77SRafael J. Wysocki }
7535e928f77SRafael J. Wysocki 
7545e928f77SRafael J. Wysocki /**
75562052ab1SRafael J. Wysocki  * rpm_resume - Carry out runtime resume of given device.
7565e928f77SRafael J. Wysocki  * @dev: Device to resume.
7573f9af051SAlan Stern  * @rpmflags: Flag bits.
7585e928f77SRafael J. Wysocki  *
75962052ab1SRafael J. Wysocki  * Check if the device's runtime PM status allows it to be resumed.  Cancel
7601bfee5bcSAlan Stern  * any scheduled or pending requests.  If another resume has been started
76125985edcSLucas De Marchi  * earlier, either return immediately or wait for it to finish, depending on the
7621bfee5bcSAlan Stern  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
7631bfee5bcSAlan Stern  * parallel with this function, either tell the other process to resume after
7641bfee5bcSAlan Stern  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
7651bfee5bcSAlan Stern  * flag is set then queue a resume request; otherwise run the
7661bfee5bcSAlan Stern  * ->runtime_resume() callback directly.  Queue an idle notification for the
7671bfee5bcSAlan Stern  * device if the resume succeeded.
7685e928f77SRafael J. Wysocki  *
7695e928f77SRafael J. Wysocki  * This function must be called under dev->power.lock with interrupts disabled.
7705e928f77SRafael J. Wysocki  */
rpm_resume(struct device * dev,int rpmflags)771140a6c94SAlan Stern static int rpm_resume(struct device *dev, int rpmflags)
7725e928f77SRafael J. Wysocki 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
7735e928f77SRafael J. Wysocki {
77471c63122SRafael J. Wysocki 	int (*callback)(struct device *);
7755e928f77SRafael J. Wysocki 	struct device *parent = NULL;
7765e928f77SRafael J. Wysocki 	int retval = 0;
7775e928f77SRafael J. Wysocki 
778db8f5086SPeter Zijlstra 	trace_rpm_resume(dev, rpmflags);
7795e928f77SRafael J. Wysocki 
7805e928f77SRafael J. Wysocki  repeat:
781c24efa67SRafael J. Wysocki 	if (dev->power.runtime_error) {
7825e928f77SRafael J. Wysocki 		retval = -EINVAL;
783c24efa67SRafael J. Wysocki 	} else if (dev->power.disable_depth > 0) {
784c24efa67SRafael J. Wysocki 		if (dev->power.runtime_status == RPM_ACTIVE &&
785c24efa67SRafael J. Wysocki 		    dev->power.last_status == RPM_ACTIVE)
7866f3c77b0SKevin Hilman 			retval = 1;
787c24efa67SRafael J. Wysocki 		else
788632e270eSRafael J. Wysocki 			retval = -EACCES;
789c24efa67SRafael J. Wysocki 	}
7905e928f77SRafael J. Wysocki 	if (retval)
7915e928f77SRafael J. Wysocki 		goto out;
7925e928f77SRafael J. Wysocki 
79315bcb91dSAlan Stern 	/*
79415bcb91dSAlan Stern 	 * Other scheduled or pending requests need to be canceled.  Small
79515bcb91dSAlan Stern 	 * optimization: If an autosuspend timer is running, leave it running
79615bcb91dSAlan Stern 	 * rather than cancelling it now only to restart it again in the near
79715bcb91dSAlan Stern 	 * future.
79815bcb91dSAlan Stern 	 */
79915bcb91dSAlan Stern 	dev->power.request = RPM_REQ_NONE;
80015bcb91dSAlan Stern 	if (!dev->power.timer_autosuspends)
80115bcb91dSAlan Stern 		pm_runtime_deactivate_timer(dev);
8021bfee5bcSAlan Stern 
8031bfee5bcSAlan Stern 	if (dev->power.runtime_status == RPM_ACTIVE) {
8041bfee5bcSAlan Stern 		retval = 1;
8051bfee5bcSAlan Stern 		goto out;
8061bfee5bcSAlan Stern 	}
8071bfee5bcSAlan Stern 
808dbfa4478SRafael J. Wysocki 	if (dev->power.runtime_status == RPM_RESUMING ||
809dbfa4478SRafael J. Wysocki 	    dev->power.runtime_status == RPM_SUSPENDING) {
8105e928f77SRafael J. Wysocki 		DEFINE_WAIT(wait);
8115e928f77SRafael J. Wysocki 
8121bfee5bcSAlan Stern 		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
813e66332a4SRafael J. Wysocki 			if (dev->power.runtime_status == RPM_SUSPENDING) {
8145e928f77SRafael J. Wysocki 				dev->power.deferred_resume = true;
815e66332a4SRafael J. Wysocki 				if (rpmflags & RPM_NOWAIT)
8165e928f77SRafael J. Wysocki 					retval = -EINPROGRESS;
817e66332a4SRafael J. Wysocki 			} else {
818e66332a4SRafael J. Wysocki 				retval = -EINPROGRESS;
819e66332a4SRafael J. Wysocki 			}
8205e928f77SRafael J. Wysocki 			goto out;
8215e928f77SRafael J. Wysocki 		}
8225e928f77SRafael J. Wysocki 
823ad3c36a5SRafael J. Wysocki 		if (dev->power.irq_safe) {
824ad3c36a5SRafael J. Wysocki 			spin_unlock(&dev->power.lock);
825ad3c36a5SRafael J. Wysocki 
826ad3c36a5SRafael J. Wysocki 			cpu_relax();
827ad3c36a5SRafael J. Wysocki 
828ad3c36a5SRafael J. Wysocki 			spin_lock(&dev->power.lock);
829ad3c36a5SRafael J. Wysocki 			goto repeat;
830ad3c36a5SRafael J. Wysocki 		}
831ad3c36a5SRafael J. Wysocki 
8325e928f77SRafael J. Wysocki 		/* Wait for the operation carried out in parallel with us. */
8335e928f77SRafael J. Wysocki 		for (;;) {
8345e928f77SRafael J. Wysocki 			prepare_to_wait(&dev->power.wait_queue, &wait,
8355e928f77SRafael J. Wysocki 					TASK_UNINTERRUPTIBLE);
836dbfa4478SRafael J. Wysocki 			if (dev->power.runtime_status != RPM_RESUMING &&
837dbfa4478SRafael J. Wysocki 			    dev->power.runtime_status != RPM_SUSPENDING)
8385e928f77SRafael J. Wysocki 				break;
8395e928f77SRafael J. Wysocki 
8405e928f77SRafael J. Wysocki 			spin_unlock_irq(&dev->power.lock);
8415e928f77SRafael J. Wysocki 
8425e928f77SRafael J. Wysocki 			schedule();
8435e928f77SRafael J. Wysocki 
8445e928f77SRafael J. Wysocki 			spin_lock_irq(&dev->power.lock);
8455e928f77SRafael J. Wysocki 		}
8465e928f77SRafael J. Wysocki 		finish_wait(&dev->power.wait_queue, &wait);
8475e928f77SRafael J. Wysocki 		goto repeat;
8485e928f77SRafael J. Wysocki 	}
8495e928f77SRafael J. Wysocki 
8507490e442SAlan Stern 	/*
8517490e442SAlan Stern 	 * See if we can skip waking up the parent.  This is safe only if
8527490e442SAlan Stern 	 * power.no_callbacks is set, because otherwise we don't know whether
8537490e442SAlan Stern 	 * the resume will actually succeed.
8547490e442SAlan Stern 	 */
8557490e442SAlan Stern 	if (dev->power.no_callbacks && !parent && dev->parent) {
856d63be5f9SMing Lei 		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
857dbfa4478SRafael J. Wysocki 		if (dev->parent->power.disable_depth > 0 ||
858dbfa4478SRafael J. Wysocki 		    dev->parent->power.ignore_children ||
859dbfa4478SRafael J. Wysocki 		    dev->parent->power.runtime_status == RPM_ACTIVE) {
8607490e442SAlan Stern 			atomic_inc(&dev->parent->power.child_count);
8617490e442SAlan Stern 			spin_unlock(&dev->parent->power.lock);
8627f321c26SRafael J. Wysocki 			retval = 1;
8637490e442SAlan Stern 			goto no_callback;	/* Assume success. */
8647490e442SAlan Stern 		}
8657490e442SAlan Stern 		spin_unlock(&dev->parent->power.lock);
8667490e442SAlan Stern 	}
8677490e442SAlan Stern 
8681bfee5bcSAlan Stern 	/* Carry out an asynchronous or a synchronous resume. */
8691bfee5bcSAlan Stern 	if (rpmflags & RPM_ASYNC) {
8701bfee5bcSAlan Stern 		dev->power.request = RPM_REQ_RESUME;
8711bfee5bcSAlan Stern 		if (!dev->power.request_pending) {
8721bfee5bcSAlan Stern 			dev->power.request_pending = true;
8731bfee5bcSAlan Stern 			queue_work(pm_wq, &dev->power.work);
8741bfee5bcSAlan Stern 		}
8751bfee5bcSAlan Stern 		retval = 0;
8761bfee5bcSAlan Stern 		goto out;
8771bfee5bcSAlan Stern 	}
8781bfee5bcSAlan Stern 
8795e928f77SRafael J. Wysocki 	if (!parent && dev->parent) {
8805e928f77SRafael J. Wysocki 		/*
881c7b61de5SAlan Stern 		 * Increment the parent's usage counter and resume it if
882c7b61de5SAlan Stern 		 * necessary.  Not needed if dev is irq-safe; then the
883c7b61de5SAlan Stern 		 * parent is permanently resumed.
8845e928f77SRafael J. Wysocki 		 */
8855e928f77SRafael J. Wysocki 		parent = dev->parent;
886c7b61de5SAlan Stern 		if (dev->power.irq_safe)
887c7b61de5SAlan Stern 			goto skip_parent;
888dbfa4478SRafael J. Wysocki 
889862f89b3SAlan Stern 		spin_unlock(&dev->power.lock);
8905e928f77SRafael J. Wysocki 
8915e928f77SRafael J. Wysocki 		pm_runtime_get_noresume(parent);
8925e928f77SRafael J. Wysocki 
893862f89b3SAlan Stern 		spin_lock(&parent->power.lock);
8945e928f77SRafael J. Wysocki 		/*
895216ef0b6SUlf Hansson 		 * Resume the parent if it has runtime PM enabled and not been
896216ef0b6SUlf Hansson 		 * set to ignore its children.
8975e928f77SRafael J. Wysocki 		 */
898dbfa4478SRafael J. Wysocki 		if (!parent->power.disable_depth &&
899dbfa4478SRafael J. Wysocki 		    !parent->power.ignore_children) {
900140a6c94SAlan Stern 			rpm_resume(parent, 0);
9015e928f77SRafael J. Wysocki 			if (parent->power.runtime_status != RPM_ACTIVE)
9025e928f77SRafael J. Wysocki 				retval = -EBUSY;
9035e928f77SRafael J. Wysocki 		}
904862f89b3SAlan Stern 		spin_unlock(&parent->power.lock);
9055e928f77SRafael J. Wysocki 
906862f89b3SAlan Stern 		spin_lock(&dev->power.lock);
9075e928f77SRafael J. Wysocki 		if (retval)
9085e928f77SRafael J. Wysocki 			goto out;
909dbfa4478SRafael J. Wysocki 
9105e928f77SRafael J. Wysocki 		goto repeat;
9115e928f77SRafael J. Wysocki 	}
912c7b61de5SAlan Stern  skip_parent:
9135e928f77SRafael J. Wysocki 
9147490e442SAlan Stern 	if (dev->power.no_callbacks)
9157490e442SAlan Stern 		goto no_callback;	/* Assume success. */
9167490e442SAlan Stern 
9178d4b9d1bSArjan van de Ven 	__update_runtime_status(dev, RPM_RESUMING);
9185e928f77SRafael J. Wysocki 
919dbcd2d72SAndrzej Hajda 	callback = RPM_GET_CALLBACK(dev, runtime_resume);
92035cd133cSRafael J. Wysocki 
92125971410SChunfeng Yun 	dev_pm_disable_wake_irq_check(dev, false);
92271c63122SRafael J. Wysocki 	retval = rpm_callback(callback, dev);
9235e928f77SRafael J. Wysocki 	if (retval) {
9248d4b9d1bSArjan van de Ven 		__update_runtime_status(dev, RPM_SUSPENDED);
9255e928f77SRafael J. Wysocki 		pm_runtime_cancel_pending(dev);
926bed57030STony Lindgren 		dev_pm_enable_wake_irq_check(dev, false);
9275e928f77SRafael J. Wysocki 	} else {
9287490e442SAlan Stern  no_callback:
9298d4b9d1bSArjan van de Ven 		__update_runtime_status(dev, RPM_ACTIVE);
93056f487c7STony Lindgren 		pm_runtime_mark_last_busy(dev);
9315e928f77SRafael J. Wysocki 		if (parent)
9325e928f77SRafael J. Wysocki 			atomic_inc(&parent->power.child_count);
9335e928f77SRafael J. Wysocki 	}
9345e928f77SRafael J. Wysocki 	wake_up_all(&dev->power.wait_queue);
9355e928f77SRafael J. Wysocki 
9367f321c26SRafael J. Wysocki 	if (retval >= 0)
937140a6c94SAlan Stern 		rpm_idle(dev, RPM_ASYNC);
9385e928f77SRafael J. Wysocki 
9395e928f77SRafael J. Wysocki  out:
940c7b61de5SAlan Stern 	if (parent && !dev->power.irq_safe) {
9415e928f77SRafael J. Wysocki 		spin_unlock_irq(&dev->power.lock);
9425e928f77SRafael J. Wysocki 
9435e928f77SRafael J. Wysocki 		pm_runtime_put(parent);
9445e928f77SRafael J. Wysocki 
9455e928f77SRafael J. Wysocki 		spin_lock_irq(&dev->power.lock);
9465e928f77SRafael J. Wysocki 	}
9475e928f77SRafael J. Wysocki 
948db8f5086SPeter Zijlstra 	trace_rpm_return_int(dev, _THIS_IP_, retval);
9495e928f77SRafael J. Wysocki 
9505e928f77SRafael J. Wysocki 	return retval;
9515e928f77SRafael J. Wysocki }
9525e928f77SRafael J. Wysocki 
9535e928f77SRafael J. Wysocki /**
95462052ab1SRafael J. Wysocki  * pm_runtime_work - Universal runtime PM work function.
9555e928f77SRafael J. Wysocki  * @work: Work structure used for scheduling the execution of this function.
9565e928f77SRafael J. Wysocki  *
9575e928f77SRafael J. Wysocki  * Use @work to get the device object the work is to be done for, determine what
95862052ab1SRafael J. Wysocki  * is to be done and execute the appropriate runtime PM function.
9595e928f77SRafael J. Wysocki  */
pm_runtime_work(struct work_struct * work)9605e928f77SRafael J. Wysocki static void pm_runtime_work(struct work_struct *work)
9615e928f77SRafael J. Wysocki {
9625e928f77SRafael J. Wysocki 	struct device *dev = container_of(work, struct device, power.work);
9635e928f77SRafael J. Wysocki 	enum rpm_request req;
9645e928f77SRafael J. Wysocki 
9655e928f77SRafael J. Wysocki 	spin_lock_irq(&dev->power.lock);
9665e928f77SRafael J. Wysocki 
9675e928f77SRafael J. Wysocki 	if (!dev->power.request_pending)
9685e928f77SRafael J. Wysocki 		goto out;
9695e928f77SRafael J. Wysocki 
9705e928f77SRafael J. Wysocki 	req = dev->power.request;
9715e928f77SRafael J. Wysocki 	dev->power.request = RPM_REQ_NONE;
9725e928f77SRafael J. Wysocki 	dev->power.request_pending = false;
9735e928f77SRafael J. Wysocki 
9745e928f77SRafael J. Wysocki 	switch (req) {
9755e928f77SRafael J. Wysocki 	case RPM_REQ_NONE:
9765e928f77SRafael J. Wysocki 		break;
9775e928f77SRafael J. Wysocki 	case RPM_REQ_IDLE:
978140a6c94SAlan Stern 		rpm_idle(dev, RPM_NOWAIT);
9795e928f77SRafael J. Wysocki 		break;
9805e928f77SRafael J. Wysocki 	case RPM_REQ_SUSPEND:
981140a6c94SAlan Stern 		rpm_suspend(dev, RPM_NOWAIT);
9825e928f77SRafael J. Wysocki 		break;
98315bcb91dSAlan Stern 	case RPM_REQ_AUTOSUSPEND:
98415bcb91dSAlan Stern 		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
98515bcb91dSAlan Stern 		break;
9865e928f77SRafael J. Wysocki 	case RPM_REQ_RESUME:
987140a6c94SAlan Stern 		rpm_resume(dev, RPM_NOWAIT);
9885e928f77SRafael J. Wysocki 		break;
9895e928f77SRafael J. Wysocki 	}
9905e928f77SRafael J. Wysocki 
9915e928f77SRafael J. Wysocki  out:
9925e928f77SRafael J. Wysocki 	spin_unlock_irq(&dev->power.lock);
9935e928f77SRafael J. Wysocki }
9945e928f77SRafael J. Wysocki 
9955e928f77SRafael J. Wysocki /**
9965e928f77SRafael J. Wysocki  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
99712c0632bSPierre-Louis Bossart  * @timer: hrtimer used by pm_schedule_suspend().
9985e928f77SRafael J. Wysocki  *
9991bfee5bcSAlan Stern  * Check if the time is right and queue a suspend request.
10005e928f77SRafael J. Wysocki  */
pm_suspend_timer_fn(struct hrtimer * timer)10018234f673SVincent Guittot static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
10025e928f77SRafael J. Wysocki {
10038234f673SVincent Guittot 	struct device *dev = container_of(timer, struct device, power.suspend_timer);
10045e928f77SRafael J. Wysocki 	unsigned long flags;
10058234f673SVincent Guittot 	u64 expires;
10065e928f77SRafael J. Wysocki 
10075e928f77SRafael J. Wysocki 	spin_lock_irqsave(&dev->power.lock, flags);
10085e928f77SRafael J. Wysocki 
10095e928f77SRafael J. Wysocki 	expires = dev->power.timer_expires;
10101f7b7081SLadislav Michl 	/*
10111f7b7081SLadislav Michl 	 * If 'expires' is after the current time, we've been called
10121f7b7081SLadislav Michl 	 * too early.
10131f7b7081SLadislav Michl 	 */
101415efb47dSVincent Guittot 	if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
10155e928f77SRafael J. Wysocki 		dev->power.timer_expires = 0;
101615bcb91dSAlan Stern 		rpm_suspend(dev, dev->power.timer_autosuspends ?
101715bcb91dSAlan Stern 		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
10185e928f77SRafael J. Wysocki 	}
10195e928f77SRafael J. Wysocki 
10205e928f77SRafael J. Wysocki 	spin_unlock_irqrestore(&dev->power.lock, flags);
10218234f673SVincent Guittot 
10228234f673SVincent Guittot 	return HRTIMER_NORESTART;
10235e928f77SRafael J. Wysocki }
10245e928f77SRafael J. Wysocki 
10255e928f77SRafael J. Wysocki /**
10265e928f77SRafael J. Wysocki  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
10275e928f77SRafael J. Wysocki  * @dev: Device to suspend.
10285e928f77SRafael J. Wysocki  * @delay: Time to wait before submitting a suspend request, in milliseconds.
10295e928f77SRafael J. Wysocki  */
pm_schedule_suspend(struct device * dev,unsigned int delay)10305e928f77SRafael J. Wysocki int pm_schedule_suspend(struct device *dev, unsigned int delay)
10315e928f77SRafael J. Wysocki {
10325e928f77SRafael J. Wysocki 	unsigned long flags;
103315efb47dSVincent Guittot 	u64 expires;
10341bfee5bcSAlan Stern 	int retval;
10355e928f77SRafael J. Wysocki 
10365e928f77SRafael J. Wysocki 	spin_lock_irqsave(&dev->power.lock, flags);
10375e928f77SRafael J. Wysocki 
10385e928f77SRafael J. Wysocki 	if (!delay) {
1039140a6c94SAlan Stern 		retval = rpm_suspend(dev, RPM_ASYNC);
10405e928f77SRafael J. Wysocki 		goto out;
10415e928f77SRafael J. Wysocki 	}
10425e928f77SRafael J. Wysocki 
10431bfee5bcSAlan Stern 	retval = rpm_check_suspend_allowed(dev);
10445e928f77SRafael J. Wysocki 	if (retval)
10455e928f77SRafael J. Wysocki 		goto out;
10465e928f77SRafael J. Wysocki 
10471bfee5bcSAlan Stern 	/* Other scheduled or pending requests need to be canceled. */
10481bfee5bcSAlan Stern 	pm_runtime_cancel_pending(dev);
10491bfee5bcSAlan Stern 
105015efb47dSVincent Guittot 	expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
105115efb47dSVincent Guittot 	dev->power.timer_expires = expires;
105215bcb91dSAlan Stern 	dev->power.timer_autosuspends = 0;
10538234f673SVincent Guittot 	hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
10545e928f77SRafael J. Wysocki 
10555e928f77SRafael J. Wysocki  out:
10565e928f77SRafael J. Wysocki 	spin_unlock_irqrestore(&dev->power.lock, flags);
10575e928f77SRafael J. Wysocki 
10585e928f77SRafael J. Wysocki 	return retval;
10595e928f77SRafael J. Wysocki }
10605e928f77SRafael J. Wysocki EXPORT_SYMBOL_GPL(pm_schedule_suspend);
10615e928f77SRafael J. Wysocki 
rpm_drop_usage_count(struct device * dev)106282586a72SRafael J. Wysocki static int rpm_drop_usage_count(struct device *dev)
106382586a72SRafael J. Wysocki {
106482586a72SRafael J. Wysocki 	int ret;
106582586a72SRafael J. Wysocki 
106682586a72SRafael J. Wysocki 	ret = atomic_sub_return(1, &dev->power.usage_count);
106782586a72SRafael J. Wysocki 	if (ret >= 0)
106882586a72SRafael J. Wysocki 		return ret;
106982586a72SRafael J. Wysocki 
107082586a72SRafael J. Wysocki 	/*
107182586a72SRafael J. Wysocki 	 * Because rpm_resume() does not check the usage counter, it will resume
107282586a72SRafael J. Wysocki 	 * the device even if the usage counter is 0 or negative, so it is
107382586a72SRafael J. Wysocki 	 * sufficient to increment the usage counter here to reverse the change
107482586a72SRafael J. Wysocki 	 * made above.
107582586a72SRafael J. Wysocki 	 */
107682586a72SRafael J. Wysocki 	atomic_inc(&dev->power.usage_count);
107782586a72SRafael J. Wysocki 	dev_warn(dev, "Runtime PM usage count underflow!\n");
107882586a72SRafael J. Wysocki 	return -EINVAL;
107982586a72SRafael J. Wysocki }
108082586a72SRafael J. Wysocki 
10815e928f77SRafael J. Wysocki /**
108262052ab1SRafael J. Wysocki  * __pm_runtime_idle - Entry point for runtime idle operations.
1083140a6c94SAlan Stern  * @dev: Device to send idle notification for.
1084140a6c94SAlan Stern  * @rpmflags: Flag bits.
1085140a6c94SAlan Stern  *
1086140a6c94SAlan Stern  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
108782586a72SRafael J. Wysocki  * return immediately if it is larger than zero (if it becomes negative, log a
108882586a72SRafael J. Wysocki  * warning, increment it, and return an error).  Then carry out an idle
1089140a6c94SAlan Stern  * notification, either synchronous or asynchronous.
1090140a6c94SAlan Stern  *
1091311aab73SColin Cross  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1092311aab73SColin Cross  * or if pm_runtime_irq_safe() has been called.
10935e928f77SRafael J. Wysocki  */
__pm_runtime_idle(struct device * dev,int rpmflags)1094140a6c94SAlan Stern int __pm_runtime_idle(struct device *dev, int rpmflags)
1095140a6c94SAlan Stern {
1096140a6c94SAlan Stern 	unsigned long flags;
1097140a6c94SAlan Stern 	int retval;
1098140a6c94SAlan Stern 
1099140a6c94SAlan Stern 	if (rpmflags & RPM_GET_PUT) {
110082586a72SRafael J. Wysocki 		retval = rpm_drop_usage_count(dev);
110182586a72SRafael J. Wysocki 		if (retval < 0) {
110282586a72SRafael J. Wysocki 			return retval;
110382586a72SRafael J. Wysocki 		} else if (retval > 0) {
1104db8f5086SPeter Zijlstra 			trace_rpm_usage(dev, rpmflags);
1105140a6c94SAlan Stern 			return 0;
1106140a6c94SAlan Stern 		}
1107d2292906SMichał Mirosław 	}
1108140a6c94SAlan Stern 
1109a9306a63SRafael J. Wysocki 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1110a9306a63SRafael J. Wysocki 
1111140a6c94SAlan Stern 	spin_lock_irqsave(&dev->power.lock, flags);
1112140a6c94SAlan Stern 	retval = rpm_idle(dev, rpmflags);
1113140a6c94SAlan Stern 	spin_unlock_irqrestore(&dev->power.lock, flags);
1114140a6c94SAlan Stern 
1115140a6c94SAlan Stern 	return retval;
1116140a6c94SAlan Stern }
1117140a6c94SAlan Stern EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1118140a6c94SAlan Stern 
1119140a6c94SAlan Stern /**
112062052ab1SRafael J. Wysocki  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1121140a6c94SAlan Stern  * @dev: Device to suspend.
1122140a6c94SAlan Stern  * @rpmflags: Flag bits.
1123140a6c94SAlan Stern  *
112415bcb91dSAlan Stern  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
112582586a72SRafael J. Wysocki  * return immediately if it is larger than zero (if it becomes negative, log a
112682586a72SRafael J. Wysocki  * warning, increment it, and return an error).  Then carry out a suspend,
112715bcb91dSAlan Stern  * either synchronous or asynchronous.
1128140a6c94SAlan Stern  *
1129311aab73SColin Cross  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1130311aab73SColin Cross  * or if pm_runtime_irq_safe() has been called.
1131140a6c94SAlan Stern  */
__pm_runtime_suspend(struct device * dev,int rpmflags)1132140a6c94SAlan Stern int __pm_runtime_suspend(struct device *dev, int rpmflags)
11335e928f77SRafael J. Wysocki {
11345e928f77SRafael J. Wysocki 	unsigned long flags;
11355e928f77SRafael J. Wysocki 	int retval;
11365e928f77SRafael J. Wysocki 
113715bcb91dSAlan Stern 	if (rpmflags & RPM_GET_PUT) {
113882586a72SRafael J. Wysocki 		retval = rpm_drop_usage_count(dev);
113982586a72SRafael J. Wysocki 		if (retval < 0) {
114082586a72SRafael J. Wysocki 			return retval;
114182586a72SRafael J. Wysocki 		} else if (retval > 0) {
1142db8f5086SPeter Zijlstra 			trace_rpm_usage(dev, rpmflags);
114315bcb91dSAlan Stern 			return 0;
114415bcb91dSAlan Stern 		}
1145d2292906SMichał Mirosław 	}
114615bcb91dSAlan Stern 
1147a9306a63SRafael J. Wysocki 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1148a9306a63SRafael J. Wysocki 
11495e928f77SRafael J. Wysocki 	spin_lock_irqsave(&dev->power.lock, flags);
1150140a6c94SAlan Stern 	retval = rpm_suspend(dev, rpmflags);
11515e928f77SRafael J. Wysocki 	spin_unlock_irqrestore(&dev->power.lock, flags);
11525e928f77SRafael J. Wysocki 
11535e928f77SRafael J. Wysocki 	return retval;
11545e928f77SRafael J. Wysocki }
1155140a6c94SAlan Stern EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
11565e928f77SRafael J. Wysocki 
11575e928f77SRafael J. Wysocki /**
115862052ab1SRafael J. Wysocki  * __pm_runtime_resume - Entry point for runtime resume operations.
1159140a6c94SAlan Stern  * @dev: Device to resume.
11603f9af051SAlan Stern  * @rpmflags: Flag bits.
11615e928f77SRafael J. Wysocki  *
1162140a6c94SAlan Stern  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
1163140a6c94SAlan Stern  * carry out a resume, either synchronous or asynchronous.
1164140a6c94SAlan Stern  *
1165311aab73SColin Cross  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1166311aab73SColin Cross  * or if pm_runtime_irq_safe() has been called.
11675e928f77SRafael J. Wysocki  */
__pm_runtime_resume(struct device * dev,int rpmflags)1168140a6c94SAlan Stern int __pm_runtime_resume(struct device *dev, int rpmflags)
11695e928f77SRafael J. Wysocki {
1170140a6c94SAlan Stern 	unsigned long flags;
11711d531c14SAlan Stern 	int retval;
11725e928f77SRafael J. Wysocki 
1173a9306a63SRafael J. Wysocki 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1174a9306a63SRafael J. Wysocki 			dev->power.runtime_status != RPM_ACTIVE);
1175311aab73SColin Cross 
1176140a6c94SAlan Stern 	if (rpmflags & RPM_GET_PUT)
11771d531c14SAlan Stern 		atomic_inc(&dev->power.usage_count);
1178140a6c94SAlan Stern 
1179140a6c94SAlan Stern 	spin_lock_irqsave(&dev->power.lock, flags);
1180140a6c94SAlan Stern 	retval = rpm_resume(dev, rpmflags);
1181140a6c94SAlan Stern 	spin_unlock_irqrestore(&dev->power.lock, flags);
11825e928f77SRafael J. Wysocki 
11835e928f77SRafael J. Wysocki 	return retval;
11845e928f77SRafael J. Wysocki }
1185140a6c94SAlan Stern EXPORT_SYMBOL_GPL(__pm_runtime_resume);
11865e928f77SRafael J. Wysocki 
11875e928f77SRafael J. Wysocki /**
1188c0ef3df8SSakari Ailus  * pm_runtime_get_conditional - Conditionally bump up device usage counter.
1189a436b6a1SRafael J. Wysocki  * @dev: Device to handle.
11900abf803eSRafael J. Wysocki  * @ign_usage_count: Whether or not to look at the current usage counter value.
1191a436b6a1SRafael J. Wysocki  *
11920abf803eSRafael J. Wysocki  * Return -EINVAL if runtime PM is disabled for @dev.
1193a436b6a1SRafael J. Wysocki  *
11940abf803eSRafael J. Wysocki  * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
11950abf803eSRafael J. Wysocki  * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
11960abf803eSRafael J. Wysocki  * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
11970abf803eSRafael J. Wysocki  * without changing the usage counter.
1198c111566bSSakari Ailus  *
11990abf803eSRafael J. Wysocki  * If @ign_usage_count is %true, this function can be used to prevent suspending
12000abf803eSRafael J. Wysocki  * the device when its runtime PM status is %RPM_ACTIVE.
1201c111566bSSakari Ailus  *
12020abf803eSRafael J. Wysocki  * If @ign_usage_count is %false, this function can be used to prevent
12030abf803eSRafael J. Wysocki  * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
12040abf803eSRafael J. Wysocki  * runtime PM usage counter is not zero.
1205c111566bSSakari Ailus  *
120610aa694eSBhaskar Chowdhury  * The caller is responsible for decrementing the runtime PM usage counter of
12070abf803eSRafael J. Wysocki  * @dev after this function has returned a positive value for it.
1208a436b6a1SRafael J. Wysocki  */
pm_runtime_get_conditional(struct device * dev,bool ign_usage_count)1209c0ef3df8SSakari Ailus static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
1210a436b6a1SRafael J. Wysocki {
1211a436b6a1SRafael J. Wysocki 	unsigned long flags;
1212a436b6a1SRafael J. Wysocki 	int retval;
1213a436b6a1SRafael J. Wysocki 
1214a436b6a1SRafael J. Wysocki 	spin_lock_irqsave(&dev->power.lock, flags);
1215c111566bSSakari Ailus 	if (dev->power.disable_depth > 0) {
1216c111566bSSakari Ailus 		retval = -EINVAL;
1217c111566bSSakari Ailus 	} else if (dev->power.runtime_status != RPM_ACTIVE) {
1218c111566bSSakari Ailus 		retval = 0;
1219c111566bSSakari Ailus 	} else if (ign_usage_count) {
1220c111566bSSakari Ailus 		retval = 1;
1221c111566bSSakari Ailus 		atomic_inc(&dev->power.usage_count);
1222c111566bSSakari Ailus 	} else {
1223c111566bSSakari Ailus 		retval = atomic_inc_not_zero(&dev->power.usage_count);
1224c111566bSSakari Ailus 	}
1225db8f5086SPeter Zijlstra 	trace_rpm_usage(dev, 0);
1226a436b6a1SRafael J. Wysocki 	spin_unlock_irqrestore(&dev->power.lock, flags);
1227c111566bSSakari Ailus 
1228a436b6a1SRafael J. Wysocki 	return retval;
1229a436b6a1SRafael J. Wysocki }
1230c0ef3df8SSakari Ailus 
1231c0ef3df8SSakari Ailus /**
1232c0ef3df8SSakari Ailus  * pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is
1233c0ef3df8SSakari Ailus  *			      in active state
1234c0ef3df8SSakari Ailus  * @dev: Target device.
1235c0ef3df8SSakari Ailus  *
1236c0ef3df8SSakari Ailus  * Increment the runtime PM usage counter of @dev if its runtime PM status is
1237c0ef3df8SSakari Ailus  * %RPM_ACTIVE, in which case it returns 1. If the device is in a different
1238c0ef3df8SSakari Ailus  * state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the
1239c0ef3df8SSakari Ailus  * device, in which case also the usage_count will remain unmodified.
1240c0ef3df8SSakari Ailus  */
pm_runtime_get_if_active(struct device * dev)1241c0ef3df8SSakari Ailus int pm_runtime_get_if_active(struct device *dev)
1242c0ef3df8SSakari Ailus {
1243c0ef3df8SSakari Ailus 	return pm_runtime_get_conditional(dev, true);
1244c0ef3df8SSakari Ailus }
1245c111566bSSakari Ailus EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1246a436b6a1SRafael J. Wysocki 
1247a436b6a1SRafael J. Wysocki /**
1248c0ef3df8SSakari Ailus  * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
1249c0ef3df8SSakari Ailus  * @dev: Target device.
1250c0ef3df8SSakari Ailus  *
1251c0ef3df8SSakari Ailus  * Increment the runtime PM usage counter of @dev if its runtime PM status is
1252c0ef3df8SSakari Ailus  * %RPM_ACTIVE and its runtime PM usage counter is greater than 0, in which case
1253c0ef3df8SSakari Ailus  * it returns 1. If the device is in a different state or its usage_count is 0,
1254c0ef3df8SSakari Ailus  * 0 is returned. -EINVAL is returned if runtime PM is disabled for the device,
1255c0ef3df8SSakari Ailus  * in which case also the usage_count will remain unmodified.
1256c0ef3df8SSakari Ailus  */
pm_runtime_get_if_in_use(struct device * dev)1257c0ef3df8SSakari Ailus int pm_runtime_get_if_in_use(struct device *dev)
1258c0ef3df8SSakari Ailus {
1259c0ef3df8SSakari Ailus 	return pm_runtime_get_conditional(dev, false);
1260c0ef3df8SSakari Ailus }
1261c0ef3df8SSakari Ailus EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1262c0ef3df8SSakari Ailus 
1263c0ef3df8SSakari Ailus /**
126462052ab1SRafael J. Wysocki  * __pm_runtime_set_status - Set runtime PM status of a device.
12655e928f77SRafael J. Wysocki  * @dev: Device to handle.
126662052ab1SRafael J. Wysocki  * @status: New runtime PM status of the device.
12675e928f77SRafael J. Wysocki  *
126862052ab1SRafael J. Wysocki  * If runtime PM of the device is disabled or its power.runtime_error field is
12695e928f77SRafael J. Wysocki  * different from zero, the status may be changed either to RPM_ACTIVE, or to
12705e928f77SRafael J. Wysocki  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
12715e928f77SRafael J. Wysocki  * However, if the device has a parent and the parent is not active, and the
12725e928f77SRafael J. Wysocki  * parent's power.ignore_children flag is unset, the device's status cannot be
12735e928f77SRafael J. Wysocki  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
12745e928f77SRafael J. Wysocki  *
12755e928f77SRafael J. Wysocki  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
12765e928f77SRafael J. Wysocki  * and the device parent's counter of unsuspended children is modified to
12775e928f77SRafael J. Wysocki  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
12785e928f77SRafael J. Wysocki  * notification request for the parent is submitted.
12794080ab08SRafael J. Wysocki  *
12804080ab08SRafael J. Wysocki  * If @dev has any suppliers (as reflected by device links to them), and @status
12814080ab08SRafael J. Wysocki  * is RPM_ACTIVE, they will be activated upfront and if the activation of one
12824080ab08SRafael J. Wysocki  * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
12834080ab08SRafael J. Wysocki  * of the @status value) and the suppliers will be deacticated on exit.  The
12844080ab08SRafael J. Wysocki  * error returned by the failing supplier activation will be returned in that
12854080ab08SRafael J. Wysocki  * case.
12865e928f77SRafael J. Wysocki  */
__pm_runtime_set_status(struct device * dev,unsigned int status)12875e928f77SRafael J. Wysocki int __pm_runtime_set_status(struct device *dev, unsigned int status)
12885e928f77SRafael J. Wysocki {
12895e928f77SRafael J. Wysocki 	struct device *parent = dev->parent;
12905e928f77SRafael J. Wysocki 	bool notify_parent = false;
129113966517SUlf Hansson 	unsigned long flags;
12925e928f77SRafael J. Wysocki 	int error = 0;
12935e928f77SRafael J. Wysocki 
12945e928f77SRafael J. Wysocki 	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
12955e928f77SRafael J. Wysocki 		return -EINVAL;
12965e928f77SRafael J. Wysocki 
129713966517SUlf Hansson 	spin_lock_irqsave(&dev->power.lock, flags);
12985e928f77SRafael J. Wysocki 
1299c1567f81SRafael J. Wysocki 	/*
1300c1567f81SRafael J. Wysocki 	 * Prevent PM-runtime from being enabled for the device or return an
1301c1567f81SRafael J. Wysocki 	 * error if it is enabled already and working.
1302c1567f81SRafael J. Wysocki 	 */
1303c1567f81SRafael J. Wysocki 	if (dev->power.runtime_error || dev->power.disable_depth)
1304c1567f81SRafael J. Wysocki 		dev->power.disable_depth++;
1305c1567f81SRafael J. Wysocki 	else
13065e928f77SRafael J. Wysocki 		error = -EAGAIN;
1307c1567f81SRafael J. Wysocki 
130813966517SUlf Hansson 	spin_unlock_irqrestore(&dev->power.lock, flags);
1309c1567f81SRafael J. Wysocki 
1310c1567f81SRafael J. Wysocki 	if (error)
1311c1567f81SRafael J. Wysocki 		return error;
1312c1567f81SRafael J. Wysocki 
13134080ab08SRafael J. Wysocki 	/*
13144080ab08SRafael J. Wysocki 	 * If the new status is RPM_ACTIVE, the suppliers can be activated
13154080ab08SRafael J. Wysocki 	 * upfront regardless of the current status, because next time
13164080ab08SRafael J. Wysocki 	 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
13174080ab08SRafael J. Wysocki 	 * involved will be dropped down to one anyway.
13184080ab08SRafael J. Wysocki 	 */
13194080ab08SRafael J. Wysocki 	if (status == RPM_ACTIVE) {
13204080ab08SRafael J. Wysocki 		int idx = device_links_read_lock();
13214080ab08SRafael J. Wysocki 
13224080ab08SRafael J. Wysocki 		error = rpm_get_suppliers(dev);
13234080ab08SRafael J. Wysocki 		if (error)
13244080ab08SRafael J. Wysocki 			status = RPM_SUSPENDED;
13254080ab08SRafael J. Wysocki 
13264080ab08SRafael J. Wysocki 		device_links_read_unlock(idx);
13275e928f77SRafael J. Wysocki 	}
13285e928f77SRafael J. Wysocki 
132913966517SUlf Hansson 	spin_lock_irqsave(&dev->power.lock, flags);
13305e928f77SRafael J. Wysocki 
1331f8817f61SRafael J. Wysocki 	if (dev->power.runtime_status == status || !parent)
13325e928f77SRafael J. Wysocki 		goto out_set;
13335e928f77SRafael J. Wysocki 
13345e928f77SRafael J. Wysocki 	if (status == RPM_SUSPENDED) {
13355e928f77SRafael J. Wysocki 		atomic_add_unless(&parent->power.child_count, -1, 0);
13365e928f77SRafael J. Wysocki 		notify_parent = !parent->power.ignore_children;
1337f8817f61SRafael J. Wysocki 	} else {
1338bab636b9SRafael J. Wysocki 		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
13395e928f77SRafael J. Wysocki 
13405e928f77SRafael J. Wysocki 		/*
13415e928f77SRafael J. Wysocki 		 * It is invalid to put an active child under a parent that is
134262052ab1SRafael J. Wysocki 		 * not active, has runtime PM enabled and the
13435e928f77SRafael J. Wysocki 		 * 'power.ignore_children' flag unset.
13445e928f77SRafael J. Wysocki 		 */
1345dbfa4478SRafael J. Wysocki 		if (!parent->power.disable_depth &&
1346dbfa4478SRafael J. Wysocki 		    !parent->power.ignore_children &&
1347dbfa4478SRafael J. Wysocki 		    parent->power.runtime_status != RPM_ACTIVE) {
134871723f95SLinus Walleij 			dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
134971723f95SLinus Walleij 				dev_name(dev),
135071723f95SLinus Walleij 				dev_name(parent));
13515e928f77SRafael J. Wysocki 			error = -EBUSY;
135271723f95SLinus Walleij 		} else if (dev->power.runtime_status == RPM_SUSPENDED) {
13535e928f77SRafael J. Wysocki 			atomic_inc(&parent->power.child_count);
135471723f95SLinus Walleij 		}
13555e928f77SRafael J. Wysocki 
1356862f89b3SAlan Stern 		spin_unlock(&parent->power.lock);
13575e928f77SRafael J. Wysocki 
13584080ab08SRafael J. Wysocki 		if (error) {
13594080ab08SRafael J. Wysocki 			status = RPM_SUSPENDED;
13605e928f77SRafael J. Wysocki 			goto out;
13615e928f77SRafael J. Wysocki 		}
13624080ab08SRafael J. Wysocki 	}
13635e928f77SRafael J. Wysocki 
13645e928f77SRafael J. Wysocki  out_set:
13658d4b9d1bSArjan van de Ven 	__update_runtime_status(dev, status);
13664080ab08SRafael J. Wysocki 	if (!error)
13675e928f77SRafael J. Wysocki 		dev->power.runtime_error = 0;
13684080ab08SRafael J. Wysocki 
13695e928f77SRafael J. Wysocki  out:
137013966517SUlf Hansson 	spin_unlock_irqrestore(&dev->power.lock, flags);
13715e928f77SRafael J. Wysocki 
13725e928f77SRafael J. Wysocki 	if (notify_parent)
13735e928f77SRafael J. Wysocki 		pm_request_idle(parent);
13745e928f77SRafael J. Wysocki 
13754080ab08SRafael J. Wysocki 	if (status == RPM_SUSPENDED) {
13764080ab08SRafael J. Wysocki 		int idx = device_links_read_lock();
13774080ab08SRafael J. Wysocki 
13784080ab08SRafael J. Wysocki 		rpm_put_suppliers(dev);
13794080ab08SRafael J. Wysocki 
13804080ab08SRafael J. Wysocki 		device_links_read_unlock(idx);
13814080ab08SRafael J. Wysocki 	}
13824080ab08SRafael J. Wysocki 
1383c1567f81SRafael J. Wysocki 	pm_runtime_enable(dev);
1384c1567f81SRafael J. Wysocki 
13855e928f77SRafael J. Wysocki 	return error;
13865e928f77SRafael J. Wysocki }
13875e928f77SRafael J. Wysocki EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
13885e928f77SRafael J. Wysocki 
13895e928f77SRafael J. Wysocki /**
13905e928f77SRafael J. Wysocki  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
13915e928f77SRafael J. Wysocki  * @dev: Device to handle.
13925e928f77SRafael J. Wysocki  *
13935e928f77SRafael J. Wysocki  * Flush all pending requests for the device from pm_wq and wait for all
139462052ab1SRafael J. Wysocki  * runtime PM operations involving the device in progress to complete.
13955e928f77SRafael J. Wysocki  *
13965e928f77SRafael J. Wysocki  * Should be called under dev->power.lock with interrupts disabled.
13975e928f77SRafael J. Wysocki  */
__pm_runtime_barrier(struct device * dev)13985e928f77SRafael J. Wysocki static void __pm_runtime_barrier(struct device *dev)
13995e928f77SRafael J. Wysocki {
14005e928f77SRafael J. Wysocki 	pm_runtime_deactivate_timer(dev);
14015e928f77SRafael J. Wysocki 
14025e928f77SRafael J. Wysocki 	if (dev->power.request_pending) {
14035e928f77SRafael J. Wysocki 		dev->power.request = RPM_REQ_NONE;
14045e928f77SRafael J. Wysocki 		spin_unlock_irq(&dev->power.lock);
14055e928f77SRafael J. Wysocki 
14065e928f77SRafael J. Wysocki 		cancel_work_sync(&dev->power.work);
14075e928f77SRafael J. Wysocki 
14085e928f77SRafael J. Wysocki 		spin_lock_irq(&dev->power.lock);
14095e928f77SRafael J. Wysocki 		dev->power.request_pending = false;
14105e928f77SRafael J. Wysocki 	}
14115e928f77SRafael J. Wysocki 
1412dbfa4478SRafael J. Wysocki 	if (dev->power.runtime_status == RPM_SUSPENDING ||
1413dbfa4478SRafael J. Wysocki 	    dev->power.runtime_status == RPM_RESUMING ||
1414dbfa4478SRafael J. Wysocki 	    dev->power.idle_notification) {
14155e928f77SRafael J. Wysocki 		DEFINE_WAIT(wait);
14165e928f77SRafael J. Wysocki 
14175e928f77SRafael J. Wysocki 		/* Suspend, wake-up or idle notification in progress. */
14185e928f77SRafael J. Wysocki 		for (;;) {
14195e928f77SRafael J. Wysocki 			prepare_to_wait(&dev->power.wait_queue, &wait,
14205e928f77SRafael J. Wysocki 					TASK_UNINTERRUPTIBLE);
14215e928f77SRafael J. Wysocki 			if (dev->power.runtime_status != RPM_SUSPENDING
14225e928f77SRafael J. Wysocki 			    && dev->power.runtime_status != RPM_RESUMING
14235e928f77SRafael J. Wysocki 			    && !dev->power.idle_notification)
14245e928f77SRafael J. Wysocki 				break;
14255e928f77SRafael J. Wysocki 			spin_unlock_irq(&dev->power.lock);
14265e928f77SRafael J. Wysocki 
14275e928f77SRafael J. Wysocki 			schedule();
14285e928f77SRafael J. Wysocki 
14295e928f77SRafael J. Wysocki 			spin_lock_irq(&dev->power.lock);
14305e928f77SRafael J. Wysocki 		}
14315e928f77SRafael J. Wysocki 		finish_wait(&dev->power.wait_queue, &wait);
14325e928f77SRafael J. Wysocki 	}
14335e928f77SRafael J. Wysocki }
14345e928f77SRafael J. Wysocki 
14355e928f77SRafael J. Wysocki /**
14365e928f77SRafael J. Wysocki  * pm_runtime_barrier - Flush pending requests and wait for completions.
14375e928f77SRafael J. Wysocki  * @dev: Device to handle.
14385e928f77SRafael J. Wysocki  *
14395e928f77SRafael J. Wysocki  * Prevent the device from being suspended by incrementing its usage counter and
14405e928f77SRafael J. Wysocki  * if there's a pending resume request for the device, wake the device up.
14415e928f77SRafael J. Wysocki  * Next, make sure that all pending requests for the device have been flushed
144262052ab1SRafael J. Wysocki  * from pm_wq and wait for all runtime PM operations involving the device in
14435e928f77SRafael J. Wysocki  * progress to complete.
14445e928f77SRafael J. Wysocki  *
14455e928f77SRafael J. Wysocki  * Return value:
14465e928f77SRafael J. Wysocki  * 1, if there was a resume request pending and the device had to be woken up,
14475e928f77SRafael J. Wysocki  * 0, otherwise
14485e928f77SRafael J. Wysocki  */
pm_runtime_barrier(struct device * dev)14495e928f77SRafael J. Wysocki int pm_runtime_barrier(struct device *dev)
14505e928f77SRafael J. Wysocki {
14515e928f77SRafael J. Wysocki 	int retval = 0;
14525e928f77SRafael J. Wysocki 
14535e928f77SRafael J. Wysocki 	pm_runtime_get_noresume(dev);
14545e928f77SRafael J. Wysocki 	spin_lock_irq(&dev->power.lock);
14555e928f77SRafael J. Wysocki 
14565e928f77SRafael J. Wysocki 	if (dev->power.request_pending
14575e928f77SRafael J. Wysocki 	    && dev->power.request == RPM_REQ_RESUME) {
1458140a6c94SAlan Stern 		rpm_resume(dev, 0);
14595e928f77SRafael J. Wysocki 		retval = 1;
14605e928f77SRafael J. Wysocki 	}
14615e928f77SRafael J. Wysocki 
14625e928f77SRafael J. Wysocki 	__pm_runtime_barrier(dev);
14635e928f77SRafael J. Wysocki 
14645e928f77SRafael J. Wysocki 	spin_unlock_irq(&dev->power.lock);
14655e928f77SRafael J. Wysocki 	pm_runtime_put_noidle(dev);
14665e928f77SRafael J. Wysocki 
14675e928f77SRafael J. Wysocki 	return retval;
14685e928f77SRafael J. Wysocki }
14695e928f77SRafael J. Wysocki EXPORT_SYMBOL_GPL(pm_runtime_barrier);
14705e928f77SRafael J. Wysocki 
pm_runtime_block_if_disabled(struct device * dev)14715e928f77SRafael J. Wysocki bool pm_runtime_block_if_disabled(struct device *dev)
14725e928f77SRafael J. Wysocki {
14735e928f77SRafael J. Wysocki 	bool ret;
14745e928f77SRafael J. Wysocki 
14755e928f77SRafael J. Wysocki 	spin_lock_irq(&dev->power.lock);
14765e928f77SRafael J. Wysocki 
14775e928f77SRafael J. Wysocki 	ret = !pm_runtime_enabled(dev);
14785e928f77SRafael J. Wysocki 	if (ret && dev->power.last_status == RPM_INVALID)
14795e928f77SRafael J. Wysocki 		dev->power.last_status = RPM_BLOCKED;
14805e928f77SRafael J. Wysocki 
14815e928f77SRafael J. Wysocki 	spin_unlock_irq(&dev->power.lock);
148262052ab1SRafael J. Wysocki 
14835e928f77SRafael J. Wysocki 	return ret;
14845e928f77SRafael J. Wysocki }
1485dbfa4478SRafael J. Wysocki 
pm_runtime_unblock(struct device * dev)1486dbfa4478SRafael J. Wysocki void pm_runtime_unblock(struct device *dev)
14875e928f77SRafael J. Wysocki {
14885e928f77SRafael J. Wysocki 	spin_lock_irq(&dev->power.lock);
14895e928f77SRafael J. Wysocki 
14905e928f77SRafael J. Wysocki 	if (dev->power.last_status == RPM_BLOCKED)
14915e928f77SRafael J. Wysocki 		dev->power.last_status = RPM_INVALID;
14925e928f77SRafael J. Wysocki 
1493140a6c94SAlan Stern 	spin_unlock_irq(&dev->power.lock);
14945e928f77SRafael J. Wysocki }
14955e928f77SRafael J. Wysocki 
__pm_runtime_disable(struct device * dev,bool check_resume)14965e928f77SRafael J. Wysocki void __pm_runtime_disable(struct device *dev, bool check_resume)
14975e928f77SRafael J. Wysocki {
1498fed7e88cSVincent Guittot 	spin_lock_irq(&dev->power.lock);
1499fed7e88cSVincent Guittot 
1500fed7e88cSVincent Guittot 	if (dev->power.disable_depth > 0) {
1501c24efa67SRafael J. Wysocki 		dev->power.disable_depth++;
15025e928f77SRafael J. Wysocki 		goto out;
1503c24efa67SRafael J. Wysocki 	}
1504c24efa67SRafael J. Wysocki 
15055e928f77SRafael J. Wysocki 	/*
15065e928f77SRafael J. Wysocki 	 * Wake up the device if there's a resume request pending, because that
15075e928f77SRafael J. Wysocki 	 * means there probably is some I/O to process and disabling runtime PM
15085e928f77SRafael J. Wysocki 	 * shouldn't prevent the device from processing the I/O.
15095e928f77SRafael J. Wysocki 	 */
15105e928f77SRafael J. Wysocki 	if (check_resume && dev->power.request_pending &&
15115e928f77SRafael J. Wysocki 	    dev->power.request == RPM_REQ_RESUME) {
151262052ab1SRafael J. Wysocki 		/*
15135e928f77SRafael J. Wysocki 		 * Prevent suspends and idle notifications from being carried
15145e928f77SRafael J. Wysocki 		 * out after we have woken up the device.
15155e928f77SRafael J. Wysocki 		 */
15165e928f77SRafael J. Wysocki 		pm_runtime_get_noresume(dev);
15175e928f77SRafael J. Wysocki 
15185e928f77SRafael J. Wysocki 		rpm_resume(dev, 0);
15195e928f77SRafael J. Wysocki 
15205e928f77SRafael J. Wysocki 		pm_runtime_put_noidle(dev);
1521c24efa67SRafael J. Wysocki 	}
15225e928f77SRafael J. Wysocki 
1523c24efa67SRafael J. Wysocki 	/* Update time accounting before disabling PM-runtime. */
152458456488SVincent Guittot 	update_pm_runtime_accounting(dev);
15255e928f77SRafael J. Wysocki 
1526c24efa67SRafael J. Wysocki 	if (!dev->power.disable_depth++) {
1527c24efa67SRafael J. Wysocki 		__pm_runtime_barrier(dev);
1528f8817f61SRafael J. Wysocki 		dev->power.last_status = dev->power.runtime_status;
1529c24efa67SRafael J. Wysocki 	}
1530c24efa67SRafael J. Wysocki 
1531c24efa67SRafael J. Wysocki  out:
1532c24efa67SRafael J. Wysocki 	spin_unlock_irq(&dev->power.lock);
1533c24efa67SRafael J. Wysocki }
1534c24efa67SRafael J. Wysocki EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1535c24efa67SRafael J. Wysocki 
1536c24efa67SRafael J. Wysocki /**
1537c24efa67SRafael J. Wysocki  * pm_runtime_enable - Enable runtime PM of a device.
15385e928f77SRafael J. Wysocki  * @dev: Device to handle.
15395e928f77SRafael J. Wysocki  */
pm_runtime_enable(struct device * dev)15405e928f77SRafael J. Wysocki void pm_runtime_enable(struct device *dev)
15415e928f77SRafael J. Wysocki {
1542b3636a3aSDmitry Baryshkov 	unsigned long flags;
1543b3636a3aSDmitry Baryshkov 
1544b4060db9SDouglas Anderson 	spin_lock_irqsave(&dev->power.lock, flags);
1545b3636a3aSDmitry Baryshkov 
1546b3636a3aSDmitry Baryshkov 	if (!dev->power.disable_depth) {
1547b3636a3aSDmitry Baryshkov 		dev_warn(dev, "Unbalanced %s!\n", __func__);
1548b3636a3aSDmitry Baryshkov 		goto out;
1549b3636a3aSDmitry Baryshkov 	}
1550b4060db9SDouglas Anderson 
1551b4060db9SDouglas Anderson 	if (--dev->power.disable_depth > 0)
1552b4060db9SDouglas Anderson 		goto out;
1553b4060db9SDouglas Anderson 
1554b3636a3aSDmitry Baryshkov 	if (dev->power.last_status == RPM_BLOCKED) {
1555b3636a3aSDmitry Baryshkov 		dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n");
1556b3636a3aSDmitry Baryshkov 		dump_stack();
1557b3636a3aSDmitry Baryshkov 	}
1558b3636a3aSDmitry Baryshkov 	dev->power.last_status = RPM_INVALID;
1559b3636a3aSDmitry Baryshkov 	dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1560b3636a3aSDmitry Baryshkov 
1561b3636a3aSDmitry Baryshkov 	if (dev->power.runtime_status == RPM_SUSPENDED &&
1562b3636a3aSDmitry Baryshkov 	    !dev->power.ignore_children &&
1563b3636a3aSDmitry Baryshkov 	    atomic_read(&dev->power.child_count) > 0)
15645e928f77SRafael J. Wysocki 		dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
156562052ab1SRafael J. Wysocki 
156653823639SRafael J. Wysocki out:
156753823639SRafael J. Wysocki 	spin_unlock_irqrestore(&dev->power.lock, flags);
156853823639SRafael J. Wysocki }
156953823639SRafael J. Wysocki EXPORT_SYMBOL_GPL(pm_runtime_enable);
157053823639SRafael J. Wysocki 
pm_runtime_disable_action(void * data)157153823639SRafael J. Wysocki static void pm_runtime_disable_action(void *data)
157253823639SRafael J. Wysocki {
157353823639SRafael J. Wysocki 	pm_runtime_dont_use_autosuspend(data);
157453823639SRafael J. Wysocki 	pm_runtime_disable(data);
157553823639SRafael J. Wysocki }
157653823639SRafael J. Wysocki 
157753823639SRafael J. Wysocki /**
157853823639SRafael J. Wysocki  * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
157953823639SRafael J. Wysocki  *
1580140a6c94SAlan Stern  * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
158153823639SRafael J. Wysocki  * you at driver exit time if needed.
158253823639SRafael J. Wysocki  *
158353823639SRafael J. Wysocki  * @dev: Device to handle.
158453823639SRafael J. Wysocki  */
devm_pm_runtime_enable(struct device * dev)158553823639SRafael J. Wysocki int devm_pm_runtime_enable(struct device *dev)
158653823639SRafael J. Wysocki {
158753823639SRafael J. Wysocki 	pm_runtime_enable(dev);
158862052ab1SRafael J. Wysocki 
158953823639SRafael J. Wysocki 	return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
159053823639SRafael J. Wysocki }
159153823639SRafael J. Wysocki EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
159253823639SRafael J. Wysocki 
159353823639SRafael J. Wysocki /**
159453823639SRafael J. Wysocki  * pm_runtime_forbid - Block runtime PM of a device.
159582586a72SRafael J. Wysocki  * @dev: Device to handle.
159682586a72SRafael J. Wysocki  *
159753823639SRafael J. Wysocki  * Increase the device's usage count and clear its power.runtime_auto flag,
159853823639SRafael J. Wysocki  * so that it cannot be suspended at run time until pm_runtime_allow() is called
159953823639SRafael J. Wysocki  * for it.
160053823639SRafael J. Wysocki  */
pm_runtime_forbid(struct device * dev)160153823639SRafael J. Wysocki void pm_runtime_forbid(struct device *dev)
160282586a72SRafael J. Wysocki {
160382586a72SRafael J. Wysocki 	spin_lock_irq(&dev->power.lock);
1604fe7450b0SRafael J. Wysocki 	if (!dev->power.runtime_auto)
160582586a72SRafael J. Wysocki 		goto out;
1606db8f5086SPeter Zijlstra 
160753823639SRafael J. Wysocki 	dev->power.runtime_auto = false;
160853823639SRafael J. Wysocki 	atomic_inc(&dev->power.usage_count);
160953823639SRafael J. Wysocki 	rpm_resume(dev, 0);
161053823639SRafael J. Wysocki 
161153823639SRafael J. Wysocki  out:
161253823639SRafael J. Wysocki 	spin_unlock_irq(&dev->power.lock);
161353823639SRafael J. Wysocki }
161462052ab1SRafael J. Wysocki EXPORT_SYMBOL_GPL(pm_runtime_forbid);
16157490e442SAlan Stern 
16167490e442SAlan Stern /**
16177490e442SAlan Stern  * pm_runtime_allow - Unblock runtime PM of a device.
161862052ab1SRafael J. Wysocki  * @dev: Device to handle.
161962052ab1SRafael J. Wysocki  *
16207490e442SAlan Stern  * Decrease the device's usage count and set its power.runtime_auto flag.
16217490e442SAlan Stern  */
pm_runtime_allow(struct device * dev)16227490e442SAlan Stern void pm_runtime_allow(struct device *dev)
16237490e442SAlan Stern {
16247490e442SAlan Stern 	int ret;
16257490e442SAlan Stern 
16267490e442SAlan Stern 	spin_lock_irq(&dev->power.lock);
16277490e442SAlan Stern 	if (dev->power.runtime_auto)
16287490e442SAlan Stern 		goto out;
16297490e442SAlan Stern 
16307490e442SAlan Stern 	dev->power.runtime_auto = true;
16317490e442SAlan Stern 	ret = rpm_drop_usage_count(dev);
1632c7b61de5SAlan Stern 	if (ret == 0)
1633c7b61de5SAlan Stern 		rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1634c7b61de5SAlan Stern 	else if (ret > 0)
1635c7b61de5SAlan Stern 		trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);
1636c7b61de5SAlan Stern 
1637c7b61de5SAlan Stern  out:
1638c7b61de5SAlan Stern 	spin_unlock_irq(&dev->power.lock);
1639c7b61de5SAlan Stern }
1640c7b61de5SAlan Stern EXPORT_SYMBOL_GPL(pm_runtime_allow);
1641c7b61de5SAlan Stern 
1642c7b61de5SAlan Stern /**
1643c7b61de5SAlan Stern  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1644c7b61de5SAlan Stern  * @dev: Device to handle.
1645c7b61de5SAlan Stern  *
1646dbfa4478SRafael J. Wysocki  * Set the power.no_callbacks flag, which tells the PM core that this
1647c7b61de5SAlan Stern  * device is power-managed through its parent and has no runtime PM
1648c7b61de5SAlan Stern  * callbacks of its own.  The runtime sysfs attributes will be removed.
1649c7b61de5SAlan Stern  */
pm_runtime_no_callbacks(struct device * dev)1650c7b61de5SAlan Stern void pm_runtime_no_callbacks(struct device *dev)
1651c7b61de5SAlan Stern {
1652c7b61de5SAlan Stern 	spin_lock_irq(&dev->power.lock);
1653c7b61de5SAlan Stern 	dev->power.no_callbacks = 1;
165415bcb91dSAlan Stern 	spin_unlock_irq(&dev->power.lock);
165515bcb91dSAlan Stern 	if (device_is_registered(dev))
165615bcb91dSAlan Stern 		rpm_sysfs_remove(dev);
165715bcb91dSAlan Stern }
165815bcb91dSAlan Stern EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
165915bcb91dSAlan Stern 
166015bcb91dSAlan Stern /**
166115bcb91dSAlan Stern  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
166215bcb91dSAlan Stern  * @dev: Device to handle
166315bcb91dSAlan Stern  *
166415bcb91dSAlan Stern  * Set the power.irq_safe flag, which tells the PM core that the
166515bcb91dSAlan Stern  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
166615bcb91dSAlan Stern  * always be invoked with the spinlock held and interrupts disabled.  It also
166715bcb91dSAlan Stern  * causes the parent's usage counter to be permanently incremented, preventing
166815bcb91dSAlan Stern  * the parent from runtime suspending -- otherwise an irq-safe child might have
166915bcb91dSAlan Stern  * to wait for a non-irq-safe parent.
167015bcb91dSAlan Stern  */
pm_runtime_irq_safe(struct device * dev)167115bcb91dSAlan Stern void pm_runtime_irq_safe(struct device *dev)
167215bcb91dSAlan Stern {
167315bcb91dSAlan Stern 	if (dev->parent)
167415bcb91dSAlan Stern 		pm_runtime_get_sync(dev->parent);
1675d2292906SMichał Mirosław 
1676db8f5086SPeter Zijlstra 	spin_lock_irq(&dev->power.lock);
167715bcb91dSAlan Stern 	dev->power.irq_safe = 1;
167815bcb91dSAlan Stern 	spin_unlock_irq(&dev->power.lock);
167915bcb91dSAlan Stern }
168015bcb91dSAlan Stern EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
168115bcb91dSAlan Stern 
168215bcb91dSAlan Stern /**
168315bcb91dSAlan Stern  * update_autosuspend - Handle a change to a device's autosuspend settings.
168415bcb91dSAlan Stern  * @dev: Device to handle.
168515bcb91dSAlan Stern  * @old_delay: The former autosuspend_delay value.
168615bcb91dSAlan Stern  * @old_use: The former use_autosuspend value.
168715bcb91dSAlan Stern  *
168815bcb91dSAlan Stern  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
168915bcb91dSAlan Stern  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
169015bcb91dSAlan Stern  *
169115bcb91dSAlan Stern  * This function must be called under dev->power.lock with interrupts disabled.
169215bcb91dSAlan Stern  */
update_autosuspend(struct device * dev,int old_delay,int old_use)169315bcb91dSAlan Stern static void update_autosuspend(struct device *dev, int old_delay, int old_use)
169415bcb91dSAlan Stern {
169515bcb91dSAlan Stern 	int delay = dev->power.autosuspend_delay;
169615bcb91dSAlan Stern 
169715bcb91dSAlan Stern 	/* Should runtime suspend be prevented now? */
169862052ab1SRafael J. Wysocki 	if (dev->power.use_autosuspend && delay < 0) {
169962052ab1SRafael J. Wysocki 
170015bcb91dSAlan Stern 		/* If it used to be allowed then prevent it. */
170115bcb91dSAlan Stern 		if (!old_use || old_delay >= 0) {
170215bcb91dSAlan Stern 			atomic_inc(&dev->power.usage_count);
170315bcb91dSAlan Stern 			rpm_resume(dev, 0);
170415bcb91dSAlan Stern 		} else {
170515bcb91dSAlan Stern 			trace_rpm_usage(dev, 0);
170615bcb91dSAlan Stern 		}
170715bcb91dSAlan Stern 	}
170815bcb91dSAlan Stern 
170915bcb91dSAlan Stern 	/* Runtime suspend should be allowed now. */
171015bcb91dSAlan Stern 	else {
171115bcb91dSAlan Stern 
171215bcb91dSAlan Stern 		/* If it used to be prevented then allow it. */
171315bcb91dSAlan Stern 		if (old_use && old_delay < 0)
171415bcb91dSAlan Stern 			atomic_dec(&dev->power.usage_count);
171515bcb91dSAlan Stern 
171615bcb91dSAlan Stern 		/* Maybe we can autosuspend now. */
171715bcb91dSAlan Stern 		rpm_idle(dev, RPM_AUTO);
171815bcb91dSAlan Stern 	}
171962052ab1SRafael J. Wysocki }
172015bcb91dSAlan Stern 
172115bcb91dSAlan Stern /**
172215bcb91dSAlan Stern  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
172315bcb91dSAlan Stern  * @dev: Device to handle.
172415bcb91dSAlan Stern  * @delay: Value of the new delay in milliseconds.
172515bcb91dSAlan Stern  *
172615bcb91dSAlan Stern  * Set the device's power.autosuspend_delay value.  If it changes to negative
172715bcb91dSAlan Stern  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
172815bcb91dSAlan Stern  * changes the other way, allow runtime suspends.
172915bcb91dSAlan Stern  */
pm_runtime_set_autosuspend_delay(struct device * dev,int delay)173015bcb91dSAlan Stern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
173115bcb91dSAlan Stern {
173215bcb91dSAlan Stern 	int old_delay, old_use;
173315bcb91dSAlan Stern 
173415bcb91dSAlan Stern 	spin_lock_irq(&dev->power.lock);
173515bcb91dSAlan Stern 	old_delay = dev->power.autosuspend_delay;
173662052ab1SRafael J. Wysocki 	old_use = dev->power.use_autosuspend;
17375e928f77SRafael J. Wysocki 	dev->power.autosuspend_delay = delay;
17385e928f77SRafael J. Wysocki 	update_autosuspend(dev, old_delay, old_use);
17395e928f77SRafael J. Wysocki 	spin_unlock_irq(&dev->power.lock);
17405e928f77SRafael J. Wysocki }
17415e928f77SRafael J. Wysocki EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1742c24efa67SRafael J. Wysocki 
17435e928f77SRafael J. Wysocki /**
17445e928f77SRafael J. Wysocki  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
17455e928f77SRafael J. Wysocki  * @dev: Device to handle.
17465e928f77SRafael J. Wysocki  * @use: New value for use_autosuspend.
17475e928f77SRafael J. Wysocki  *
17485e928f77SRafael J. Wysocki  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
17495e928f77SRafael J. Wysocki  * suspends as needed.
17505e928f77SRafael J. Wysocki  */
__pm_runtime_use_autosuspend(struct device * dev,bool use)17515e928f77SRafael J. Wysocki void __pm_runtime_use_autosuspend(struct device *dev, bool use)
175253823639SRafael J. Wysocki {
17535e928f77SRafael J. Wysocki 	int old_delay, old_use;
17545e928f77SRafael J. Wysocki 
17555e928f77SRafael J. Wysocki 	spin_lock_irq(&dev->power.lock);
17565e928f77SRafael J. Wysocki 	old_delay = dev->power.autosuspend_delay;
1757c745253eSTony Lindgren 	old_use = dev->power.use_autosuspend;
17585e928f77SRafael J. Wysocki 	dev->power.use_autosuspend = use;
17595e928f77SRafael J. Wysocki 	update_autosuspend(dev, old_delay, old_use);
17605e928f77SRafael J. Wysocki 	spin_unlock_irq(&dev->power.lock);
17618234f673SVincent Guittot }
17628234f673SVincent Guittot EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
17635e928f77SRafael J. Wysocki 
17645e928f77SRafael J. Wysocki /**
17655e928f77SRafael J. Wysocki  * pm_runtime_init - Initialize runtime PM fields in given device object.
17665e928f77SRafael J. Wysocki  * @dev: Device object to initialize.
17675e928f77SRafael J. Wysocki  */
pm_runtime_init(struct device * dev)17685de85b9dSUlf Hansson void pm_runtime_init(struct device *dev)
17695de85b9dSUlf Hansson {
17705de85b9dSUlf Hansson 	dev->power.runtime_status = RPM_SUSPENDED;
17715de85b9dSUlf Hansson 	dev->power.last_status = RPM_INVALID;
17725de85b9dSUlf Hansson 	dev->power.idle_notification = false;
17735de85b9dSUlf Hansson 
17745de85b9dSUlf Hansson 	dev->power.disable_depth = 1;
17755de85b9dSUlf Hansson 	atomic_set(&dev->power.usage_count, 0);
17765de85b9dSUlf Hansson 
17775de85b9dSUlf Hansson 	dev->power.runtime_error = 0;
17785de85b9dSUlf Hansson 
17795de85b9dSUlf Hansson 	atomic_set(&dev->power.child_count, 0);
17805de85b9dSUlf Hansson 	pm_suspend_ignore_children(dev, false);
17815de85b9dSUlf Hansson 	dev->power.runtime_auto = true;
17825de85b9dSUlf Hansson 
17835de85b9dSUlf Hansson 	dev->power.request_pending = false;
17845de85b9dSUlf Hansson 	dev->power.request = RPM_REQ_NONE;
17855de85b9dSUlf Hansson 	dev->power.deferred_resume = false;
17865de85b9dSUlf Hansson 	dev->power.needs_force_resume = 0;
17875e928f77SRafael J. Wysocki 	INIT_WORK(&dev->power.work, pm_runtime_work);
17885e928f77SRafael J. Wysocki 
17895e928f77SRafael J. Wysocki 	dev->power.timer_expires = 0;
17905e928f77SRafael J. Wysocki 	hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC,
17915e928f77SRafael J. Wysocki 		      HRTIMER_MODE_ABS);
17925e928f77SRafael J. Wysocki 
17935de85b9dSUlf Hansson 	init_waitqueue_head(&dev->power.wait_queue);
17945e928f77SRafael J. Wysocki }
179537f20416SUlf Hansson 
179637f20416SUlf Hansson /**
1797b06c0b2fSRafael J. Wysocki  * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
179821d5c57bSRafael J. Wysocki  * @dev: Device object to re-initialize.
179921d5c57bSRafael J. Wysocki  */
pm_runtime_reinit(struct device * dev)1800b06c0b2fSRafael J. Wysocki void pm_runtime_reinit(struct device *dev)
180121d5c57bSRafael J. Wysocki {
1802b06c0b2fSRafael J. Wysocki 	if (!pm_runtime_enabled(dev)) {
180321d5c57bSRafael J. Wysocki 		if (dev->power.runtime_status == RPM_ACTIVE)
180421d5c57bSRafael J. Wysocki 			pm_runtime_set_suspended(dev);
180521d5c57bSRafael J. Wysocki 		if (dev->power.irq_safe) {
180621d5c57bSRafael J. Wysocki 			spin_lock_irq(&dev->power.lock);
1807c2fa1e1bSJoel Fernandes (Google) 			dev->power.irq_safe = 0;
1808c2fa1e1bSJoel Fernandes (Google) 			spin_unlock_irq(&dev->power.lock);
18094c06c4e6SRafael J. Wysocki 			if (dev->parent)
181036003d4cSRafael J. Wysocki 				pm_runtime_put(dev->parent);
1811b06c0b2fSRafael J. Wysocki 		}
18124c06c4e6SRafael J. Wysocki 	}
1813b06c0b2fSRafael J. Wysocki }
1814b06c0b2fSRafael J. Wysocki 
1815b06c0b2fSRafael J. Wysocki /**
1816b06c0b2fSRafael J. Wysocki  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1817b06c0b2fSRafael J. Wysocki  * @dev: Device object being removed from device hierarchy.
1818b06c0b2fSRafael J. Wysocki  */
pm_runtime_remove(struct device * dev)1819b06c0b2fSRafael J. Wysocki void pm_runtime_remove(struct device *dev)
1820b06c0b2fSRafael J. Wysocki {
1821b06c0b2fSRafael J. Wysocki 	__pm_runtime_disable(dev, false);
1822b06c0b2fSRafael J. Wysocki 	pm_runtime_reinit(dev);
1823b06c0b2fSRafael J. Wysocki }
1824b06c0b2fSRafael J. Wysocki 
1825b06c0b2fSRafael J. Wysocki /**
1826b06c0b2fSRafael J. Wysocki  * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1827b06c0b2fSRafael J. Wysocki  * @dev: Consumer device.
1828c2fa1e1bSJoel Fernandes (Google)  */
pm_runtime_get_suppliers(struct device * dev)1829c2fa1e1bSJoel Fernandes (Google) void pm_runtime_get_suppliers(struct device *dev)
183036003d4cSRafael J. Wysocki {
183136003d4cSRafael J. Wysocki 	struct device_link *link;
1832b06c0b2fSRafael J. Wysocki 	int idx;
183336003d4cSRafael J. Wysocki 
183421d5c57bSRafael J. Wysocki 	idx = device_links_read_lock();
183521d5c57bSRafael J. Wysocki 
183621d5c57bSRafael J. Wysocki 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
183721d5c57bSRafael J. Wysocki 				device_links_read_lock_held())
1838baa8809fSRafael J. Wysocki 		if (link->flags & DL_FLAG_PM_RUNTIME) {
1839baa8809fSRafael J. Wysocki 			link->supplier_preactivated = true;
1840baa8809fSRafael J. Wysocki 			pm_runtime_get_sync(link->supplier);
1841baa8809fSRafael J. Wysocki 		}
1842baa8809fSRafael J. Wysocki 
1843baa8809fSRafael J. Wysocki 	device_links_read_unlock(idx);
1844baa8809fSRafael J. Wysocki }
1845e0e398e2SRafael J. Wysocki 
1846baa8809fSRafael J. Wysocki /**
1847baa8809fSRafael J. Wysocki  * pm_runtime_put_suppliers - Drop references to supplier devices.
1848baa8809fSRafael J. Wysocki  * @dev: Consumer device.
1849baa8809fSRafael J. Wysocki  */
pm_runtime_put_suppliers(struct device * dev)1850baa8809fSRafael J. Wysocki void pm_runtime_put_suppliers(struct device *dev)
1851baa8809fSRafael J. Wysocki {
1852baa8809fSRafael J. Wysocki 	struct device_link *link;
1853e0e398e2SRafael J. Wysocki 	int idx;
1854e0e398e2SRafael J. Wysocki 
1855e0e398e2SRafael J. Wysocki 	idx = device_links_read_lock();
1856e0e398e2SRafael J. Wysocki 
1857e0e398e2SRafael J. Wysocki 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1858e0e398e2SRafael J. Wysocki 				device_links_read_lock_held())
1859e0e398e2SRafael J. Wysocki 		if (link->supplier_preactivated) {
1860e0e398e2SRafael J. Wysocki 			link->supplier_preactivated = false;
1861e0e398e2SRafael J. Wysocki 			pm_runtime_put(link->supplier);
1862e0e398e2SRafael J. Wysocki 		}
1863e0e398e2SRafael J. Wysocki 
1864e0e398e2SRafael J. Wysocki 	device_links_read_unlock(idx);
1865e0e398e2SRafael J. Wysocki }
1866e0e398e2SRafael J. Wysocki 
pm_runtime_new_link(struct device * dev)186707358194SRafael J. Wysocki void pm_runtime_new_link(struct device *dev)
186807358194SRafael J. Wysocki {
1869e0e398e2SRafael J. Wysocki 	spin_lock_irq(&dev->power.lock);
1870e0e398e2SRafael J. Wysocki 	dev->power.links_count++;
18714918e1f8SRafael J. Wysocki 	spin_unlock_irq(&dev->power.lock);
18724918e1f8SRafael J. Wysocki }
18734918e1f8SRafael J. Wysocki 
pm_runtime_drop_link_count(struct device * dev)18741f5c6855SRafael J. Wysocki static void pm_runtime_drop_link_count(struct device *dev)
18751f5c6855SRafael J. Wysocki {
18764918e1f8SRafael J. Wysocki 	spin_lock_irq(&dev->power.lock);
18774918e1f8SRafael J. Wysocki 	WARN_ON(dev->power.links_count == 0);
187821d5c57bSRafael J. Wysocki 	dev->power.links_count--;
187937f20416SUlf Hansson 	spin_unlock_irq(&dev->power.lock);
188037f20416SUlf Hansson }
188137f20416SUlf Hansson 
188237f20416SUlf Hansson /**
18834918e1f8SRafael J. Wysocki  * pm_runtime_drop_link - Prepare for device link removal.
18844918e1f8SRafael J. Wysocki  * @link: Device link going away.
18854918e1f8SRafael J. Wysocki  *
18864918e1f8SRafael J. Wysocki  * Drop the link count of the consumer end of @link and decrement the supplier
18874918e1f8SRafael J. Wysocki  * device's runtime PM usage counter as many times as needed to drop all of the
18884918e1f8SRafael J. Wysocki  * PM runtime reference to it from the consumer.
188937f20416SUlf Hansson  */
pm_runtime_drop_link(struct device_link * link)189037f20416SUlf Hansson void pm_runtime_drop_link(struct device_link *link)
18914918e1f8SRafael J. Wysocki {
18924918e1f8SRafael J. Wysocki 	if (!(link->flags & DL_FLAG_PM_RUNTIME))
18934918e1f8SRafael J. Wysocki 		return;
1894450316dcSRichard Fitzgerald 
1895450316dcSRichard Fitzgerald 	pm_runtime_drop_link_count(link->consumer);
1896450316dcSRichard Fitzgerald 	pm_runtime_release_supplier(link);
1897450316dcSRichard Fitzgerald 	pm_request_idle(link->supplier);
189837f20416SUlf Hansson }
189937f20416SUlf Hansson 
pm_runtime_need_not_resume(struct device * dev)190037f20416SUlf Hansson bool pm_runtime_need_not_resume(struct device *dev)
190137f20416SUlf Hansson {
1902617fcb67SUlf Hansson 	return atomic_read(&dev->power.usage_count) <= 1 &&
190337f20416SUlf Hansson 		(atomic_read(&dev->power.child_count) == 0 ||
190437f20416SUlf Hansson 		 dev->power.ignore_children);
190537f20416SUlf Hansson }
190637f20416SUlf Hansson 
190737f20416SUlf Hansson /**
1908dbcd2d72SAndrzej Hajda  * pm_runtime_force_suspend - Force a device into suspend state if needed.
190937f20416SUlf Hansson  * @dev: Device to suspend.
1910c46a0d5aSUlf Hansson  *
1911617fcb67SUlf Hansson  * Disable runtime PM so we safely can check the device's runtime PM status and
191237f20416SUlf Hansson  * if it is active, invoke its ->runtime_suspend callback to suspend it and
191337f20416SUlf Hansson  * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
191437f20416SUlf Hansson  * usage and children counters don't indicate that the device was in use before
1915c46a0d5aSUlf Hansson  * the system-wide transition under way, decrement its parent's children counter
1916c46a0d5aSUlf Hansson  * (if there is a parent).  Keep runtime PM disabled to preserve the state
19171d9174fbSUlf Hansson  * unless we encounter errors.
19184918e1f8SRafael J. Wysocki  *
19194918e1f8SRafael J. Wysocki  * Typically this function may be invoked from a system suspend callback to make
19204918e1f8SRafael J. Wysocki  * sure the device is put into low power state and it should only be used during
19214918e1f8SRafael J. Wysocki  * system-wide PM transitions to sleep states.  It assumes that the analogous
19221d9174fbSUlf Hansson  * pm_runtime_force_resume() will be used to resume the device.
1923c745253eSTony Lindgren  *
192437f20416SUlf Hansson  * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent
1925c745253eSTony Lindgren  * state where this function has called the ->runtime_suspend callback but the
19264918e1f8SRafael J. Wysocki  * PM core marks the driver as runtime active.
1927c745253eSTony Lindgren  */
pm_runtime_force_suspend(struct device * dev)1928c745253eSTony Lindgren int pm_runtime_force_suspend(struct device *dev)
19294918e1f8SRafael J. Wysocki {
193037f20416SUlf Hansson 	int (*callback)(struct device *);
19314918e1f8SRafael J. Wysocki 	int ret;
193237f20416SUlf Hansson 
1933c46a0d5aSUlf Hansson 	pm_runtime_disable(dev);
193437f20416SUlf Hansson 	if (pm_runtime_status_suspended(dev))
193537f20416SUlf Hansson 		return 0;
193637f20416SUlf Hansson 
193737f20416SUlf Hansson 	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
193837f20416SUlf Hansson 
193937f20416SUlf Hansson 	dev_pm_enable_wake_irq_check(dev, true);
19401d9174fbSUlf Hansson 	ret = callback ? callback(dev) : 0;
194137f20416SUlf Hansson 	if (ret)
194237f20416SUlf Hansson 		goto err;
194337f20416SUlf Hansson 
194437f20416SUlf Hansson 	dev_pm_enable_wake_irq_complete(dev);
19454918e1f8SRafael J. Wysocki 
19464918e1f8SRafael J. Wysocki 	/*
19474918e1f8SRafael J. Wysocki 	 * If the device can stay in suspend after the system-wide transition
194837f20416SUlf Hansson 	 * to the working state that will follow, drop the children counter of
19491d9174fbSUlf Hansson 	 * its parent, but set its status to RPM_SUSPENDED anyway in case this
195037f20416SUlf Hansson 	 * function will be called again for it in the meantime.
195137f20416SUlf Hansson 	 */
195237f20416SUlf Hansson 	if (pm_runtime_need_not_resume(dev)) {
195337f20416SUlf Hansson 		pm_runtime_set_suspended(dev);
195437f20416SUlf Hansson 	} else {
195537f20416SUlf Hansson 		__update_runtime_status(dev, RPM_SUSPENDED);
1956d2677d57SRafael J. Wysocki 		dev->power.needs_force_resume = 1;
19579f5b5274SUlf Hansson 	}
19589f5b5274SUlf Hansson 
19591d9174fbSUlf Hansson 	return 0;
19604918e1f8SRafael J. Wysocki 
19614918e1f8SRafael J. Wysocki err:
19621d9174fbSUlf Hansson 	dev_pm_disable_wake_irq_check(dev, true);
19634918e1f8SRafael J. Wysocki 	pm_runtime_enable(dev);
19641d9174fbSUlf Hansson 	return ret;
19654918e1f8SRafael J. Wysocki }
196637f20416SUlf Hansson EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1967c46a0d5aSUlf Hansson 
1968617fcb67SUlf Hansson /**
19690ae3aeefSUlf Hansson  * pm_runtime_force_resume - Force a device into resume state if needed.
19700ae3aeefSUlf Hansson  * @dev: Device to resume.
1971c46a0d5aSUlf Hansson  *
19720ae3aeefSUlf Hansson  * Prior invoking this function we expect the user to have brought the device
19730ae3aeefSUlf Hansson  * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
19740ae3aeefSUlf Hansson  * those actions and bring the device into full power, if it is expected to be
197537f20416SUlf Hansson  * used on system resume.  In the other case, we defer the resume to be managed
197637f20416SUlf Hansson  * via runtime PM.
1977c745253eSTony Lindgren  *
197837f20416SUlf Hansson  * Typically this function may be invoked from a system resume callback.
197937f20416SUlf Hansson  */
pm_runtime_force_resume(struct device * dev)198037f20416SUlf Hansson int pm_runtime_force_resume(struct device *dev)
198137f20416SUlf Hansson {
1982 	int (*callback)(struct device *);
1983 	int ret = 0;
1984 
1985 	if (!dev->power.needs_force_resume)
1986 		goto out;
1987 
1988 	/*
1989 	 * The value of the parent's children counter is correct already, so
1990 	 * just update the status of the device.
1991 	 */
1992 	__update_runtime_status(dev, RPM_ACTIVE);
1993 
1994 	callback = RPM_GET_CALLBACK(dev, runtime_resume);
1995 
1996 	dev_pm_disable_wake_irq_check(dev, false);
1997 	ret = callback ? callback(dev) : 0;
1998 	if (ret) {
1999 		pm_runtime_set_suspended(dev);
2000 		dev_pm_enable_wake_irq_check(dev, false);
2001 		goto out;
2002 	}
2003 
2004 	pm_runtime_mark_last_busy(dev);
2005 out:
2006 	dev->power.needs_force_resume = 0;
2007 	pm_runtime_enable(dev);
2008 	return ret;
2009 }
2010 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
2011