1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * pm_runtime.h - Device run-time power management helper functions.
4  *
5  * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>
6  */
7 
8 #ifndef _LINUX_PM_RUNTIME_H
9 #define _LINUX_PM_RUNTIME_H
10 
11 #include <linux/device.h>
12 #include <linux/notifier.h>
13 #include <linux/pm.h>
14 
15 #include <linux/jiffies.h>
16 
17 /* Runtime PM flag argument bits */
18 #define RPM_ASYNC		0x01	/* Request is asynchronous */
19 #define RPM_NOWAIT		0x02	/* Don't wait for concurrent
20 					    state change */
21 #define RPM_GET_PUT		0x04	/* Increment/decrement the
22 					    usage_count */
23 #define RPM_AUTO		0x08	/* Use autosuspend_delay */
24 
25 /*
26  * Use this for defining a set of PM operations to be used in all situations
27  * (system suspend, hibernation or runtime PM).
28  *
29  * Note that the behaviour differs from the deprecated UNIVERSAL_DEV_PM_OPS()
30  * macro, which uses the provided callbacks for both runtime PM and system
31  * sleep, while DEFINE_RUNTIME_DEV_PM_OPS() uses pm_runtime_force_suspend()
32  * and pm_runtime_force_resume() for its system sleep callbacks.
33  *
34  * If the underlying dev_pm_ops struct symbol has to be exported, use
35  * EXPORT_RUNTIME_DEV_PM_OPS() or EXPORT_GPL_RUNTIME_DEV_PM_OPS() instead.
36  */
37 #define DEFINE_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
38 	_DEFINE_DEV_PM_OPS(name, pm_runtime_force_suspend, \
39 			   pm_runtime_force_resume, suspend_fn, \
40 			   resume_fn, idle_fn)
41 
42 #define EXPORT_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
43 	_EXPORT_DEV_PM_OPS(name, pm_runtime_force_suspend, pm_runtime_force_resume, \
44 			   suspend_fn, resume_fn, idle_fn, "", "")
45 #define EXPORT_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
46 	_EXPORT_DEV_PM_OPS(name, pm_runtime_force_suspend, pm_runtime_force_resume, \
47 			   suspend_fn, resume_fn, idle_fn, "_gpl", "")
48 #define EXPORT_NS_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
49 	_EXPORT_DEV_PM_OPS(name, pm_runtime_force_suspend, pm_runtime_force_resume, \
50 			   suspend_fn, resume_fn, idle_fn, "", #ns)
51 #define EXPORT_NS_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
52 	_EXPORT_DEV_PM_OPS(name, pm_runtime_force_suspend, pm_runtime_force_resume, \
53 			   suspend_fn, resume_fn, idle_fn, "_gpl", #ns)
54 
55 #ifdef CONFIG_PM
56 extern struct workqueue_struct *pm_wq;
57 
queue_pm_work(struct work_struct * work)58 static inline bool queue_pm_work(struct work_struct *work)
59 {
60 	return queue_work(pm_wq, work);
61 }
62 
63 extern int pm_generic_runtime_suspend(struct device *dev);
64 extern int pm_generic_runtime_resume(struct device *dev);
65 extern int pm_runtime_force_suspend(struct device *dev);
66 extern int pm_runtime_force_resume(struct device *dev);
67 
68 extern int __pm_runtime_idle(struct device *dev, int rpmflags);
69 extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
70 extern int __pm_runtime_resume(struct device *dev, int rpmflags);
71 extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);
72 extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
73 extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
74 extern int pm_runtime_barrier(struct device *dev);
75 extern void pm_runtime_enable(struct device *dev);
76 extern void __pm_runtime_disable(struct device *dev, bool check_resume);
77 extern void pm_runtime_allow(struct device *dev);
78 extern void pm_runtime_forbid(struct device *dev);
79 extern void pm_runtime_no_callbacks(struct device *dev);
80 extern void pm_runtime_irq_safe(struct device *dev);
81 extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
82 extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
83 extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
84 extern void pm_runtime_update_max_time_suspended(struct device *dev,
85 						 s64 delta_ns);
86 extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
87 extern void pm_runtime_get_suppliers(struct device *dev);
88 extern void pm_runtime_put_suppliers(struct device *dev);
89 extern void pm_runtime_new_link(struct device *dev);
90 extern void pm_runtime_drop_link(struct device_link *link);
91 extern void pm_runtime_release_supplier(struct device_link *link);
92 
93 extern int devm_pm_runtime_enable(struct device *dev);
94 
95 /**
96  * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
97  * @dev: Target device.
98  *
99  * Increment the runtime PM usage counter of @dev if its runtime PM status is
100  * %RPM_ACTIVE and its runtime PM usage counter is greater than 0.
101  */
pm_runtime_get_if_in_use(struct device * dev)102 static inline int pm_runtime_get_if_in_use(struct device *dev)
103 {
104 	return pm_runtime_get_if_active(dev, false);
105 }
106 
107 /**
108  * pm_suspend_ignore_children - Set runtime PM behavior regarding children.
109  * @dev: Target device.
110  * @enable: Whether or not to ignore possible dependencies on children.
111  *
112  * The dependencies of @dev on its children will not be taken into account by
113  * the runtime PM framework going forward if @enable is %true, or they will
114  * be taken into account otherwise.
115  */
pm_suspend_ignore_children(struct device * dev,bool enable)116 static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
117 {
118 	dev->power.ignore_children = enable;
119 }
120 
121 /**
122  * pm_runtime_get_noresume - Bump up runtime PM usage counter of a device.
123  * @dev: Target device.
124  */
pm_runtime_get_noresume(struct device * dev)125 static inline void pm_runtime_get_noresume(struct device *dev)
126 {
127 	atomic_inc(&dev->power.usage_count);
128 }
129 
130 /**
131  * pm_runtime_put_noidle - Drop runtime PM usage counter of a device.
132  * @dev: Target device.
133  *
134  * Decrement the runtime PM usage counter of @dev unless it is 0 already.
135  */
pm_runtime_put_noidle(struct device * dev)136 static inline void pm_runtime_put_noidle(struct device *dev)
137 {
138 	atomic_add_unless(&dev->power.usage_count, -1, 0);
139 }
140 
141 /**
142  * pm_runtime_suspended - Check whether or not a device is runtime-suspended.
143  * @dev: Target device.
144  *
145  * Return %true if runtime PM is enabled for @dev and its runtime PM status is
146  * %RPM_SUSPENDED, or %false otherwise.
147  *
148  * Note that the return value of this function can only be trusted if it is
149  * called under the runtime PM lock of @dev or under conditions in which
150  * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
151  * status cannot change.
152  */
pm_runtime_suspended(struct device * dev)153 static inline bool pm_runtime_suspended(struct device *dev)
154 {
155 	return dev->power.runtime_status == RPM_SUSPENDED
156 		&& !dev->power.disable_depth;
157 }
158 
159 /**
160  * pm_runtime_active - Check whether or not a device is runtime-active.
161  * @dev: Target device.
162  *
163  * Return %true if runtime PM is disabled for @dev or its runtime PM status is
164  * %RPM_ACTIVE, or %false otherwise.
165  *
166  * Note that the return value of this function can only be trusted if it is
167  * called under the runtime PM lock of @dev or under conditions in which
168  * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
169  * status cannot change.
170  */
pm_runtime_active(struct device * dev)171 static inline bool pm_runtime_active(struct device *dev)
172 {
173 	return dev->power.runtime_status == RPM_ACTIVE
174 		|| dev->power.disable_depth;
175 }
176 
177 /**
178  * pm_runtime_status_suspended - Check if runtime PM status is "suspended".
179  * @dev: Target device.
180  *
181  * Return %true if the runtime PM status of @dev is %RPM_SUSPENDED, or %false
182  * otherwise, regardless of whether or not runtime PM has been enabled for @dev.
183  *
184  * Note that the return value of this function can only be trusted if it is
185  * called under the runtime PM lock of @dev or under conditions in which the
186  * runtime PM status of @dev cannot change.
187  */
pm_runtime_status_suspended(struct device * dev)188 static inline bool pm_runtime_status_suspended(struct device *dev)
189 {
190 	return dev->power.runtime_status == RPM_SUSPENDED;
191 }
192 
193 /**
194  * pm_runtime_enabled - Check if runtime PM is enabled.
195  * @dev: Target device.
196  *
197  * Return %true if runtime PM is enabled for @dev or %false otherwise.
198  *
199  * Note that the return value of this function can only be trusted if it is
200  * called under the runtime PM lock of @dev or under conditions in which
201  * runtime PM cannot be either disabled or enabled for @dev.
202  */
pm_runtime_enabled(struct device * dev)203 static inline bool pm_runtime_enabled(struct device *dev)
204 {
205 	return !dev->power.disable_depth;
206 }
207 
208 /**
209  * pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present.
210  * @dev: Target device.
211  *
212  * Return %true if @dev is a special device without runtime PM callbacks or
213  * %false otherwise.
214  */
pm_runtime_has_no_callbacks(struct device * dev)215 static inline bool pm_runtime_has_no_callbacks(struct device *dev)
216 {
217 	return dev->power.no_callbacks;
218 }
219 
220 /**
221  * pm_runtime_mark_last_busy - Update the last access time of a device.
222  * @dev: Target device.
223  *
224  * Update the last access time of @dev used by the runtime PM autosuspend
225  * mechanism to the current time as returned by ktime_get_mono_fast_ns().
226  */
pm_runtime_mark_last_busy(struct device * dev)227 static inline void pm_runtime_mark_last_busy(struct device *dev)
228 {
229 	WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
230 }
231 
232 /**
233  * pm_runtime_is_irq_safe - Check if runtime PM can work in interrupt context.
234  * @dev: Target device.
235  *
236  * Return %true if @dev has been marked as an "IRQ-safe" device (with respect
237  * to runtime PM), in which case its runtime PM callabcks can be expected to
238  * work correctly when invoked from interrupt handlers.
239  */
pm_runtime_is_irq_safe(struct device * dev)240 static inline bool pm_runtime_is_irq_safe(struct device *dev)
241 {
242 	return dev->power.irq_safe;
243 }
244 
245 extern u64 pm_runtime_suspended_time(struct device *dev);
246 
247 #else /* !CONFIG_PM */
248 
queue_pm_work(struct work_struct * work)249 static inline bool queue_pm_work(struct work_struct *work) { return false; }
250 
pm_generic_runtime_suspend(struct device * dev)251 static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
pm_generic_runtime_resume(struct device * dev)252 static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
pm_runtime_force_suspend(struct device * dev)253 static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
pm_runtime_force_resume(struct device * dev)254 static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
255 
__pm_runtime_idle(struct device * dev,int rpmflags)256 static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
257 {
258 	return -ENOSYS;
259 }
__pm_runtime_suspend(struct device * dev,int rpmflags)260 static inline int __pm_runtime_suspend(struct device *dev, int rpmflags)
261 {
262 	return -ENOSYS;
263 }
__pm_runtime_resume(struct device * dev,int rpmflags)264 static inline int __pm_runtime_resume(struct device *dev, int rpmflags)
265 {
266 	return 1;
267 }
pm_schedule_suspend(struct device * dev,unsigned int delay)268 static inline int pm_schedule_suspend(struct device *dev, unsigned int delay)
269 {
270 	return -ENOSYS;
271 }
pm_runtime_get_if_in_use(struct device * dev)272 static inline int pm_runtime_get_if_in_use(struct device *dev)
273 {
274 	return -EINVAL;
275 }
pm_runtime_get_if_active(struct device * dev,bool ign_usage_count)276 static inline int pm_runtime_get_if_active(struct device *dev,
277 					   bool ign_usage_count)
278 {
279 	return -EINVAL;
280 }
__pm_runtime_set_status(struct device * dev,unsigned int status)281 static inline int __pm_runtime_set_status(struct device *dev,
282 					    unsigned int status) { return 0; }
pm_runtime_barrier(struct device * dev)283 static inline int pm_runtime_barrier(struct device *dev) { return 0; }
pm_runtime_enable(struct device * dev)284 static inline void pm_runtime_enable(struct device *dev) {}
__pm_runtime_disable(struct device * dev,bool c)285 static inline void __pm_runtime_disable(struct device *dev, bool c) {}
pm_runtime_allow(struct device * dev)286 static inline void pm_runtime_allow(struct device *dev) {}
pm_runtime_forbid(struct device * dev)287 static inline void pm_runtime_forbid(struct device *dev) {}
288 
devm_pm_runtime_enable(struct device * dev)289 static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
290 
pm_suspend_ignore_children(struct device * dev,bool enable)291 static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
pm_runtime_get_noresume(struct device * dev)292 static inline void pm_runtime_get_noresume(struct device *dev) {}
pm_runtime_put_noidle(struct device * dev)293 static inline void pm_runtime_put_noidle(struct device *dev) {}
pm_runtime_suspended(struct device * dev)294 static inline bool pm_runtime_suspended(struct device *dev) { return false; }
pm_runtime_active(struct device * dev)295 static inline bool pm_runtime_active(struct device *dev) { return true; }
pm_runtime_status_suspended(struct device * dev)296 static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
pm_runtime_enabled(struct device * dev)297 static inline bool pm_runtime_enabled(struct device *dev) { return false; }
298 
pm_runtime_no_callbacks(struct device * dev)299 static inline void pm_runtime_no_callbacks(struct device *dev) {}
pm_runtime_irq_safe(struct device * dev)300 static inline void pm_runtime_irq_safe(struct device *dev) {}
pm_runtime_is_irq_safe(struct device * dev)301 static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
302 
pm_runtime_has_no_callbacks(struct device * dev)303 static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; }
pm_runtime_mark_last_busy(struct device * dev)304 static inline void pm_runtime_mark_last_busy(struct device *dev) {}
__pm_runtime_use_autosuspend(struct device * dev,bool use)305 static inline void __pm_runtime_use_autosuspend(struct device *dev,
306 						bool use) {}
pm_runtime_set_autosuspend_delay(struct device * dev,int delay)307 static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
308 						int delay) {}
pm_runtime_autosuspend_expiration(struct device * dev)309 static inline u64 pm_runtime_autosuspend_expiration(
310 				struct device *dev) { return 0; }
pm_runtime_set_memalloc_noio(struct device * dev,bool enable)311 static inline void pm_runtime_set_memalloc_noio(struct device *dev,
312 						bool enable){}
pm_runtime_get_suppliers(struct device * dev)313 static inline void pm_runtime_get_suppliers(struct device *dev) {}
pm_runtime_put_suppliers(struct device * dev)314 static inline void pm_runtime_put_suppliers(struct device *dev) {}
pm_runtime_new_link(struct device * dev)315 static inline void pm_runtime_new_link(struct device *dev) {}
pm_runtime_drop_link(struct device_link * link)316 static inline void pm_runtime_drop_link(struct device_link *link) {}
pm_runtime_release_supplier(struct device_link * link)317 static inline void pm_runtime_release_supplier(struct device_link *link) {}
318 
319 #endif /* !CONFIG_PM */
320 
321 /**
322  * pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it.
323  * @dev: Target device.
324  *
325  * Invoke the "idle check" callback of @dev and, depending on its return value,
326  * set up autosuspend of @dev or suspend it (depending on whether or not
327  * autosuspend has been enabled for it).
328  */
pm_runtime_idle(struct device * dev)329 static inline int pm_runtime_idle(struct device *dev)
330 {
331 	return __pm_runtime_idle(dev, 0);
332 }
333 
334 /**
335  * pm_runtime_suspend - Suspend a device synchronously.
336  * @dev: Target device.
337  */
pm_runtime_suspend(struct device * dev)338 static inline int pm_runtime_suspend(struct device *dev)
339 {
340 	return __pm_runtime_suspend(dev, 0);
341 }
342 
343 /**
344  * pm_runtime_autosuspend - Set up autosuspend of a device or suspend it.
345  * @dev: Target device.
346  *
347  * Set up autosuspend of @dev or suspend it (depending on whether or not
348  * autosuspend is enabled for it) without engaging its "idle check" callback.
349  */
pm_runtime_autosuspend(struct device * dev)350 static inline int pm_runtime_autosuspend(struct device *dev)
351 {
352 	return __pm_runtime_suspend(dev, RPM_AUTO);
353 }
354 
355 /**
356  * pm_runtime_resume - Resume a device synchronously.
357  * @dev: Target device.
358  */
pm_runtime_resume(struct device * dev)359 static inline int pm_runtime_resume(struct device *dev)
360 {
361 	return __pm_runtime_resume(dev, 0);
362 }
363 
364 /**
365  * pm_request_idle - Queue up "idle check" execution for a device.
366  * @dev: Target device.
367  *
368  * Queue up a work item to run an equivalent of pm_runtime_idle() for @dev
369  * asynchronously.
370  */
pm_request_idle(struct device * dev)371 static inline int pm_request_idle(struct device *dev)
372 {
373 	return __pm_runtime_idle(dev, RPM_ASYNC);
374 }
375 
376 /**
377  * pm_request_resume - Queue up runtime-resume of a device.
378  * @dev: Target device.
379  */
pm_request_resume(struct device * dev)380 static inline int pm_request_resume(struct device *dev)
381 {
382 	return __pm_runtime_resume(dev, RPM_ASYNC);
383 }
384 
385 /**
386  * pm_request_autosuspend - Queue up autosuspend of a device.
387  * @dev: Target device.
388  *
389  * Queue up a work item to run an equivalent pm_runtime_autosuspend() for @dev
390  * asynchronously.
391  */
pm_request_autosuspend(struct device * dev)392 static inline int pm_request_autosuspend(struct device *dev)
393 {
394 	return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO);
395 }
396 
397 /**
398  * pm_runtime_get - Bump up usage counter and queue up resume of a device.
399  * @dev: Target device.
400  *
401  * Bump up the runtime PM usage counter of @dev and queue up a work item to
402  * carry out runtime-resume of it.
403  */
pm_runtime_get(struct device * dev)404 static inline int pm_runtime_get(struct device *dev)
405 {
406 	return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC);
407 }
408 
409 /**
410  * pm_runtime_get_sync - Bump up usage counter of a device and resume it.
411  * @dev: Target device.
412  *
413  * Bump up the runtime PM usage counter of @dev and carry out runtime-resume of
414  * it synchronously.
415  *
416  * The possible return values of this function are the same as for
417  * pm_runtime_resume() and the runtime PM usage counter of @dev remains
418  * incremented in all cases, even if it returns an error code.
419  * Consider using pm_runtime_resume_and_get() instead of it, especially
420  * if its return value is checked by the caller, as this is likely to result
421  * in cleaner code.
422  */
pm_runtime_get_sync(struct device * dev)423 static inline int pm_runtime_get_sync(struct device *dev)
424 {
425 	return __pm_runtime_resume(dev, RPM_GET_PUT);
426 }
427 
428 /**
429  * pm_runtime_resume_and_get - Bump up usage counter of a device and resume it.
430  * @dev: Target device.
431  *
432  * Resume @dev synchronously and if that is successful, increment its runtime
433  * PM usage counter. Return 0 if the runtime PM usage counter of @dev has been
434  * incremented or a negative error code otherwise.
435  */
pm_runtime_resume_and_get(struct device * dev)436 static inline int pm_runtime_resume_and_get(struct device *dev)
437 {
438 	int ret;
439 
440 	ret = __pm_runtime_resume(dev, RPM_GET_PUT);
441 	if (ret < 0) {
442 		pm_runtime_put_noidle(dev);
443 		return ret;
444 	}
445 
446 	return 0;
447 }
448 
449 /**
450  * pm_runtime_put - Drop device usage counter and queue up "idle check" if 0.
451  * @dev: Target device.
452  *
453  * Decrement the runtime PM usage counter of @dev and if it turns out to be
454  * equal to 0, queue up a work item for @dev like in pm_request_idle().
455  */
pm_runtime_put(struct device * dev)456 static inline int pm_runtime_put(struct device *dev)
457 {
458 	return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC);
459 }
460 
461 /**
462  * pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0.
463  * @dev: Target device.
464  *
465  * Decrement the runtime PM usage counter of @dev and if it turns out to be
466  * equal to 0, queue up a work item for @dev like in pm_request_autosuspend().
467  */
pm_runtime_put_autosuspend(struct device * dev)468 static inline int pm_runtime_put_autosuspend(struct device *dev)
469 {
470 	return __pm_runtime_suspend(dev,
471 	    RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
472 }
473 
474 /**
475  * pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0.
476  * @dev: Target device.
477  *
478  * Decrement the runtime PM usage counter of @dev and if it turns out to be
479  * equal to 0, invoke the "idle check" callback of @dev and, depending on its
480  * return value, set up autosuspend of @dev or suspend it (depending on whether
481  * or not autosuspend has been enabled for it).
482  *
483  * The possible return values of this function are the same as for
484  * pm_runtime_idle() and the runtime PM usage counter of @dev remains
485  * decremented in all cases, even if it returns an error code.
486  */
pm_runtime_put_sync(struct device * dev)487 static inline int pm_runtime_put_sync(struct device *dev)
488 {
489 	return __pm_runtime_idle(dev, RPM_GET_PUT);
490 }
491 
492 /**
493  * pm_runtime_put_sync_suspend - Drop device usage counter and suspend if 0.
494  * @dev: Target device.
495  *
496  * Decrement the runtime PM usage counter of @dev and if it turns out to be
497  * equal to 0, carry out runtime-suspend of @dev synchronously.
498  *
499  * The possible return values of this function are the same as for
500  * pm_runtime_suspend() and the runtime PM usage counter of @dev remains
501  * decremented in all cases, even if it returns an error code.
502  */
pm_runtime_put_sync_suspend(struct device * dev)503 static inline int pm_runtime_put_sync_suspend(struct device *dev)
504 {
505 	return __pm_runtime_suspend(dev, RPM_GET_PUT);
506 }
507 
508 /**
509  * pm_runtime_put_sync_autosuspend - Drop device usage counter and autosuspend if 0.
510  * @dev: Target device.
511  *
512  * Decrement the runtime PM usage counter of @dev and if it turns out to be
513  * equal to 0, set up autosuspend of @dev or suspend it synchronously (depending
514  * on whether or not autosuspend has been enabled for it).
515  *
516  * The possible return values of this function are the same as for
517  * pm_runtime_autosuspend() and the runtime PM usage counter of @dev remains
518  * decremented in all cases, even if it returns an error code.
519  */
pm_runtime_put_sync_autosuspend(struct device * dev)520 static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
521 {
522 	return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
523 }
524 
525 /**
526  * pm_runtime_set_active - Set runtime PM status to "active".
527  * @dev: Target device.
528  *
529  * Set the runtime PM status of @dev to %RPM_ACTIVE and ensure that dependencies
530  * of it will be taken into account.
531  *
532  * It is not valid to call this function for devices with runtime PM enabled.
533  */
pm_runtime_set_active(struct device * dev)534 static inline int pm_runtime_set_active(struct device *dev)
535 {
536 	return __pm_runtime_set_status(dev, RPM_ACTIVE);
537 }
538 
539 /**
540  * pm_runtime_set_suspended - Set runtime PM status to "suspended".
541  * @dev: Target device.
542  *
543  * Set the runtime PM status of @dev to %RPM_SUSPENDED and ensure that
544  * dependencies of it will be taken into account.
545  *
546  * It is not valid to call this function for devices with runtime PM enabled.
547  */
pm_runtime_set_suspended(struct device * dev)548 static inline int pm_runtime_set_suspended(struct device *dev)
549 {
550 	return __pm_runtime_set_status(dev, RPM_SUSPENDED);
551 }
552 
553 /**
554  * pm_runtime_disable - Disable runtime PM for a device.
555  * @dev: Target device.
556  *
557  * Prevent the runtime PM framework from working with @dev (by incrementing its
558  * "blocking" counter).
559  *
560  * For each invocation of this function for @dev there must be a matching
561  * pm_runtime_enable() call in order for runtime PM to be enabled for it.
562  */
pm_runtime_disable(struct device * dev)563 static inline void pm_runtime_disable(struct device *dev)
564 {
565 	__pm_runtime_disable(dev, true);
566 }
567 
568 /**
569  * pm_runtime_use_autosuspend - Allow autosuspend to be used for a device.
570  * @dev: Target device.
571  *
572  * Allow the runtime PM autosuspend mechanism to be used for @dev whenever
573  * requested (or "autosuspend" will be handled as direct runtime-suspend for
574  * it).
575  *
576  * NOTE: It's important to undo this with pm_runtime_dont_use_autosuspend()
577  * at driver exit time unless your driver initially enabled pm_runtime
578  * with devm_pm_runtime_enable() (which handles it for you).
579  */
pm_runtime_use_autosuspend(struct device * dev)580 static inline void pm_runtime_use_autosuspend(struct device *dev)
581 {
582 	__pm_runtime_use_autosuspend(dev, true);
583 }
584 
585 /**
586  * pm_runtime_dont_use_autosuspend - Prevent autosuspend from being used.
587  * @dev: Target device.
588  *
589  * Prevent the runtime PM autosuspend mechanism from being used for @dev which
590  * means that "autosuspend" will be handled as direct runtime-suspend for it
591  * going forward.
592  */
pm_runtime_dont_use_autosuspend(struct device * dev)593 static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
594 {
595 	__pm_runtime_use_autosuspend(dev, false);
596 }
597 
598 #endif
599