1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * pm_runtime.h - Device run-time power management helper functions.
4  *
5  * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>
6  */
7 
8 #ifndef _LINUX_PM_RUNTIME_H
9 #define _LINUX_PM_RUNTIME_H
10 
11 #include <linux/device.h>
12 #include <linux/notifier.h>
13 #include <linux/pm.h>
14 
15 #include <linux/jiffies.h>
16 
17 /* Runtime PM flag argument bits */
18 #define RPM_ASYNC		0x01	/* Request is asynchronous */
19 #define RPM_NOWAIT		0x02	/* Don't wait for concurrent
20 					    state change */
21 #define RPM_GET_PUT		0x04	/* Increment/decrement the
22 					    usage_count */
23 #define RPM_AUTO		0x08	/* Use autosuspend_delay */
24 
25 /*
26  * Use this for defining a set of PM operations to be used in all situations
27  * (system suspend, hibernation or runtime PM).
28  *
29  * Note that the behaviour differs from the deprecated UNIVERSAL_DEV_PM_OPS()
30  * macro, which uses the provided callbacks for both runtime PM and system
31  * sleep, while DEFINE_RUNTIME_DEV_PM_OPS() uses pm_runtime_force_suspend()
32  * and pm_runtime_force_resume() for its system sleep callbacks.
33  *
34  * If the underlying dev_pm_ops struct symbol has to be exported, use
35  * EXPORT_RUNTIME_DEV_PM_OPS() or EXPORT_GPL_RUNTIME_DEV_PM_OPS() instead.
36  */
37 #define DEFINE_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
38 	_DEFINE_DEV_PM_OPS(name, pm_runtime_force_suspend, \
39 			   pm_runtime_force_resume, suspend_fn, \
40 			   resume_fn, idle_fn)
41 
42 #define EXPORT_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
43 	EXPORT_DEV_PM_OPS(name) = { \
44 		RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
45 	}
46 #define EXPORT_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
47 	EXPORT_GPL_DEV_PM_OPS(name) = { \
48 		RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
49 	}
50 #define EXPORT_NS_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
51 	EXPORT_NS_DEV_PM_OPS(name, ns) = { \
52 		RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
53 	}
54 #define EXPORT_NS_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
55 	EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
56 		RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
57 	}
58 
59 #ifdef CONFIG_PM
60 extern struct workqueue_struct *pm_wq;
61 
queue_pm_work(struct work_struct * work)62 static inline bool queue_pm_work(struct work_struct *work)
63 {
64 	return queue_work(pm_wq, work);
65 }
66 
67 extern int pm_generic_runtime_suspend(struct device *dev);
68 extern int pm_generic_runtime_resume(struct device *dev);
69 extern int pm_runtime_force_suspend(struct device *dev);
70 extern int pm_runtime_force_resume(struct device *dev);
71 
72 extern int __pm_runtime_idle(struct device *dev, int rpmflags);
73 extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
74 extern int __pm_runtime_resume(struct device *dev, int rpmflags);
75 extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);
76 extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
77 extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
78 extern int pm_runtime_barrier(struct device *dev);
79 extern void pm_runtime_enable(struct device *dev);
80 extern void __pm_runtime_disable(struct device *dev, bool check_resume);
81 extern void pm_runtime_allow(struct device *dev);
82 extern void pm_runtime_forbid(struct device *dev);
83 extern void pm_runtime_no_callbacks(struct device *dev);
84 extern void pm_runtime_irq_safe(struct device *dev);
85 extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
86 extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
87 extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
88 extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
89 extern void pm_runtime_get_suppliers(struct device *dev);
90 extern void pm_runtime_put_suppliers(struct device *dev);
91 extern void pm_runtime_new_link(struct device *dev);
92 extern void pm_runtime_drop_link(struct device_link *link);
93 extern void pm_runtime_release_supplier(struct device_link *link);
94 
95 extern int devm_pm_runtime_enable(struct device *dev);
96 
97 /**
98  * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
99  * @dev: Target device.
100  *
101  * Increment the runtime PM usage counter of @dev if its runtime PM status is
102  * %RPM_ACTIVE and its runtime PM usage counter is greater than 0.
103  */
pm_runtime_get_if_in_use(struct device * dev)104 static inline int pm_runtime_get_if_in_use(struct device *dev)
105 {
106 	return pm_runtime_get_if_active(dev, false);
107 }
108 
109 /**
110  * pm_suspend_ignore_children - Set runtime PM behavior regarding children.
111  * @dev: Target device.
112  * @enable: Whether or not to ignore possible dependencies on children.
113  *
114  * The dependencies of @dev on its children will not be taken into account by
115  * the runtime PM framework going forward if @enable is %true, or they will
116  * be taken into account otherwise.
117  */
pm_suspend_ignore_children(struct device * dev,bool enable)118 static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
119 {
120 	dev->power.ignore_children = enable;
121 }
122 
123 /**
124  * pm_runtime_get_noresume - Bump up runtime PM usage counter of a device.
125  * @dev: Target device.
126  */
pm_runtime_get_noresume(struct device * dev)127 static inline void pm_runtime_get_noresume(struct device *dev)
128 {
129 	atomic_inc(&dev->power.usage_count);
130 }
131 
132 /**
133  * pm_runtime_put_noidle - Drop runtime PM usage counter of a device.
134  * @dev: Target device.
135  *
136  * Decrement the runtime PM usage counter of @dev unless it is 0 already.
137  */
pm_runtime_put_noidle(struct device * dev)138 static inline void pm_runtime_put_noidle(struct device *dev)
139 {
140 	atomic_add_unless(&dev->power.usage_count, -1, 0);
141 }
142 
143 /**
144  * pm_runtime_suspended - Check whether or not a device is runtime-suspended.
145  * @dev: Target device.
146  *
147  * Return %true if runtime PM is enabled for @dev and its runtime PM status is
148  * %RPM_SUSPENDED, or %false otherwise.
149  *
150  * Note that the return value of this function can only be trusted if it is
151  * called under the runtime PM lock of @dev or under conditions in which
152  * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
153  * status cannot change.
154  */
pm_runtime_suspended(struct device * dev)155 static inline bool pm_runtime_suspended(struct device *dev)
156 {
157 	return dev->power.runtime_status == RPM_SUSPENDED
158 		&& !dev->power.disable_depth;
159 }
160 
161 /**
162  * pm_runtime_active - Check whether or not a device is runtime-active.
163  * @dev: Target device.
164  *
165  * Return %true if runtime PM is disabled for @dev or its runtime PM status is
166  * %RPM_ACTIVE, or %false otherwise.
167  *
168  * Note that the return value of this function can only be trusted if it is
169  * called under the runtime PM lock of @dev or under conditions in which
170  * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
171  * status cannot change.
172  */
pm_runtime_active(struct device * dev)173 static inline bool pm_runtime_active(struct device *dev)
174 {
175 	return dev->power.runtime_status == RPM_ACTIVE
176 		|| dev->power.disable_depth;
177 }
178 
179 /**
180  * pm_runtime_status_suspended - Check if runtime PM status is "suspended".
181  * @dev: Target device.
182  *
183  * Return %true if the runtime PM status of @dev is %RPM_SUSPENDED, or %false
184  * otherwise, regardless of whether or not runtime PM has been enabled for @dev.
185  *
186  * Note that the return value of this function can only be trusted if it is
187  * called under the runtime PM lock of @dev or under conditions in which the
188  * runtime PM status of @dev cannot change.
189  */
pm_runtime_status_suspended(struct device * dev)190 static inline bool pm_runtime_status_suspended(struct device *dev)
191 {
192 	return dev->power.runtime_status == RPM_SUSPENDED;
193 }
194 
195 /**
196  * pm_runtime_enabled - Check if runtime PM is enabled.
197  * @dev: Target device.
198  *
199  * Return %true if runtime PM is enabled for @dev or %false otherwise.
200  *
201  * Note that the return value of this function can only be trusted if it is
202  * called under the runtime PM lock of @dev or under conditions in which
203  * runtime PM cannot be either disabled or enabled for @dev.
204  */
pm_runtime_enabled(struct device * dev)205 static inline bool pm_runtime_enabled(struct device *dev)
206 {
207 	return !dev->power.disable_depth;
208 }
209 
210 /**
211  * pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present.
212  * @dev: Target device.
213  *
214  * Return %true if @dev is a special device without runtime PM callbacks or
215  * %false otherwise.
216  */
pm_runtime_has_no_callbacks(struct device * dev)217 static inline bool pm_runtime_has_no_callbacks(struct device *dev)
218 {
219 	return dev->power.no_callbacks;
220 }
221 
222 /**
223  * pm_runtime_mark_last_busy - Update the last access time of a device.
224  * @dev: Target device.
225  *
226  * Update the last access time of @dev used by the runtime PM autosuspend
227  * mechanism to the current time as returned by ktime_get_mono_fast_ns().
228  */
pm_runtime_mark_last_busy(struct device * dev)229 static inline void pm_runtime_mark_last_busy(struct device *dev)
230 {
231 	WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
232 }
233 
234 /**
235  * pm_runtime_is_irq_safe - Check if runtime PM can work in interrupt context.
236  * @dev: Target device.
237  *
238  * Return %true if @dev has been marked as an "IRQ-safe" device (with respect
239  * to runtime PM), in which case its runtime PM callabcks can be expected to
240  * work correctly when invoked from interrupt handlers.
241  */
pm_runtime_is_irq_safe(struct device * dev)242 static inline bool pm_runtime_is_irq_safe(struct device *dev)
243 {
244 	return dev->power.irq_safe;
245 }
246 
247 extern u64 pm_runtime_suspended_time(struct device *dev);
248 
249 #else /* !CONFIG_PM */
250 
queue_pm_work(struct work_struct * work)251 static inline bool queue_pm_work(struct work_struct *work) { return false; }
252 
pm_generic_runtime_suspend(struct device * dev)253 static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
pm_generic_runtime_resume(struct device * dev)254 static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
pm_runtime_force_suspend(struct device * dev)255 static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
pm_runtime_force_resume(struct device * dev)256 static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
257 
__pm_runtime_idle(struct device * dev,int rpmflags)258 static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
259 {
260 	return -ENOSYS;
261 }
__pm_runtime_suspend(struct device * dev,int rpmflags)262 static inline int __pm_runtime_suspend(struct device *dev, int rpmflags)
263 {
264 	return -ENOSYS;
265 }
__pm_runtime_resume(struct device * dev,int rpmflags)266 static inline int __pm_runtime_resume(struct device *dev, int rpmflags)
267 {
268 	return 1;
269 }
pm_schedule_suspend(struct device * dev,unsigned int delay)270 static inline int pm_schedule_suspend(struct device *dev, unsigned int delay)
271 {
272 	return -ENOSYS;
273 }
pm_runtime_get_if_in_use(struct device * dev)274 static inline int pm_runtime_get_if_in_use(struct device *dev)
275 {
276 	return -EINVAL;
277 }
pm_runtime_get_if_active(struct device * dev,bool ign_usage_count)278 static inline int pm_runtime_get_if_active(struct device *dev,
279 					   bool ign_usage_count)
280 {
281 	return -EINVAL;
282 }
__pm_runtime_set_status(struct device * dev,unsigned int status)283 static inline int __pm_runtime_set_status(struct device *dev,
284 					    unsigned int status) { return 0; }
pm_runtime_barrier(struct device * dev)285 static inline int pm_runtime_barrier(struct device *dev) { return 0; }
pm_runtime_enable(struct device * dev)286 static inline void pm_runtime_enable(struct device *dev) {}
__pm_runtime_disable(struct device * dev,bool c)287 static inline void __pm_runtime_disable(struct device *dev, bool c) {}
pm_runtime_allow(struct device * dev)288 static inline void pm_runtime_allow(struct device *dev) {}
pm_runtime_forbid(struct device * dev)289 static inline void pm_runtime_forbid(struct device *dev) {}
290 
devm_pm_runtime_enable(struct device * dev)291 static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
292 
pm_suspend_ignore_children(struct device * dev,bool enable)293 static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
pm_runtime_get_noresume(struct device * dev)294 static inline void pm_runtime_get_noresume(struct device *dev) {}
pm_runtime_put_noidle(struct device * dev)295 static inline void pm_runtime_put_noidle(struct device *dev) {}
pm_runtime_suspended(struct device * dev)296 static inline bool pm_runtime_suspended(struct device *dev) { return false; }
pm_runtime_active(struct device * dev)297 static inline bool pm_runtime_active(struct device *dev) { return true; }
pm_runtime_status_suspended(struct device * dev)298 static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
pm_runtime_enabled(struct device * dev)299 static inline bool pm_runtime_enabled(struct device *dev) { return false; }
300 
pm_runtime_no_callbacks(struct device * dev)301 static inline void pm_runtime_no_callbacks(struct device *dev) {}
pm_runtime_irq_safe(struct device * dev)302 static inline void pm_runtime_irq_safe(struct device *dev) {}
pm_runtime_is_irq_safe(struct device * dev)303 static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
304 
pm_runtime_has_no_callbacks(struct device * dev)305 static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; }
pm_runtime_mark_last_busy(struct device * dev)306 static inline void pm_runtime_mark_last_busy(struct device *dev) {}
__pm_runtime_use_autosuspend(struct device * dev,bool use)307 static inline void __pm_runtime_use_autosuspend(struct device *dev,
308 						bool use) {}
pm_runtime_set_autosuspend_delay(struct device * dev,int delay)309 static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
310 						int delay) {}
pm_runtime_autosuspend_expiration(struct device * dev)311 static inline u64 pm_runtime_autosuspend_expiration(
312 				struct device *dev) { return 0; }
pm_runtime_set_memalloc_noio(struct device * dev,bool enable)313 static inline void pm_runtime_set_memalloc_noio(struct device *dev,
314 						bool enable){}
pm_runtime_get_suppliers(struct device * dev)315 static inline void pm_runtime_get_suppliers(struct device *dev) {}
pm_runtime_put_suppliers(struct device * dev)316 static inline void pm_runtime_put_suppliers(struct device *dev) {}
pm_runtime_new_link(struct device * dev)317 static inline void pm_runtime_new_link(struct device *dev) {}
pm_runtime_drop_link(struct device_link * link)318 static inline void pm_runtime_drop_link(struct device_link *link) {}
pm_runtime_release_supplier(struct device_link * link)319 static inline void pm_runtime_release_supplier(struct device_link *link) {}
320 
321 #endif /* !CONFIG_PM */
322 
323 /**
324  * pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it.
325  * @dev: Target device.
326  *
327  * Invoke the "idle check" callback of @dev and, depending on its return value,
328  * set up autosuspend of @dev or suspend it (depending on whether or not
329  * autosuspend has been enabled for it).
330  */
pm_runtime_idle(struct device * dev)331 static inline int pm_runtime_idle(struct device *dev)
332 {
333 	return __pm_runtime_idle(dev, 0);
334 }
335 
336 /**
337  * pm_runtime_suspend - Suspend a device synchronously.
338  * @dev: Target device.
339  */
pm_runtime_suspend(struct device * dev)340 static inline int pm_runtime_suspend(struct device *dev)
341 {
342 	return __pm_runtime_suspend(dev, 0);
343 }
344 
345 /**
346  * pm_runtime_autosuspend - Set up autosuspend of a device or suspend it.
347  * @dev: Target device.
348  *
349  * Set up autosuspend of @dev or suspend it (depending on whether or not
350  * autosuspend is enabled for it) without engaging its "idle check" callback.
351  */
pm_runtime_autosuspend(struct device * dev)352 static inline int pm_runtime_autosuspend(struct device *dev)
353 {
354 	return __pm_runtime_suspend(dev, RPM_AUTO);
355 }
356 
357 /**
358  * pm_runtime_resume - Resume a device synchronously.
359  * @dev: Target device.
360  */
pm_runtime_resume(struct device * dev)361 static inline int pm_runtime_resume(struct device *dev)
362 {
363 	return __pm_runtime_resume(dev, 0);
364 }
365 
366 /**
367  * pm_request_idle - Queue up "idle check" execution for a device.
368  * @dev: Target device.
369  *
370  * Queue up a work item to run an equivalent of pm_runtime_idle() for @dev
371  * asynchronously.
372  */
pm_request_idle(struct device * dev)373 static inline int pm_request_idle(struct device *dev)
374 {
375 	return __pm_runtime_idle(dev, RPM_ASYNC);
376 }
377 
378 /**
379  * pm_request_resume - Queue up runtime-resume of a device.
380  * @dev: Target device.
381  */
pm_request_resume(struct device * dev)382 static inline int pm_request_resume(struct device *dev)
383 {
384 	return __pm_runtime_resume(dev, RPM_ASYNC);
385 }
386 
387 /**
388  * pm_request_autosuspend - Queue up autosuspend of a device.
389  * @dev: Target device.
390  *
391  * Queue up a work item to run an equivalent pm_runtime_autosuspend() for @dev
392  * asynchronously.
393  */
pm_request_autosuspend(struct device * dev)394 static inline int pm_request_autosuspend(struct device *dev)
395 {
396 	return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO);
397 }
398 
399 /**
400  * pm_runtime_get - Bump up usage counter and queue up resume of a device.
401  * @dev: Target device.
402  *
403  * Bump up the runtime PM usage counter of @dev and queue up a work item to
404  * carry out runtime-resume of it.
405  */
pm_runtime_get(struct device * dev)406 static inline int pm_runtime_get(struct device *dev)
407 {
408 	return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC);
409 }
410 
411 /**
412  * pm_runtime_get_sync - Bump up usage counter of a device and resume it.
413  * @dev: Target device.
414  *
415  * Bump up the runtime PM usage counter of @dev and carry out runtime-resume of
416  * it synchronously.
417  *
418  * The possible return values of this function are the same as for
419  * pm_runtime_resume() and the runtime PM usage counter of @dev remains
420  * incremented in all cases, even if it returns an error code.
421  * Consider using pm_runtime_resume_and_get() instead of it, especially
422  * if its return value is checked by the caller, as this is likely to result
423  * in cleaner code.
424  */
pm_runtime_get_sync(struct device * dev)425 static inline int pm_runtime_get_sync(struct device *dev)
426 {
427 	return __pm_runtime_resume(dev, RPM_GET_PUT);
428 }
429 
430 /**
431  * pm_runtime_resume_and_get - Bump up usage counter of a device and resume it.
432  * @dev: Target device.
433  *
434  * Resume @dev synchronously and if that is successful, increment its runtime
435  * PM usage counter. Return 0 if the runtime PM usage counter of @dev has been
436  * incremented or a negative error code otherwise.
437  */
pm_runtime_resume_and_get(struct device * dev)438 static inline int pm_runtime_resume_and_get(struct device *dev)
439 {
440 	int ret;
441 
442 	ret = __pm_runtime_resume(dev, RPM_GET_PUT);
443 	if (ret < 0) {
444 		pm_runtime_put_noidle(dev);
445 		return ret;
446 	}
447 
448 	return 0;
449 }
450 
451 /**
452  * pm_runtime_put - Drop device usage counter and queue up "idle check" if 0.
453  * @dev: Target device.
454  *
455  * Decrement the runtime PM usage counter of @dev and if it turns out to be
456  * equal to 0, queue up a work item for @dev like in pm_request_idle().
457  */
pm_runtime_put(struct device * dev)458 static inline int pm_runtime_put(struct device *dev)
459 {
460 	return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC);
461 }
462 
463 /**
464  * pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0.
465  * @dev: Target device.
466  *
467  * Decrement the runtime PM usage counter of @dev and if it turns out to be
468  * equal to 0, queue up a work item for @dev like in pm_request_autosuspend().
469  */
pm_runtime_put_autosuspend(struct device * dev)470 static inline int pm_runtime_put_autosuspend(struct device *dev)
471 {
472 	return __pm_runtime_suspend(dev,
473 	    RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
474 }
475 
476 /**
477  * pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0.
478  * @dev: Target device.
479  *
480  * Decrement the runtime PM usage counter of @dev and if it turns out to be
481  * equal to 0, invoke the "idle check" callback of @dev and, depending on its
482  * return value, set up autosuspend of @dev or suspend it (depending on whether
483  * or not autosuspend has been enabled for it).
484  *
485  * The possible return values of this function are the same as for
486  * pm_runtime_idle() and the runtime PM usage counter of @dev remains
487  * decremented in all cases, even if it returns an error code.
488  */
pm_runtime_put_sync(struct device * dev)489 static inline int pm_runtime_put_sync(struct device *dev)
490 {
491 	return __pm_runtime_idle(dev, RPM_GET_PUT);
492 }
493 
494 /**
495  * pm_runtime_put_sync_suspend - Drop device usage counter and suspend if 0.
496  * @dev: Target device.
497  *
498  * Decrement the runtime PM usage counter of @dev and if it turns out to be
499  * equal to 0, carry out runtime-suspend of @dev synchronously.
500  *
501  * The possible return values of this function are the same as for
502  * pm_runtime_suspend() and the runtime PM usage counter of @dev remains
503  * decremented in all cases, even if it returns an error code.
504  */
pm_runtime_put_sync_suspend(struct device * dev)505 static inline int pm_runtime_put_sync_suspend(struct device *dev)
506 {
507 	return __pm_runtime_suspend(dev, RPM_GET_PUT);
508 }
509 
510 /**
511  * pm_runtime_put_sync_autosuspend - Drop device usage counter and autosuspend if 0.
512  * @dev: Target device.
513  *
514  * Decrement the runtime PM usage counter of @dev and if it turns out to be
515  * equal to 0, set up autosuspend of @dev or suspend it synchronously (depending
516  * on whether or not autosuspend has been enabled for it).
517  *
518  * The possible return values of this function are the same as for
519  * pm_runtime_autosuspend() and the runtime PM usage counter of @dev remains
520  * decremented in all cases, even if it returns an error code.
521  */
pm_runtime_put_sync_autosuspend(struct device * dev)522 static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
523 {
524 	return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
525 }
526 
527 /**
528  * pm_runtime_set_active - Set runtime PM status to "active".
529  * @dev: Target device.
530  *
531  * Set the runtime PM status of @dev to %RPM_ACTIVE and ensure that dependencies
532  * of it will be taken into account.
533  *
534  * It is not valid to call this function for devices with runtime PM enabled.
535  */
pm_runtime_set_active(struct device * dev)536 static inline int pm_runtime_set_active(struct device *dev)
537 {
538 	return __pm_runtime_set_status(dev, RPM_ACTIVE);
539 }
540 
541 /**
542  * pm_runtime_set_suspended - Set runtime PM status to "suspended".
543  * @dev: Target device.
544  *
545  * Set the runtime PM status of @dev to %RPM_SUSPENDED and ensure that
546  * dependencies of it will be taken into account.
547  *
548  * It is not valid to call this function for devices with runtime PM enabled.
549  */
pm_runtime_set_suspended(struct device * dev)550 static inline int pm_runtime_set_suspended(struct device *dev)
551 {
552 	return __pm_runtime_set_status(dev, RPM_SUSPENDED);
553 }
554 
555 /**
556  * pm_runtime_disable - Disable runtime PM for a device.
557  * @dev: Target device.
558  *
559  * Prevent the runtime PM framework from working with @dev (by incrementing its
560  * "blocking" counter).
561  *
562  * For each invocation of this function for @dev there must be a matching
563  * pm_runtime_enable() call in order for runtime PM to be enabled for it.
564  */
pm_runtime_disable(struct device * dev)565 static inline void pm_runtime_disable(struct device *dev)
566 {
567 	__pm_runtime_disable(dev, true);
568 }
569 
570 /**
571  * pm_runtime_use_autosuspend - Allow autosuspend to be used for a device.
572  * @dev: Target device.
573  *
574  * Allow the runtime PM autosuspend mechanism to be used for @dev whenever
575  * requested (or "autosuspend" will be handled as direct runtime-suspend for
576  * it).
577  *
578  * NOTE: It's important to undo this with pm_runtime_dont_use_autosuspend()
579  * at driver exit time unless your driver initially enabled pm_runtime
580  * with devm_pm_runtime_enable() (which handles it for you).
581  */
pm_runtime_use_autosuspend(struct device * dev)582 static inline void pm_runtime_use_autosuspend(struct device *dev)
583 {
584 	__pm_runtime_use_autosuspend(dev, true);
585 }
586 
587 /**
588  * pm_runtime_dont_use_autosuspend - Prevent autosuspend from being used.
589  * @dev: Target device.
590  *
591  * Prevent the runtime PM autosuspend mechanism from being used for @dev which
592  * means that "autosuspend" will be handled as direct runtime-suspend for it
593  * going forward.
594  */
pm_runtime_dont_use_autosuspend(struct device * dev)595 static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
596 {
597 	__pm_runtime_use_autosuspend(dev, false);
598 }
599 
600 #endif
601