suspend.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_SUSPEND_H
  3. #define _LINUX_SUSPEND_H
  4. #include <linux/swap.h>
  5. #include <linux/notifier.h>
  6. #include <linux/init.h>
  7. #include <linux/pm.h>
  8. #include <linux/mm.h>
  9. #include <linux/freezer.h>
  10. #include <linux/android_kabi.h>
  11. #include <asm/errno.h>
  12. #ifdef CONFIG_VT
  13. extern void pm_set_vt_switch(int);
  14. #else
  15. static inline void pm_set_vt_switch(int do_switch)
  16. {
  17. }
  18. #endif
  19. #ifdef CONFIG_VT_CONSOLE_SLEEP
  20. extern void pm_prepare_console(void);
  21. extern void pm_restore_console(void);
  22. #else
  23. static inline void pm_prepare_console(void)
  24. {
  25. }
  26. static inline void pm_restore_console(void)
  27. {
  28. }
  29. #endif
  30. typedef int __bitwise suspend_state_t;
  31. #define PM_SUSPEND_ON ((__force suspend_state_t) 0)
  32. #define PM_SUSPEND_TO_IDLE ((__force suspend_state_t) 1)
  33. #define PM_SUSPEND_STANDBY ((__force suspend_state_t) 2)
  34. #define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
  35. #define PM_SUSPEND_MIN PM_SUSPEND_TO_IDLE
  36. #define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
  37. enum suspend_stat_step {
  38. SUSPEND_FREEZE = 1,
  39. SUSPEND_PREPARE,
  40. SUSPEND_SUSPEND,
  41. SUSPEND_SUSPEND_LATE,
  42. SUSPEND_SUSPEND_NOIRQ,
  43. SUSPEND_RESUME_NOIRQ,
  44. SUSPEND_RESUME_EARLY,
  45. SUSPEND_RESUME
  46. };
  47. struct suspend_stats {
  48. int success;
  49. int fail;
  50. int failed_freeze;
  51. int failed_prepare;
  52. int failed_suspend;
  53. int failed_suspend_late;
  54. int failed_suspend_noirq;
  55. int failed_resume;
  56. int failed_resume_early;
  57. int failed_resume_noirq;
  58. #define REC_FAILED_NUM 2
  59. int last_failed_dev;
  60. char failed_devs[REC_FAILED_NUM][40];
  61. int last_failed_errno;
  62. int errno[REC_FAILED_NUM];
  63. int last_failed_step;
  64. enum suspend_stat_step failed_steps[REC_FAILED_NUM];
  65. };
  66. extern struct suspend_stats suspend_stats;
  67. static inline void dpm_save_failed_dev(const char *name)
  68. {
  69. strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
  70. name,
  71. sizeof(suspend_stats.failed_devs[0]));
  72. suspend_stats.last_failed_dev++;
  73. suspend_stats.last_failed_dev %= REC_FAILED_NUM;
  74. }
  75. static inline void dpm_save_failed_errno(int err)
  76. {
  77. suspend_stats.errno[suspend_stats.last_failed_errno] = err;
  78. suspend_stats.last_failed_errno++;
  79. suspend_stats.last_failed_errno %= REC_FAILED_NUM;
  80. }
  81. static inline void dpm_save_failed_step(enum suspend_stat_step step)
  82. {
  83. suspend_stats.failed_steps[suspend_stats.last_failed_step] = step;
  84. suspend_stats.last_failed_step++;
  85. suspend_stats.last_failed_step %= REC_FAILED_NUM;
  86. }
  87. /**
  88. * struct platform_suspend_ops - Callbacks for managing platform dependent
  89. * system sleep states.
  90. *
  91. * @valid: Callback to determine if given system sleep state is supported by
  92. * the platform.
  93. * Valid (ie. supported) states are advertised in /sys/power/state. Note
  94. * that it still may be impossible to enter given system sleep state if the
  95. * conditions aren't right.
  96. * There is the %suspend_valid_only_mem function available that can be
  97. * assigned to this if the platform only supports mem sleep.
  98. *
  99. * @begin: Initialise a transition to given system sleep state.
  100. * @begin() is executed right prior to suspending devices. The information
  101. * conveyed to the platform code by @begin() should be disregarded by it as
  102. * soon as @end() is executed. If @begin() fails (ie. returns nonzero),
  103. * @prepare(), @enter() and @finish() will not be called by the PM core.
  104. * This callback is optional. However, if it is implemented, the argument
  105. * passed to @enter() is redundant and should be ignored.
  106. *
  107. * @prepare: Prepare the platform for entering the system sleep state indicated
  108. * by @begin().
  109. * @prepare() is called right after devices have been suspended (ie. the
  110. * appropriate .suspend() method has been executed for each device) and
  111. * before device drivers' late suspend callbacks are executed. It returns
  112. * 0 on success or a negative error code otherwise, in which case the
  113. * system cannot enter the desired sleep state (@prepare_late(), @enter(),
  114. * and @wake() will not be called in that case).
  115. *
  116. * @prepare_late: Finish preparing the platform for entering the system sleep
  117. * state indicated by @begin().
  118. * @prepare_late is called before disabling nonboot CPUs and after
  119. * device drivers' late suspend callbacks have been executed. It returns
  120. * 0 on success or a negative error code otherwise, in which case the
  121. * system cannot enter the desired sleep state (@enter() will not be
  122. * executed).
  123. *
  124. * @enter: Enter the system sleep state indicated by @begin() or represented by
  125. * the argument if @begin() is not implemented.
  126. * This callback is mandatory. It returns 0 on success or a negative
  127. * error code otherwise, in which case the system cannot enter the desired
  128. * sleep state.
  129. *
  130. * @wake: Called when the system has just left a sleep state, right after
  131. * the nonboot CPUs have been enabled and before device drivers' early
  132. * resume callbacks are executed.
  133. * This callback is optional, but should be implemented by the platforms
  134. * that implement @prepare_late(). If implemented, it is always called
  135. * after @prepare_late and @enter(), even if one of them fails.
  136. *
  137. * @finish: Finish wake-up of the platform.
  138. * @finish is called right prior to calling device drivers' regular suspend
  139. * callbacks.
  140. * This callback is optional, but should be implemented by the platforms
  141. * that implement @prepare(). If implemented, it is always called after
  142. * @enter() and @wake(), even if any of them fails. It is executed after
  143. * a failing @prepare.
  144. *
  145. * @suspend_again: Returns whether the system should suspend again (true) or
  146. * not (false). If the platform wants to poll sensors or execute some
  147. * code during suspended without invoking userspace and most of devices,
  148. * suspend_again callback is the place assuming that periodic-wakeup or
  149. * alarm-wakeup is already setup. This allows to execute some codes while
  150. * being kept suspended in the view of userland and devices.
  151. *
  152. * @end: Called by the PM core right after resuming devices, to indicate to
  153. * the platform that the system has returned to the working state or
  154. * the transition to the sleep state has been aborted.
  155. * This callback is optional, but should be implemented by the platforms
  156. * that implement @begin(). Accordingly, platforms implementing @begin()
  157. * should also provide a @end() which cleans up transitions aborted before
  158. * @enter().
  159. *
  160. * @recover: Recover the platform from a suspend failure.
  161. * Called by the PM core if the suspending of devices fails.
  162. * This callback is optional and should only be implemented by platforms
  163. * which require special recovery actions in that situation.
  164. */
  165. struct platform_suspend_ops {
  166. int (*valid)(suspend_state_t state);
  167. int (*begin)(suspend_state_t state);
  168. int (*prepare)(void);
  169. int (*prepare_late)(void);
  170. int (*enter)(suspend_state_t state);
  171. void (*wake)(void);
  172. void (*finish)(void);
  173. bool (*suspend_again)(void);
  174. void (*end)(void);
  175. void (*recover)(void);
  176. ANDROID_KABI_RESERVE(1);
  177. };
  178. struct platform_s2idle_ops {
  179. int (*begin)(void);
  180. int (*prepare)(void);
  181. int (*prepare_late)(void);
  182. bool (*wake)(void);
  183. void (*restore_early)(void);
  184. void (*restore)(void);
  185. void (*end)(void);
  186. ANDROID_KABI_RESERVE(1);
  187. };
  188. #ifdef CONFIG_SUSPEND
  189. extern suspend_state_t mem_sleep_current;
  190. extern suspend_state_t mem_sleep_default;
  191. /**
  192. * suspend_set_ops - set platform dependent suspend operations
  193. * @ops: The new suspend operations to set.
  194. */
  195. extern void suspend_set_ops(const struct platform_suspend_ops *ops);
  196. extern int suspend_valid_only_mem(suspend_state_t state);
  197. extern unsigned int pm_suspend_global_flags;
  198. #define PM_SUSPEND_FLAG_FW_SUSPEND BIT(0)
  199. #define PM_SUSPEND_FLAG_FW_RESUME BIT(1)
  200. #define PM_SUSPEND_FLAG_NO_PLATFORM BIT(2)
  201. static inline void pm_suspend_clear_flags(void)
  202. {
  203. pm_suspend_global_flags = 0;
  204. }
  205. static inline void pm_set_suspend_via_firmware(void)
  206. {
  207. pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_SUSPEND;
  208. }
  209. static inline void pm_set_resume_via_firmware(void)
  210. {
  211. pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_RESUME;
  212. }
  213. static inline void pm_set_suspend_no_platform(void)
  214. {
  215. pm_suspend_global_flags |= PM_SUSPEND_FLAG_NO_PLATFORM;
  216. }
  217. /**
  218. * pm_suspend_via_firmware - Check if platform firmware will suspend the system.
  219. *
  220. * To be called during system-wide power management transitions to sleep states
  221. * or during the subsequent system-wide transitions back to the working state.
  222. *
  223. * Return 'true' if the platform firmware is going to be invoked at the end of
  224. * the system-wide power management transition (to a sleep state) in progress in
  225. * order to complete it, or if the platform firmware has been invoked in order
  226. * to complete the last (or preceding) transition of the system to a sleep
  227. * state.
  228. *
  229. * This matters if the caller needs or wants to carry out some special actions
  230. * depending on whether or not control will be passed to the platform firmware
  231. * subsequently (for example, the device may need to be reset before letting the
  232. * platform firmware manipulate it, which is not necessary when the platform
  233. * firmware is not going to be invoked) or when such special actions may have
  234. * been carried out during the preceding transition of the system to a sleep
  235. * state (as they may need to be taken into account).
  236. */
  237. static inline bool pm_suspend_via_firmware(void)
  238. {
  239. return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_SUSPEND);
  240. }
  241. /**
  242. * pm_resume_via_firmware - Check if platform firmware has woken up the system.
  243. *
  244. * To be called during system-wide power management transitions from sleep
  245. * states.
  246. *
  247. * Return 'true' if the platform firmware has passed control to the kernel at
  248. * the beginning of the system-wide power management transition in progress, so
  249. * the event that woke up the system from sleep has been handled by the platform
  250. * firmware.
  251. */
  252. static inline bool pm_resume_via_firmware(void)
  253. {
  254. return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_RESUME);
  255. }
  256. /**
  257. * pm_suspend_no_platform - Check if platform may change device power states.
  258. *
  259. * To be called during system-wide power management transitions to sleep states
  260. * or during the subsequent system-wide transitions back to the working state.
  261. *
  262. * Return 'true' if the power states of devices remain under full control of the
  263. * kernel throughout the system-wide suspend and resume cycle in progress (that
  264. * is, if a device is put into a certain power state during suspend, it can be
  265. * expected to remain in that state during resume).
  266. */
  267. static inline bool pm_suspend_no_platform(void)
  268. {
  269. return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_NO_PLATFORM);
  270. }
  271. /* Suspend-to-idle state machnine. */
  272. enum s2idle_states {
  273. S2IDLE_STATE_NONE, /* Not suspended/suspending. */
  274. S2IDLE_STATE_ENTER, /* Enter suspend-to-idle. */
  275. S2IDLE_STATE_WAKE, /* Wake up from suspend-to-idle. */
  276. };
  277. extern enum s2idle_states __read_mostly s2idle_state;
  278. static inline bool idle_should_enter_s2idle(void)
  279. {
  280. return unlikely(s2idle_state == S2IDLE_STATE_ENTER);
  281. }
  282. extern bool pm_suspend_default_s2idle(void);
  283. extern void __init pm_states_init(void);
  284. extern void s2idle_set_ops(const struct platform_s2idle_ops *ops);
  285. extern void s2idle_wake(void);
  286. /**
  287. * arch_suspend_disable_irqs - disable IRQs for suspend
  288. *
  289. * Disables IRQs (in the default case). This is a weak symbol in the common
  290. * code and thus allows architectures to override it if more needs to be
  291. * done. Not called for suspend to disk.
  292. */
  293. extern void arch_suspend_disable_irqs(void);
  294. /**
  295. * arch_suspend_enable_irqs - enable IRQs after suspend
  296. *
  297. * Enables IRQs (in the default case). This is a weak symbol in the common
  298. * code and thus allows architectures to override it if more needs to be
  299. * done. Not called for suspend to disk.
  300. */
  301. extern void arch_suspend_enable_irqs(void);
  302. extern int pm_suspend(suspend_state_t state);
  303. extern bool sync_on_suspend_enabled;
  304. #else /* !CONFIG_SUSPEND */
  305. #define suspend_valid_only_mem NULL
  306. static inline void pm_suspend_clear_flags(void) {}
  307. static inline void pm_set_suspend_via_firmware(void) {}
  308. static inline void pm_set_resume_via_firmware(void) {}
  309. static inline bool pm_suspend_via_firmware(void) { return false; }
  310. static inline bool pm_resume_via_firmware(void) { return false; }
  311. static inline bool pm_suspend_no_platform(void) { return false; }
  312. static inline bool pm_suspend_default_s2idle(void) { return false; }
  313. static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
  314. static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
  315. static inline bool sync_on_suspend_enabled(void) { return true; }
  316. static inline bool idle_should_enter_s2idle(void) { return false; }
  317. static inline void __init pm_states_init(void) {}
  318. static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {}
  319. static inline void s2idle_wake(void) {}
  320. #endif /* !CONFIG_SUSPEND */
  321. /* struct pbe is used for creating lists of pages that should be restored
  322. * atomically during the resume from disk, because the page frames they have
  323. * occupied before the suspend are in use.
  324. */
  325. struct pbe {
  326. void *address; /* address of the copy */
  327. void *orig_address; /* original address of a page */
  328. struct pbe *next;
  329. };
  330. /* mm/page_alloc.c */
  331. extern void mark_free_pages(struct zone *zone);
  332. /**
  333. * struct platform_hibernation_ops - hibernation platform support
  334. *
  335. * The methods in this structure allow a platform to carry out special
  336. * operations required by it during a hibernation transition.
  337. *
  338. * All the methods below, except for @recover(), must be implemented.
  339. *
  340. * @begin: Tell the platform driver that we're starting hibernation.
  341. * Called right after shrinking memory and before freezing devices.
  342. *
  343. * @end: Called by the PM core right after resuming devices, to indicate to
  344. * the platform that the system has returned to the working state.
  345. *
  346. * @pre_snapshot: Prepare the platform for creating the hibernation image.
  347. * Called right after devices have been frozen and before the nonboot
  348. * CPUs are disabled (runs with IRQs on).
  349. *
  350. * @finish: Restore the previous state of the platform after the hibernation
  351. * image has been created *or* put the platform into the normal operation
  352. * mode after the hibernation (the same method is executed in both cases).
  353. * Called right after the nonboot CPUs have been enabled and before
  354. * thawing devices (runs with IRQs on).
  355. *
  356. * @prepare: Prepare the platform for entering the low power state.
  357. * Called right after the hibernation image has been saved and before
  358. * devices are prepared for entering the low power state.
  359. *
  360. * @enter: Put the system into the low power state after the hibernation image
  361. * has been saved to disk.
  362. * Called after the nonboot CPUs have been disabled and all of the low
  363. * level devices have been shut down (runs with IRQs off).
  364. *
  365. * @leave: Perform the first stage of the cleanup after the system sleep state
  366. * indicated by @set_target() has been left.
  367. * Called right after the control has been passed from the boot kernel to
  368. * the image kernel, before the nonboot CPUs are enabled and before devices
  369. * are resumed. Executed with interrupts disabled.
  370. *
  371. * @pre_restore: Prepare system for the restoration from a hibernation image.
  372. * Called right after devices have been frozen and before the nonboot
  373. * CPUs are disabled (runs with IRQs on).
  374. *
  375. * @restore_cleanup: Clean up after a failing image restoration.
  376. * Called right after the nonboot CPUs have been enabled and before
  377. * thawing devices (runs with IRQs on).
  378. *
  379. * @recover: Recover the platform from a failure to suspend devices.
  380. * Called by the PM core if the suspending of devices during hibernation
  381. * fails. This callback is optional and should only be implemented by
  382. * platforms which require special recovery actions in that situation.
  383. */
  384. struct platform_hibernation_ops {
  385. int (*begin)(pm_message_t stage);
  386. void (*end)(void);
  387. int (*pre_snapshot)(void);
  388. void (*finish)(void);
  389. int (*prepare)(void);
  390. int (*enter)(void);
  391. void (*leave)(void);
  392. int (*pre_restore)(void);
  393. void (*restore_cleanup)(void);
  394. void (*recover)(void);
  395. ANDROID_KABI_RESERVE(1);
  396. };
  397. #ifdef CONFIG_HIBERNATION
  398. /* kernel/power/snapshot.c */
  399. extern void register_nosave_region(unsigned long b, unsigned long e);
  400. extern int swsusp_page_is_forbidden(struct page *);
  401. extern void swsusp_set_page_free(struct page *);
  402. extern void swsusp_unset_page_free(struct page *);
  403. extern unsigned long get_safe_page(gfp_t gfp_mask);
  404. extern asmlinkage int swsusp_arch_suspend(void);
  405. extern asmlinkage int swsusp_arch_resume(void);
  406. extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
  407. extern int hibernate(void);
  408. extern bool system_entering_hibernation(void);
  409. extern bool hibernation_available(void);
  410. asmlinkage int swsusp_save(void);
  411. extern struct pbe *restore_pblist;
  412. int pfn_is_nosave(unsigned long pfn);
  413. int hibernate_quiet_exec(int (*func)(void *data), void *data);
  414. #else /* CONFIG_HIBERNATION */
  415. static inline void register_nosave_region(unsigned long b, unsigned long e) {}
  416. static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
  417. static inline void swsusp_set_page_free(struct page *p) {}
  418. static inline void swsusp_unset_page_free(struct page *p) {}
  419. static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
  420. static inline int hibernate(void) { return -ENOSYS; }
  421. static inline bool system_entering_hibernation(void) { return false; }
  422. static inline bool hibernation_available(void) { return false; }
  423. static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) {
  424. return -ENOTSUPP;
  425. }
  426. #endif /* CONFIG_HIBERNATION */
  427. #ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV
  428. int is_hibernate_resume_dev(dev_t dev);
  429. #else
  430. static inline int is_hibernate_resume_dev(dev_t dev) { return 0; }
  431. #endif
  432. /* Hibernation and suspend events */
  433. #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
  434. #define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
  435. #define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
  436. #define PM_POST_SUSPEND 0x0004 /* Suspend finished */
  437. #define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
  438. #define PM_POST_RESTORE 0x0006 /* Restore failed */
  439. extern struct mutex system_transition_mutex;
  440. #ifdef CONFIG_PM_SLEEP
  441. void save_processor_state(void);
  442. void restore_processor_state(void);
  443. /* kernel/power/main.c */
  444. extern int register_pm_notifier(struct notifier_block *nb);
  445. extern int unregister_pm_notifier(struct notifier_block *nb);
  446. extern void ksys_sync_helper(void);
  447. #define pm_notifier(fn, pri) { \
  448. static struct notifier_block fn##_nb = \
  449. { .notifier_call = fn, .priority = pri }; \
  450. register_pm_notifier(&fn##_nb); \
  451. }
  452. /* drivers/base/power/wakeup.c */
  453. extern bool events_check_enabled;
  454. extern suspend_state_t pm_suspend_target_state;
  455. extern bool pm_wakeup_pending(void);
  456. extern void pm_system_wakeup(void);
  457. extern void pm_system_cancel_wakeup(void);
  458. extern void pm_wakeup_clear(unsigned int irq_number);
  459. extern void pm_system_irq_wakeup(unsigned int irq_number);
  460. extern unsigned int pm_wakeup_irq(void);
  461. extern bool pm_get_wakeup_count(unsigned int *count, bool block);
  462. extern bool pm_save_wakeup_count(unsigned int count);
  463. extern void pm_wakep_autosleep_enabled(bool set);
  464. extern void pm_print_active_wakeup_sources(void);
  465. extern void pm_get_active_wakeup_sources(char *pending_sources, size_t max);
  466. extern void lock_system_sleep(void);
  467. extern void unlock_system_sleep(void);
  468. #else /* !CONFIG_PM_SLEEP */
  469. static inline int register_pm_notifier(struct notifier_block *nb)
  470. {
  471. return 0;
  472. }
  473. static inline int unregister_pm_notifier(struct notifier_block *nb)
  474. {
  475. return 0;
  476. }
  477. static inline void ksys_sync_helper(void) {}
  478. #define pm_notifier(fn, pri) do { (void)(fn); } while (0)
  479. static inline bool pm_wakeup_pending(void) { return false; }
  480. static inline void pm_system_wakeup(void) {}
  481. static inline void pm_wakeup_clear(bool reset) {}
  482. static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
  483. static inline void lock_system_sleep(void) {}
  484. static inline void unlock_system_sleep(void) {}
  485. #endif /* !CONFIG_PM_SLEEP */
  486. #ifdef CONFIG_PM_SLEEP_DEBUG
  487. extern bool pm_print_times_enabled;
  488. extern bool pm_debug_messages_on;
  489. extern __printf(2, 3) void __pm_pr_dbg(bool defer, const char *fmt, ...);
  490. #else
  491. #define pm_print_times_enabled (false)
  492. #define pm_debug_messages_on (false)
  493. #include <linux/printk.h>
  494. #define __pm_pr_dbg(defer, fmt, ...) \
  495. no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
  496. #endif
  497. #define pm_pr_dbg(fmt, ...) \
  498. __pm_pr_dbg(false, fmt, ##__VA_ARGS__)
  499. #define pm_deferred_pr_dbg(fmt, ...) \
  500. __pm_pr_dbg(true, fmt, ##__VA_ARGS__)
  501. #ifdef CONFIG_PM_AUTOSLEEP
  502. /* kernel/power/autosleep.c */
  503. void queue_up_suspend_work(void);
  504. #else /* !CONFIG_PM_AUTOSLEEP */
  505. static inline void queue_up_suspend_work(void) {}
  506. #endif /* !CONFIG_PM_AUTOSLEEP */
  507. #endif /* _LINUX_SUSPEND_H */