wakeup_reason.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. /*
  2. * kernel/power/wakeup_reason.c
  3. *
  4. * Logs the reasons which caused the kernel to resume from
  5. * the suspend mode.
  6. *
  7. * Copyright (C) 2020 Google, Inc.
  8. * This software is licensed under the terms of the GNU General Public
  9. * License version 2, as published by the Free Software Foundation, and
  10. * may be copied, distributed, and modified under those terms.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include <linux/wakeup_reason.h>
  18. #include <linux/kernel.h>
  19. #include <linux/irq.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/io.h>
  22. #include <linux/kobject.h>
  23. #include <linux/sysfs.h>
  24. #include <linux/init.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/notifier.h>
  27. #include <linux/suspend.h>
  28. #include <linux/slab.h>
  29. /*
  30. * struct wakeup_irq_node - stores data and relationships for IRQs logged as
  31. * either base or nested wakeup reasons during suspend/resume flow.
  32. * @siblings - for membership on leaf or parent IRQ lists
  33. * @irq - the IRQ number
  34. * @irq_name - the name associated with the IRQ, or a default if none
  35. */
  36. struct wakeup_irq_node {
  37. struct list_head siblings;
  38. int irq;
  39. const char *irq_name;
  40. };
  41. enum wakeup_reason_flag {
  42. RESUME_NONE = 0,
  43. RESUME_IRQ,
  44. RESUME_ABORT,
  45. RESUME_ABNORMAL,
  46. };
  47. static DEFINE_SPINLOCK(wakeup_reason_lock);
  48. static LIST_HEAD(leaf_irqs); /* kept in ascending IRQ sorted order */
  49. static LIST_HEAD(parent_irqs); /* unordered */
  50. static struct kmem_cache *wakeup_irq_nodes_cache;
  51. static const char *default_irq_name = "(unnamed)";
  52. static struct kobject *kobj;
  53. static bool capture_reasons;
  54. static int wakeup_reason;
  55. static char non_irq_wake_reason[MAX_SUSPEND_ABORT_LEN];
  56. static ktime_t last_monotime; /* monotonic time before last suspend */
  57. static ktime_t curr_monotime; /* monotonic time after last suspend */
  58. static ktime_t last_stime; /* monotonic boottime offset before last suspend */
  59. static ktime_t curr_stime; /* monotonic boottime offset after last suspend */
  60. static void init_node(struct wakeup_irq_node *p, int irq)
  61. {
  62. struct irq_desc *desc;
  63. INIT_LIST_HEAD(&p->siblings);
  64. p->irq = irq;
  65. desc = irq_to_desc(irq);
  66. if (desc && desc->action && desc->action->name)
  67. p->irq_name = desc->action->name;
  68. else
  69. p->irq_name = default_irq_name;
  70. }
  71. static struct wakeup_irq_node *create_node(int irq)
  72. {
  73. struct wakeup_irq_node *result;
  74. result = kmem_cache_alloc(wakeup_irq_nodes_cache, GFP_ATOMIC);
  75. if (unlikely(!result))
  76. pr_warn("Failed to log wakeup IRQ %d\n", irq);
  77. else
  78. init_node(result, irq);
  79. return result;
  80. }
  81. static void delete_list(struct list_head *head)
  82. {
  83. struct wakeup_irq_node *n;
  84. while (!list_empty(head)) {
  85. n = list_first_entry(head, struct wakeup_irq_node, siblings);
  86. list_del(&n->siblings);
  87. kmem_cache_free(wakeup_irq_nodes_cache, n);
  88. }
  89. }
  90. static bool add_sibling_node_sorted(struct list_head *head, int irq)
  91. {
  92. struct wakeup_irq_node *n = NULL;
  93. struct list_head *predecessor = head;
  94. if (unlikely(WARN_ON(!head)))
  95. return NULL;
  96. if (!list_empty(head))
  97. list_for_each_entry(n, head, siblings) {
  98. if (n->irq < irq)
  99. predecessor = &n->siblings;
  100. else if (n->irq == irq)
  101. return true;
  102. else
  103. break;
  104. }
  105. n = create_node(irq);
  106. if (n) {
  107. list_add(&n->siblings, predecessor);
  108. return true;
  109. }
  110. return false;
  111. }
  112. static struct wakeup_irq_node *find_node_in_list(struct list_head *head,
  113. int irq)
  114. {
  115. struct wakeup_irq_node *n;
  116. if (unlikely(WARN_ON(!head)))
  117. return NULL;
  118. list_for_each_entry(n, head, siblings)
  119. if (n->irq == irq)
  120. return n;
  121. return NULL;
  122. }
  123. void log_irq_wakeup_reason(int irq)
  124. {
  125. unsigned long flags;
  126. spin_lock_irqsave(&wakeup_reason_lock, flags);
  127. if (wakeup_reason == RESUME_ABNORMAL || wakeup_reason == RESUME_ABORT) {
  128. spin_unlock_irqrestore(&wakeup_reason_lock, flags);
  129. return;
  130. }
  131. if (!capture_reasons) {
  132. spin_unlock_irqrestore(&wakeup_reason_lock, flags);
  133. return;
  134. }
  135. if (find_node_in_list(&parent_irqs, irq) == NULL)
  136. add_sibling_node_sorted(&leaf_irqs, irq);
  137. wakeup_reason = RESUME_IRQ;
  138. spin_unlock_irqrestore(&wakeup_reason_lock, flags);
  139. }
  140. void log_threaded_irq_wakeup_reason(int irq, int parent_irq)
  141. {
  142. struct wakeup_irq_node *parent;
  143. unsigned long flags;
  144. /*
  145. * Intentionally unsynchronized. Calls that come in after we have
  146. * resumed should have a fast exit path since there's no work to be
  147. * done, any any coherence issue that could cause a wrong value here is
  148. * both highly improbable - given the set/clear timing - and very low
  149. * impact (parent IRQ gets logged instead of the specific child).
  150. */
  151. if (!capture_reasons)
  152. return;
  153. spin_lock_irqsave(&wakeup_reason_lock, flags);
  154. if (wakeup_reason == RESUME_ABNORMAL || wakeup_reason == RESUME_ABORT) {
  155. spin_unlock_irqrestore(&wakeup_reason_lock, flags);
  156. return;
  157. }
  158. if (!capture_reasons || (find_node_in_list(&leaf_irqs, irq) != NULL)) {
  159. spin_unlock_irqrestore(&wakeup_reason_lock, flags);
  160. return;
  161. }
  162. parent = find_node_in_list(&parent_irqs, parent_irq);
  163. if (parent != NULL)
  164. add_sibling_node_sorted(&leaf_irqs, irq);
  165. else {
  166. parent = find_node_in_list(&leaf_irqs, parent_irq);
  167. if (parent != NULL) {
  168. list_del_init(&parent->siblings);
  169. list_add_tail(&parent->siblings, &parent_irqs);
  170. add_sibling_node_sorted(&leaf_irqs, irq);
  171. }
  172. }
  173. spin_unlock_irqrestore(&wakeup_reason_lock, flags);
  174. }
  175. EXPORT_SYMBOL_GPL(log_threaded_irq_wakeup_reason);
  176. static void __log_abort_or_abnormal_wake(bool abort, const char *fmt,
  177. va_list args)
  178. {
  179. unsigned long flags;
  180. spin_lock_irqsave(&wakeup_reason_lock, flags);
  181. /* Suspend abort or abnormal wake reason has already been logged. */
  182. if (wakeup_reason != RESUME_NONE) {
  183. spin_unlock_irqrestore(&wakeup_reason_lock, flags);
  184. return;
  185. }
  186. if (abort)
  187. wakeup_reason = RESUME_ABORT;
  188. else
  189. wakeup_reason = RESUME_ABNORMAL;
  190. vsnprintf(non_irq_wake_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
  191. spin_unlock_irqrestore(&wakeup_reason_lock, flags);
  192. }
  193. void log_suspend_abort_reason(const char *fmt, ...)
  194. {
  195. va_list args;
  196. va_start(args, fmt);
  197. __log_abort_or_abnormal_wake(true, fmt, args);
  198. va_end(args);
  199. }
  200. EXPORT_SYMBOL_GPL(log_suspend_abort_reason);
  201. void log_abnormal_wakeup_reason(const char *fmt, ...)
  202. {
  203. va_list args;
  204. va_start(args, fmt);
  205. __log_abort_or_abnormal_wake(false, fmt, args);
  206. va_end(args);
  207. }
  208. EXPORT_SYMBOL_GPL(log_abnormal_wakeup_reason);
  209. void clear_wakeup_reasons(void)
  210. {
  211. unsigned long flags;
  212. spin_lock_irqsave(&wakeup_reason_lock, flags);
  213. delete_list(&leaf_irqs);
  214. delete_list(&parent_irqs);
  215. wakeup_reason = RESUME_NONE;
  216. capture_reasons = true;
  217. spin_unlock_irqrestore(&wakeup_reason_lock, flags);
  218. }
  219. static void print_wakeup_sources(void)
  220. {
  221. struct wakeup_irq_node *n;
  222. unsigned long flags;
  223. spin_lock_irqsave(&wakeup_reason_lock, flags);
  224. capture_reasons = false;
  225. if (wakeup_reason == RESUME_ABORT) {
  226. pr_info("Abort: %s\n", non_irq_wake_reason);
  227. spin_unlock_irqrestore(&wakeup_reason_lock, flags);
  228. return;
  229. }
  230. if (wakeup_reason == RESUME_IRQ && !list_empty(&leaf_irqs))
  231. list_for_each_entry(n, &leaf_irqs, siblings)
  232. pr_info("Resume caused by IRQ %d, %s\n", n->irq,
  233. n->irq_name);
  234. else if (wakeup_reason == RESUME_ABNORMAL)
  235. pr_info("Resume caused by %s\n", non_irq_wake_reason);
  236. else
  237. pr_info("Resume cause unknown\n");
  238. spin_unlock_irqrestore(&wakeup_reason_lock, flags);
  239. }
  240. static ssize_t last_resume_reason_show(struct kobject *kobj,
  241. struct kobj_attribute *attr, char *buf)
  242. {
  243. ssize_t buf_offset = 0;
  244. struct wakeup_irq_node *n;
  245. unsigned long flags;
  246. spin_lock_irqsave(&wakeup_reason_lock, flags);
  247. if (wakeup_reason == RESUME_ABORT) {
  248. buf_offset = scnprintf(buf, PAGE_SIZE, "Abort: %s",
  249. non_irq_wake_reason);
  250. spin_unlock_irqrestore(&wakeup_reason_lock, flags);
  251. return buf_offset;
  252. }
  253. if (wakeup_reason == RESUME_IRQ && !list_empty(&leaf_irqs))
  254. list_for_each_entry(n, &leaf_irqs, siblings)
  255. buf_offset += scnprintf(buf + buf_offset,
  256. PAGE_SIZE - buf_offset,
  257. "%d %s\n", n->irq, n->irq_name);
  258. else if (wakeup_reason == RESUME_ABNORMAL)
  259. buf_offset = scnprintf(buf, PAGE_SIZE, "-1 %s",
  260. non_irq_wake_reason);
  261. spin_unlock_irqrestore(&wakeup_reason_lock, flags);
  262. return buf_offset;
  263. }
  264. static ssize_t last_suspend_time_show(struct kobject *kobj,
  265. struct kobj_attribute *attr, char *buf)
  266. {
  267. struct timespec64 sleep_time;
  268. struct timespec64 total_time;
  269. struct timespec64 suspend_resume_time;
  270. /*
  271. * total_time is calculated from monotonic bootoffsets because
  272. * unlike CLOCK_MONOTONIC it include the time spent in suspend state.
  273. */
  274. total_time = ktime_to_timespec64(ktime_sub(curr_stime, last_stime));
  275. /*
  276. * suspend_resume_time is calculated as monotonic (CLOCK_MONOTONIC)
  277. * time interval before entering suspend and post suspend.
  278. */
  279. suspend_resume_time =
  280. ktime_to_timespec64(ktime_sub(curr_monotime, last_monotime));
  281. /* sleep_time = total_time - suspend_resume_time */
  282. sleep_time = timespec64_sub(total_time, suspend_resume_time);
  283. /* Export suspend_resume_time and sleep_time in pair here. */
  284. return sprintf(buf, "%llu.%09lu %llu.%09lu\n",
  285. (unsigned long long)suspend_resume_time.tv_sec,
  286. suspend_resume_time.tv_nsec,
  287. (unsigned long long)sleep_time.tv_sec,
  288. sleep_time.tv_nsec);
  289. }
  290. static struct kobj_attribute resume_reason = __ATTR_RO(last_resume_reason);
  291. static struct kobj_attribute suspend_time = __ATTR_RO(last_suspend_time);
  292. static struct attribute *attrs[] = {
  293. &resume_reason.attr,
  294. &suspend_time.attr,
  295. NULL,
  296. };
  297. static struct attribute_group attr_group = {
  298. .attrs = attrs,
  299. };
  300. /* Detects a suspend and clears all the previous wake up reasons*/
  301. static int wakeup_reason_pm_event(struct notifier_block *notifier,
  302. unsigned long pm_event, void *unused)
  303. {
  304. switch (pm_event) {
  305. case PM_SUSPEND_PREPARE:
  306. /* monotonic time since boot */
  307. last_monotime = ktime_get();
  308. /* monotonic time since boot including the time spent in suspend */
  309. last_stime = ktime_get_boottime();
  310. clear_wakeup_reasons();
  311. break;
  312. case PM_POST_SUSPEND:
  313. /* monotonic time since boot */
  314. curr_monotime = ktime_get();
  315. /* monotonic time since boot including the time spent in suspend */
  316. curr_stime = ktime_get_boottime();
  317. print_wakeup_sources();
  318. break;
  319. default:
  320. break;
  321. }
  322. return NOTIFY_DONE;
  323. }
  324. static struct notifier_block wakeup_reason_pm_notifier_block = {
  325. .notifier_call = wakeup_reason_pm_event,
  326. };
  327. static int __init wakeup_reason_init(void)
  328. {
  329. if (register_pm_notifier(&wakeup_reason_pm_notifier_block)) {
  330. pr_warn("[%s] failed to register PM notifier\n", __func__);
  331. goto fail;
  332. }
  333. kobj = kobject_create_and_add("wakeup_reasons", kernel_kobj);
  334. if (!kobj) {
  335. pr_warn("[%s] failed to create a sysfs kobject\n", __func__);
  336. goto fail_unregister_pm_notifier;
  337. }
  338. if (sysfs_create_group(kobj, &attr_group)) {
  339. pr_warn("[%s] failed to create a sysfs group\n", __func__);
  340. goto fail_kobject_put;
  341. }
  342. wakeup_irq_nodes_cache =
  343. kmem_cache_create("wakeup_irq_node_cache",
  344. sizeof(struct wakeup_irq_node), 0, 0, NULL);
  345. if (!wakeup_irq_nodes_cache)
  346. goto fail_remove_group;
  347. return 0;
  348. fail_remove_group:
  349. sysfs_remove_group(kobj, &attr_group);
  350. fail_kobject_put:
  351. kobject_put(kobj);
  352. fail_unregister_pm_notifier:
  353. unregister_pm_notifier(&wakeup_reason_pm_notifier_block);
  354. fail:
  355. return 1;
  356. }
  357. late_initcall(wakeup_reason_init);