test_lockup.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Test module to generate lockups
  4. */
  5. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <linux/delay.h>
  9. #include <linux/sched.h>
  10. #include <linux/sched/signal.h>
  11. #include <linux/sched/clock.h>
  12. #include <linux/cpu.h>
  13. #include <linux/nmi.h>
  14. #include <linux/mm.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/file.h>
  17. static unsigned int time_secs;
  18. module_param(time_secs, uint, 0600);
  19. MODULE_PARM_DESC(time_secs, "lockup time in seconds, default 0");
  20. static unsigned int time_nsecs;
  21. module_param(time_nsecs, uint, 0600);
  22. MODULE_PARM_DESC(time_nsecs, "nanoseconds part of lockup time, default 0");
  23. static unsigned int cooldown_secs;
  24. module_param(cooldown_secs, uint, 0600);
  25. MODULE_PARM_DESC(cooldown_secs, "cooldown time between iterations in seconds, default 0");
  26. static unsigned int cooldown_nsecs;
  27. module_param(cooldown_nsecs, uint, 0600);
  28. MODULE_PARM_DESC(cooldown_nsecs, "nanoseconds part of cooldown, default 0");
  29. static unsigned int iterations = 1;
  30. module_param(iterations, uint, 0600);
  31. MODULE_PARM_DESC(iterations, "lockup iterations, default 1");
  32. static bool all_cpus;
  33. module_param(all_cpus, bool, 0400);
  34. MODULE_PARM_DESC(all_cpus, "trigger lockup at all cpus at once");
  35. static int wait_state;
  36. static char *state = "R";
  37. module_param(state, charp, 0400);
  38. MODULE_PARM_DESC(state, "wait in 'R' running (default), 'D' uninterruptible, 'K' killable, 'S' interruptible state");
  39. static bool use_hrtimer;
  40. module_param(use_hrtimer, bool, 0400);
  41. MODULE_PARM_DESC(use_hrtimer, "use high-resolution timer for sleeping");
  42. static bool iowait;
  43. module_param(iowait, bool, 0400);
  44. MODULE_PARM_DESC(iowait, "account sleep time as iowait");
  45. static bool lock_read;
  46. module_param(lock_read, bool, 0400);
  47. MODULE_PARM_DESC(lock_read, "lock read-write locks for read");
  48. static bool lock_single;
  49. module_param(lock_single, bool, 0400);
  50. MODULE_PARM_DESC(lock_single, "acquire locks only at one cpu");
  51. static bool reacquire_locks;
  52. module_param(reacquire_locks, bool, 0400);
  53. MODULE_PARM_DESC(reacquire_locks, "release and reacquire locks/irq/preempt between iterations");
  54. static bool touch_softlockup;
  55. module_param(touch_softlockup, bool, 0600);
  56. MODULE_PARM_DESC(touch_softlockup, "touch soft-lockup watchdog between iterations");
  57. static bool touch_hardlockup;
  58. module_param(touch_hardlockup, bool, 0600);
  59. MODULE_PARM_DESC(touch_hardlockup, "touch hard-lockup watchdog between iterations");
  60. static bool call_cond_resched;
  61. module_param(call_cond_resched, bool, 0600);
  62. MODULE_PARM_DESC(call_cond_resched, "call cond_resched() between iterations");
  63. static bool measure_lock_wait;
  64. module_param(measure_lock_wait, bool, 0400);
  65. MODULE_PARM_DESC(measure_lock_wait, "measure lock wait time");
  66. static unsigned long lock_wait_threshold = ULONG_MAX;
  67. module_param(lock_wait_threshold, ulong, 0400);
  68. MODULE_PARM_DESC(lock_wait_threshold, "print lock wait time longer than this in nanoseconds, default off");
  69. static bool test_disable_irq;
  70. module_param_named(disable_irq, test_disable_irq, bool, 0400);
  71. MODULE_PARM_DESC(disable_irq, "disable interrupts: generate hard-lockups");
  72. static bool disable_softirq;
  73. module_param(disable_softirq, bool, 0400);
  74. MODULE_PARM_DESC(disable_softirq, "disable bottom-half irq handlers");
  75. static bool disable_preempt;
  76. module_param(disable_preempt, bool, 0400);
  77. MODULE_PARM_DESC(disable_preempt, "disable preemption: generate soft-lockups");
  78. static bool lock_rcu;
  79. module_param(lock_rcu, bool, 0400);
  80. MODULE_PARM_DESC(lock_rcu, "grab rcu_read_lock: generate rcu stalls");
  81. static bool lock_mmap_sem;
  82. module_param(lock_mmap_sem, bool, 0400);
  83. MODULE_PARM_DESC(lock_mmap_sem, "lock mm->mmap_lock: block procfs interfaces");
  84. static unsigned long lock_rwsem_ptr;
  85. module_param_unsafe(lock_rwsem_ptr, ulong, 0400);
  86. MODULE_PARM_DESC(lock_rwsem_ptr, "lock rw_semaphore at address");
  87. static unsigned long lock_mutex_ptr;
  88. module_param_unsafe(lock_mutex_ptr, ulong, 0400);
  89. MODULE_PARM_DESC(lock_mutex_ptr, "lock mutex at address");
  90. static unsigned long lock_spinlock_ptr;
  91. module_param_unsafe(lock_spinlock_ptr, ulong, 0400);
  92. MODULE_PARM_DESC(lock_spinlock_ptr, "lock spinlock at address");
  93. static unsigned long lock_rwlock_ptr;
  94. module_param_unsafe(lock_rwlock_ptr, ulong, 0400);
  95. MODULE_PARM_DESC(lock_rwlock_ptr, "lock rwlock at address");
  96. static unsigned int alloc_pages_nr;
  97. module_param_unsafe(alloc_pages_nr, uint, 0600);
  98. MODULE_PARM_DESC(alloc_pages_nr, "allocate and free pages under locks");
  99. static unsigned int alloc_pages_order;
  100. module_param(alloc_pages_order, uint, 0400);
  101. MODULE_PARM_DESC(alloc_pages_order, "page order to allocate");
  102. static gfp_t alloc_pages_gfp = GFP_KERNEL;
  103. module_param_unsafe(alloc_pages_gfp, uint, 0400);
  104. MODULE_PARM_DESC(alloc_pages_gfp, "allocate pages with this gfp_mask, default GFP_KERNEL");
  105. static bool alloc_pages_atomic;
  106. module_param(alloc_pages_atomic, bool, 0400);
  107. MODULE_PARM_DESC(alloc_pages_atomic, "allocate pages with GFP_ATOMIC");
  108. static bool reallocate_pages;
  109. module_param(reallocate_pages, bool, 0400);
  110. MODULE_PARM_DESC(reallocate_pages, "free and allocate pages between iterations");
  111. struct file *test_file;
  112. static struct inode *test_inode;
  113. static char test_file_path[256];
  114. module_param_string(file_path, test_file_path, sizeof(test_file_path), 0400);
  115. MODULE_PARM_DESC(file_path, "file path to test");
  116. static bool test_lock_inode;
  117. module_param_named(lock_inode, test_lock_inode, bool, 0400);
  118. MODULE_PARM_DESC(lock_inode, "lock file -> inode -> i_rwsem");
  119. static bool test_lock_mapping;
  120. module_param_named(lock_mapping, test_lock_mapping, bool, 0400);
  121. MODULE_PARM_DESC(lock_mapping, "lock file -> mapping -> i_mmap_rwsem");
  122. static bool test_lock_sb_umount;
  123. module_param_named(lock_sb_umount, test_lock_sb_umount, bool, 0400);
  124. MODULE_PARM_DESC(lock_sb_umount, "lock file -> sb -> s_umount");
  125. static atomic_t alloc_pages_failed = ATOMIC_INIT(0);
  126. static atomic64_t max_lock_wait = ATOMIC64_INIT(0);
  127. static struct task_struct *main_task;
  128. static int master_cpu;
  129. static void test_lock(bool master, bool verbose)
  130. {
  131. u64 wait_start;
  132. if (measure_lock_wait)
  133. wait_start = local_clock();
  134. if (lock_mutex_ptr && master) {
  135. if (verbose)
  136. pr_notice("lock mutex %ps\n", (void *)lock_mutex_ptr);
  137. mutex_lock((struct mutex *)lock_mutex_ptr);
  138. }
  139. if (lock_rwsem_ptr && master) {
  140. if (verbose)
  141. pr_notice("lock rw_semaphore %ps\n",
  142. (void *)lock_rwsem_ptr);
  143. if (lock_read)
  144. down_read((struct rw_semaphore *)lock_rwsem_ptr);
  145. else
  146. down_write((struct rw_semaphore *)lock_rwsem_ptr);
  147. }
  148. if (lock_mmap_sem && master) {
  149. if (verbose)
  150. pr_notice("lock mmap_lock pid=%d\n", main_task->pid);
  151. if (lock_read)
  152. mmap_read_lock(main_task->mm);
  153. else
  154. mmap_write_lock(main_task->mm);
  155. }
  156. if (test_disable_irq)
  157. local_irq_disable();
  158. if (disable_softirq)
  159. local_bh_disable();
  160. if (disable_preempt)
  161. preempt_disable();
  162. if (lock_rcu)
  163. rcu_read_lock();
  164. if (lock_spinlock_ptr && master) {
  165. if (verbose)
  166. pr_notice("lock spinlock %ps\n",
  167. (void *)lock_spinlock_ptr);
  168. spin_lock((spinlock_t *)lock_spinlock_ptr);
  169. }
  170. if (lock_rwlock_ptr && master) {
  171. if (verbose)
  172. pr_notice("lock rwlock %ps\n",
  173. (void *)lock_rwlock_ptr);
  174. if (lock_read)
  175. read_lock((rwlock_t *)lock_rwlock_ptr);
  176. else
  177. write_lock((rwlock_t *)lock_rwlock_ptr);
  178. }
  179. if (measure_lock_wait) {
  180. s64 cur_wait = local_clock() - wait_start;
  181. s64 max_wait = atomic64_read(&max_lock_wait);
  182. do {
  183. if (cur_wait < max_wait)
  184. break;
  185. max_wait = atomic64_cmpxchg(&max_lock_wait,
  186. max_wait, cur_wait);
  187. } while (max_wait != cur_wait);
  188. if (cur_wait > lock_wait_threshold)
  189. pr_notice_ratelimited("lock wait %lld ns\n", cur_wait);
  190. }
  191. }
  192. static void test_unlock(bool master, bool verbose)
  193. {
  194. if (lock_rwlock_ptr && master) {
  195. if (lock_read)
  196. read_unlock((rwlock_t *)lock_rwlock_ptr);
  197. else
  198. write_unlock((rwlock_t *)lock_rwlock_ptr);
  199. if (verbose)
  200. pr_notice("unlock rwlock %ps\n",
  201. (void *)lock_rwlock_ptr);
  202. }
  203. if (lock_spinlock_ptr && master) {
  204. spin_unlock((spinlock_t *)lock_spinlock_ptr);
  205. if (verbose)
  206. pr_notice("unlock spinlock %ps\n",
  207. (void *)lock_spinlock_ptr);
  208. }
  209. if (lock_rcu)
  210. rcu_read_unlock();
  211. if (disable_preempt)
  212. preempt_enable();
  213. if (disable_softirq)
  214. local_bh_enable();
  215. if (test_disable_irq)
  216. local_irq_enable();
  217. if (lock_mmap_sem && master) {
  218. if (lock_read)
  219. mmap_read_unlock(main_task->mm);
  220. else
  221. mmap_write_unlock(main_task->mm);
  222. if (verbose)
  223. pr_notice("unlock mmap_lock pid=%d\n", main_task->pid);
  224. }
  225. if (lock_rwsem_ptr && master) {
  226. if (lock_read)
  227. up_read((struct rw_semaphore *)lock_rwsem_ptr);
  228. else
  229. up_write((struct rw_semaphore *)lock_rwsem_ptr);
  230. if (verbose)
  231. pr_notice("unlock rw_semaphore %ps\n",
  232. (void *)lock_rwsem_ptr);
  233. }
  234. if (lock_mutex_ptr && master) {
  235. mutex_unlock((struct mutex *)lock_mutex_ptr);
  236. if (verbose)
  237. pr_notice("unlock mutex %ps\n",
  238. (void *)lock_mutex_ptr);
  239. }
  240. }
  241. static void test_alloc_pages(struct list_head *pages)
  242. {
  243. struct page *page;
  244. unsigned int i;
  245. for (i = 0; i < alloc_pages_nr; i++) {
  246. page = alloc_pages(alloc_pages_gfp, alloc_pages_order);
  247. if (!page) {
  248. atomic_inc(&alloc_pages_failed);
  249. break;
  250. }
  251. list_add(&page->lru, pages);
  252. }
  253. }
  254. static void test_free_pages(struct list_head *pages)
  255. {
  256. struct page *page, *next;
  257. list_for_each_entry_safe(page, next, pages, lru)
  258. __free_pages(page, alloc_pages_order);
  259. INIT_LIST_HEAD(pages);
  260. }
  261. static void test_wait(unsigned int secs, unsigned int nsecs)
  262. {
  263. if (wait_state == TASK_RUNNING) {
  264. if (secs)
  265. mdelay(secs * MSEC_PER_SEC);
  266. if (nsecs)
  267. ndelay(nsecs);
  268. return;
  269. }
  270. __set_current_state(wait_state);
  271. if (use_hrtimer) {
  272. ktime_t time;
  273. time = ns_to_ktime((u64)secs * NSEC_PER_SEC + nsecs);
  274. schedule_hrtimeout(&time, HRTIMER_MODE_REL);
  275. } else {
  276. schedule_timeout(secs * HZ + nsecs_to_jiffies(nsecs));
  277. }
  278. }
  279. static void test_lockup(bool master)
  280. {
  281. u64 lockup_start = local_clock();
  282. unsigned int iter = 0;
  283. LIST_HEAD(pages);
  284. pr_notice("Start on CPU%d\n", raw_smp_processor_id());
  285. test_lock(master, true);
  286. test_alloc_pages(&pages);
  287. while (iter++ < iterations && !signal_pending(main_task)) {
  288. if (iowait)
  289. current->in_iowait = 1;
  290. test_wait(time_secs, time_nsecs);
  291. if (iowait)
  292. current->in_iowait = 0;
  293. if (reallocate_pages)
  294. test_free_pages(&pages);
  295. if (reacquire_locks)
  296. test_unlock(master, false);
  297. if (touch_softlockup)
  298. touch_softlockup_watchdog();
  299. if (touch_hardlockup)
  300. touch_nmi_watchdog();
  301. if (call_cond_resched)
  302. cond_resched();
  303. test_wait(cooldown_secs, cooldown_nsecs);
  304. if (reacquire_locks)
  305. test_lock(master, false);
  306. if (reallocate_pages)
  307. test_alloc_pages(&pages);
  308. }
  309. pr_notice("Finish on CPU%d in %lld ns\n", raw_smp_processor_id(),
  310. local_clock() - lockup_start);
  311. test_free_pages(&pages);
  312. test_unlock(master, true);
  313. }
  314. static DEFINE_PER_CPU(struct work_struct, test_works);
  315. static void test_work_fn(struct work_struct *work)
  316. {
  317. test_lockup(!lock_single ||
  318. work == per_cpu_ptr(&test_works, master_cpu));
  319. }
  320. static bool test_kernel_ptr(unsigned long addr, int size)
  321. {
  322. void *ptr = (void *)addr;
  323. char buf;
  324. if (!addr)
  325. return false;
  326. /* should be at least readable kernel address */
  327. if (!IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) &&
  328. (access_ok((void __user *)ptr, 1) ||
  329. access_ok((void __user *)ptr + size - 1, 1))) {
  330. pr_err("user space ptr invalid in kernel: %#lx\n", addr);
  331. return true;
  332. }
  333. if (get_kernel_nofault(buf, ptr) ||
  334. get_kernel_nofault(buf, ptr + size - 1)) {
  335. pr_err("invalid kernel ptr: %#lx\n", addr);
  336. return true;
  337. }
  338. return false;
  339. }
  340. static bool __maybe_unused test_magic(unsigned long addr, int offset,
  341. unsigned int expected)
  342. {
  343. void *ptr = (void *)addr + offset;
  344. unsigned int magic = 0;
  345. if (!addr)
  346. return false;
  347. if (get_kernel_nofault(magic, ptr) || magic != expected) {
  348. pr_err("invalid magic at %#lx + %#x = %#x, expected %#x\n",
  349. addr, offset, magic, expected);
  350. return true;
  351. }
  352. return false;
  353. }
  354. static int __init test_lockup_init(void)
  355. {
  356. u64 test_start = local_clock();
  357. main_task = current;
  358. switch (state[0]) {
  359. case 'S':
  360. wait_state = TASK_INTERRUPTIBLE;
  361. break;
  362. case 'D':
  363. wait_state = TASK_UNINTERRUPTIBLE;
  364. break;
  365. case 'K':
  366. wait_state = TASK_KILLABLE;
  367. break;
  368. case 'R':
  369. wait_state = TASK_RUNNING;
  370. break;
  371. default:
  372. pr_err("unknown state=%s\n", state);
  373. return -EINVAL;
  374. }
  375. if (alloc_pages_atomic)
  376. alloc_pages_gfp = GFP_ATOMIC;
  377. if (test_kernel_ptr(lock_spinlock_ptr, sizeof(spinlock_t)) ||
  378. test_kernel_ptr(lock_rwlock_ptr, sizeof(rwlock_t)) ||
  379. test_kernel_ptr(lock_mutex_ptr, sizeof(struct mutex)) ||
  380. test_kernel_ptr(lock_rwsem_ptr, sizeof(struct rw_semaphore)))
  381. return -EINVAL;
  382. #ifdef CONFIG_DEBUG_SPINLOCK
  383. if (test_magic(lock_spinlock_ptr,
  384. offsetof(spinlock_t, rlock.magic),
  385. SPINLOCK_MAGIC) ||
  386. test_magic(lock_rwlock_ptr,
  387. offsetof(rwlock_t, magic),
  388. RWLOCK_MAGIC) ||
  389. test_magic(lock_mutex_ptr,
  390. offsetof(struct mutex, wait_lock.rlock.magic),
  391. SPINLOCK_MAGIC) ||
  392. test_magic(lock_rwsem_ptr,
  393. offsetof(struct rw_semaphore, wait_lock.magic),
  394. SPINLOCK_MAGIC))
  395. return -EINVAL;
  396. #endif
  397. if ((wait_state != TASK_RUNNING ||
  398. (call_cond_resched && !reacquire_locks) ||
  399. (alloc_pages_nr && gfpflags_allow_blocking(alloc_pages_gfp))) &&
  400. (test_disable_irq || disable_softirq || disable_preempt ||
  401. lock_rcu || lock_spinlock_ptr || lock_rwlock_ptr)) {
  402. pr_err("refuse to sleep in atomic context\n");
  403. return -EINVAL;
  404. }
  405. if (lock_mmap_sem && !main_task->mm) {
  406. pr_err("no mm to lock mmap_lock\n");
  407. return -EINVAL;
  408. }
  409. if (test_file_path[0]) {
  410. test_file = filp_open(test_file_path, O_RDONLY, 0);
  411. if (IS_ERR(test_file)) {
  412. pr_err("failed to open %s: %ld\n", test_file_path, PTR_ERR(test_file));
  413. return PTR_ERR(test_file);
  414. }
  415. test_inode = file_inode(test_file);
  416. } else if (test_lock_inode ||
  417. test_lock_mapping ||
  418. test_lock_sb_umount) {
  419. pr_err("no file to lock\n");
  420. return -EINVAL;
  421. }
  422. if (test_lock_inode && test_inode)
  423. lock_rwsem_ptr = (unsigned long)&test_inode->i_rwsem;
  424. if (test_lock_mapping && test_file && test_file->f_mapping)
  425. lock_rwsem_ptr = (unsigned long)&test_file->f_mapping->i_mmap_rwsem;
  426. if (test_lock_sb_umount && test_inode)
  427. lock_rwsem_ptr = (unsigned long)&test_inode->i_sb->s_umount;
  428. pr_notice("START pid=%d time=%u +%u ns cooldown=%u +%u ns iterations=%u state=%s %s%s%s%s%s%s%s%s%s%s%s\n",
  429. main_task->pid, time_secs, time_nsecs,
  430. cooldown_secs, cooldown_nsecs, iterations, state,
  431. all_cpus ? "all_cpus " : "",
  432. iowait ? "iowait " : "",
  433. test_disable_irq ? "disable_irq " : "",
  434. disable_softirq ? "disable_softirq " : "",
  435. disable_preempt ? "disable_preempt " : "",
  436. lock_rcu ? "lock_rcu " : "",
  437. lock_read ? "lock_read " : "",
  438. touch_softlockup ? "touch_softlockup " : "",
  439. touch_hardlockup ? "touch_hardlockup " : "",
  440. call_cond_resched ? "call_cond_resched " : "",
  441. reacquire_locks ? "reacquire_locks " : "");
  442. if (alloc_pages_nr)
  443. pr_notice("ALLOCATE PAGES nr=%u order=%u gfp=%pGg %s\n",
  444. alloc_pages_nr, alloc_pages_order, &alloc_pages_gfp,
  445. reallocate_pages ? "reallocate_pages " : "");
  446. if (all_cpus) {
  447. unsigned int cpu;
  448. cpus_read_lock();
  449. preempt_disable();
  450. master_cpu = smp_processor_id();
  451. for_each_online_cpu(cpu) {
  452. INIT_WORK(per_cpu_ptr(&test_works, cpu), test_work_fn);
  453. queue_work_on(cpu, system_highpri_wq,
  454. per_cpu_ptr(&test_works, cpu));
  455. }
  456. preempt_enable();
  457. for_each_online_cpu(cpu)
  458. flush_work(per_cpu_ptr(&test_works, cpu));
  459. cpus_read_unlock();
  460. } else {
  461. test_lockup(true);
  462. }
  463. if (measure_lock_wait)
  464. pr_notice("Maximum lock wait: %lld ns\n",
  465. atomic64_read(&max_lock_wait));
  466. if (alloc_pages_nr)
  467. pr_notice("Page allocation failed %u times\n",
  468. atomic_read(&alloc_pages_failed));
  469. pr_notice("FINISH in %llu ns\n", local_clock() - test_start);
  470. if (test_file)
  471. fput(test_file);
  472. if (signal_pending(main_task))
  473. return -EINTR;
  474. return -EAGAIN;
  475. }
  476. module_init(test_lockup_init);
  477. MODULE_LICENSE("GPL");
  478. MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
  479. MODULE_AUTHOR("Konstantin Khlebnikov <khlebnikov@yandex-team.ru>");
  480. MODULE_DESCRIPTION("Test module to generate lockups");