debugobjects.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #define pr_fmt(fmt) "ODEBUG: " fmt
  11. #include <linux/debugobjects.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/sched.h>
  14. #include <linux/sched/task_stack.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/debugfs.h>
  17. #include <linux/slab.h>
  18. #include <linux/hash.h>
  19. #include <linux/kmemleak.h>
  20. #include <linux/cpu.h>
  21. #define ODEBUG_HASH_BITS 14
  22. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  23. #define ODEBUG_POOL_SIZE 1024
  24. #define ODEBUG_POOL_MIN_LEVEL 256
  25. #define ODEBUG_POOL_PERCPU_SIZE 64
  26. #define ODEBUG_BATCH_SIZE 16
  27. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  28. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  29. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  30. /*
  31. * We limit the freeing of debug objects via workqueue at a maximum
  32. * frequency of 10Hz and about 1024 objects for each freeing operation.
  33. * So it is freeing at most 10k debug objects per second.
  34. */
  35. #define ODEBUG_FREE_WORK_MAX 1024
  36. #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
  37. struct debug_bucket {
  38. struct hlist_head list;
  39. raw_spinlock_t lock;
  40. };
  41. /*
  42. * Debug object percpu free list
  43. * Access is protected by disabling irq
  44. */
  45. struct debug_percpu_free {
  46. struct hlist_head free_objs;
  47. int obj_free;
  48. };
  49. static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
  50. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  51. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  52. static DEFINE_RAW_SPINLOCK(pool_lock);
  53. static HLIST_HEAD(obj_pool);
  54. static HLIST_HEAD(obj_to_free);
  55. /*
  56. * Because of the presence of percpu free pools, obj_pool_free will
  57. * under-count those in the percpu free pools. Similarly, obj_pool_used
  58. * will over-count those in the percpu free pools. Adjustments will be
  59. * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
  60. * can be off.
  61. */
  62. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  63. static int obj_pool_free = ODEBUG_POOL_SIZE;
  64. static int obj_pool_used;
  65. static int obj_pool_max_used;
  66. static bool obj_freeing;
  67. /* The number of objs on the global free list */
  68. static int obj_nr_tofree;
  69. static int debug_objects_maxchain __read_mostly;
  70. static int __maybe_unused debug_objects_maxchecked __read_mostly;
  71. static int debug_objects_fixups __read_mostly;
  72. static int debug_objects_warnings __read_mostly;
  73. static int debug_objects_enabled __read_mostly
  74. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  75. static int debug_objects_pool_size __read_mostly
  76. = ODEBUG_POOL_SIZE;
  77. static int debug_objects_pool_min_level __read_mostly
  78. = ODEBUG_POOL_MIN_LEVEL;
  79. static const struct debug_obj_descr *descr_test __read_mostly;
  80. static struct kmem_cache *obj_cache __read_mostly;
  81. /*
  82. * Track numbers of kmem_cache_alloc()/free() calls done.
  83. */
  84. static int debug_objects_allocated;
  85. static int debug_objects_freed;
  86. static void free_obj_work(struct work_struct *work);
  87. static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
  88. static int __init enable_object_debug(char *str)
  89. {
  90. debug_objects_enabled = 1;
  91. return 0;
  92. }
  93. static int __init disable_object_debug(char *str)
  94. {
  95. debug_objects_enabled = 0;
  96. return 0;
  97. }
  98. early_param("debug_objects", enable_object_debug);
  99. early_param("no_debug_objects", disable_object_debug);
  100. static const char *obj_states[ODEBUG_STATE_MAX] = {
  101. [ODEBUG_STATE_NONE] = "none",
  102. [ODEBUG_STATE_INIT] = "initialized",
  103. [ODEBUG_STATE_INACTIVE] = "inactive",
  104. [ODEBUG_STATE_ACTIVE] = "active",
  105. [ODEBUG_STATE_DESTROYED] = "destroyed",
  106. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  107. };
  108. static void fill_pool(void)
  109. {
  110. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  111. struct debug_obj *obj;
  112. unsigned long flags;
  113. if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
  114. return;
  115. /*
  116. * Reuse objs from the global free list; they will be reinitialized
  117. * when allocating.
  118. *
  119. * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
  120. * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
  121. * sections.
  122. */
  123. while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
  124. raw_spin_lock_irqsave(&pool_lock, flags);
  125. /*
  126. * Recheck with the lock held as the worker thread might have
  127. * won the race and freed the global free list already.
  128. */
  129. while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
  130. obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
  131. hlist_del(&obj->node);
  132. WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
  133. hlist_add_head(&obj->node, &obj_pool);
  134. WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
  135. }
  136. raw_spin_unlock_irqrestore(&pool_lock, flags);
  137. }
  138. if (unlikely(!obj_cache))
  139. return;
  140. while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
  141. struct debug_obj *new[ODEBUG_BATCH_SIZE];
  142. int cnt;
  143. for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
  144. new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
  145. if (!new[cnt])
  146. break;
  147. }
  148. if (!cnt)
  149. return;
  150. raw_spin_lock_irqsave(&pool_lock, flags);
  151. while (cnt) {
  152. hlist_add_head(&new[--cnt]->node, &obj_pool);
  153. debug_objects_allocated++;
  154. WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
  155. }
  156. raw_spin_unlock_irqrestore(&pool_lock, flags);
  157. }
  158. }
  159. /*
  160. * Lookup an object in the hash bucket.
  161. */
  162. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  163. {
  164. struct debug_obj *obj;
  165. int cnt = 0;
  166. hlist_for_each_entry(obj, &b->list, node) {
  167. cnt++;
  168. if (obj->object == addr)
  169. return obj;
  170. }
  171. if (cnt > debug_objects_maxchain)
  172. debug_objects_maxchain = cnt;
  173. return NULL;
  174. }
  175. /*
  176. * Allocate a new object from the hlist
  177. */
  178. static struct debug_obj *__alloc_object(struct hlist_head *list)
  179. {
  180. struct debug_obj *obj = NULL;
  181. if (list->first) {
  182. obj = hlist_entry(list->first, typeof(*obj), node);
  183. hlist_del(&obj->node);
  184. }
  185. return obj;
  186. }
  187. /*
  188. * Allocate a new object. If the pool is empty, switch off the debugger.
  189. * Must be called with interrupts disabled.
  190. */
  191. static struct debug_obj *
  192. alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
  193. {
  194. struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
  195. struct debug_obj *obj;
  196. if (likely(obj_cache)) {
  197. obj = __alloc_object(&percpu_pool->free_objs);
  198. if (obj) {
  199. percpu_pool->obj_free--;
  200. goto init_obj;
  201. }
  202. }
  203. raw_spin_lock(&pool_lock);
  204. obj = __alloc_object(&obj_pool);
  205. if (obj) {
  206. obj_pool_used++;
  207. WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
  208. /*
  209. * Looking ahead, allocate one batch of debug objects and
  210. * put them into the percpu free pool.
  211. */
  212. if (likely(obj_cache)) {
  213. int i;
  214. for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
  215. struct debug_obj *obj2;
  216. obj2 = __alloc_object(&obj_pool);
  217. if (!obj2)
  218. break;
  219. hlist_add_head(&obj2->node,
  220. &percpu_pool->free_objs);
  221. percpu_pool->obj_free++;
  222. obj_pool_used++;
  223. WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
  224. }
  225. }
  226. if (obj_pool_used > obj_pool_max_used)
  227. obj_pool_max_used = obj_pool_used;
  228. if (obj_pool_free < obj_pool_min_free)
  229. obj_pool_min_free = obj_pool_free;
  230. }
  231. raw_spin_unlock(&pool_lock);
  232. init_obj:
  233. if (obj) {
  234. obj->object = addr;
  235. obj->descr = descr;
  236. obj->state = ODEBUG_STATE_NONE;
  237. obj->astate = 0;
  238. hlist_add_head(&obj->node, &b->list);
  239. }
  240. return obj;
  241. }
  242. /*
  243. * workqueue function to free objects.
  244. *
  245. * To reduce contention on the global pool_lock, the actual freeing of
  246. * debug objects will be delayed if the pool_lock is busy.
  247. */
  248. static void free_obj_work(struct work_struct *work)
  249. {
  250. struct hlist_node *tmp;
  251. struct debug_obj *obj;
  252. unsigned long flags;
  253. HLIST_HEAD(tofree);
  254. WRITE_ONCE(obj_freeing, false);
  255. if (!raw_spin_trylock_irqsave(&pool_lock, flags))
  256. return;
  257. if (obj_pool_free >= debug_objects_pool_size)
  258. goto free_objs;
  259. /*
  260. * The objs on the pool list might be allocated before the work is
  261. * run, so recheck if pool list it full or not, if not fill pool
  262. * list from the global free list. As it is likely that a workload
  263. * may be gearing up to use more and more objects, don't free any
  264. * of them until the next round.
  265. */
  266. while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
  267. obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
  268. hlist_del(&obj->node);
  269. hlist_add_head(&obj->node, &obj_pool);
  270. WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
  271. WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
  272. }
  273. raw_spin_unlock_irqrestore(&pool_lock, flags);
  274. return;
  275. free_objs:
  276. /*
  277. * Pool list is already full and there are still objs on the free
  278. * list. Move remaining free objs to a temporary list to free the
  279. * memory outside the pool_lock held region.
  280. */
  281. if (obj_nr_tofree) {
  282. hlist_move_list(&obj_to_free, &tofree);
  283. debug_objects_freed += obj_nr_tofree;
  284. WRITE_ONCE(obj_nr_tofree, 0);
  285. }
  286. raw_spin_unlock_irqrestore(&pool_lock, flags);
  287. hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
  288. hlist_del(&obj->node);
  289. kmem_cache_free(obj_cache, obj);
  290. }
  291. }
  292. static void __free_object(struct debug_obj *obj)
  293. {
  294. struct debug_obj *objs[ODEBUG_BATCH_SIZE];
  295. struct debug_percpu_free *percpu_pool;
  296. int lookahead_count = 0;
  297. unsigned long flags;
  298. bool work;
  299. local_irq_save(flags);
  300. if (!obj_cache)
  301. goto free_to_obj_pool;
  302. /*
  303. * Try to free it into the percpu pool first.
  304. */
  305. percpu_pool = this_cpu_ptr(&percpu_obj_pool);
  306. if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
  307. hlist_add_head(&obj->node, &percpu_pool->free_objs);
  308. percpu_pool->obj_free++;
  309. local_irq_restore(flags);
  310. return;
  311. }
  312. /*
  313. * As the percpu pool is full, look ahead and pull out a batch
  314. * of objects from the percpu pool and free them as well.
  315. */
  316. for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
  317. objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
  318. if (!objs[lookahead_count])
  319. break;
  320. percpu_pool->obj_free--;
  321. }
  322. free_to_obj_pool:
  323. raw_spin_lock(&pool_lock);
  324. work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
  325. (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
  326. obj_pool_used--;
  327. if (work) {
  328. WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
  329. hlist_add_head(&obj->node, &obj_to_free);
  330. if (lookahead_count) {
  331. WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
  332. obj_pool_used -= lookahead_count;
  333. while (lookahead_count) {
  334. hlist_add_head(&objs[--lookahead_count]->node,
  335. &obj_to_free);
  336. }
  337. }
  338. if ((obj_pool_free > debug_objects_pool_size) &&
  339. (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
  340. int i;
  341. /*
  342. * Free one more batch of objects from obj_pool.
  343. */
  344. for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
  345. obj = __alloc_object(&obj_pool);
  346. hlist_add_head(&obj->node, &obj_to_free);
  347. WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
  348. WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
  349. }
  350. }
  351. } else {
  352. WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
  353. hlist_add_head(&obj->node, &obj_pool);
  354. if (lookahead_count) {
  355. WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
  356. obj_pool_used -= lookahead_count;
  357. while (lookahead_count) {
  358. hlist_add_head(&objs[--lookahead_count]->node,
  359. &obj_pool);
  360. }
  361. }
  362. }
  363. raw_spin_unlock(&pool_lock);
  364. local_irq_restore(flags);
  365. }
  366. /*
  367. * Put the object back into the pool and schedule work to free objects
  368. * if necessary.
  369. */
  370. static void free_object(struct debug_obj *obj)
  371. {
  372. __free_object(obj);
  373. if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
  374. WRITE_ONCE(obj_freeing, true);
  375. schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
  376. }
  377. }
  378. #ifdef CONFIG_HOTPLUG_CPU
  379. static int object_cpu_offline(unsigned int cpu)
  380. {
  381. struct debug_percpu_free *percpu_pool;
  382. struct hlist_node *tmp;
  383. struct debug_obj *obj;
  384. /* Remote access is safe as the CPU is dead already */
  385. percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
  386. hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
  387. hlist_del(&obj->node);
  388. kmem_cache_free(obj_cache, obj);
  389. }
  390. percpu_pool->obj_free = 0;
  391. return 0;
  392. }
  393. #endif
  394. /*
  395. * We run out of memory. That means we probably have tons of objects
  396. * allocated.
  397. */
  398. static void debug_objects_oom(void)
  399. {
  400. struct debug_bucket *db = obj_hash;
  401. struct hlist_node *tmp;
  402. HLIST_HEAD(freelist);
  403. struct debug_obj *obj;
  404. unsigned long flags;
  405. int i;
  406. pr_warn("Out of memory. ODEBUG disabled\n");
  407. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  408. raw_spin_lock_irqsave(&db->lock, flags);
  409. hlist_move_list(&db->list, &freelist);
  410. raw_spin_unlock_irqrestore(&db->lock, flags);
  411. /* Now free them */
  412. hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
  413. hlist_del(&obj->node);
  414. free_object(obj);
  415. }
  416. }
  417. }
  418. /*
  419. * We use the pfn of the address for the hash. That way we can check
  420. * for freed objects simply by checking the affected bucket.
  421. */
  422. static struct debug_bucket *get_bucket(unsigned long addr)
  423. {
  424. unsigned long hash;
  425. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  426. return &obj_hash[hash];
  427. }
  428. static void debug_print_object(struct debug_obj *obj, char *msg)
  429. {
  430. const struct debug_obj_descr *descr = obj->descr;
  431. static int limit;
  432. if (limit < 5 && descr != descr_test) {
  433. void *hint = descr->debug_hint ?
  434. descr->debug_hint(obj->object) : NULL;
  435. limit++;
  436. WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
  437. "object type: %s hint: %pS\n",
  438. msg, obj_states[obj->state], obj->astate,
  439. descr->name, hint);
  440. }
  441. debug_objects_warnings++;
  442. }
  443. /*
  444. * Try to repair the damage, so we have a better chance to get useful
  445. * debug output.
  446. */
  447. static bool
  448. debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
  449. void * addr, enum debug_obj_state state)
  450. {
  451. if (fixup && fixup(addr, state)) {
  452. debug_objects_fixups++;
  453. return true;
  454. }
  455. return false;
  456. }
  457. static void debug_object_is_on_stack(void *addr, int onstack)
  458. {
  459. int is_on_stack;
  460. static int limit;
  461. if (limit > 4)
  462. return;
  463. is_on_stack = object_is_on_stack(addr);
  464. if (is_on_stack == onstack)
  465. return;
  466. limit++;
  467. if (is_on_stack)
  468. pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
  469. task_stack_page(current));
  470. else
  471. pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
  472. task_stack_page(current));
  473. WARN_ON(1);
  474. }
  475. static void
  476. __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
  477. {
  478. enum debug_obj_state state;
  479. bool check_stack = false;
  480. struct debug_bucket *db;
  481. struct debug_obj *obj;
  482. unsigned long flags;
  483. fill_pool();
  484. db = get_bucket((unsigned long) addr);
  485. raw_spin_lock_irqsave(&db->lock, flags);
  486. obj = lookup_object(addr, db);
  487. if (!obj) {
  488. obj = alloc_object(addr, db, descr);
  489. if (!obj) {
  490. debug_objects_enabled = 0;
  491. raw_spin_unlock_irqrestore(&db->lock, flags);
  492. debug_objects_oom();
  493. return;
  494. }
  495. check_stack = true;
  496. }
  497. switch (obj->state) {
  498. case ODEBUG_STATE_NONE:
  499. case ODEBUG_STATE_INIT:
  500. case ODEBUG_STATE_INACTIVE:
  501. obj->state = ODEBUG_STATE_INIT;
  502. break;
  503. case ODEBUG_STATE_ACTIVE:
  504. state = obj->state;
  505. raw_spin_unlock_irqrestore(&db->lock, flags);
  506. debug_print_object(obj, "init");
  507. debug_object_fixup(descr->fixup_init, addr, state);
  508. return;
  509. case ODEBUG_STATE_DESTROYED:
  510. raw_spin_unlock_irqrestore(&db->lock, flags);
  511. debug_print_object(obj, "init");
  512. return;
  513. default:
  514. break;
  515. }
  516. raw_spin_unlock_irqrestore(&db->lock, flags);
  517. if (check_stack)
  518. debug_object_is_on_stack(addr, onstack);
  519. }
  520. /**
  521. * debug_object_init - debug checks when an object is initialized
  522. * @addr: address of the object
  523. * @descr: pointer to an object specific debug description structure
  524. */
  525. void debug_object_init(void *addr, const struct debug_obj_descr *descr)
  526. {
  527. if (!debug_objects_enabled)
  528. return;
  529. __debug_object_init(addr, descr, 0);
  530. }
  531. EXPORT_SYMBOL_GPL(debug_object_init);
  532. /**
  533. * debug_object_init_on_stack - debug checks when an object on stack is
  534. * initialized
  535. * @addr: address of the object
  536. * @descr: pointer to an object specific debug description structure
  537. */
  538. void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
  539. {
  540. if (!debug_objects_enabled)
  541. return;
  542. __debug_object_init(addr, descr, 1);
  543. }
  544. EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
  545. /**
  546. * debug_object_activate - debug checks when an object is activated
  547. * @addr: address of the object
  548. * @descr: pointer to an object specific debug description structure
  549. * Returns 0 for success, -EINVAL for check failed.
  550. */
  551. int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
  552. {
  553. enum debug_obj_state state;
  554. struct debug_bucket *db;
  555. struct debug_obj *obj;
  556. unsigned long flags;
  557. int ret;
  558. struct debug_obj o = { .object = addr,
  559. .state = ODEBUG_STATE_NOTAVAILABLE,
  560. .descr = descr };
  561. if (!debug_objects_enabled)
  562. return 0;
  563. db = get_bucket((unsigned long) addr);
  564. raw_spin_lock_irqsave(&db->lock, flags);
  565. obj = lookup_object(addr, db);
  566. if (obj) {
  567. bool print_object = false;
  568. switch (obj->state) {
  569. case ODEBUG_STATE_INIT:
  570. case ODEBUG_STATE_INACTIVE:
  571. obj->state = ODEBUG_STATE_ACTIVE;
  572. ret = 0;
  573. break;
  574. case ODEBUG_STATE_ACTIVE:
  575. state = obj->state;
  576. raw_spin_unlock_irqrestore(&db->lock, flags);
  577. debug_print_object(obj, "activate");
  578. ret = debug_object_fixup(descr->fixup_activate, addr, state);
  579. return ret ? 0 : -EINVAL;
  580. case ODEBUG_STATE_DESTROYED:
  581. print_object = true;
  582. ret = -EINVAL;
  583. break;
  584. default:
  585. ret = 0;
  586. break;
  587. }
  588. raw_spin_unlock_irqrestore(&db->lock, flags);
  589. if (print_object)
  590. debug_print_object(obj, "activate");
  591. return ret;
  592. }
  593. raw_spin_unlock_irqrestore(&db->lock, flags);
  594. /*
  595. * We are here when a static object is activated. We
  596. * let the type specific code confirm whether this is
  597. * true or not. if true, we just make sure that the
  598. * static object is tracked in the object tracker. If
  599. * not, this must be a bug, so we try to fix it up.
  600. */
  601. if (descr->is_static_object && descr->is_static_object(addr)) {
  602. /* track this static object */
  603. debug_object_init(addr, descr);
  604. debug_object_activate(addr, descr);
  605. } else {
  606. debug_print_object(&o, "activate");
  607. ret = debug_object_fixup(descr->fixup_activate, addr,
  608. ODEBUG_STATE_NOTAVAILABLE);
  609. return ret ? 0 : -EINVAL;
  610. }
  611. return 0;
  612. }
  613. EXPORT_SYMBOL_GPL(debug_object_activate);
  614. /**
  615. * debug_object_deactivate - debug checks when an object is deactivated
  616. * @addr: address of the object
  617. * @descr: pointer to an object specific debug description structure
  618. */
  619. void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
  620. {
  621. struct debug_bucket *db;
  622. struct debug_obj *obj;
  623. unsigned long flags;
  624. bool print_object = false;
  625. if (!debug_objects_enabled)
  626. return;
  627. db = get_bucket((unsigned long) addr);
  628. raw_spin_lock_irqsave(&db->lock, flags);
  629. obj = lookup_object(addr, db);
  630. if (obj) {
  631. switch (obj->state) {
  632. case ODEBUG_STATE_INIT:
  633. case ODEBUG_STATE_INACTIVE:
  634. case ODEBUG_STATE_ACTIVE:
  635. if (!obj->astate)
  636. obj->state = ODEBUG_STATE_INACTIVE;
  637. else
  638. print_object = true;
  639. break;
  640. case ODEBUG_STATE_DESTROYED:
  641. print_object = true;
  642. break;
  643. default:
  644. break;
  645. }
  646. }
  647. raw_spin_unlock_irqrestore(&db->lock, flags);
  648. if (!obj) {
  649. struct debug_obj o = { .object = addr,
  650. .state = ODEBUG_STATE_NOTAVAILABLE,
  651. .descr = descr };
  652. debug_print_object(&o, "deactivate");
  653. } else if (print_object) {
  654. debug_print_object(obj, "deactivate");
  655. }
  656. }
  657. EXPORT_SYMBOL_GPL(debug_object_deactivate);
  658. /**
  659. * debug_object_destroy - debug checks when an object is destroyed
  660. * @addr: address of the object
  661. * @descr: pointer to an object specific debug description structure
  662. */
  663. void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
  664. {
  665. enum debug_obj_state state;
  666. struct debug_bucket *db;
  667. struct debug_obj *obj;
  668. unsigned long flags;
  669. bool print_object = false;
  670. if (!debug_objects_enabled)
  671. return;
  672. db = get_bucket((unsigned long) addr);
  673. raw_spin_lock_irqsave(&db->lock, flags);
  674. obj = lookup_object(addr, db);
  675. if (!obj)
  676. goto out_unlock;
  677. switch (obj->state) {
  678. case ODEBUG_STATE_NONE:
  679. case ODEBUG_STATE_INIT:
  680. case ODEBUG_STATE_INACTIVE:
  681. obj->state = ODEBUG_STATE_DESTROYED;
  682. break;
  683. case ODEBUG_STATE_ACTIVE:
  684. state = obj->state;
  685. raw_spin_unlock_irqrestore(&db->lock, flags);
  686. debug_print_object(obj, "destroy");
  687. debug_object_fixup(descr->fixup_destroy, addr, state);
  688. return;
  689. case ODEBUG_STATE_DESTROYED:
  690. print_object = true;
  691. break;
  692. default:
  693. break;
  694. }
  695. out_unlock:
  696. raw_spin_unlock_irqrestore(&db->lock, flags);
  697. if (print_object)
  698. debug_print_object(obj, "destroy");
  699. }
  700. EXPORT_SYMBOL_GPL(debug_object_destroy);
  701. /**
  702. * debug_object_free - debug checks when an object is freed
  703. * @addr: address of the object
  704. * @descr: pointer to an object specific debug description structure
  705. */
  706. void debug_object_free(void *addr, const struct debug_obj_descr *descr)
  707. {
  708. enum debug_obj_state state;
  709. struct debug_bucket *db;
  710. struct debug_obj *obj;
  711. unsigned long flags;
  712. if (!debug_objects_enabled)
  713. return;
  714. db = get_bucket((unsigned long) addr);
  715. raw_spin_lock_irqsave(&db->lock, flags);
  716. obj = lookup_object(addr, db);
  717. if (!obj)
  718. goto out_unlock;
  719. switch (obj->state) {
  720. case ODEBUG_STATE_ACTIVE:
  721. state = obj->state;
  722. raw_spin_unlock_irqrestore(&db->lock, flags);
  723. debug_print_object(obj, "free");
  724. debug_object_fixup(descr->fixup_free, addr, state);
  725. return;
  726. default:
  727. hlist_del(&obj->node);
  728. raw_spin_unlock_irqrestore(&db->lock, flags);
  729. free_object(obj);
  730. return;
  731. }
  732. out_unlock:
  733. raw_spin_unlock_irqrestore(&db->lock, flags);
  734. }
  735. EXPORT_SYMBOL_GPL(debug_object_free);
  736. /**
  737. * debug_object_assert_init - debug checks when object should be init-ed
  738. * @addr: address of the object
  739. * @descr: pointer to an object specific debug description structure
  740. */
  741. void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
  742. {
  743. struct debug_bucket *db;
  744. struct debug_obj *obj;
  745. unsigned long flags;
  746. if (!debug_objects_enabled)
  747. return;
  748. db = get_bucket((unsigned long) addr);
  749. raw_spin_lock_irqsave(&db->lock, flags);
  750. obj = lookup_object(addr, db);
  751. if (!obj) {
  752. struct debug_obj o = { .object = addr,
  753. .state = ODEBUG_STATE_NOTAVAILABLE,
  754. .descr = descr };
  755. raw_spin_unlock_irqrestore(&db->lock, flags);
  756. /*
  757. * Maybe the object is static, and we let the type specific
  758. * code confirm. Track this static object if true, else invoke
  759. * fixup.
  760. */
  761. if (descr->is_static_object && descr->is_static_object(addr)) {
  762. /* Track this static object */
  763. debug_object_init(addr, descr);
  764. } else {
  765. debug_print_object(&o, "assert_init");
  766. debug_object_fixup(descr->fixup_assert_init, addr,
  767. ODEBUG_STATE_NOTAVAILABLE);
  768. }
  769. return;
  770. }
  771. raw_spin_unlock_irqrestore(&db->lock, flags);
  772. }
  773. EXPORT_SYMBOL_GPL(debug_object_assert_init);
  774. /**
  775. * debug_object_active_state - debug checks object usage state machine
  776. * @addr: address of the object
  777. * @descr: pointer to an object specific debug description structure
  778. * @expect: expected state
  779. * @next: state to move to if expected state is found
  780. */
  781. void
  782. debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
  783. unsigned int expect, unsigned int next)
  784. {
  785. struct debug_bucket *db;
  786. struct debug_obj *obj;
  787. unsigned long flags;
  788. bool print_object = false;
  789. if (!debug_objects_enabled)
  790. return;
  791. db = get_bucket((unsigned long) addr);
  792. raw_spin_lock_irqsave(&db->lock, flags);
  793. obj = lookup_object(addr, db);
  794. if (obj) {
  795. switch (obj->state) {
  796. case ODEBUG_STATE_ACTIVE:
  797. if (obj->astate == expect)
  798. obj->astate = next;
  799. else
  800. print_object = true;
  801. break;
  802. default:
  803. print_object = true;
  804. break;
  805. }
  806. }
  807. raw_spin_unlock_irqrestore(&db->lock, flags);
  808. if (!obj) {
  809. struct debug_obj o = { .object = addr,
  810. .state = ODEBUG_STATE_NOTAVAILABLE,
  811. .descr = descr };
  812. debug_print_object(&o, "active_state");
  813. } else if (print_object) {
  814. debug_print_object(obj, "active_state");
  815. }
  816. }
  817. EXPORT_SYMBOL_GPL(debug_object_active_state);
  818. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  819. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  820. {
  821. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  822. const struct debug_obj_descr *descr;
  823. enum debug_obj_state state;
  824. struct debug_bucket *db;
  825. struct hlist_node *tmp;
  826. struct debug_obj *obj;
  827. int cnt, objs_checked = 0;
  828. saddr = (unsigned long) address;
  829. eaddr = saddr + size;
  830. paddr = saddr & ODEBUG_CHUNK_MASK;
  831. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  832. chunks >>= ODEBUG_CHUNK_SHIFT;
  833. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  834. db = get_bucket(paddr);
  835. repeat:
  836. cnt = 0;
  837. raw_spin_lock_irqsave(&db->lock, flags);
  838. hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
  839. cnt++;
  840. oaddr = (unsigned long) obj->object;
  841. if (oaddr < saddr || oaddr >= eaddr)
  842. continue;
  843. switch (obj->state) {
  844. case ODEBUG_STATE_ACTIVE:
  845. descr = obj->descr;
  846. state = obj->state;
  847. raw_spin_unlock_irqrestore(&db->lock, flags);
  848. debug_print_object(obj, "free");
  849. debug_object_fixup(descr->fixup_free,
  850. (void *) oaddr, state);
  851. goto repeat;
  852. default:
  853. hlist_del(&obj->node);
  854. __free_object(obj);
  855. break;
  856. }
  857. }
  858. raw_spin_unlock_irqrestore(&db->lock, flags);
  859. if (cnt > debug_objects_maxchain)
  860. debug_objects_maxchain = cnt;
  861. objs_checked += cnt;
  862. }
  863. if (objs_checked > debug_objects_maxchecked)
  864. debug_objects_maxchecked = objs_checked;
  865. /* Schedule work to actually kmem_cache_free() objects */
  866. if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
  867. WRITE_ONCE(obj_freeing, true);
  868. schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
  869. }
  870. }
  871. void debug_check_no_obj_freed(const void *address, unsigned long size)
  872. {
  873. if (debug_objects_enabled)
  874. __debug_check_no_obj_freed(address, size);
  875. }
  876. #endif
  877. #ifdef CONFIG_DEBUG_FS
  878. static int debug_stats_show(struct seq_file *m, void *v)
  879. {
  880. int cpu, obj_percpu_free = 0;
  881. for_each_possible_cpu(cpu)
  882. obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
  883. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  884. seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
  885. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  886. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  887. seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
  888. seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
  889. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  890. seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
  891. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  892. seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
  893. seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
  894. seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
  895. return 0;
  896. }
  897. DEFINE_SHOW_ATTRIBUTE(debug_stats);
  898. static int __init debug_objects_init_debugfs(void)
  899. {
  900. struct dentry *dbgdir;
  901. if (!debug_objects_enabled)
  902. return 0;
  903. dbgdir = debugfs_create_dir("debug_objects", NULL);
  904. debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
  905. return 0;
  906. }
  907. __initcall(debug_objects_init_debugfs);
  908. #else
  909. static inline void debug_objects_init_debugfs(void) { }
  910. #endif
  911. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  912. /* Random data structure for the self test */
  913. struct self_test {
  914. unsigned long dummy1[6];
  915. int static_init;
  916. unsigned long dummy2[3];
  917. };
  918. static __initconst const struct debug_obj_descr descr_type_test;
  919. static bool __init is_static_object(void *addr)
  920. {
  921. struct self_test *obj = addr;
  922. return obj->static_init;
  923. }
  924. /*
  925. * fixup_init is called when:
  926. * - an active object is initialized
  927. */
  928. static bool __init fixup_init(void *addr, enum debug_obj_state state)
  929. {
  930. struct self_test *obj = addr;
  931. switch (state) {
  932. case ODEBUG_STATE_ACTIVE:
  933. debug_object_deactivate(obj, &descr_type_test);
  934. debug_object_init(obj, &descr_type_test);
  935. return true;
  936. default:
  937. return false;
  938. }
  939. }
  940. /*
  941. * fixup_activate is called when:
  942. * - an active object is activated
  943. * - an unknown non-static object is activated
  944. */
  945. static bool __init fixup_activate(void *addr, enum debug_obj_state state)
  946. {
  947. struct self_test *obj = addr;
  948. switch (state) {
  949. case ODEBUG_STATE_NOTAVAILABLE:
  950. return true;
  951. case ODEBUG_STATE_ACTIVE:
  952. debug_object_deactivate(obj, &descr_type_test);
  953. debug_object_activate(obj, &descr_type_test);
  954. return true;
  955. default:
  956. return false;
  957. }
  958. }
  959. /*
  960. * fixup_destroy is called when:
  961. * - an active object is destroyed
  962. */
  963. static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
  964. {
  965. struct self_test *obj = addr;
  966. switch (state) {
  967. case ODEBUG_STATE_ACTIVE:
  968. debug_object_deactivate(obj, &descr_type_test);
  969. debug_object_destroy(obj, &descr_type_test);
  970. return true;
  971. default:
  972. return false;
  973. }
  974. }
  975. /*
  976. * fixup_free is called when:
  977. * - an active object is freed
  978. */
  979. static bool __init fixup_free(void *addr, enum debug_obj_state state)
  980. {
  981. struct self_test *obj = addr;
  982. switch (state) {
  983. case ODEBUG_STATE_ACTIVE:
  984. debug_object_deactivate(obj, &descr_type_test);
  985. debug_object_free(obj, &descr_type_test);
  986. return true;
  987. default:
  988. return false;
  989. }
  990. }
  991. static int __init
  992. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  993. {
  994. struct debug_bucket *db;
  995. struct debug_obj *obj;
  996. unsigned long flags;
  997. int res = -EINVAL;
  998. db = get_bucket((unsigned long) addr);
  999. raw_spin_lock_irqsave(&db->lock, flags);
  1000. obj = lookup_object(addr, db);
  1001. if (!obj && state != ODEBUG_STATE_NONE) {
  1002. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  1003. goto out;
  1004. }
  1005. if (obj && obj->state != state) {
  1006. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  1007. obj->state, state);
  1008. goto out;
  1009. }
  1010. if (fixups != debug_objects_fixups) {
  1011. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  1012. fixups, debug_objects_fixups);
  1013. goto out;
  1014. }
  1015. if (warnings != debug_objects_warnings) {
  1016. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  1017. warnings, debug_objects_warnings);
  1018. goto out;
  1019. }
  1020. res = 0;
  1021. out:
  1022. raw_spin_unlock_irqrestore(&db->lock, flags);
  1023. if (res)
  1024. debug_objects_enabled = 0;
  1025. return res;
  1026. }
  1027. static __initconst const struct debug_obj_descr descr_type_test = {
  1028. .name = "selftest",
  1029. .is_static_object = is_static_object,
  1030. .fixup_init = fixup_init,
  1031. .fixup_activate = fixup_activate,
  1032. .fixup_destroy = fixup_destroy,
  1033. .fixup_free = fixup_free,
  1034. };
  1035. static __initdata struct self_test obj = { .static_init = 0 };
  1036. static void __init debug_objects_selftest(void)
  1037. {
  1038. int fixups, oldfixups, warnings, oldwarnings;
  1039. unsigned long flags;
  1040. local_irq_save(flags);
  1041. fixups = oldfixups = debug_objects_fixups;
  1042. warnings = oldwarnings = debug_objects_warnings;
  1043. descr_test = &descr_type_test;
  1044. debug_object_init(&obj, &descr_type_test);
  1045. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  1046. goto out;
  1047. debug_object_activate(&obj, &descr_type_test);
  1048. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  1049. goto out;
  1050. debug_object_activate(&obj, &descr_type_test);
  1051. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  1052. goto out;
  1053. debug_object_deactivate(&obj, &descr_type_test);
  1054. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  1055. goto out;
  1056. debug_object_destroy(&obj, &descr_type_test);
  1057. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  1058. goto out;
  1059. debug_object_init(&obj, &descr_type_test);
  1060. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  1061. goto out;
  1062. debug_object_activate(&obj, &descr_type_test);
  1063. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  1064. goto out;
  1065. debug_object_deactivate(&obj, &descr_type_test);
  1066. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  1067. goto out;
  1068. debug_object_free(&obj, &descr_type_test);
  1069. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  1070. goto out;
  1071. obj.static_init = 1;
  1072. debug_object_activate(&obj, &descr_type_test);
  1073. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  1074. goto out;
  1075. debug_object_init(&obj, &descr_type_test);
  1076. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  1077. goto out;
  1078. debug_object_free(&obj, &descr_type_test);
  1079. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  1080. goto out;
  1081. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  1082. debug_object_init(&obj, &descr_type_test);
  1083. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  1084. goto out;
  1085. debug_object_activate(&obj, &descr_type_test);
  1086. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  1087. goto out;
  1088. __debug_check_no_obj_freed(&obj, sizeof(obj));
  1089. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  1090. goto out;
  1091. #endif
  1092. pr_info("selftest passed\n");
  1093. out:
  1094. debug_objects_fixups = oldfixups;
  1095. debug_objects_warnings = oldwarnings;
  1096. descr_test = NULL;
  1097. local_irq_restore(flags);
  1098. }
  1099. #else
  1100. static inline void debug_objects_selftest(void) { }
  1101. #endif
  1102. /*
  1103. * Called during early boot to initialize the hash buckets and link
  1104. * the static object pool objects into the poll list. After this call
  1105. * the object tracker is fully operational.
  1106. */
  1107. void __init debug_objects_early_init(void)
  1108. {
  1109. int i;
  1110. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  1111. raw_spin_lock_init(&obj_hash[i].lock);
  1112. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  1113. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  1114. }
  1115. /*
  1116. * Convert the statically allocated objects to dynamic ones:
  1117. */
  1118. static int __init debug_objects_replace_static_objects(void)
  1119. {
  1120. struct debug_bucket *db = obj_hash;
  1121. struct hlist_node *tmp;
  1122. struct debug_obj *obj, *new;
  1123. HLIST_HEAD(objects);
  1124. int i, cnt = 0;
  1125. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  1126. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  1127. if (!obj)
  1128. goto free;
  1129. hlist_add_head(&obj->node, &objects);
  1130. }
  1131. /*
  1132. * debug_objects_mem_init() is now called early that only one CPU is up
  1133. * and interrupts have been disabled, so it is safe to replace the
  1134. * active object references.
  1135. */
  1136. /* Remove the statically allocated objects from the pool */
  1137. hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
  1138. hlist_del(&obj->node);
  1139. /* Move the allocated objects to the pool */
  1140. hlist_move_list(&objects, &obj_pool);
  1141. /* Replace the active object references */
  1142. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  1143. hlist_move_list(&db->list, &objects);
  1144. hlist_for_each_entry(obj, &objects, node) {
  1145. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  1146. hlist_del(&new->node);
  1147. /* copy object data */
  1148. *new = *obj;
  1149. hlist_add_head(&new->node, &db->list);
  1150. cnt++;
  1151. }
  1152. }
  1153. pr_debug("%d of %d active objects replaced\n",
  1154. cnt, obj_pool_used);
  1155. return 0;
  1156. free:
  1157. hlist_for_each_entry_safe(obj, tmp, &objects, node) {
  1158. hlist_del(&obj->node);
  1159. kmem_cache_free(obj_cache, obj);
  1160. }
  1161. return -ENOMEM;
  1162. }
  1163. /*
  1164. * Called after the kmem_caches are functional to setup a dedicated
  1165. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  1166. * prevents that the debug code is called on kmem_cache_free() for the
  1167. * debug tracker objects to avoid recursive calls.
  1168. */
  1169. void __init debug_objects_mem_init(void)
  1170. {
  1171. int cpu, extras;
  1172. if (!debug_objects_enabled)
  1173. return;
  1174. /*
  1175. * Initialize the percpu object pools
  1176. *
  1177. * Initialization is not strictly necessary, but was done for
  1178. * completeness.
  1179. */
  1180. for_each_possible_cpu(cpu)
  1181. INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
  1182. obj_cache = kmem_cache_create("debug_objects_cache",
  1183. sizeof (struct debug_obj), 0,
  1184. SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
  1185. NULL);
  1186. if (!obj_cache || debug_objects_replace_static_objects()) {
  1187. debug_objects_enabled = 0;
  1188. kmem_cache_destroy(obj_cache);
  1189. pr_warn("out of memory.\n");
  1190. } else
  1191. debug_objects_selftest();
  1192. #ifdef CONFIG_HOTPLUG_CPU
  1193. cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
  1194. object_cpu_offline);
  1195. #endif
  1196. /*
  1197. * Increase the thresholds for allocating and freeing objects
  1198. * according to the number of possible CPUs available in the system.
  1199. */
  1200. extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
  1201. debug_objects_pool_size += extras;
  1202. debug_objects_pool_min_level += extras;
  1203. }