radix-tree.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2001 Momchil Velikov
  4. * Portions Copyright (C) 2001 Christoph Hellwig
  5. * Copyright (C) 2005 SGI, Christoph Lameter
  6. * Copyright (C) 2006 Nick Piggin
  7. * Copyright (C) 2012 Konstantin Khlebnikov
  8. * Copyright (C) 2016 Intel, Matthew Wilcox
  9. * Copyright (C) 2016 Intel, Ross Zwisler
  10. */
  11. #include <linux/bitmap.h>
  12. #include <linux/bitops.h>
  13. #include <linux/bug.h>
  14. #include <linux/cpu.h>
  15. #include <linux/errno.h>
  16. #include <linux/export.h>
  17. #include <linux/idr.h>
  18. #include <linux/init.h>
  19. #include <linux/kernel.h>
  20. #include <linux/kmemleak.h>
  21. #include <linux/percpu.h>
  22. #include <linux/preempt.h> /* in_interrupt() */
  23. #include <linux/radix-tree.h>
  24. #include <linux/rcupdate.h>
  25. #include <linux/slab.h>
  26. #include <linux/string.h>
  27. #include <linux/xarray.h>
  28. /*
  29. * Radix tree node cache.
  30. */
  31. struct kmem_cache *radix_tree_node_cachep;
  32. /*
  33. * The radix tree is variable-height, so an insert operation not only has
  34. * to build the branch to its corresponding item, it also has to build the
  35. * branch to existing items if the size has to be increased (by
  36. * radix_tree_extend).
  37. *
  38. * The worst case is a zero height tree with just a single item at index 0,
  39. * and then inserting an item at index ULONG_MAX. This requires 2 new branches
  40. * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
  41. * Hence:
  42. */
  43. #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
  44. /*
  45. * The IDR does not have to be as high as the radix tree since it uses
  46. * signed integers, not unsigned longs.
  47. */
  48. #define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1)
  49. #define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \
  50. RADIX_TREE_MAP_SHIFT))
  51. #define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
  52. /*
  53. * Per-cpu pool of preloaded nodes
  54. */
  55. DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = {
  56. .lock = INIT_LOCAL_LOCK(lock),
  57. };
  58. EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads);
  59. static inline struct radix_tree_node *entry_to_node(void *ptr)
  60. {
  61. return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
  62. }
  63. static inline void *node_to_entry(void *ptr)
  64. {
  65. return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
  66. }
  67. #define RADIX_TREE_RETRY XA_RETRY_ENTRY
  68. static inline unsigned long
  69. get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
  70. {
  71. return parent ? slot - parent->slots : 0;
  72. }
  73. static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
  74. struct radix_tree_node **nodep, unsigned long index)
  75. {
  76. unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
  77. void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
  78. *nodep = (void *)entry;
  79. return offset;
  80. }
  81. static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
  82. {
  83. return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK);
  84. }
  85. static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
  86. int offset)
  87. {
  88. __set_bit(offset, node->tags[tag]);
  89. }
  90. static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
  91. int offset)
  92. {
  93. __clear_bit(offset, node->tags[tag]);
  94. }
  95. static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
  96. int offset)
  97. {
  98. return test_bit(offset, node->tags[tag]);
  99. }
  100. static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
  101. {
  102. root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
  103. }
  104. static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
  105. {
  106. root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
  107. }
  108. static inline void root_tag_clear_all(struct radix_tree_root *root)
  109. {
  110. root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1);
  111. }
  112. static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
  113. {
  114. return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT));
  115. }
  116. static inline unsigned root_tags_get(const struct radix_tree_root *root)
  117. {
  118. return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT;
  119. }
  120. static inline bool is_idr(const struct radix_tree_root *root)
  121. {
  122. return !!(root->xa_flags & ROOT_IS_IDR);
  123. }
  124. /*
  125. * Returns 1 if any slot in the node has this tag set.
  126. * Otherwise returns 0.
  127. */
  128. static inline int any_tag_set(const struct radix_tree_node *node,
  129. unsigned int tag)
  130. {
  131. unsigned idx;
  132. for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
  133. if (node->tags[tag][idx])
  134. return 1;
  135. }
  136. return 0;
  137. }
  138. static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
  139. {
  140. bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
  141. }
  142. /**
  143. * radix_tree_find_next_bit - find the next set bit in a memory region
  144. *
  145. * @addr: The address to base the search on
  146. * @size: The bitmap size in bits
  147. * @offset: The bitnumber to start searching at
  148. *
  149. * Unrollable variant of find_next_bit() for constant size arrays.
  150. * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
  151. * Returns next bit offset, or size if nothing found.
  152. */
  153. static __always_inline unsigned long
  154. radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
  155. unsigned long offset)
  156. {
  157. const unsigned long *addr = node->tags[tag];
  158. if (offset < RADIX_TREE_MAP_SIZE) {
  159. unsigned long tmp;
  160. addr += offset / BITS_PER_LONG;
  161. tmp = *addr >> (offset % BITS_PER_LONG);
  162. if (tmp)
  163. return __ffs(tmp) + offset;
  164. offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
  165. while (offset < RADIX_TREE_MAP_SIZE) {
  166. tmp = *++addr;
  167. if (tmp)
  168. return __ffs(tmp) + offset;
  169. offset += BITS_PER_LONG;
  170. }
  171. }
  172. return RADIX_TREE_MAP_SIZE;
  173. }
  174. static unsigned int iter_offset(const struct radix_tree_iter *iter)
  175. {
  176. return iter->index & RADIX_TREE_MAP_MASK;
  177. }
  178. /*
  179. * The maximum index which can be stored in a radix tree
  180. */
  181. static inline unsigned long shift_maxindex(unsigned int shift)
  182. {
  183. return (RADIX_TREE_MAP_SIZE << shift) - 1;
  184. }
  185. static inline unsigned long node_maxindex(const struct radix_tree_node *node)
  186. {
  187. return shift_maxindex(node->shift);
  188. }
  189. static unsigned long next_index(unsigned long index,
  190. const struct radix_tree_node *node,
  191. unsigned long offset)
  192. {
  193. return (index & ~node_maxindex(node)) + (offset << node->shift);
  194. }
  195. /*
  196. * This assumes that the caller has performed appropriate preallocation, and
  197. * that the caller has pinned this thread of control to the current CPU.
  198. */
  199. static struct radix_tree_node *
  200. radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
  201. struct radix_tree_root *root,
  202. unsigned int shift, unsigned int offset,
  203. unsigned int count, unsigned int nr_values)
  204. {
  205. struct radix_tree_node *ret = NULL;
  206. /*
  207. * Preload code isn't irq safe and it doesn't make sense to use
  208. * preloading during an interrupt anyway as all the allocations have
  209. * to be atomic. So just do normal allocation when in interrupt.
  210. */
  211. if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
  212. struct radix_tree_preload *rtp;
  213. /*
  214. * Even if the caller has preloaded, try to allocate from the
  215. * cache first for the new node to get accounted to the memory
  216. * cgroup.
  217. */
  218. ret = kmem_cache_alloc(radix_tree_node_cachep,
  219. gfp_mask | __GFP_NOWARN);
  220. if (ret)
  221. goto out;
  222. /*
  223. * Provided the caller has preloaded here, we will always
  224. * succeed in getting a node here (and never reach
  225. * kmem_cache_alloc)
  226. */
  227. rtp = this_cpu_ptr(&radix_tree_preloads);
  228. if (rtp->nr) {
  229. ret = rtp->nodes;
  230. rtp->nodes = ret->parent;
  231. rtp->nr--;
  232. }
  233. /*
  234. * Update the allocation stack trace as this is more useful
  235. * for debugging.
  236. */
  237. kmemleak_update_trace(ret);
  238. goto out;
  239. }
  240. ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
  241. out:
  242. BUG_ON(radix_tree_is_internal_node(ret));
  243. if (ret) {
  244. ret->shift = shift;
  245. ret->offset = offset;
  246. ret->count = count;
  247. ret->nr_values = nr_values;
  248. ret->parent = parent;
  249. ret->array = root;
  250. }
  251. return ret;
  252. }
  253. void radix_tree_node_rcu_free(struct rcu_head *head)
  254. {
  255. struct radix_tree_node *node =
  256. container_of(head, struct radix_tree_node, rcu_head);
  257. /*
  258. * Must only free zeroed nodes into the slab. We can be left with
  259. * non-NULL entries by radix_tree_free_nodes, so clear the entries
  260. * and tags here.
  261. */
  262. memset(node->slots, 0, sizeof(node->slots));
  263. memset(node->tags, 0, sizeof(node->tags));
  264. INIT_LIST_HEAD(&node->private_list);
  265. kmem_cache_free(radix_tree_node_cachep, node);
  266. }
  267. static inline void
  268. radix_tree_node_free(struct radix_tree_node *node)
  269. {
  270. call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
  271. }
  272. /*
  273. * Load up this CPU's radix_tree_node buffer with sufficient objects to
  274. * ensure that the addition of a single element in the tree cannot fail. On
  275. * success, return zero, with preemption disabled. On error, return -ENOMEM
  276. * with preemption not disabled.
  277. *
  278. * To make use of this facility, the radix tree must be initialised without
  279. * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
  280. */
  281. static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
  282. {
  283. struct radix_tree_preload *rtp;
  284. struct radix_tree_node *node;
  285. int ret = -ENOMEM;
  286. /*
  287. * Nodes preloaded by one cgroup can be used by another cgroup, so
  288. * they should never be accounted to any particular memory cgroup.
  289. */
  290. gfp_mask &= ~__GFP_ACCOUNT;
  291. local_lock(&radix_tree_preloads.lock);
  292. rtp = this_cpu_ptr(&radix_tree_preloads);
  293. while (rtp->nr < nr) {
  294. local_unlock(&radix_tree_preloads.lock);
  295. node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
  296. if (node == NULL)
  297. goto out;
  298. local_lock(&radix_tree_preloads.lock);
  299. rtp = this_cpu_ptr(&radix_tree_preloads);
  300. if (rtp->nr < nr) {
  301. node->parent = rtp->nodes;
  302. rtp->nodes = node;
  303. rtp->nr++;
  304. } else {
  305. kmem_cache_free(radix_tree_node_cachep, node);
  306. }
  307. }
  308. ret = 0;
  309. out:
  310. return ret;
  311. }
  312. /*
  313. * Load up this CPU's radix_tree_node buffer with sufficient objects to
  314. * ensure that the addition of a single element in the tree cannot fail. On
  315. * success, return zero, with preemption disabled. On error, return -ENOMEM
  316. * with preemption not disabled.
  317. *
  318. * To make use of this facility, the radix tree must be initialised without
  319. * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
  320. */
  321. int radix_tree_preload(gfp_t gfp_mask)
  322. {
  323. /* Warn on non-sensical use... */
  324. WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
  325. return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
  326. }
  327. EXPORT_SYMBOL(radix_tree_preload);
  328. /*
  329. * The same as above function, except we don't guarantee preloading happens.
  330. * We do it, if we decide it helps. On success, return zero with preemption
  331. * disabled. On error, return -ENOMEM with preemption not disabled.
  332. */
  333. int radix_tree_maybe_preload(gfp_t gfp_mask)
  334. {
  335. if (gfpflags_allow_blocking(gfp_mask))
  336. return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
  337. /* Preloading doesn't help anything with this gfp mask, skip it */
  338. local_lock(&radix_tree_preloads.lock);
  339. return 0;
  340. }
  341. EXPORT_SYMBOL(radix_tree_maybe_preload);
  342. static unsigned radix_tree_load_root(const struct radix_tree_root *root,
  343. struct radix_tree_node **nodep, unsigned long *maxindex)
  344. {
  345. struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
  346. *nodep = node;
  347. if (likely(radix_tree_is_internal_node(node))) {
  348. node = entry_to_node(node);
  349. *maxindex = node_maxindex(node);
  350. return node->shift + RADIX_TREE_MAP_SHIFT;
  351. }
  352. *maxindex = 0;
  353. return 0;
  354. }
  355. /*
  356. * Extend a radix tree so it can store key @index.
  357. */
  358. static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
  359. unsigned long index, unsigned int shift)
  360. {
  361. void *entry;
  362. unsigned int maxshift;
  363. int tag;
  364. /* Figure out what the shift should be. */
  365. maxshift = shift;
  366. while (index > shift_maxindex(maxshift))
  367. maxshift += RADIX_TREE_MAP_SHIFT;
  368. entry = rcu_dereference_raw(root->xa_head);
  369. if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
  370. goto out;
  371. do {
  372. struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
  373. root, shift, 0, 1, 0);
  374. if (!node)
  375. return -ENOMEM;
  376. if (is_idr(root)) {
  377. all_tag_set(node, IDR_FREE);
  378. if (!root_tag_get(root, IDR_FREE)) {
  379. tag_clear(node, IDR_FREE, 0);
  380. root_tag_set(root, IDR_FREE);
  381. }
  382. } else {
  383. /* Propagate the aggregated tag info to the new child */
  384. for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
  385. if (root_tag_get(root, tag))
  386. tag_set(node, tag, 0);
  387. }
  388. }
  389. BUG_ON(shift > BITS_PER_LONG);
  390. if (radix_tree_is_internal_node(entry)) {
  391. entry_to_node(entry)->parent = node;
  392. } else if (xa_is_value(entry)) {
  393. /* Moving a value entry root->xa_head to a node */
  394. node->nr_values = 1;
  395. }
  396. /*
  397. * entry was already in the radix tree, so we do not need
  398. * rcu_assign_pointer here
  399. */
  400. node->slots[0] = (void __rcu *)entry;
  401. entry = node_to_entry(node);
  402. rcu_assign_pointer(root->xa_head, entry);
  403. shift += RADIX_TREE_MAP_SHIFT;
  404. } while (shift <= maxshift);
  405. out:
  406. return maxshift + RADIX_TREE_MAP_SHIFT;
  407. }
  408. /**
  409. * radix_tree_shrink - shrink radix tree to minimum height
  410. * @root radix tree root
  411. */
  412. static inline bool radix_tree_shrink(struct radix_tree_root *root)
  413. {
  414. bool shrunk = false;
  415. for (;;) {
  416. struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
  417. struct radix_tree_node *child;
  418. if (!radix_tree_is_internal_node(node))
  419. break;
  420. node = entry_to_node(node);
  421. /*
  422. * The candidate node has more than one child, or its child
  423. * is not at the leftmost slot, we cannot shrink.
  424. */
  425. if (node->count != 1)
  426. break;
  427. child = rcu_dereference_raw(node->slots[0]);
  428. if (!child)
  429. break;
  430. /*
  431. * For an IDR, we must not shrink entry 0 into the root in
  432. * case somebody calls idr_replace() with a pointer that
  433. * appears to be an internal entry
  434. */
  435. if (!node->shift && is_idr(root))
  436. break;
  437. if (radix_tree_is_internal_node(child))
  438. entry_to_node(child)->parent = NULL;
  439. /*
  440. * We don't need rcu_assign_pointer(), since we are simply
  441. * moving the node from one part of the tree to another: if it
  442. * was safe to dereference the old pointer to it
  443. * (node->slots[0]), it will be safe to dereference the new
  444. * one (root->xa_head) as far as dependent read barriers go.
  445. */
  446. root->xa_head = (void __rcu *)child;
  447. if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
  448. root_tag_clear(root, IDR_FREE);
  449. /*
  450. * We have a dilemma here. The node's slot[0] must not be
  451. * NULLed in case there are concurrent lookups expecting to
  452. * find the item. However if this was a bottom-level node,
  453. * then it may be subject to the slot pointer being visible
  454. * to callers dereferencing it. If item corresponding to
  455. * slot[0] is subsequently deleted, these callers would expect
  456. * their slot to become empty sooner or later.
  457. *
  458. * For example, lockless pagecache will look up a slot, deref
  459. * the page pointer, and if the page has 0 refcount it means it
  460. * was concurrently deleted from pagecache so try the deref
  461. * again. Fortunately there is already a requirement for logic
  462. * to retry the entire slot lookup -- the indirect pointer
  463. * problem (replacing direct root node with an indirect pointer
  464. * also results in a stale slot). So tag the slot as indirect
  465. * to force callers to retry.
  466. */
  467. node->count = 0;
  468. if (!radix_tree_is_internal_node(child)) {
  469. node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
  470. }
  471. WARN_ON_ONCE(!list_empty(&node->private_list));
  472. radix_tree_node_free(node);
  473. shrunk = true;
  474. }
  475. return shrunk;
  476. }
  477. static bool delete_node(struct radix_tree_root *root,
  478. struct radix_tree_node *node)
  479. {
  480. bool deleted = false;
  481. do {
  482. struct radix_tree_node *parent;
  483. if (node->count) {
  484. if (node_to_entry(node) ==
  485. rcu_dereference_raw(root->xa_head))
  486. deleted |= radix_tree_shrink(root);
  487. return deleted;
  488. }
  489. parent = node->parent;
  490. if (parent) {
  491. parent->slots[node->offset] = NULL;
  492. parent->count--;
  493. } else {
  494. /*
  495. * Shouldn't the tags already have all been cleared
  496. * by the caller?
  497. */
  498. if (!is_idr(root))
  499. root_tag_clear_all(root);
  500. root->xa_head = NULL;
  501. }
  502. WARN_ON_ONCE(!list_empty(&node->private_list));
  503. radix_tree_node_free(node);
  504. deleted = true;
  505. node = parent;
  506. } while (node);
  507. return deleted;
  508. }
  509. /**
  510. * __radix_tree_create - create a slot in a radix tree
  511. * @root: radix tree root
  512. * @index: index key
  513. * @nodep: returns node
  514. * @slotp: returns slot
  515. *
  516. * Create, if necessary, and return the node and slot for an item
  517. * at position @index in the radix tree @root.
  518. *
  519. * Until there is more than one item in the tree, no nodes are
  520. * allocated and @root->xa_head is used as a direct slot instead of
  521. * pointing to a node, in which case *@nodep will be NULL.
  522. *
  523. * Returns -ENOMEM, or 0 for success.
  524. */
  525. static int __radix_tree_create(struct radix_tree_root *root,
  526. unsigned long index, struct radix_tree_node **nodep,
  527. void __rcu ***slotp)
  528. {
  529. struct radix_tree_node *node = NULL, *child;
  530. void __rcu **slot = (void __rcu **)&root->xa_head;
  531. unsigned long maxindex;
  532. unsigned int shift, offset = 0;
  533. unsigned long max = index;
  534. gfp_t gfp = root_gfp_mask(root);
  535. shift = radix_tree_load_root(root, &child, &maxindex);
  536. /* Make sure the tree is high enough. */
  537. if (max > maxindex) {
  538. int error = radix_tree_extend(root, gfp, max, shift);
  539. if (error < 0)
  540. return error;
  541. shift = error;
  542. child = rcu_dereference_raw(root->xa_head);
  543. }
  544. while (shift > 0) {
  545. shift -= RADIX_TREE_MAP_SHIFT;
  546. if (child == NULL) {
  547. /* Have to add a child node. */
  548. child = radix_tree_node_alloc(gfp, node, root, shift,
  549. offset, 0, 0);
  550. if (!child)
  551. return -ENOMEM;
  552. rcu_assign_pointer(*slot, node_to_entry(child));
  553. if (node)
  554. node->count++;
  555. } else if (!radix_tree_is_internal_node(child))
  556. break;
  557. /* Go a level down */
  558. node = entry_to_node(child);
  559. offset = radix_tree_descend(node, &child, index);
  560. slot = &node->slots[offset];
  561. }
  562. if (nodep)
  563. *nodep = node;
  564. if (slotp)
  565. *slotp = slot;
  566. return 0;
  567. }
  568. /*
  569. * Free any nodes below this node. The tree is presumed to not need
  570. * shrinking, and any user data in the tree is presumed to not need a
  571. * destructor called on it. If we need to add a destructor, we can
  572. * add that functionality later. Note that we may not clear tags or
  573. * slots from the tree as an RCU walker may still have a pointer into
  574. * this subtree. We could replace the entries with RADIX_TREE_RETRY,
  575. * but we'll still have to clear those in rcu_free.
  576. */
  577. static void radix_tree_free_nodes(struct radix_tree_node *node)
  578. {
  579. unsigned offset = 0;
  580. struct radix_tree_node *child = entry_to_node(node);
  581. for (;;) {
  582. void *entry = rcu_dereference_raw(child->slots[offset]);
  583. if (xa_is_node(entry) && child->shift) {
  584. child = entry_to_node(entry);
  585. offset = 0;
  586. continue;
  587. }
  588. offset++;
  589. while (offset == RADIX_TREE_MAP_SIZE) {
  590. struct radix_tree_node *old = child;
  591. offset = child->offset + 1;
  592. child = child->parent;
  593. WARN_ON_ONCE(!list_empty(&old->private_list));
  594. radix_tree_node_free(old);
  595. if (old == entry_to_node(node))
  596. return;
  597. }
  598. }
  599. }
  600. static inline int insert_entries(struct radix_tree_node *node,
  601. void __rcu **slot, void *item, bool replace)
  602. {
  603. if (*slot)
  604. return -EEXIST;
  605. rcu_assign_pointer(*slot, item);
  606. if (node) {
  607. node->count++;
  608. if (xa_is_value(item))
  609. node->nr_values++;
  610. }
  611. return 1;
  612. }
  613. /**
  614. * __radix_tree_insert - insert into a radix tree
  615. * @root: radix tree root
  616. * @index: index key
  617. * @item: item to insert
  618. *
  619. * Insert an item into the radix tree at position @index.
  620. */
  621. int radix_tree_insert(struct radix_tree_root *root, unsigned long index,
  622. void *item)
  623. {
  624. struct radix_tree_node *node;
  625. void __rcu **slot;
  626. int error;
  627. BUG_ON(radix_tree_is_internal_node(item));
  628. error = __radix_tree_create(root, index, &node, &slot);
  629. if (error)
  630. return error;
  631. error = insert_entries(node, slot, item, false);
  632. if (error < 0)
  633. return error;
  634. if (node) {
  635. unsigned offset = get_slot_offset(node, slot);
  636. BUG_ON(tag_get(node, 0, offset));
  637. BUG_ON(tag_get(node, 1, offset));
  638. BUG_ON(tag_get(node, 2, offset));
  639. } else {
  640. BUG_ON(root_tags_get(root));
  641. }
  642. return 0;
  643. }
  644. EXPORT_SYMBOL(radix_tree_insert);
  645. /**
  646. * __radix_tree_lookup - lookup an item in a radix tree
  647. * @root: radix tree root
  648. * @index: index key
  649. * @nodep: returns node
  650. * @slotp: returns slot
  651. *
  652. * Lookup and return the item at position @index in the radix
  653. * tree @root.
  654. *
  655. * Until there is more than one item in the tree, no nodes are
  656. * allocated and @root->xa_head is used as a direct slot instead of
  657. * pointing to a node, in which case *@nodep will be NULL.
  658. */
  659. void *__radix_tree_lookup(const struct radix_tree_root *root,
  660. unsigned long index, struct radix_tree_node **nodep,
  661. void __rcu ***slotp)
  662. {
  663. struct radix_tree_node *node, *parent;
  664. unsigned long maxindex;
  665. void __rcu **slot;
  666. restart:
  667. parent = NULL;
  668. slot = (void __rcu **)&root->xa_head;
  669. radix_tree_load_root(root, &node, &maxindex);
  670. if (index > maxindex)
  671. return NULL;
  672. while (radix_tree_is_internal_node(node)) {
  673. unsigned offset;
  674. parent = entry_to_node(node);
  675. offset = radix_tree_descend(parent, &node, index);
  676. slot = parent->slots + offset;
  677. if (node == RADIX_TREE_RETRY)
  678. goto restart;
  679. if (parent->shift == 0)
  680. break;
  681. }
  682. if (nodep)
  683. *nodep = parent;
  684. if (slotp)
  685. *slotp = slot;
  686. return node;
  687. }
  688. /**
  689. * radix_tree_lookup_slot - lookup a slot in a radix tree
  690. * @root: radix tree root
  691. * @index: index key
  692. *
  693. * Returns: the slot corresponding to the position @index in the
  694. * radix tree @root. This is useful for update-if-exists operations.
  695. *
  696. * This function can be called under rcu_read_lock iff the slot is not
  697. * modified by radix_tree_replace_slot, otherwise it must be called
  698. * exclusive from other writers. Any dereference of the slot must be done
  699. * using radix_tree_deref_slot.
  700. */
  701. void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root,
  702. unsigned long index)
  703. {
  704. void __rcu **slot;
  705. if (!__radix_tree_lookup(root, index, NULL, &slot))
  706. return NULL;
  707. return slot;
  708. }
  709. EXPORT_SYMBOL(radix_tree_lookup_slot);
  710. /**
  711. * radix_tree_lookup - perform lookup operation on a radix tree
  712. * @root: radix tree root
  713. * @index: index key
  714. *
  715. * Lookup the item at the position @index in the radix tree @root.
  716. *
  717. * This function can be called under rcu_read_lock, however the caller
  718. * must manage lifetimes of leaf nodes (eg. RCU may also be used to free
  719. * them safely). No RCU barriers are required to access or modify the
  720. * returned item, however.
  721. */
  722. void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
  723. {
  724. return __radix_tree_lookup(root, index, NULL, NULL);
  725. }
  726. EXPORT_SYMBOL(radix_tree_lookup);
  727. static void replace_slot(void __rcu **slot, void *item,
  728. struct radix_tree_node *node, int count, int values)
  729. {
  730. if (node && (count || values)) {
  731. node->count += count;
  732. node->nr_values += values;
  733. }
  734. rcu_assign_pointer(*slot, item);
  735. }
  736. static bool node_tag_get(const struct radix_tree_root *root,
  737. const struct radix_tree_node *node,
  738. unsigned int tag, unsigned int offset)
  739. {
  740. if (node)
  741. return tag_get(node, tag, offset);
  742. return root_tag_get(root, tag);
  743. }
  744. /*
  745. * IDR users want to be able to store NULL in the tree, so if the slot isn't
  746. * free, don't adjust the count, even if it's transitioning between NULL and
  747. * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still
  748. * have empty bits, but it only stores NULL in slots when they're being
  749. * deleted.
  750. */
  751. static int calculate_count(struct radix_tree_root *root,
  752. struct radix_tree_node *node, void __rcu **slot,
  753. void *item, void *old)
  754. {
  755. if (is_idr(root)) {
  756. unsigned offset = get_slot_offset(node, slot);
  757. bool free = node_tag_get(root, node, IDR_FREE, offset);
  758. if (!free)
  759. return 0;
  760. if (!old)
  761. return 1;
  762. }
  763. return !!item - !!old;
  764. }
  765. /**
  766. * __radix_tree_replace - replace item in a slot
  767. * @root: radix tree root
  768. * @node: pointer to tree node
  769. * @slot: pointer to slot in @node
  770. * @item: new item to store in the slot.
  771. *
  772. * For use with __radix_tree_lookup(). Caller must hold tree write locked
  773. * across slot lookup and replacement.
  774. */
  775. void __radix_tree_replace(struct radix_tree_root *root,
  776. struct radix_tree_node *node,
  777. void __rcu **slot, void *item)
  778. {
  779. void *old = rcu_dereference_raw(*slot);
  780. int values = !!xa_is_value(item) - !!xa_is_value(old);
  781. int count = calculate_count(root, node, slot, item, old);
  782. /*
  783. * This function supports replacing value entries and
  784. * deleting entries, but that needs accounting against the
  785. * node unless the slot is root->xa_head.
  786. */
  787. WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) &&
  788. (count || values));
  789. replace_slot(slot, item, node, count, values);
  790. if (!node)
  791. return;
  792. delete_node(root, node);
  793. }
  794. /**
  795. * radix_tree_replace_slot - replace item in a slot
  796. * @root: radix tree root
  797. * @slot: pointer to slot
  798. * @item: new item to store in the slot.
  799. *
  800. * For use with radix_tree_lookup_slot() and
  801. * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked
  802. * across slot lookup and replacement.
  803. *
  804. * NOTE: This cannot be used to switch between non-entries (empty slots),
  805. * regular entries, and value entries, as that requires accounting
  806. * inside the radix tree node. When switching from one type of entry or
  807. * deleting, use __radix_tree_lookup() and __radix_tree_replace() or
  808. * radix_tree_iter_replace().
  809. */
  810. void radix_tree_replace_slot(struct radix_tree_root *root,
  811. void __rcu **slot, void *item)
  812. {
  813. __radix_tree_replace(root, NULL, slot, item);
  814. }
  815. EXPORT_SYMBOL(radix_tree_replace_slot);
  816. /**
  817. * radix_tree_iter_replace - replace item in a slot
  818. * @root: radix tree root
  819. * @slot: pointer to slot
  820. * @item: new item to store in the slot.
  821. *
  822. * For use with radix_tree_for_each_slot().
  823. * Caller must hold tree write locked.
  824. */
  825. void radix_tree_iter_replace(struct radix_tree_root *root,
  826. const struct radix_tree_iter *iter,
  827. void __rcu **slot, void *item)
  828. {
  829. __radix_tree_replace(root, iter->node, slot, item);
  830. }
  831. static void node_tag_set(struct radix_tree_root *root,
  832. struct radix_tree_node *node,
  833. unsigned int tag, unsigned int offset)
  834. {
  835. while (node) {
  836. if (tag_get(node, tag, offset))
  837. return;
  838. tag_set(node, tag, offset);
  839. offset = node->offset;
  840. node = node->parent;
  841. }
  842. if (!root_tag_get(root, tag))
  843. root_tag_set(root, tag);
  844. }
  845. /**
  846. * radix_tree_tag_set - set a tag on a radix tree node
  847. * @root: radix tree root
  848. * @index: index key
  849. * @tag: tag index
  850. *
  851. * Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
  852. * corresponding to @index in the radix tree. From
  853. * the root all the way down to the leaf node.
  854. *
  855. * Returns the address of the tagged item. Setting a tag on a not-present
  856. * item is a bug.
  857. */
  858. void *radix_tree_tag_set(struct radix_tree_root *root,
  859. unsigned long index, unsigned int tag)
  860. {
  861. struct radix_tree_node *node, *parent;
  862. unsigned long maxindex;
  863. radix_tree_load_root(root, &node, &maxindex);
  864. BUG_ON(index > maxindex);
  865. while (radix_tree_is_internal_node(node)) {
  866. unsigned offset;
  867. parent = entry_to_node(node);
  868. offset = radix_tree_descend(parent, &node, index);
  869. BUG_ON(!node);
  870. if (!tag_get(parent, tag, offset))
  871. tag_set(parent, tag, offset);
  872. }
  873. /* set the root's tag bit */
  874. if (!root_tag_get(root, tag))
  875. root_tag_set(root, tag);
  876. return node;
  877. }
  878. EXPORT_SYMBOL(radix_tree_tag_set);
  879. static void node_tag_clear(struct radix_tree_root *root,
  880. struct radix_tree_node *node,
  881. unsigned int tag, unsigned int offset)
  882. {
  883. while (node) {
  884. if (!tag_get(node, tag, offset))
  885. return;
  886. tag_clear(node, tag, offset);
  887. if (any_tag_set(node, tag))
  888. return;
  889. offset = node->offset;
  890. node = node->parent;
  891. }
  892. /* clear the root's tag bit */
  893. if (root_tag_get(root, tag))
  894. root_tag_clear(root, tag);
  895. }
  896. /**
  897. * radix_tree_tag_clear - clear a tag on a radix tree node
  898. * @root: radix tree root
  899. * @index: index key
  900. * @tag: tag index
  901. *
  902. * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
  903. * corresponding to @index in the radix tree. If this causes
  904. * the leaf node to have no tags set then clear the tag in the
  905. * next-to-leaf node, etc.
  906. *
  907. * Returns the address of the tagged item on success, else NULL. ie:
  908. * has the same return value and semantics as radix_tree_lookup().
  909. */
  910. void *radix_tree_tag_clear(struct radix_tree_root *root,
  911. unsigned long index, unsigned int tag)
  912. {
  913. struct radix_tree_node *node, *parent;
  914. unsigned long maxindex;
  915. int offset;
  916. radix_tree_load_root(root, &node, &maxindex);
  917. if (index > maxindex)
  918. return NULL;
  919. parent = NULL;
  920. while (radix_tree_is_internal_node(node)) {
  921. parent = entry_to_node(node);
  922. offset = radix_tree_descend(parent, &node, index);
  923. }
  924. if (node)
  925. node_tag_clear(root, parent, tag, offset);
  926. return node;
  927. }
  928. EXPORT_SYMBOL(radix_tree_tag_clear);
  929. /**
  930. * radix_tree_iter_tag_clear - clear a tag on the current iterator entry
  931. * @root: radix tree root
  932. * @iter: iterator state
  933. * @tag: tag to clear
  934. */
  935. void radix_tree_iter_tag_clear(struct radix_tree_root *root,
  936. const struct radix_tree_iter *iter, unsigned int tag)
  937. {
  938. node_tag_clear(root, iter->node, tag, iter_offset(iter));
  939. }
  940. /**
  941. * radix_tree_tag_get - get a tag on a radix tree node
  942. * @root: radix tree root
  943. * @index: index key
  944. * @tag: tag index (< RADIX_TREE_MAX_TAGS)
  945. *
  946. * Return values:
  947. *
  948. * 0: tag not present or not set
  949. * 1: tag set
  950. *
  951. * Note that the return value of this function may not be relied on, even if
  952. * the RCU lock is held, unless tag modification and node deletion are excluded
  953. * from concurrency.
  954. */
  955. int radix_tree_tag_get(const struct radix_tree_root *root,
  956. unsigned long index, unsigned int tag)
  957. {
  958. struct radix_tree_node *node, *parent;
  959. unsigned long maxindex;
  960. if (!root_tag_get(root, tag))
  961. return 0;
  962. radix_tree_load_root(root, &node, &maxindex);
  963. if (index > maxindex)
  964. return 0;
  965. while (radix_tree_is_internal_node(node)) {
  966. unsigned offset;
  967. parent = entry_to_node(node);
  968. offset = radix_tree_descend(parent, &node, index);
  969. if (!tag_get(parent, tag, offset))
  970. return 0;
  971. if (node == RADIX_TREE_RETRY)
  972. break;
  973. }
  974. return 1;
  975. }
  976. EXPORT_SYMBOL(radix_tree_tag_get);
  977. /* Construct iter->tags bit-mask from node->tags[tag] array */
  978. static void set_iter_tags(struct radix_tree_iter *iter,
  979. struct radix_tree_node *node, unsigned offset,
  980. unsigned tag)
  981. {
  982. unsigned tag_long = offset / BITS_PER_LONG;
  983. unsigned tag_bit = offset % BITS_PER_LONG;
  984. if (!node) {
  985. iter->tags = 1;
  986. return;
  987. }
  988. iter->tags = node->tags[tag][tag_long] >> tag_bit;
  989. /* This never happens if RADIX_TREE_TAG_LONGS == 1 */
  990. if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
  991. /* Pick tags from next element */
  992. if (tag_bit)
  993. iter->tags |= node->tags[tag][tag_long + 1] <<
  994. (BITS_PER_LONG - tag_bit);
  995. /* Clip chunk size, here only BITS_PER_LONG tags */
  996. iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG);
  997. }
  998. }
  999. void __rcu **radix_tree_iter_resume(void __rcu **slot,
  1000. struct radix_tree_iter *iter)
  1001. {
  1002. slot++;
  1003. iter->index = __radix_tree_iter_add(iter, 1);
  1004. iter->next_index = iter->index;
  1005. iter->tags = 0;
  1006. return NULL;
  1007. }
  1008. EXPORT_SYMBOL(radix_tree_iter_resume);
  1009. /**
  1010. * radix_tree_next_chunk - find next chunk of slots for iteration
  1011. *
  1012. * @root: radix tree root
  1013. * @iter: iterator state
  1014. * @flags: RADIX_TREE_ITER_* flags and tag index
  1015. * Returns: pointer to chunk first slot, or NULL if iteration is over
  1016. */
  1017. void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
  1018. struct radix_tree_iter *iter, unsigned flags)
  1019. {
  1020. unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
  1021. struct radix_tree_node *node, *child;
  1022. unsigned long index, offset, maxindex;
  1023. if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
  1024. return NULL;
  1025. /*
  1026. * Catch next_index overflow after ~0UL. iter->index never overflows
  1027. * during iterating; it can be zero only at the beginning.
  1028. * And we cannot overflow iter->next_index in a single step,
  1029. * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
  1030. *
  1031. * This condition also used by radix_tree_next_slot() to stop
  1032. * contiguous iterating, and forbid switching to the next chunk.
  1033. */
  1034. index = iter->next_index;
  1035. if (!index && iter->index)
  1036. return NULL;
  1037. restart:
  1038. radix_tree_load_root(root, &child, &maxindex);
  1039. if (index > maxindex)
  1040. return NULL;
  1041. if (!child)
  1042. return NULL;
  1043. if (!radix_tree_is_internal_node(child)) {
  1044. /* Single-slot tree */
  1045. iter->index = index;
  1046. iter->next_index = maxindex + 1;
  1047. iter->tags = 1;
  1048. iter->node = NULL;
  1049. return (void __rcu **)&root->xa_head;
  1050. }
  1051. do {
  1052. node = entry_to_node(child);
  1053. offset = radix_tree_descend(node, &child, index);
  1054. if ((flags & RADIX_TREE_ITER_TAGGED) ?
  1055. !tag_get(node, tag, offset) : !child) {
  1056. /* Hole detected */
  1057. if (flags & RADIX_TREE_ITER_CONTIG)
  1058. return NULL;
  1059. if (flags & RADIX_TREE_ITER_TAGGED)
  1060. offset = radix_tree_find_next_bit(node, tag,
  1061. offset + 1);
  1062. else
  1063. while (++offset < RADIX_TREE_MAP_SIZE) {
  1064. void *slot = rcu_dereference_raw(
  1065. node->slots[offset]);
  1066. if (slot)
  1067. break;
  1068. }
  1069. index &= ~node_maxindex(node);
  1070. index += offset << node->shift;
  1071. /* Overflow after ~0UL */
  1072. if (!index)
  1073. return NULL;
  1074. if (offset == RADIX_TREE_MAP_SIZE)
  1075. goto restart;
  1076. child = rcu_dereference_raw(node->slots[offset]);
  1077. }
  1078. if (!child)
  1079. goto restart;
  1080. if (child == RADIX_TREE_RETRY)
  1081. break;
  1082. } while (node->shift && radix_tree_is_internal_node(child));
  1083. /* Update the iterator state */
  1084. iter->index = (index &~ node_maxindex(node)) | offset;
  1085. iter->next_index = (index | node_maxindex(node)) + 1;
  1086. iter->node = node;
  1087. if (flags & RADIX_TREE_ITER_TAGGED)
  1088. set_iter_tags(iter, node, offset, tag);
  1089. return node->slots + offset;
  1090. }
  1091. EXPORT_SYMBOL(radix_tree_next_chunk);
  1092. /**
  1093. * radix_tree_gang_lookup - perform multiple lookup on a radix tree
  1094. * @root: radix tree root
  1095. * @results: where the results of the lookup are placed
  1096. * @first_index: start the lookup from this key
  1097. * @max_items: place up to this many items at *results
  1098. *
  1099. * Performs an index-ascending scan of the tree for present items. Places
  1100. * them at *@results and returns the number of items which were placed at
  1101. * *@results.
  1102. *
  1103. * The implementation is naive.
  1104. *
  1105. * Like radix_tree_lookup, radix_tree_gang_lookup may be called under
  1106. * rcu_read_lock. In this case, rather than the returned results being
  1107. * an atomic snapshot of the tree at a single point in time, the
  1108. * semantics of an RCU protected gang lookup are as though multiple
  1109. * radix_tree_lookups have been issued in individual locks, and results
  1110. * stored in 'results'.
  1111. */
  1112. unsigned int
  1113. radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
  1114. unsigned long first_index, unsigned int max_items)
  1115. {
  1116. struct radix_tree_iter iter;
  1117. void __rcu **slot;
  1118. unsigned int ret = 0;
  1119. if (unlikely(!max_items))
  1120. return 0;
  1121. radix_tree_for_each_slot(slot, root, &iter, first_index) {
  1122. results[ret] = rcu_dereference_raw(*slot);
  1123. if (!results[ret])
  1124. continue;
  1125. if (radix_tree_is_internal_node(results[ret])) {
  1126. slot = radix_tree_iter_retry(&iter);
  1127. continue;
  1128. }
  1129. if (++ret == max_items)
  1130. break;
  1131. }
  1132. return ret;
  1133. }
  1134. EXPORT_SYMBOL(radix_tree_gang_lookup);
  1135. /**
  1136. * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
  1137. * based on a tag
  1138. * @root: radix tree root
  1139. * @results: where the results of the lookup are placed
  1140. * @first_index: start the lookup from this key
  1141. * @max_items: place up to this many items at *results
  1142. * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
  1143. *
  1144. * Performs an index-ascending scan of the tree for present items which
  1145. * have the tag indexed by @tag set. Places the items at *@results and
  1146. * returns the number of items which were placed at *@results.
  1147. */
  1148. unsigned int
  1149. radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results,
  1150. unsigned long first_index, unsigned int max_items,
  1151. unsigned int tag)
  1152. {
  1153. struct radix_tree_iter iter;
  1154. void __rcu **slot;
  1155. unsigned int ret = 0;
  1156. if (unlikely(!max_items))
  1157. return 0;
  1158. radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
  1159. results[ret] = rcu_dereference_raw(*slot);
  1160. if (!results[ret])
  1161. continue;
  1162. if (radix_tree_is_internal_node(results[ret])) {
  1163. slot = radix_tree_iter_retry(&iter);
  1164. continue;
  1165. }
  1166. if (++ret == max_items)
  1167. break;
  1168. }
  1169. return ret;
  1170. }
  1171. EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
  1172. /**
  1173. * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
  1174. * radix tree based on a tag
  1175. * @root: radix tree root
  1176. * @results: where the results of the lookup are placed
  1177. * @first_index: start the lookup from this key
  1178. * @max_items: place up to this many items at *results
  1179. * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
  1180. *
  1181. * Performs an index-ascending scan of the tree for present items which
  1182. * have the tag indexed by @tag set. Places the slots at *@results and
  1183. * returns the number of slots which were placed at *@results.
  1184. */
  1185. unsigned int
  1186. radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root,
  1187. void __rcu ***results, unsigned long first_index,
  1188. unsigned int max_items, unsigned int tag)
  1189. {
  1190. struct radix_tree_iter iter;
  1191. void __rcu **slot;
  1192. unsigned int ret = 0;
  1193. if (unlikely(!max_items))
  1194. return 0;
  1195. radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
  1196. results[ret] = slot;
  1197. if (++ret == max_items)
  1198. break;
  1199. }
  1200. return ret;
  1201. }
  1202. EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
  1203. static bool __radix_tree_delete(struct radix_tree_root *root,
  1204. struct radix_tree_node *node, void __rcu **slot)
  1205. {
  1206. void *old = rcu_dereference_raw(*slot);
  1207. int values = xa_is_value(old) ? -1 : 0;
  1208. unsigned offset = get_slot_offset(node, slot);
  1209. int tag;
  1210. if (is_idr(root))
  1211. node_tag_set(root, node, IDR_FREE, offset);
  1212. else
  1213. for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
  1214. node_tag_clear(root, node, tag, offset);
  1215. replace_slot(slot, NULL, node, -1, values);
  1216. return node && delete_node(root, node);
  1217. }
  1218. /**
  1219. * radix_tree_iter_delete - delete the entry at this iterator position
  1220. * @root: radix tree root
  1221. * @iter: iterator state
  1222. * @slot: pointer to slot
  1223. *
  1224. * Delete the entry at the position currently pointed to by the iterator.
  1225. * This may result in the current node being freed; if it is, the iterator
  1226. * is advanced so that it will not reference the freed memory. This
  1227. * function may be called without any locking if there are no other threads
  1228. * which can access this tree.
  1229. */
  1230. void radix_tree_iter_delete(struct radix_tree_root *root,
  1231. struct radix_tree_iter *iter, void __rcu **slot)
  1232. {
  1233. if (__radix_tree_delete(root, iter->node, slot))
  1234. iter->index = iter->next_index;
  1235. }
  1236. EXPORT_SYMBOL(radix_tree_iter_delete);
  1237. /**
  1238. * radix_tree_delete_item - delete an item from a radix tree
  1239. * @root: radix tree root
  1240. * @index: index key
  1241. * @item: expected item
  1242. *
  1243. * Remove @item at @index from the radix tree rooted at @root.
  1244. *
  1245. * Return: the deleted entry, or %NULL if it was not present
  1246. * or the entry at the given @index was not @item.
  1247. */
  1248. void *radix_tree_delete_item(struct radix_tree_root *root,
  1249. unsigned long index, void *item)
  1250. {
  1251. struct radix_tree_node *node = NULL;
  1252. void __rcu **slot = NULL;
  1253. void *entry;
  1254. entry = __radix_tree_lookup(root, index, &node, &slot);
  1255. if (!slot)
  1256. return NULL;
  1257. if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
  1258. get_slot_offset(node, slot))))
  1259. return NULL;
  1260. if (item && entry != item)
  1261. return NULL;
  1262. __radix_tree_delete(root, node, slot);
  1263. return entry;
  1264. }
  1265. EXPORT_SYMBOL(radix_tree_delete_item);
  1266. /**
  1267. * radix_tree_delete - delete an entry from a radix tree
  1268. * @root: radix tree root
  1269. * @index: index key
  1270. *
  1271. * Remove the entry at @index from the radix tree rooted at @root.
  1272. *
  1273. * Return: The deleted entry, or %NULL if it was not present.
  1274. */
  1275. void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
  1276. {
  1277. return radix_tree_delete_item(root, index, NULL);
  1278. }
  1279. EXPORT_SYMBOL(radix_tree_delete);
  1280. /**
  1281. * radix_tree_tagged - test whether any items in the tree are tagged
  1282. * @root: radix tree root
  1283. * @tag: tag to test
  1284. */
  1285. int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag)
  1286. {
  1287. return root_tag_get(root, tag);
  1288. }
  1289. EXPORT_SYMBOL(radix_tree_tagged);
  1290. /**
  1291. * idr_preload - preload for idr_alloc()
  1292. * @gfp_mask: allocation mask to use for preloading
  1293. *
  1294. * Preallocate memory to use for the next call to idr_alloc(). This function
  1295. * returns with preemption disabled. It will be enabled by idr_preload_end().
  1296. */
  1297. void idr_preload(gfp_t gfp_mask)
  1298. {
  1299. if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
  1300. local_lock(&radix_tree_preloads.lock);
  1301. }
  1302. EXPORT_SYMBOL(idr_preload);
  1303. void __rcu **idr_get_free(struct radix_tree_root *root,
  1304. struct radix_tree_iter *iter, gfp_t gfp,
  1305. unsigned long max)
  1306. {
  1307. struct radix_tree_node *node = NULL, *child;
  1308. void __rcu **slot = (void __rcu **)&root->xa_head;
  1309. unsigned long maxindex, start = iter->next_index;
  1310. unsigned int shift, offset = 0;
  1311. grow:
  1312. shift = radix_tree_load_root(root, &child, &maxindex);
  1313. if (!radix_tree_tagged(root, IDR_FREE))
  1314. start = max(start, maxindex + 1);
  1315. if (start > max)
  1316. return ERR_PTR(-ENOSPC);
  1317. if (start > maxindex) {
  1318. int error = radix_tree_extend(root, gfp, start, shift);
  1319. if (error < 0)
  1320. return ERR_PTR(error);
  1321. shift = error;
  1322. child = rcu_dereference_raw(root->xa_head);
  1323. }
  1324. if (start == 0 && shift == 0)
  1325. shift = RADIX_TREE_MAP_SHIFT;
  1326. while (shift) {
  1327. shift -= RADIX_TREE_MAP_SHIFT;
  1328. if (child == NULL) {
  1329. /* Have to add a child node. */
  1330. child = radix_tree_node_alloc(gfp, node, root, shift,
  1331. offset, 0, 0);
  1332. if (!child)
  1333. return ERR_PTR(-ENOMEM);
  1334. all_tag_set(child, IDR_FREE);
  1335. rcu_assign_pointer(*slot, node_to_entry(child));
  1336. if (node)
  1337. node->count++;
  1338. } else if (!radix_tree_is_internal_node(child))
  1339. break;
  1340. node = entry_to_node(child);
  1341. offset = radix_tree_descend(node, &child, start);
  1342. if (!tag_get(node, IDR_FREE, offset)) {
  1343. offset = radix_tree_find_next_bit(node, IDR_FREE,
  1344. offset + 1);
  1345. start = next_index(start, node, offset);
  1346. if (start > max || start == 0)
  1347. return ERR_PTR(-ENOSPC);
  1348. while (offset == RADIX_TREE_MAP_SIZE) {
  1349. offset = node->offset + 1;
  1350. node = node->parent;
  1351. if (!node)
  1352. goto grow;
  1353. shift = node->shift;
  1354. }
  1355. child = rcu_dereference_raw(node->slots[offset]);
  1356. }
  1357. slot = &node->slots[offset];
  1358. }
  1359. iter->index = start;
  1360. if (node)
  1361. iter->next_index = 1 + min(max, (start | node_maxindex(node)));
  1362. else
  1363. iter->next_index = 1;
  1364. iter->node = node;
  1365. set_iter_tags(iter, node, offset, IDR_FREE);
  1366. return slot;
  1367. }
  1368. /**
  1369. * idr_destroy - release all internal memory from an IDR
  1370. * @idr: idr handle
  1371. *
  1372. * After this function is called, the IDR is empty, and may be reused or
  1373. * the data structure containing it may be freed.
  1374. *
  1375. * A typical clean-up sequence for objects stored in an idr tree will use
  1376. * idr_for_each() to free all objects, if necessary, then idr_destroy() to
  1377. * free the memory used to keep track of those objects.
  1378. */
  1379. void idr_destroy(struct idr *idr)
  1380. {
  1381. struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head);
  1382. if (radix_tree_is_internal_node(node))
  1383. radix_tree_free_nodes(node);
  1384. idr->idr_rt.xa_head = NULL;
  1385. root_tag_set(&idr->idr_rt, IDR_FREE);
  1386. }
  1387. EXPORT_SYMBOL(idr_destroy);
  1388. static void
  1389. radix_tree_node_ctor(void *arg)
  1390. {
  1391. struct radix_tree_node *node = arg;
  1392. memset(node, 0, sizeof(*node));
  1393. INIT_LIST_HEAD(&node->private_list);
  1394. }
  1395. static int radix_tree_cpu_dead(unsigned int cpu)
  1396. {
  1397. struct radix_tree_preload *rtp;
  1398. struct radix_tree_node *node;
  1399. /* Free per-cpu pool of preloaded nodes */
  1400. rtp = &per_cpu(radix_tree_preloads, cpu);
  1401. while (rtp->nr) {
  1402. node = rtp->nodes;
  1403. rtp->nodes = node->parent;
  1404. kmem_cache_free(radix_tree_node_cachep, node);
  1405. rtp->nr--;
  1406. }
  1407. return 0;
  1408. }
  1409. void __init radix_tree_init(void)
  1410. {
  1411. int ret;
  1412. BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
  1413. BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK);
  1414. BUILD_BUG_ON(XA_CHUNK_SIZE > 255);
  1415. radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
  1416. sizeof(struct radix_tree_node), 0,
  1417. SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
  1418. radix_tree_node_ctor);
  1419. ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
  1420. NULL, radix_tree_cpu_dead);
  1421. WARN_ON(ret < 0);
  1422. }