idr.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. /*
  2. * 2002-10-18 written by Jim Houston jim.houston@ccur.com
  3. * Copyright (C) 2002 by Concurrent Computer Corporation
  4. * Distributed under the GNU GPL license version 2.
  5. *
  6. * Modified by George Anzinger to reuse immediately and to use
  7. * find bit instructions. Also removed _irq on spinlocks.
  8. *
  9. * Small id to pointer translation service.
  10. *
  11. * It uses a radix tree like structure as a sparse array indexed
  12. * by the id to obtain the pointer. The bitmap makes allocating
  13. * a new id quick.
  14. *
  15. * You call it to allocate an id (an int) an associate with that id a
  16. * pointer or what ever, we treat it as a (void *). You can pass this
  17. * id to a user for him to pass back at a later time. You then pass
  18. * that id to this code and it returns your pointer.
  19. * You can release ids at any time. When all ids are released, most of
  20. * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
  21. * don't need to go to the memory "store" during an id allocate, just
  22. * so you don't need to be too concerned about locking and conflicts
  23. * with the slab allocator.
  24. */
  25. #ifndef TEST // to test in user space...
  26. #include <linux/slab.h>
  27. #include <linux/init.h>
  28. #include <linux/module.h>
  29. #endif
  30. #include <linux/err.h>
  31. #include <linux/string.h>
  32. #include <linux/idr.h>
  33. static struct kmem_cache *idr_layer_cache;
  34. static struct idr_layer *alloc_layer(struct idr *idp)
  35. {
  36. struct idr_layer *p;
  37. unsigned long flags;
  38. spin_lock_irqsave(&idp->lock, flags);
  39. if ((p = idp->id_free)) {
  40. idp->id_free = p->ary[0];
  41. idp->id_free_cnt--;
  42. p->ary[0] = NULL;
  43. }
  44. spin_unlock_irqrestore(&idp->lock, flags);
  45. return(p);
  46. }
  47. /* only called when idp->lock is held */
  48. static void __free_layer(struct idr *idp, struct idr_layer *p)
  49. {
  50. p->ary[0] = idp->id_free;
  51. idp->id_free = p;
  52. idp->id_free_cnt++;
  53. }
  54. static void free_layer(struct idr *idp, struct idr_layer *p)
  55. {
  56. unsigned long flags;
  57. /*
  58. * Depends on the return element being zeroed.
  59. */
  60. spin_lock_irqsave(&idp->lock, flags);
  61. __free_layer(idp, p);
  62. spin_unlock_irqrestore(&idp->lock, flags);
  63. }
  64. /**
  65. * idr_pre_get - reserver resources for idr allocation
  66. * @idp: idr handle
  67. * @gfp_mask: memory allocation flags
  68. *
  69. * This function should be called prior to locking and calling the
  70. * following function. It preallocates enough memory to satisfy
  71. * the worst possible allocation.
  72. *
  73. * If the system is REALLY out of memory this function returns 0,
  74. * otherwise 1.
  75. */
  76. int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
  77. {
  78. while (idp->id_free_cnt < IDR_FREE_MAX) {
  79. struct idr_layer *new;
  80. new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
  81. if (new == NULL)
  82. return (0);
  83. free_layer(idp, new);
  84. }
  85. return 1;
  86. }
  87. EXPORT_SYMBOL(idr_pre_get);
  88. static int sub_alloc(struct idr *idp, void *ptr, int *starting_id)
  89. {
  90. int n, m, sh;
  91. struct idr_layer *p, *new;
  92. struct idr_layer *pa[MAX_LEVEL];
  93. int l, id;
  94. long bm;
  95. id = *starting_id;
  96. p = idp->top;
  97. l = idp->layers;
  98. pa[l--] = NULL;
  99. while (1) {
  100. /*
  101. * We run around this while until we reach the leaf node...
  102. */
  103. n = (id >> (IDR_BITS*l)) & IDR_MASK;
  104. bm = ~p->bitmap;
  105. m = find_next_bit(&bm, IDR_SIZE, n);
  106. if (m == IDR_SIZE) {
  107. /* no space available go back to previous layer. */
  108. l++;
  109. id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
  110. if (!(p = pa[l])) {
  111. *starting_id = id;
  112. return -2;
  113. }
  114. continue;
  115. }
  116. if (m != n) {
  117. sh = IDR_BITS*l;
  118. id = ((id >> sh) ^ n ^ m) << sh;
  119. }
  120. if ((id >= MAX_ID_BIT) || (id < 0))
  121. return -3;
  122. if (l == 0)
  123. break;
  124. /*
  125. * Create the layer below if it is missing.
  126. */
  127. if (!p->ary[m]) {
  128. if (!(new = alloc_layer(idp)))
  129. return -1;
  130. p->ary[m] = new;
  131. p->count++;
  132. }
  133. pa[l--] = p;
  134. p = p->ary[m];
  135. }
  136. /*
  137. * We have reached the leaf node, plant the
  138. * users pointer and return the raw id.
  139. */
  140. p->ary[m] = (struct idr_layer *)ptr;
  141. __set_bit(m, &p->bitmap);
  142. p->count++;
  143. /*
  144. * If this layer is full mark the bit in the layer above
  145. * to show that this part of the radix tree is full.
  146. * This may complete the layer above and require walking
  147. * up the radix tree.
  148. */
  149. n = id;
  150. while (p->bitmap == IDR_FULL) {
  151. if (!(p = pa[++l]))
  152. break;
  153. n = n >> IDR_BITS;
  154. __set_bit((n & IDR_MASK), &p->bitmap);
  155. }
  156. return(id);
  157. }
  158. static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
  159. {
  160. struct idr_layer *p, *new;
  161. int layers, v, id;
  162. unsigned long flags;
  163. id = starting_id;
  164. build_up:
  165. p = idp->top;
  166. layers = idp->layers;
  167. if (unlikely(!p)) {
  168. if (!(p = alloc_layer(idp)))
  169. return -1;
  170. layers = 1;
  171. }
  172. /*
  173. * Add a new layer to the top of the tree if the requested
  174. * id is larger than the currently allocated space.
  175. */
  176. while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
  177. layers++;
  178. if (!p->count)
  179. continue;
  180. if (!(new = alloc_layer(idp))) {
  181. /*
  182. * The allocation failed. If we built part of
  183. * the structure tear it down.
  184. */
  185. spin_lock_irqsave(&idp->lock, flags);
  186. for (new = p; p && p != idp->top; new = p) {
  187. p = p->ary[0];
  188. new->ary[0] = NULL;
  189. new->bitmap = new->count = 0;
  190. __free_layer(idp, new);
  191. }
  192. spin_unlock_irqrestore(&idp->lock, flags);
  193. return -1;
  194. }
  195. new->ary[0] = p;
  196. new->count = 1;
  197. if (p->bitmap == IDR_FULL)
  198. __set_bit(0, &new->bitmap);
  199. p = new;
  200. }
  201. idp->top = p;
  202. idp->layers = layers;
  203. v = sub_alloc(idp, ptr, &id);
  204. if (v == -2)
  205. goto build_up;
  206. return(v);
  207. }
  208. /**
  209. * idr_get_new_above - allocate new idr entry above or equal to a start id
  210. * @idp: idr handle
  211. * @ptr: pointer you want associated with the ide
  212. * @start_id: id to start search at
  213. * @id: pointer to the allocated handle
  214. *
  215. * This is the allocate id function. It should be called with any
  216. * required locks.
  217. *
  218. * If memory is required, it will return -EAGAIN, you should unlock
  219. * and go back to the idr_pre_get() call. If the idr is full, it will
  220. * return -ENOSPC.
  221. *
  222. * @id returns a value in the range 0 ... 0x7fffffff
  223. */
  224. int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
  225. {
  226. int rv;
  227. rv = idr_get_new_above_int(idp, ptr, starting_id);
  228. /*
  229. * This is a cheap hack until the IDR code can be fixed to
  230. * return proper error values.
  231. */
  232. if (rv < 0) {
  233. if (rv == -1)
  234. return -EAGAIN;
  235. else /* Will be -3 */
  236. return -ENOSPC;
  237. }
  238. *id = rv;
  239. return 0;
  240. }
  241. EXPORT_SYMBOL(idr_get_new_above);
  242. /**
  243. * idr_get_new - allocate new idr entry
  244. * @idp: idr handle
  245. * @ptr: pointer you want associated with the ide
  246. * @id: pointer to the allocated handle
  247. *
  248. * This is the allocate id function. It should be called with any
  249. * required locks.
  250. *
  251. * If memory is required, it will return -EAGAIN, you should unlock
  252. * and go back to the idr_pre_get() call. If the idr is full, it will
  253. * return -ENOSPC.
  254. *
  255. * @id returns a value in the range 0 ... 0x7fffffff
  256. */
  257. int idr_get_new(struct idr *idp, void *ptr, int *id)
  258. {
  259. int rv;
  260. rv = idr_get_new_above_int(idp, ptr, 0);
  261. /*
  262. * This is a cheap hack until the IDR code can be fixed to
  263. * return proper error values.
  264. */
  265. if (rv < 0) {
  266. if (rv == -1)
  267. return -EAGAIN;
  268. else /* Will be -3 */
  269. return -ENOSPC;
  270. }
  271. *id = rv;
  272. return 0;
  273. }
  274. EXPORT_SYMBOL(idr_get_new);
  275. static void idr_remove_warning(int id)
  276. {
  277. printk("idr_remove called for id=%d which is not allocated.\n", id);
  278. dump_stack();
  279. }
  280. static void sub_remove(struct idr *idp, int shift, int id)
  281. {
  282. struct idr_layer *p = idp->top;
  283. struct idr_layer **pa[MAX_LEVEL];
  284. struct idr_layer ***paa = &pa[0];
  285. int n;
  286. *paa = NULL;
  287. *++paa = &idp->top;
  288. while ((shift > 0) && p) {
  289. n = (id >> shift) & IDR_MASK;
  290. __clear_bit(n, &p->bitmap);
  291. *++paa = &p->ary[n];
  292. p = p->ary[n];
  293. shift -= IDR_BITS;
  294. }
  295. n = id & IDR_MASK;
  296. if (likely(p != NULL && test_bit(n, &p->bitmap))){
  297. __clear_bit(n, &p->bitmap);
  298. p->ary[n] = NULL;
  299. while(*paa && ! --((**paa)->count)){
  300. free_layer(idp, **paa);
  301. **paa-- = NULL;
  302. }
  303. if (!*paa)
  304. idp->layers = 0;
  305. } else
  306. idr_remove_warning(id);
  307. }
  308. /**
  309. * idr_remove - remove the given id and free it's slot
  310. * @idp: idr handle
  311. * @id: unique key
  312. */
  313. void idr_remove(struct idr *idp, int id)
  314. {
  315. struct idr_layer *p;
  316. /* Mask off upper bits we don't use for the search. */
  317. id &= MAX_ID_MASK;
  318. sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
  319. if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
  320. idp->top->ary[0]) { // We can drop a layer
  321. p = idp->top->ary[0];
  322. idp->top->bitmap = idp->top->count = 0;
  323. free_layer(idp, idp->top);
  324. idp->top = p;
  325. --idp->layers;
  326. }
  327. while (idp->id_free_cnt >= IDR_FREE_MAX) {
  328. p = alloc_layer(idp);
  329. kmem_cache_free(idr_layer_cache, p);
  330. return;
  331. }
  332. }
  333. EXPORT_SYMBOL(idr_remove);
  334. /**
  335. * idr_destroy - release all cached layers within an idr tree
  336. * idp: idr handle
  337. */
  338. void idr_destroy(struct idr *idp)
  339. {
  340. while (idp->id_free_cnt) {
  341. struct idr_layer *p = alloc_layer(idp);
  342. kmem_cache_free(idr_layer_cache, p);
  343. }
  344. }
  345. EXPORT_SYMBOL(idr_destroy);
  346. /**
  347. * idr_find - return pointer for given id
  348. * @idp: idr handle
  349. * @id: lookup key
  350. *
  351. * Return the pointer given the id it has been registered with. A %NULL
  352. * return indicates that @id is not valid or you passed %NULL in
  353. * idr_get_new().
  354. *
  355. * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
  356. */
  357. void *idr_find(struct idr *idp, int id)
  358. {
  359. int n;
  360. struct idr_layer *p;
  361. n = idp->layers * IDR_BITS;
  362. p = idp->top;
  363. /* Mask off upper bits we don't use for the search. */
  364. id &= MAX_ID_MASK;
  365. if (id >= (1 << n))
  366. return NULL;
  367. while (n > 0 && p) {
  368. n -= IDR_BITS;
  369. p = p->ary[(id >> n) & IDR_MASK];
  370. }
  371. return((void *)p);
  372. }
  373. EXPORT_SYMBOL(idr_find);
  374. /**
  375. * idr_replace - replace pointer for given id
  376. * @idp: idr handle
  377. * @ptr: pointer you want associated with the id
  378. * @id: lookup key
  379. *
  380. * Replace the pointer registered with an id and return the old value.
  381. * A -ENOENT return indicates that @id was not found.
  382. * A -EINVAL return indicates that @id was not within valid constraints.
  383. *
  384. * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
  385. */
  386. void *idr_replace(struct idr *idp, void *ptr, int id)
  387. {
  388. int n;
  389. struct idr_layer *p, *old_p;
  390. n = idp->layers * IDR_BITS;
  391. p = idp->top;
  392. id &= MAX_ID_MASK;
  393. if (id >= (1 << n))
  394. return ERR_PTR(-EINVAL);
  395. n -= IDR_BITS;
  396. while ((n > 0) && p) {
  397. p = p->ary[(id >> n) & IDR_MASK];
  398. n -= IDR_BITS;
  399. }
  400. n = id & IDR_MASK;
  401. if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
  402. return ERR_PTR(-ENOENT);
  403. old_p = p->ary[n];
  404. p->ary[n] = ptr;
  405. return old_p;
  406. }
  407. EXPORT_SYMBOL(idr_replace);
  408. static void idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache,
  409. unsigned long flags)
  410. {
  411. memset(idr_layer, 0, sizeof(struct idr_layer));
  412. }
  413. static int init_id_cache(void)
  414. {
  415. if (!idr_layer_cache)
  416. idr_layer_cache = kmem_cache_create("idr_layer_cache",
  417. sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL);
  418. return 0;
  419. }
  420. /**
  421. * idr_init - initialize idr handle
  422. * @idp: idr handle
  423. *
  424. * This function is use to set up the handle (@idp) that you will pass
  425. * to the rest of the functions.
  426. */
  427. void idr_init(struct idr *idp)
  428. {
  429. init_id_cache();
  430. memset(idp, 0, sizeof(struct idr));
  431. spin_lock_init(&idp->lock);
  432. }
  433. EXPORT_SYMBOL(idr_init);