bitmap.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/hfsplus/bitmap.c
  4. *
  5. * Copyright (C) 2001
  6. * Brad Boyer (flar@allandria.com)
  7. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  8. *
  9. * Handling of allocation file
  10. */
  11. #include <linux/pagemap.h>
  12. #include "hfsplus_fs.h"
  13. #include "hfsplus_raw.h"
  14. #define PAGE_CACHE_BITS (PAGE_SIZE * 8)
  15. int hfsplus_block_allocate(struct super_block *sb, u32 size,
  16. u32 offset, u32 *max)
  17. {
  18. struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
  19. struct page *page;
  20. struct address_space *mapping;
  21. __be32 *pptr, *curr, *end;
  22. u32 mask, start, len, n;
  23. __be32 val;
  24. int i;
  25. len = *max;
  26. if (!len)
  27. return size;
  28. hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
  29. mutex_lock(&sbi->alloc_mutex);
  30. mapping = sbi->alloc_file->i_mapping;
  31. page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
  32. if (IS_ERR(page)) {
  33. start = size;
  34. goto out;
  35. }
  36. pptr = kmap(page);
  37. curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
  38. i = offset % 32;
  39. offset &= ~(PAGE_CACHE_BITS - 1);
  40. if ((size ^ offset) / PAGE_CACHE_BITS)
  41. end = pptr + PAGE_CACHE_BITS / 32;
  42. else
  43. end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
  44. /* scan the first partial u32 for zero bits */
  45. val = *curr;
  46. if (~val) {
  47. n = be32_to_cpu(val);
  48. mask = (1U << 31) >> i;
  49. for (; i < 32; mask >>= 1, i++) {
  50. if (!(n & mask))
  51. goto found;
  52. }
  53. }
  54. curr++;
  55. /* scan complete u32s for the first zero bit */
  56. while (1) {
  57. while (curr < end) {
  58. val = *curr;
  59. if (~val) {
  60. n = be32_to_cpu(val);
  61. mask = 1 << 31;
  62. for (i = 0; i < 32; mask >>= 1, i++) {
  63. if (!(n & mask))
  64. goto found;
  65. }
  66. }
  67. curr++;
  68. }
  69. kunmap(page);
  70. offset += PAGE_CACHE_BITS;
  71. if (offset >= size)
  72. break;
  73. page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
  74. NULL);
  75. if (IS_ERR(page)) {
  76. start = size;
  77. goto out;
  78. }
  79. curr = pptr = kmap(page);
  80. if ((size ^ offset) / PAGE_CACHE_BITS)
  81. end = pptr + PAGE_CACHE_BITS / 32;
  82. else
  83. end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
  84. }
  85. hfs_dbg(BITMAP, "bitmap full\n");
  86. start = size;
  87. goto out;
  88. found:
  89. start = offset + (curr - pptr) * 32 + i;
  90. if (start >= size) {
  91. hfs_dbg(BITMAP, "bitmap full\n");
  92. goto out;
  93. }
  94. /* do any partial u32 at the start */
  95. len = min(size - start, len);
  96. while (1) {
  97. n |= mask;
  98. if (++i >= 32)
  99. break;
  100. mask >>= 1;
  101. if (!--len || n & mask)
  102. goto done;
  103. }
  104. if (!--len)
  105. goto done;
  106. *curr++ = cpu_to_be32(n);
  107. /* do full u32s */
  108. while (1) {
  109. while (curr < end) {
  110. n = be32_to_cpu(*curr);
  111. if (len < 32)
  112. goto last;
  113. if (n) {
  114. len = 32;
  115. goto last;
  116. }
  117. *curr++ = cpu_to_be32(0xffffffff);
  118. len -= 32;
  119. }
  120. set_page_dirty(page);
  121. kunmap(page);
  122. offset += PAGE_CACHE_BITS;
  123. page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
  124. NULL);
  125. if (IS_ERR(page)) {
  126. start = size;
  127. goto out;
  128. }
  129. pptr = kmap(page);
  130. curr = pptr;
  131. end = pptr + PAGE_CACHE_BITS / 32;
  132. }
  133. last:
  134. /* do any partial u32 at end */
  135. mask = 1U << 31;
  136. for (i = 0; i < len; i++) {
  137. if (n & mask)
  138. break;
  139. n |= mask;
  140. mask >>= 1;
  141. }
  142. done:
  143. *curr = cpu_to_be32(n);
  144. set_page_dirty(page);
  145. kunmap(page);
  146. *max = offset + (curr - pptr) * 32 + i - start;
  147. sbi->free_blocks -= *max;
  148. hfsplus_mark_mdb_dirty(sb);
  149. hfs_dbg(BITMAP, "-> %u,%u\n", start, *max);
  150. out:
  151. mutex_unlock(&sbi->alloc_mutex);
  152. return start;
  153. }
  154. int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
  155. {
  156. struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
  157. struct page *page;
  158. struct address_space *mapping;
  159. __be32 *pptr, *curr, *end;
  160. u32 mask, len, pnr;
  161. int i;
  162. /* is there any actual work to be done? */
  163. if (!count)
  164. return 0;
  165. hfs_dbg(BITMAP, "block_free: %u,%u\n", offset, count);
  166. /* are all of the bits in range? */
  167. if ((offset + count) > sbi->total_blocks)
  168. return -ENOENT;
  169. mutex_lock(&sbi->alloc_mutex);
  170. mapping = sbi->alloc_file->i_mapping;
  171. pnr = offset / PAGE_CACHE_BITS;
  172. page = read_mapping_page(mapping, pnr, NULL);
  173. if (IS_ERR(page))
  174. goto kaboom;
  175. pptr = kmap(page);
  176. curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
  177. end = pptr + PAGE_CACHE_BITS / 32;
  178. len = count;
  179. /* do any partial u32 at the start */
  180. i = offset % 32;
  181. if (i) {
  182. int j = 32 - i;
  183. mask = 0xffffffffU << j;
  184. if (j > count) {
  185. mask |= 0xffffffffU >> (i + count);
  186. *curr++ &= cpu_to_be32(mask);
  187. goto out;
  188. }
  189. *curr++ &= cpu_to_be32(mask);
  190. count -= j;
  191. }
  192. /* do full u32s */
  193. while (1) {
  194. while (curr < end) {
  195. if (count < 32)
  196. goto done;
  197. *curr++ = 0;
  198. count -= 32;
  199. }
  200. if (!count)
  201. break;
  202. set_page_dirty(page);
  203. kunmap(page);
  204. page = read_mapping_page(mapping, ++pnr, NULL);
  205. if (IS_ERR(page))
  206. goto kaboom;
  207. pptr = kmap(page);
  208. curr = pptr;
  209. end = pptr + PAGE_CACHE_BITS / 32;
  210. }
  211. done:
  212. /* do any partial u32 at end */
  213. if (count) {
  214. mask = 0xffffffffU >> count;
  215. *curr &= cpu_to_be32(mask);
  216. }
  217. out:
  218. set_page_dirty(page);
  219. kunmap(page);
  220. sbi->free_blocks += len;
  221. hfsplus_mark_mdb_dirty(sb);
  222. mutex_unlock(&sbi->alloc_mutex);
  223. return 0;
  224. kaboom:
  225. pr_crit("unable to mark blocks free: error %ld\n", PTR_ERR(page));
  226. mutex_unlock(&sbi->alloc_mutex);
  227. return -EIO;
  228. }