fatent.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2004, OGAWA Hirofumi
  4. */
  5. #include <linux/blkdev.h>
  6. #include <linux/sched/signal.h>
  7. #include "fat.h"
  8. struct fatent_operations {
  9. void (*ent_blocknr)(struct super_block *, int, int *, sector_t *);
  10. void (*ent_set_ptr)(struct fat_entry *, int);
  11. int (*ent_bread)(struct super_block *, struct fat_entry *,
  12. int, sector_t);
  13. int (*ent_get)(struct fat_entry *);
  14. void (*ent_put)(struct fat_entry *, int);
  15. int (*ent_next)(struct fat_entry *);
  16. };
  17. static DEFINE_SPINLOCK(fat12_entry_lock);
  18. static void fat12_ent_blocknr(struct super_block *sb, int entry,
  19. int *offset, sector_t *blocknr)
  20. {
  21. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  22. int bytes = entry + (entry >> 1);
  23. WARN_ON(!fat_valid_entry(sbi, entry));
  24. *offset = bytes & (sb->s_blocksize - 1);
  25. *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
  26. }
  27. static void fat_ent_blocknr(struct super_block *sb, int entry,
  28. int *offset, sector_t *blocknr)
  29. {
  30. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  31. int bytes = (entry << sbi->fatent_shift);
  32. WARN_ON(!fat_valid_entry(sbi, entry));
  33. *offset = bytes & (sb->s_blocksize - 1);
  34. *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
  35. }
  36. static void fat12_ent_set_ptr(struct fat_entry *fatent, int offset)
  37. {
  38. struct buffer_head **bhs = fatent->bhs;
  39. if (fatent->nr_bhs == 1) {
  40. WARN_ON(offset >= (bhs[0]->b_size - 1));
  41. fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
  42. fatent->u.ent12_p[1] = bhs[0]->b_data + (offset + 1);
  43. } else {
  44. WARN_ON(offset != (bhs[0]->b_size - 1));
  45. fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
  46. fatent->u.ent12_p[1] = bhs[1]->b_data;
  47. }
  48. }
  49. static void fat16_ent_set_ptr(struct fat_entry *fatent, int offset)
  50. {
  51. WARN_ON(offset & (2 - 1));
  52. fatent->u.ent16_p = (__le16 *)(fatent->bhs[0]->b_data + offset);
  53. }
  54. static void fat32_ent_set_ptr(struct fat_entry *fatent, int offset)
  55. {
  56. WARN_ON(offset & (4 - 1));
  57. fatent->u.ent32_p = (__le32 *)(fatent->bhs[0]->b_data + offset);
  58. }
  59. static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
  60. int offset, sector_t blocknr)
  61. {
  62. struct buffer_head **bhs = fatent->bhs;
  63. WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
  64. fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
  65. bhs[0] = sb_bread(sb, blocknr);
  66. if (!bhs[0])
  67. goto err;
  68. if ((offset + 1) < sb->s_blocksize)
  69. fatent->nr_bhs = 1;
  70. else {
  71. /* This entry is block boundary, it needs the next block */
  72. blocknr++;
  73. bhs[1] = sb_bread(sb, blocknr);
  74. if (!bhs[1])
  75. goto err_brelse;
  76. fatent->nr_bhs = 2;
  77. }
  78. fat12_ent_set_ptr(fatent, offset);
  79. return 0;
  80. err_brelse:
  81. brelse(bhs[0]);
  82. err:
  83. fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr);
  84. return -EIO;
  85. }
  86. static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
  87. int offset, sector_t blocknr)
  88. {
  89. const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
  90. WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
  91. fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
  92. fatent->bhs[0] = sb_bread(sb, blocknr);
  93. if (!fatent->bhs[0]) {
  94. fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
  95. (llu)blocknr);
  96. return -EIO;
  97. }
  98. fatent->nr_bhs = 1;
  99. ops->ent_set_ptr(fatent, offset);
  100. return 0;
  101. }
  102. static int fat12_ent_get(struct fat_entry *fatent)
  103. {
  104. u8 **ent12_p = fatent->u.ent12_p;
  105. int next;
  106. spin_lock(&fat12_entry_lock);
  107. if (fatent->entry & 1)
  108. next = (*ent12_p[0] >> 4) | (*ent12_p[1] << 4);
  109. else
  110. next = (*ent12_p[1] << 8) | *ent12_p[0];
  111. spin_unlock(&fat12_entry_lock);
  112. next &= 0x0fff;
  113. if (next >= BAD_FAT12)
  114. next = FAT_ENT_EOF;
  115. return next;
  116. }
  117. static int fat16_ent_get(struct fat_entry *fatent)
  118. {
  119. int next = le16_to_cpu(*fatent->u.ent16_p);
  120. WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1));
  121. if (next >= BAD_FAT16)
  122. next = FAT_ENT_EOF;
  123. return next;
  124. }
  125. static int fat32_ent_get(struct fat_entry *fatent)
  126. {
  127. int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff;
  128. WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1));
  129. if (next >= BAD_FAT32)
  130. next = FAT_ENT_EOF;
  131. return next;
  132. }
  133. static void fat12_ent_put(struct fat_entry *fatent, int new)
  134. {
  135. u8 **ent12_p = fatent->u.ent12_p;
  136. if (new == FAT_ENT_EOF)
  137. new = EOF_FAT12;
  138. spin_lock(&fat12_entry_lock);
  139. if (fatent->entry & 1) {
  140. *ent12_p[0] = (new << 4) | (*ent12_p[0] & 0x0f);
  141. *ent12_p[1] = new >> 4;
  142. } else {
  143. *ent12_p[0] = new & 0xff;
  144. *ent12_p[1] = (*ent12_p[1] & 0xf0) | (new >> 8);
  145. }
  146. spin_unlock(&fat12_entry_lock);
  147. mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
  148. if (fatent->nr_bhs == 2)
  149. mark_buffer_dirty_inode(fatent->bhs[1], fatent->fat_inode);
  150. }
  151. static void fat16_ent_put(struct fat_entry *fatent, int new)
  152. {
  153. if (new == FAT_ENT_EOF)
  154. new = EOF_FAT16;
  155. *fatent->u.ent16_p = cpu_to_le16(new);
  156. mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
  157. }
  158. static void fat32_ent_put(struct fat_entry *fatent, int new)
  159. {
  160. WARN_ON(new & 0xf0000000);
  161. new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff;
  162. *fatent->u.ent32_p = cpu_to_le32(new);
  163. mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
  164. }
  165. static int fat12_ent_next(struct fat_entry *fatent)
  166. {
  167. u8 **ent12_p = fatent->u.ent12_p;
  168. struct buffer_head **bhs = fatent->bhs;
  169. u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1);
  170. fatent->entry++;
  171. if (fatent->nr_bhs == 1) {
  172. WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data +
  173. (bhs[0]->b_size - 2)));
  174. WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data +
  175. (bhs[0]->b_size - 1)));
  176. if (nextp < (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))) {
  177. ent12_p[0] = nextp - 1;
  178. ent12_p[1] = nextp;
  179. return 1;
  180. }
  181. } else {
  182. WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data +
  183. (bhs[0]->b_size - 1)));
  184. WARN_ON(ent12_p[1] != (u8 *)bhs[1]->b_data);
  185. ent12_p[0] = nextp - 1;
  186. ent12_p[1] = nextp;
  187. brelse(bhs[0]);
  188. bhs[0] = bhs[1];
  189. fatent->nr_bhs = 1;
  190. return 1;
  191. }
  192. ent12_p[0] = NULL;
  193. ent12_p[1] = NULL;
  194. return 0;
  195. }
  196. static int fat16_ent_next(struct fat_entry *fatent)
  197. {
  198. const struct buffer_head *bh = fatent->bhs[0];
  199. fatent->entry++;
  200. if (fatent->u.ent16_p < (__le16 *)(bh->b_data + (bh->b_size - 2))) {
  201. fatent->u.ent16_p++;
  202. return 1;
  203. }
  204. fatent->u.ent16_p = NULL;
  205. return 0;
  206. }
  207. static int fat32_ent_next(struct fat_entry *fatent)
  208. {
  209. const struct buffer_head *bh = fatent->bhs[0];
  210. fatent->entry++;
  211. if (fatent->u.ent32_p < (__le32 *)(bh->b_data + (bh->b_size - 4))) {
  212. fatent->u.ent32_p++;
  213. return 1;
  214. }
  215. fatent->u.ent32_p = NULL;
  216. return 0;
  217. }
  218. static const struct fatent_operations fat12_ops = {
  219. .ent_blocknr = fat12_ent_blocknr,
  220. .ent_set_ptr = fat12_ent_set_ptr,
  221. .ent_bread = fat12_ent_bread,
  222. .ent_get = fat12_ent_get,
  223. .ent_put = fat12_ent_put,
  224. .ent_next = fat12_ent_next,
  225. };
  226. static const struct fatent_operations fat16_ops = {
  227. .ent_blocknr = fat_ent_blocknr,
  228. .ent_set_ptr = fat16_ent_set_ptr,
  229. .ent_bread = fat_ent_bread,
  230. .ent_get = fat16_ent_get,
  231. .ent_put = fat16_ent_put,
  232. .ent_next = fat16_ent_next,
  233. };
  234. static const struct fatent_operations fat32_ops = {
  235. .ent_blocknr = fat_ent_blocknr,
  236. .ent_set_ptr = fat32_ent_set_ptr,
  237. .ent_bread = fat_ent_bread,
  238. .ent_get = fat32_ent_get,
  239. .ent_put = fat32_ent_put,
  240. .ent_next = fat32_ent_next,
  241. };
  242. static inline void lock_fat(struct msdos_sb_info *sbi)
  243. {
  244. mutex_lock(&sbi->fat_lock);
  245. }
  246. static inline void unlock_fat(struct msdos_sb_info *sbi)
  247. {
  248. mutex_unlock(&sbi->fat_lock);
  249. }
  250. void fat_ent_access_init(struct super_block *sb)
  251. {
  252. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  253. mutex_init(&sbi->fat_lock);
  254. if (is_fat32(sbi)) {
  255. sbi->fatent_shift = 2;
  256. sbi->fatent_ops = &fat32_ops;
  257. } else if (is_fat16(sbi)) {
  258. sbi->fatent_shift = 1;
  259. sbi->fatent_ops = &fat16_ops;
  260. } else if (is_fat12(sbi)) {
  261. sbi->fatent_shift = -1;
  262. sbi->fatent_ops = &fat12_ops;
  263. } else {
  264. fat_fs_error(sb, "invalid FAT variant, %u bits", sbi->fat_bits);
  265. }
  266. }
  267. static void mark_fsinfo_dirty(struct super_block *sb)
  268. {
  269. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  270. if (sb_rdonly(sb) || !is_fat32(sbi))
  271. return;
  272. __mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC);
  273. }
  274. static inline int fat_ent_update_ptr(struct super_block *sb,
  275. struct fat_entry *fatent,
  276. int offset, sector_t blocknr)
  277. {
  278. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  279. const struct fatent_operations *ops = sbi->fatent_ops;
  280. struct buffer_head **bhs = fatent->bhs;
  281. /* Is this fatent's blocks including this entry? */
  282. if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr)
  283. return 0;
  284. if (is_fat12(sbi)) {
  285. if ((offset + 1) < sb->s_blocksize) {
  286. /* This entry is on bhs[0]. */
  287. if (fatent->nr_bhs == 2) {
  288. brelse(bhs[1]);
  289. fatent->nr_bhs = 1;
  290. }
  291. } else {
  292. /* This entry needs the next block. */
  293. if (fatent->nr_bhs != 2)
  294. return 0;
  295. if (bhs[1]->b_blocknr != (blocknr + 1))
  296. return 0;
  297. }
  298. }
  299. ops->ent_set_ptr(fatent, offset);
  300. return 1;
  301. }
  302. int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
  303. {
  304. struct super_block *sb = inode->i_sb;
  305. struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
  306. const struct fatent_operations *ops = sbi->fatent_ops;
  307. int err, offset;
  308. sector_t blocknr;
  309. if (!fat_valid_entry(sbi, entry)) {
  310. fatent_brelse(fatent);
  311. fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
  312. return -EIO;
  313. }
  314. fatent_set_entry(fatent, entry);
  315. ops->ent_blocknr(sb, entry, &offset, &blocknr);
  316. if (!fat_ent_update_ptr(sb, fatent, offset, blocknr)) {
  317. fatent_brelse(fatent);
  318. err = ops->ent_bread(sb, fatent, offset, blocknr);
  319. if (err)
  320. return err;
  321. }
  322. return ops->ent_get(fatent);
  323. }
  324. /* FIXME: We can write the blocks as more big chunk. */
  325. static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
  326. int nr_bhs)
  327. {
  328. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  329. struct buffer_head *c_bh;
  330. int err, n, copy;
  331. err = 0;
  332. for (copy = 1; copy < sbi->fats; copy++) {
  333. sector_t backup_fat = sbi->fat_length * copy;
  334. for (n = 0; n < nr_bhs; n++) {
  335. c_bh = sb_getblk(sb, backup_fat + bhs[n]->b_blocknr);
  336. if (!c_bh) {
  337. err = -ENOMEM;
  338. goto error;
  339. }
  340. /* Avoid race with userspace read via bdev */
  341. lock_buffer(c_bh);
  342. memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
  343. set_buffer_uptodate(c_bh);
  344. unlock_buffer(c_bh);
  345. mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
  346. if (sb->s_flags & SB_SYNCHRONOUS)
  347. err = sync_dirty_buffer(c_bh);
  348. brelse(c_bh);
  349. if (err)
  350. goto error;
  351. }
  352. }
  353. error:
  354. return err;
  355. }
  356. int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
  357. int new, int wait)
  358. {
  359. struct super_block *sb = inode->i_sb;
  360. const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
  361. int err;
  362. ops->ent_put(fatent, new);
  363. if (wait) {
  364. err = fat_sync_bhs(fatent->bhs, fatent->nr_bhs);
  365. if (err)
  366. return err;
  367. }
  368. return fat_mirror_bhs(sb, fatent->bhs, fatent->nr_bhs);
  369. }
  370. static inline int fat_ent_next(struct msdos_sb_info *sbi,
  371. struct fat_entry *fatent)
  372. {
  373. if (sbi->fatent_ops->ent_next(fatent)) {
  374. if (fatent->entry < sbi->max_cluster)
  375. return 1;
  376. }
  377. return 0;
  378. }
  379. static inline int fat_ent_read_block(struct super_block *sb,
  380. struct fat_entry *fatent)
  381. {
  382. const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
  383. sector_t blocknr;
  384. int offset;
  385. fatent_brelse(fatent);
  386. ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
  387. return ops->ent_bread(sb, fatent, offset, blocknr);
  388. }
  389. static void fat_collect_bhs(struct buffer_head **bhs, int *nr_bhs,
  390. struct fat_entry *fatent)
  391. {
  392. int n, i;
  393. for (n = 0; n < fatent->nr_bhs; n++) {
  394. for (i = 0; i < *nr_bhs; i++) {
  395. if (fatent->bhs[n] == bhs[i])
  396. break;
  397. }
  398. if (i == *nr_bhs) {
  399. get_bh(fatent->bhs[n]);
  400. bhs[i] = fatent->bhs[n];
  401. (*nr_bhs)++;
  402. }
  403. }
  404. }
  405. int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
  406. {
  407. struct super_block *sb = inode->i_sb;
  408. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  409. const struct fatent_operations *ops = sbi->fatent_ops;
  410. struct fat_entry fatent, prev_ent;
  411. struct buffer_head *bhs[MAX_BUF_PER_PAGE];
  412. int i, count, err, nr_bhs, idx_clus;
  413. BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2)); /* fixed limit */
  414. lock_fat(sbi);
  415. if (sbi->free_clusters != -1 && sbi->free_clus_valid &&
  416. sbi->free_clusters < nr_cluster) {
  417. unlock_fat(sbi);
  418. return -ENOSPC;
  419. }
  420. err = nr_bhs = idx_clus = 0;
  421. count = FAT_START_ENT;
  422. fatent_init(&prev_ent);
  423. fatent_init(&fatent);
  424. fatent_set_entry(&fatent, sbi->prev_free + 1);
  425. while (count < sbi->max_cluster) {
  426. if (fatent.entry >= sbi->max_cluster)
  427. fatent.entry = FAT_START_ENT;
  428. fatent_set_entry(&fatent, fatent.entry);
  429. err = fat_ent_read_block(sb, &fatent);
  430. if (err)
  431. goto out;
  432. /* Find the free entries in a block */
  433. do {
  434. if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
  435. int entry = fatent.entry;
  436. /* make the cluster chain */
  437. ops->ent_put(&fatent, FAT_ENT_EOF);
  438. if (prev_ent.nr_bhs)
  439. ops->ent_put(&prev_ent, entry);
  440. fat_collect_bhs(bhs, &nr_bhs, &fatent);
  441. sbi->prev_free = entry;
  442. if (sbi->free_clusters != -1)
  443. sbi->free_clusters--;
  444. cluster[idx_clus] = entry;
  445. idx_clus++;
  446. if (idx_clus == nr_cluster)
  447. goto out;
  448. /*
  449. * fat_collect_bhs() gets ref-count of bhs,
  450. * so we can still use the prev_ent.
  451. */
  452. prev_ent = fatent;
  453. }
  454. count++;
  455. if (count == sbi->max_cluster)
  456. break;
  457. } while (fat_ent_next(sbi, &fatent));
  458. }
  459. /* Couldn't allocate the free entries */
  460. sbi->free_clusters = 0;
  461. sbi->free_clus_valid = 1;
  462. err = -ENOSPC;
  463. out:
  464. unlock_fat(sbi);
  465. mark_fsinfo_dirty(sb);
  466. fatent_brelse(&fatent);
  467. if (!err) {
  468. if (inode_needs_sync(inode))
  469. err = fat_sync_bhs(bhs, nr_bhs);
  470. if (!err)
  471. err = fat_mirror_bhs(sb, bhs, nr_bhs);
  472. }
  473. for (i = 0; i < nr_bhs; i++)
  474. brelse(bhs[i]);
  475. if (err && idx_clus)
  476. fat_free_clusters(inode, cluster[0]);
  477. return err;
  478. }
  479. int fat_free_clusters(struct inode *inode, int cluster)
  480. {
  481. struct super_block *sb = inode->i_sb;
  482. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  483. const struct fatent_operations *ops = sbi->fatent_ops;
  484. struct fat_entry fatent;
  485. struct buffer_head *bhs[MAX_BUF_PER_PAGE];
  486. int i, err, nr_bhs;
  487. int first_cl = cluster, dirty_fsinfo = 0;
  488. nr_bhs = 0;
  489. fatent_init(&fatent);
  490. lock_fat(sbi);
  491. do {
  492. cluster = fat_ent_read(inode, &fatent, cluster);
  493. if (cluster < 0) {
  494. err = cluster;
  495. goto error;
  496. } else if (cluster == FAT_ENT_FREE) {
  497. fat_fs_error(sb, "%s: deleting FAT entry beyond EOF",
  498. __func__);
  499. err = -EIO;
  500. goto error;
  501. }
  502. if (sbi->options.discard) {
  503. /*
  504. * Issue discard for the sectors we no longer
  505. * care about, batching contiguous clusters
  506. * into one request
  507. */
  508. if (cluster != fatent.entry + 1) {
  509. int nr_clus = fatent.entry - first_cl + 1;
  510. sb_issue_discard(sb,
  511. fat_clus_to_blknr(sbi, first_cl),
  512. nr_clus * sbi->sec_per_clus,
  513. GFP_NOFS, 0);
  514. first_cl = cluster;
  515. }
  516. }
  517. ops->ent_put(&fatent, FAT_ENT_FREE);
  518. if (sbi->free_clusters != -1) {
  519. sbi->free_clusters++;
  520. dirty_fsinfo = 1;
  521. }
  522. if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
  523. if (sb->s_flags & SB_SYNCHRONOUS) {
  524. err = fat_sync_bhs(bhs, nr_bhs);
  525. if (err)
  526. goto error;
  527. }
  528. err = fat_mirror_bhs(sb, bhs, nr_bhs);
  529. if (err)
  530. goto error;
  531. for (i = 0; i < nr_bhs; i++)
  532. brelse(bhs[i]);
  533. nr_bhs = 0;
  534. }
  535. fat_collect_bhs(bhs, &nr_bhs, &fatent);
  536. } while (cluster != FAT_ENT_EOF);
  537. if (sb->s_flags & SB_SYNCHRONOUS) {
  538. err = fat_sync_bhs(bhs, nr_bhs);
  539. if (err)
  540. goto error;
  541. }
  542. err = fat_mirror_bhs(sb, bhs, nr_bhs);
  543. error:
  544. fatent_brelse(&fatent);
  545. for (i = 0; i < nr_bhs; i++)
  546. brelse(bhs[i]);
  547. unlock_fat(sbi);
  548. if (dirty_fsinfo)
  549. mark_fsinfo_dirty(sb);
  550. return err;
  551. }
  552. EXPORT_SYMBOL_GPL(fat_free_clusters);
  553. struct fatent_ra {
  554. sector_t cur;
  555. sector_t limit;
  556. unsigned int ra_blocks;
  557. sector_t ra_advance;
  558. sector_t ra_next;
  559. sector_t ra_limit;
  560. };
  561. static void fat_ra_init(struct super_block *sb, struct fatent_ra *ra,
  562. struct fat_entry *fatent, int ent_limit)
  563. {
  564. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  565. const struct fatent_operations *ops = sbi->fatent_ops;
  566. sector_t blocknr, block_end;
  567. int offset;
  568. /*
  569. * This is the sequential read, so ra_pages * 2 (but try to
  570. * align the optimal hardware IO size).
  571. * [BTW, 128kb covers the whole sectors for FAT12 and FAT16]
  572. */
  573. unsigned long ra_pages = sb->s_bdi->ra_pages;
  574. unsigned int reada_blocks;
  575. if (fatent->entry >= ent_limit)
  576. return;
  577. if (ra_pages > sb->s_bdi->io_pages)
  578. ra_pages = rounddown(ra_pages, sb->s_bdi->io_pages);
  579. reada_blocks = ra_pages << (PAGE_SHIFT - sb->s_blocksize_bits + 1);
  580. /* Initialize the range for sequential read */
  581. ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
  582. ops->ent_blocknr(sb, ent_limit - 1, &offset, &block_end);
  583. ra->cur = 0;
  584. ra->limit = (block_end + 1) - blocknr;
  585. /* Advancing the window at half size */
  586. ra->ra_blocks = reada_blocks >> 1;
  587. ra->ra_advance = ra->cur;
  588. ra->ra_next = ra->cur;
  589. ra->ra_limit = ra->cur + min_t(sector_t, reada_blocks, ra->limit);
  590. }
  591. /* Assuming to be called before reading a new block (increments ->cur). */
  592. static void fat_ent_reada(struct super_block *sb, struct fatent_ra *ra,
  593. struct fat_entry *fatent)
  594. {
  595. if (ra->ra_next >= ra->ra_limit)
  596. return;
  597. if (ra->cur >= ra->ra_advance) {
  598. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  599. const struct fatent_operations *ops = sbi->fatent_ops;
  600. struct blk_plug plug;
  601. sector_t blocknr, diff;
  602. int offset;
  603. ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
  604. diff = blocknr - ra->cur;
  605. blk_start_plug(&plug);
  606. /*
  607. * FIXME: we would want to directly use the bio with
  608. * pages to reduce the number of segments.
  609. */
  610. for (; ra->ra_next < ra->ra_limit; ra->ra_next++)
  611. sb_breadahead(sb, ra->ra_next + diff);
  612. blk_finish_plug(&plug);
  613. /* Advance the readahead window */
  614. ra->ra_advance += ra->ra_blocks;
  615. ra->ra_limit += min_t(sector_t,
  616. ra->ra_blocks, ra->limit - ra->ra_limit);
  617. }
  618. ra->cur++;
  619. }
  620. int fat_count_free_clusters(struct super_block *sb)
  621. {
  622. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  623. const struct fatent_operations *ops = sbi->fatent_ops;
  624. struct fat_entry fatent;
  625. struct fatent_ra fatent_ra;
  626. int err = 0, free;
  627. lock_fat(sbi);
  628. if (sbi->free_clusters != -1 && sbi->free_clus_valid)
  629. goto out;
  630. free = 0;
  631. fatent_init(&fatent);
  632. fatent_set_entry(&fatent, FAT_START_ENT);
  633. fat_ra_init(sb, &fatent_ra, &fatent, sbi->max_cluster);
  634. while (fatent.entry < sbi->max_cluster) {
  635. /* readahead of fat blocks */
  636. fat_ent_reada(sb, &fatent_ra, &fatent);
  637. err = fat_ent_read_block(sb, &fatent);
  638. if (err)
  639. goto out;
  640. do {
  641. if (ops->ent_get(&fatent) == FAT_ENT_FREE)
  642. free++;
  643. } while (fat_ent_next(sbi, &fatent));
  644. cond_resched();
  645. }
  646. sbi->free_clusters = free;
  647. sbi->free_clus_valid = 1;
  648. mark_fsinfo_dirty(sb);
  649. fatent_brelse(&fatent);
  650. out:
  651. unlock_fat(sbi);
  652. return err;
  653. }
  654. static int fat_trim_clusters(struct super_block *sb, u32 clus, u32 nr_clus)
  655. {
  656. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  657. return sb_issue_discard(sb, fat_clus_to_blknr(sbi, clus),
  658. nr_clus * sbi->sec_per_clus, GFP_NOFS, 0);
  659. }
  660. int fat_trim_fs(struct inode *inode, struct fstrim_range *range)
  661. {
  662. struct super_block *sb = inode->i_sb;
  663. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  664. const struct fatent_operations *ops = sbi->fatent_ops;
  665. struct fat_entry fatent;
  666. struct fatent_ra fatent_ra;
  667. u64 ent_start, ent_end, minlen, trimmed = 0;
  668. u32 free = 0;
  669. int err = 0;
  670. /*
  671. * FAT data is organized as clusters, trim at the granulary of cluster.
  672. *
  673. * fstrim_range is in byte, convert vaules to cluster index.
  674. * Treat sectors before data region as all used, not to trim them.
  675. */
  676. ent_start = max_t(u64, range->start>>sbi->cluster_bits, FAT_START_ENT);
  677. ent_end = ent_start + (range->len >> sbi->cluster_bits) - 1;
  678. minlen = range->minlen >> sbi->cluster_bits;
  679. if (ent_start >= sbi->max_cluster || range->len < sbi->cluster_size)
  680. return -EINVAL;
  681. if (ent_end >= sbi->max_cluster)
  682. ent_end = sbi->max_cluster - 1;
  683. fatent_init(&fatent);
  684. lock_fat(sbi);
  685. fatent_set_entry(&fatent, ent_start);
  686. fat_ra_init(sb, &fatent_ra, &fatent, ent_end + 1);
  687. while (fatent.entry <= ent_end) {
  688. /* readahead of fat blocks */
  689. fat_ent_reada(sb, &fatent_ra, &fatent);
  690. err = fat_ent_read_block(sb, &fatent);
  691. if (err)
  692. goto error;
  693. do {
  694. if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
  695. free++;
  696. } else if (free) {
  697. if (free >= minlen) {
  698. u32 clus = fatent.entry - free;
  699. err = fat_trim_clusters(sb, clus, free);
  700. if (err && err != -EOPNOTSUPP)
  701. goto error;
  702. if (!err)
  703. trimmed += free;
  704. err = 0;
  705. }
  706. free = 0;
  707. }
  708. } while (fat_ent_next(sbi, &fatent) && fatent.entry <= ent_end);
  709. if (fatal_signal_pending(current)) {
  710. err = -ERESTARTSYS;
  711. goto error;
  712. }
  713. if (need_resched()) {
  714. fatent_brelse(&fatent);
  715. unlock_fat(sbi);
  716. cond_resched();
  717. lock_fat(sbi);
  718. }
  719. }
  720. /* handle scenario when tail entries are all free */
  721. if (free && free >= minlen) {
  722. u32 clus = fatent.entry - free;
  723. err = fat_trim_clusters(sb, clus, free);
  724. if (err && err != -EOPNOTSUPP)
  725. goto error;
  726. if (!err)
  727. trimmed += free;
  728. err = 0;
  729. }
  730. error:
  731. fatent_brelse(&fatent);
  732. unlock_fat(sbi);
  733. range->len = trimmed << sbi->cluster_bits;
  734. return err;
  735. }