mtdpart.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Simple MTD partitioning layer
  4. *
  5. * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
  6. * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
  7. * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
  8. *
  9. */
  10. #ifndef __UBOOT__
  11. #include <dm/devres.h>
  12. #include <linux/module.h>
  13. #include <linux/types.h>
  14. #include <linux/kernel.h>
  15. #include <linux/slab.h>
  16. #include <linux/list.h>
  17. #include <linux/kmod.h>
  18. #endif
  19. #include <common.h>
  20. #include <malloc.h>
  21. #include <linux/errno.h>
  22. #include <linux/compat.h>
  23. #include <ubi_uboot.h>
  24. #include <linux/mtd/mtd.h>
  25. #include <linux/mtd/partitions.h>
  26. #include <linux/err.h>
  27. #include <linux/sizes.h>
  28. #include "mtdcore.h"
  29. #ifndef __UBOOT__
  30. static DEFINE_MUTEX(mtd_partitions_mutex);
  31. #else
  32. DEFINE_MUTEX(mtd_partitions_mutex);
  33. #endif
  34. #ifdef __UBOOT__
  35. /* from mm/util.c */
  36. /**
  37. * kstrdup - allocate space for and copy an existing string
  38. * @s: the string to duplicate
  39. * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  40. */
  41. char *kstrdup(const char *s, gfp_t gfp)
  42. {
  43. size_t len;
  44. char *buf;
  45. if (!s)
  46. return NULL;
  47. len = strlen(s) + 1;
  48. buf = kmalloc(len, gfp);
  49. if (buf)
  50. memcpy(buf, s, len);
  51. return buf;
  52. }
  53. #endif
  54. #define MTD_SIZE_REMAINING (~0LLU)
  55. #define MTD_OFFSET_NOT_SPECIFIED (~0LLU)
  56. bool mtd_partitions_used(struct mtd_info *master)
  57. {
  58. struct mtd_info *slave;
  59. list_for_each_entry(slave, &master->partitions, node) {
  60. if (slave->usecount)
  61. return true;
  62. }
  63. return false;
  64. }
  65. /**
  66. * mtd_parse_partition - Parse @mtdparts partition definition, fill @partition
  67. * with it and update the @mtdparts string pointer.
  68. *
  69. * The partition name is allocated and must be freed by the caller.
  70. *
  71. * This function is widely inspired from part_parse (mtdparts.c).
  72. *
  73. * @mtdparts: String describing the partition with mtdparts command syntax
  74. * @partition: MTD partition structure to fill
  75. *
  76. * @return 0 on success, an error otherwise.
  77. */
  78. static int mtd_parse_partition(const char **_mtdparts,
  79. struct mtd_partition *partition)
  80. {
  81. const char *mtdparts = *_mtdparts;
  82. const char *name = NULL;
  83. int name_len;
  84. char *buf;
  85. /* Ensure the partition structure is empty */
  86. memset(partition, 0, sizeof(struct mtd_partition));
  87. /* Fetch the partition size */
  88. if (*mtdparts == '-') {
  89. /* Assign all remaining space to this partition */
  90. partition->size = MTD_SIZE_REMAINING;
  91. mtdparts++;
  92. } else {
  93. partition->size = ustrtoull(mtdparts, (char **)&mtdparts, 0);
  94. if (partition->size < SZ_4K) {
  95. printf("Minimum partition size 4kiB, %lldB requested\n",
  96. partition->size);
  97. return -EINVAL;
  98. }
  99. }
  100. /* Check for the offset */
  101. partition->offset = MTD_OFFSET_NOT_SPECIFIED;
  102. if (*mtdparts == '@') {
  103. mtdparts++;
  104. partition->offset = ustrtoull(mtdparts, (char **)&mtdparts, 0);
  105. }
  106. /* Now look for the name */
  107. if (*mtdparts == '(') {
  108. name = ++mtdparts;
  109. mtdparts = strchr(name, ')');
  110. if (!mtdparts) {
  111. printf("No closing ')' found in partition name\n");
  112. return -EINVAL;
  113. }
  114. name_len = mtdparts - name + 1;
  115. if ((name_len - 1) == 0) {
  116. printf("Empty partition name\n");
  117. return -EINVAL;
  118. }
  119. mtdparts++;
  120. } else {
  121. /* Name will be of the form size@offset */
  122. name_len = 22;
  123. }
  124. /* Check if the partition is read-only */
  125. if (strncmp(mtdparts, "ro", 2) == 0) {
  126. partition->mask_flags |= MTD_WRITEABLE;
  127. mtdparts += 2;
  128. }
  129. /* Check for a potential next partition definition */
  130. if (*mtdparts == ',') {
  131. if (partition->size == MTD_SIZE_REMAINING) {
  132. printf("No partitions allowed after a fill-up\n");
  133. return -EINVAL;
  134. }
  135. ++mtdparts;
  136. } else if ((*mtdparts == ';') || (*mtdparts == '\0')) {
  137. /* NOP */
  138. } else {
  139. printf("Unexpected character '%c' in mtdparts\n", *mtdparts);
  140. return -EINVAL;
  141. }
  142. /*
  143. * Allocate a buffer for the name and either copy the provided name or
  144. * auto-generate it with the form 'size@offset'.
  145. */
  146. buf = malloc(name_len);
  147. if (!buf)
  148. return -ENOMEM;
  149. if (name)
  150. strncpy(buf, name, name_len - 1);
  151. else
  152. snprintf(buf, name_len, "0x%08llx@0x%08llx",
  153. partition->size, partition->offset);
  154. buf[name_len - 1] = '\0';
  155. partition->name = buf;
  156. *_mtdparts = mtdparts;
  157. return 0;
  158. }
  159. /**
  160. * mtd_parse_partitions - Create a partition array from an mtdparts definition
  161. *
  162. * Stateless function that takes a @parent MTD device, a string @_mtdparts
  163. * describing the partitions (with the "mtdparts" command syntax) and creates
  164. * the corresponding MTD partition structure array @_parts. Both the name and
  165. * the structure partition itself must be freed freed, the caller may use
  166. * @mtd_free_parsed_partitions() for this purpose.
  167. *
  168. * @parent: MTD device which contains the partitions
  169. * @_mtdparts: Pointer to a string describing the partitions with "mtdparts"
  170. * command syntax.
  171. * @_parts: Allocated array containing the partitions, must be freed by the
  172. * caller.
  173. * @_nparts: Size of @_parts array.
  174. *
  175. * @return 0 on success, an error otherwise.
  176. */
  177. int mtd_parse_partitions(struct mtd_info *parent, const char **_mtdparts,
  178. struct mtd_partition **_parts, int *_nparts)
  179. {
  180. struct mtd_partition partition = {}, *parts;
  181. const char *mtdparts = *_mtdparts;
  182. int cur_off = 0, cur_sz = 0;
  183. int nparts = 0;
  184. int ret, idx;
  185. u64 sz;
  186. /* First, iterate over the partitions until we know their number */
  187. while (mtdparts[0] != '\0' && mtdparts[0] != ';') {
  188. ret = mtd_parse_partition(&mtdparts, &partition);
  189. if (ret)
  190. return ret;
  191. free((char *)partition.name);
  192. nparts++;
  193. }
  194. /* Allocate an array of partitions to give back to the caller */
  195. parts = malloc(sizeof(*parts) * nparts);
  196. if (!parts) {
  197. printf("Not enough space to save partitions meta-data\n");
  198. return -ENOMEM;
  199. }
  200. /* Iterate again over each partition to save the data in our array */
  201. for (idx = 0; idx < nparts; idx++) {
  202. ret = mtd_parse_partition(_mtdparts, &parts[idx]);
  203. if (ret)
  204. return ret;
  205. if (parts[idx].size == MTD_SIZE_REMAINING)
  206. parts[idx].size = parent->size - cur_sz;
  207. cur_sz += parts[idx].size;
  208. sz = parts[idx].size;
  209. if (sz < parent->writesize || do_div(sz, parent->writesize)) {
  210. printf("Partition size must be a multiple of %d\n",
  211. parent->writesize);
  212. return -EINVAL;
  213. }
  214. if (parts[idx].offset == MTD_OFFSET_NOT_SPECIFIED)
  215. parts[idx].offset = cur_off;
  216. cur_off += parts[idx].size;
  217. parts[idx].ecclayout = parent->ecclayout;
  218. }
  219. /* Offset by one mtdparts to point to the next device if any */
  220. if (*_mtdparts[0] == ';')
  221. (*_mtdparts)++;
  222. *_parts = parts;
  223. *_nparts = nparts;
  224. return 0;
  225. }
  226. /**
  227. * mtd_free_parsed_partitions - Free dynamically allocated partitions
  228. *
  229. * Each successful call to @mtd_parse_partitions must be followed by a call to
  230. * @mtd_free_parsed_partitions to free any allocated array during the parsing
  231. * process.
  232. *
  233. * @parts: Array containing the partitions that will be freed.
  234. * @nparts: Size of @parts array.
  235. */
  236. void mtd_free_parsed_partitions(struct mtd_partition *parts,
  237. unsigned int nparts)
  238. {
  239. int i;
  240. for (i = 0; i < nparts; i++)
  241. free((char *)parts[i].name);
  242. free(parts);
  243. }
  244. /*
  245. * MTD methods which simply translate the effective address and pass through
  246. * to the _real_ device.
  247. */
  248. static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
  249. size_t *retlen, u_char *buf)
  250. {
  251. struct mtd_ecc_stats stats;
  252. int res;
  253. stats = mtd->parent->ecc_stats;
  254. res = mtd->parent->_read(mtd->parent, from + mtd->offset, len,
  255. retlen, buf);
  256. if (unlikely(mtd_is_eccerr(res)))
  257. mtd->ecc_stats.failed +=
  258. mtd->parent->ecc_stats.failed - stats.failed;
  259. else
  260. mtd->ecc_stats.corrected +=
  261. mtd->parent->ecc_stats.corrected - stats.corrected;
  262. return res;
  263. }
  264. #ifndef __UBOOT__
  265. static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
  266. size_t *retlen, void **virt, resource_size_t *phys)
  267. {
  268. return mtd->parent->_point(mtd->parent, from + mtd->offset, len,
  269. retlen, virt, phys);
  270. }
  271. static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
  272. {
  273. return mtd->parent->_unpoint(mtd->parent, from + mtd->offset, len);
  274. }
  275. #endif
  276. static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
  277. unsigned long len,
  278. unsigned long offset,
  279. unsigned long flags)
  280. {
  281. offset += mtd->offset;
  282. return mtd->parent->_get_unmapped_area(mtd->parent, len, offset, flags);
  283. }
  284. static int part_read_oob(struct mtd_info *mtd, loff_t from,
  285. struct mtd_oob_ops *ops)
  286. {
  287. int res;
  288. if (from >= mtd->size)
  289. return -EINVAL;
  290. if (ops->datbuf && from + ops->len > mtd->size)
  291. return -EINVAL;
  292. /*
  293. * If OOB is also requested, make sure that we do not read past the end
  294. * of this partition.
  295. */
  296. if (ops->oobbuf) {
  297. size_t len, pages;
  298. if (ops->mode == MTD_OPS_AUTO_OOB)
  299. len = mtd->oobavail;
  300. else
  301. len = mtd->oobsize;
  302. pages = mtd_div_by_ws(mtd->size, mtd);
  303. pages -= mtd_div_by_ws(from, mtd);
  304. if (ops->ooboffs + ops->ooblen > pages * len)
  305. return -EINVAL;
  306. }
  307. res = mtd->parent->_read_oob(mtd->parent, from + mtd->offset, ops);
  308. if (unlikely(res)) {
  309. if (mtd_is_bitflip(res))
  310. mtd->ecc_stats.corrected++;
  311. if (mtd_is_eccerr(res))
  312. mtd->ecc_stats.failed++;
  313. }
  314. return res;
  315. }
  316. static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
  317. size_t len, size_t *retlen, u_char *buf)
  318. {
  319. return mtd->parent->_read_user_prot_reg(mtd->parent, from, len,
  320. retlen, buf);
  321. }
  322. static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
  323. size_t *retlen, struct otp_info *buf)
  324. {
  325. return mtd->parent->_get_user_prot_info(mtd->parent, len, retlen,
  326. buf);
  327. }
  328. static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
  329. size_t len, size_t *retlen, u_char *buf)
  330. {
  331. return mtd->parent->_read_fact_prot_reg(mtd->parent, from, len,
  332. retlen, buf);
  333. }
  334. static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
  335. size_t *retlen, struct otp_info *buf)
  336. {
  337. return mtd->parent->_get_fact_prot_info(mtd->parent, len, retlen,
  338. buf);
  339. }
  340. static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
  341. size_t *retlen, const u_char *buf)
  342. {
  343. return mtd->parent->_write(mtd->parent, to + mtd->offset, len,
  344. retlen, buf);
  345. }
  346. static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
  347. size_t *retlen, const u_char *buf)
  348. {
  349. return mtd->parent->_panic_write(mtd->parent, to + mtd->offset, len,
  350. retlen, buf);
  351. }
  352. static int part_write_oob(struct mtd_info *mtd, loff_t to,
  353. struct mtd_oob_ops *ops)
  354. {
  355. if (to >= mtd->size)
  356. return -EINVAL;
  357. if (ops->datbuf && to + ops->len > mtd->size)
  358. return -EINVAL;
  359. return mtd->parent->_write_oob(mtd->parent, to + mtd->offset, ops);
  360. }
  361. static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
  362. size_t len, size_t *retlen, u_char *buf)
  363. {
  364. return mtd->parent->_write_user_prot_reg(mtd->parent, from, len,
  365. retlen, buf);
  366. }
  367. static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
  368. size_t len)
  369. {
  370. return mtd->parent->_lock_user_prot_reg(mtd->parent, from, len);
  371. }
  372. #ifndef __UBOOT__
  373. static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
  374. unsigned long count, loff_t to, size_t *retlen)
  375. {
  376. return mtd->parent->_writev(mtd->parent, vecs, count,
  377. to + mtd->offset, retlen);
  378. }
  379. #endif
  380. static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
  381. {
  382. int ret;
  383. instr->addr += mtd->offset;
  384. ret = mtd->parent->_erase(mtd->parent, instr);
  385. if (ret) {
  386. if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
  387. instr->fail_addr -= mtd->offset;
  388. instr->addr -= mtd->offset;
  389. }
  390. return ret;
  391. }
  392. void mtd_erase_callback(struct erase_info *instr)
  393. {
  394. if (instr->mtd->_erase == part_erase) {
  395. if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
  396. instr->fail_addr -= instr->mtd->offset;
  397. instr->addr -= instr->mtd->offset;
  398. }
  399. if (instr->callback)
  400. instr->callback(instr);
  401. }
  402. EXPORT_SYMBOL_GPL(mtd_erase_callback);
  403. static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  404. {
  405. return mtd->parent->_lock(mtd->parent, ofs + mtd->offset, len);
  406. }
  407. static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  408. {
  409. return mtd->parent->_unlock(mtd->parent, ofs + mtd->offset, len);
  410. }
  411. static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  412. {
  413. return mtd->parent->_is_locked(mtd->parent, ofs + mtd->offset, len);
  414. }
  415. static void part_sync(struct mtd_info *mtd)
  416. {
  417. mtd->parent->_sync(mtd->parent);
  418. }
  419. #ifndef __UBOOT__
  420. static int part_suspend(struct mtd_info *mtd)
  421. {
  422. return mtd->parent->_suspend(mtd->parent);
  423. }
  424. static void part_resume(struct mtd_info *mtd)
  425. {
  426. mtd->parent->_resume(mtd->parent);
  427. }
  428. #endif
  429. static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
  430. {
  431. ofs += mtd->offset;
  432. return mtd->parent->_block_isreserved(mtd->parent, ofs);
  433. }
  434. static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
  435. {
  436. ofs += mtd->offset;
  437. return mtd->parent->_block_isbad(mtd->parent, ofs);
  438. }
  439. static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
  440. {
  441. int res;
  442. ofs += mtd->offset;
  443. res = mtd->parent->_block_markbad(mtd->parent, ofs);
  444. if (!res)
  445. mtd->ecc_stats.badblocks++;
  446. return res;
  447. }
  448. static inline void free_partition(struct mtd_info *p)
  449. {
  450. kfree(p->name);
  451. kfree(p);
  452. }
  453. /*
  454. * This function unregisters and destroy all slave MTD objects which are
  455. * attached to the given master MTD object, recursively.
  456. */
  457. static int do_del_mtd_partitions(struct mtd_info *master)
  458. {
  459. struct mtd_info *slave, *next;
  460. int ret, err = 0;
  461. list_for_each_entry_safe(slave, next, &master->partitions, node) {
  462. if (mtd_has_partitions(slave))
  463. del_mtd_partitions(slave);
  464. debug("Deleting %s MTD partition\n", slave->name);
  465. ret = del_mtd_device(slave);
  466. if (ret < 0) {
  467. printf("Error when deleting partition \"%s\" (%d)\n",
  468. slave->name, ret);
  469. err = ret;
  470. continue;
  471. }
  472. list_del(&slave->node);
  473. free_partition(slave);
  474. }
  475. return err;
  476. }
  477. int del_mtd_partitions(struct mtd_info *master)
  478. {
  479. int ret;
  480. debug("Deleting MTD partitions on \"%s\":\n", master->name);
  481. mutex_lock(&mtd_partitions_mutex);
  482. ret = do_del_mtd_partitions(master);
  483. mutex_unlock(&mtd_partitions_mutex);
  484. return ret;
  485. }
  486. static struct mtd_info *allocate_partition(struct mtd_info *master,
  487. const struct mtd_partition *part,
  488. int partno, uint64_t cur_offset)
  489. {
  490. struct mtd_info *slave;
  491. char *name;
  492. /* allocate the partition structure */
  493. slave = kzalloc(sizeof(*slave), GFP_KERNEL);
  494. name = kstrdup(part->name, GFP_KERNEL);
  495. if (!name || !slave) {
  496. printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
  497. master->name);
  498. kfree(name);
  499. kfree(slave);
  500. return ERR_PTR(-ENOMEM);
  501. }
  502. /* set up the MTD object for this partition */
  503. slave->type = master->type;
  504. slave->flags = master->flags & ~part->mask_flags;
  505. slave->size = part->size;
  506. slave->writesize = master->writesize;
  507. slave->writebufsize = master->writebufsize;
  508. slave->oobsize = master->oobsize;
  509. slave->oobavail = master->oobavail;
  510. slave->subpage_sft = master->subpage_sft;
  511. slave->name = name;
  512. slave->owner = master->owner;
  513. #ifndef __UBOOT__
  514. slave->backing_dev_info = master->backing_dev_info;
  515. /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone
  516. * to have the same data be in two different partitions.
  517. */
  518. slave->dev.parent = master->dev.parent;
  519. #endif
  520. if (master->_read)
  521. slave->_read = part_read;
  522. if (master->_write)
  523. slave->_write = part_write;
  524. if (master->_panic_write)
  525. slave->_panic_write = part_panic_write;
  526. #ifndef __UBOOT__
  527. if (master->_point && master->_unpoint) {
  528. slave->_point = part_point;
  529. slave->_unpoint = part_unpoint;
  530. }
  531. #endif
  532. if (master->_get_unmapped_area)
  533. slave->_get_unmapped_area = part_get_unmapped_area;
  534. if (master->_read_oob)
  535. slave->_read_oob = part_read_oob;
  536. if (master->_write_oob)
  537. slave->_write_oob = part_write_oob;
  538. if (master->_read_user_prot_reg)
  539. slave->_read_user_prot_reg = part_read_user_prot_reg;
  540. if (master->_read_fact_prot_reg)
  541. slave->_read_fact_prot_reg = part_read_fact_prot_reg;
  542. if (master->_write_user_prot_reg)
  543. slave->_write_user_prot_reg = part_write_user_prot_reg;
  544. if (master->_lock_user_prot_reg)
  545. slave->_lock_user_prot_reg = part_lock_user_prot_reg;
  546. if (master->_get_user_prot_info)
  547. slave->_get_user_prot_info = part_get_user_prot_info;
  548. if (master->_get_fact_prot_info)
  549. slave->_get_fact_prot_info = part_get_fact_prot_info;
  550. if (master->_sync)
  551. slave->_sync = part_sync;
  552. #ifndef __UBOOT__
  553. if (!partno && !master->dev.class && master->_suspend &&
  554. master->_resume) {
  555. slave->_suspend = part_suspend;
  556. slave->_resume = part_resume;
  557. }
  558. if (master->_writev)
  559. slave->_writev = part_writev;
  560. #endif
  561. if (master->_lock)
  562. slave->_lock = part_lock;
  563. if (master->_unlock)
  564. slave->_unlock = part_unlock;
  565. if (master->_is_locked)
  566. slave->_is_locked = part_is_locked;
  567. if (master->_block_isreserved)
  568. slave->_block_isreserved = part_block_isreserved;
  569. if (master->_block_isbad)
  570. slave->_block_isbad = part_block_isbad;
  571. if (master->_block_markbad)
  572. slave->_block_markbad = part_block_markbad;
  573. slave->_erase = part_erase;
  574. slave->parent = master;
  575. slave->offset = part->offset;
  576. INIT_LIST_HEAD(&slave->partitions);
  577. INIT_LIST_HEAD(&slave->node);
  578. if (slave->offset == MTDPART_OFS_APPEND)
  579. slave->offset = cur_offset;
  580. if (slave->offset == MTDPART_OFS_NXTBLK) {
  581. slave->offset = cur_offset;
  582. if (mtd_mod_by_eb(cur_offset, master) != 0) {
  583. /* Round up to next erasesize */
  584. slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
  585. debug("Moving partition %d: "
  586. "0x%012llx -> 0x%012llx\n", partno,
  587. (unsigned long long)cur_offset, (unsigned long long)slave->offset);
  588. }
  589. }
  590. if (slave->offset == MTDPART_OFS_RETAIN) {
  591. slave->offset = cur_offset;
  592. if (master->size - slave->offset >= slave->size) {
  593. slave->size = master->size - slave->offset
  594. - slave->size;
  595. } else {
  596. debug("mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
  597. part->name, master->size - slave->offset,
  598. slave->size);
  599. /* register to preserve ordering */
  600. goto out_register;
  601. }
  602. }
  603. if (slave->size == MTDPART_SIZ_FULL)
  604. slave->size = master->size - slave->offset;
  605. debug("0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
  606. (unsigned long long)(slave->offset + slave->size), slave->name);
  607. /* let's do some sanity checks */
  608. if (slave->offset >= master->size) {
  609. /* let's register it anyway to preserve ordering */
  610. slave->offset = 0;
  611. slave->size = 0;
  612. printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
  613. part->name);
  614. goto out_register;
  615. }
  616. if (slave->offset + slave->size > master->size) {
  617. slave->size = master->size - slave->offset;
  618. printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
  619. part->name, master->name, slave->size);
  620. }
  621. if (master->numeraseregions > 1) {
  622. /* Deal with variable erase size stuff */
  623. int i, max = master->numeraseregions;
  624. u64 end = slave->offset + slave->size;
  625. struct mtd_erase_region_info *regions = master->eraseregions;
  626. /* Find the first erase regions which is part of this
  627. * partition. */
  628. for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
  629. ;
  630. /* The loop searched for the region _behind_ the first one */
  631. if (i > 0)
  632. i--;
  633. /* Pick biggest erasesize */
  634. for (; i < max && regions[i].offset < end; i++) {
  635. if (slave->erasesize < regions[i].erasesize)
  636. slave->erasesize = regions[i].erasesize;
  637. }
  638. WARN_ON(slave->erasesize == 0);
  639. } else {
  640. /* Single erase size */
  641. slave->erasesize = master->erasesize;
  642. }
  643. if ((slave->flags & MTD_WRITEABLE) &&
  644. mtd_mod_by_eb(slave->offset, slave)) {
  645. /* Doesn't start on a boundary of major erase size */
  646. /* FIXME: Let it be writable if it is on a boundary of
  647. * _minor_ erase size though */
  648. slave->flags &= ~MTD_WRITEABLE;
  649. printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
  650. part->name);
  651. }
  652. if ((slave->flags & MTD_WRITEABLE) &&
  653. mtd_mod_by_eb(slave->size, slave)) {
  654. slave->flags &= ~MTD_WRITEABLE;
  655. printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
  656. part->name);
  657. }
  658. slave->ecclayout = master->ecclayout;
  659. slave->ecc_step_size = master->ecc_step_size;
  660. slave->ecc_strength = master->ecc_strength;
  661. slave->bitflip_threshold = master->bitflip_threshold;
  662. if (master->_block_isbad) {
  663. uint64_t offs = 0;
  664. while (offs < slave->size) {
  665. if (mtd_block_isbad(master, offs + slave->offset))
  666. slave->ecc_stats.badblocks++;
  667. offs += slave->erasesize;
  668. }
  669. }
  670. out_register:
  671. return slave;
  672. }
  673. #ifndef __UBOOT__
  674. int mtd_add_partition(struct mtd_info *master, const char *name,
  675. long long offset, long long length)
  676. {
  677. struct mtd_partition part;
  678. struct mtd_info *p, *new;
  679. uint64_t start, end;
  680. int ret = 0;
  681. /* the direct offset is expected */
  682. if (offset == MTDPART_OFS_APPEND ||
  683. offset == MTDPART_OFS_NXTBLK)
  684. return -EINVAL;
  685. if (length == MTDPART_SIZ_FULL)
  686. length = master->size - offset;
  687. if (length <= 0)
  688. return -EINVAL;
  689. part.name = name;
  690. part.size = length;
  691. part.offset = offset;
  692. part.mask_flags = 0;
  693. part.ecclayout = NULL;
  694. new = allocate_partition(master, &part, -1, offset);
  695. if (IS_ERR(new))
  696. return PTR_ERR(new);
  697. start = offset;
  698. end = offset + length;
  699. mutex_lock(&mtd_partitions_mutex);
  700. list_for_each_entry(p, &master->partitions, node) {
  701. if (start >= p->offset &&
  702. (start < (p->offset + p->size)))
  703. goto err_inv;
  704. if (end >= p->offset &&
  705. (end < (p->offset + p->size)))
  706. goto err_inv;
  707. }
  708. list_add_tail(&new->node, &master->partitions);
  709. mutex_unlock(&mtd_partitions_mutex);
  710. add_mtd_device(new);
  711. return ret;
  712. err_inv:
  713. mutex_unlock(&mtd_partitions_mutex);
  714. free_partition(new);
  715. return -EINVAL;
  716. }
  717. EXPORT_SYMBOL_GPL(mtd_add_partition);
  718. int mtd_del_partition(struct mtd_info *master, int partno)
  719. {
  720. struct mtd_info *slave, *next;
  721. int ret = -EINVAL;
  722. mutex_lock(&mtd_partitions_mutex);
  723. list_for_each_entry_safe(slave, next, &master->partitions, node)
  724. if (slave->index == partno) {
  725. ret = del_mtd_device(slave);
  726. if (ret < 0)
  727. break;
  728. list_del(&slave->node);
  729. free_partition(slave);
  730. break;
  731. }
  732. mutex_unlock(&mtd_partitions_mutex);
  733. return ret;
  734. }
  735. EXPORT_SYMBOL_GPL(mtd_del_partition);
  736. #endif
  737. /*
  738. * This function, given a master MTD object and a partition table, creates
  739. * and registers slave MTD objects which are bound to the master according to
  740. * the partition definitions.
  741. *
  742. * We don't register the master, or expect the caller to have done so,
  743. * for reasons of data integrity.
  744. */
  745. int add_mtd_partitions(struct mtd_info *master,
  746. const struct mtd_partition *parts,
  747. int nbparts)
  748. {
  749. struct mtd_info *slave;
  750. uint64_t cur_offset = 0;
  751. int i;
  752. debug("Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
  753. for (i = 0; i < nbparts; i++) {
  754. slave = allocate_partition(master, parts + i, i, cur_offset);
  755. if (IS_ERR(slave))
  756. return PTR_ERR(slave);
  757. mutex_lock(&mtd_partitions_mutex);
  758. list_add_tail(&slave->node, &master->partitions);
  759. mutex_unlock(&mtd_partitions_mutex);
  760. add_mtd_device(slave);
  761. cur_offset = slave->offset + slave->size;
  762. }
  763. return 0;
  764. }
  765. #ifndef __UBOOT__
  766. static DEFINE_SPINLOCK(part_parser_lock);
  767. static LIST_HEAD(part_parsers);
  768. static struct mtd_part_parser *get_partition_parser(const char *name)
  769. {
  770. struct mtd_part_parser *p, *ret = NULL;
  771. spin_lock(&part_parser_lock);
  772. list_for_each_entry(p, &part_parsers, list)
  773. if (!strcmp(p->name, name) && try_module_get(p->owner)) {
  774. ret = p;
  775. break;
  776. }
  777. spin_unlock(&part_parser_lock);
  778. return ret;
  779. }
  780. #define put_partition_parser(p) do { module_put((p)->owner); } while (0)
  781. void register_mtd_parser(struct mtd_part_parser *p)
  782. {
  783. spin_lock(&part_parser_lock);
  784. list_add(&p->list, &part_parsers);
  785. spin_unlock(&part_parser_lock);
  786. }
  787. EXPORT_SYMBOL_GPL(register_mtd_parser);
  788. void deregister_mtd_parser(struct mtd_part_parser *p)
  789. {
  790. spin_lock(&part_parser_lock);
  791. list_del(&p->list);
  792. spin_unlock(&part_parser_lock);
  793. }
  794. EXPORT_SYMBOL_GPL(deregister_mtd_parser);
  795. /*
  796. * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
  797. * are changing this array!
  798. */
  799. static const char * const default_mtd_part_types[] = {
  800. "cmdlinepart",
  801. "ofpart",
  802. NULL
  803. };
  804. /**
  805. * parse_mtd_partitions - parse MTD partitions
  806. * @master: the master partition (describes whole MTD device)
  807. * @types: names of partition parsers to try or %NULL
  808. * @pparts: array of partitions found is returned here
  809. * @data: MTD partition parser-specific data
  810. *
  811. * This function tries to find partition on MTD device @master. It uses MTD
  812. * partition parsers, specified in @types. However, if @types is %NULL, then
  813. * the default list of parsers is used. The default list contains only the
  814. * "cmdlinepart" and "ofpart" parsers ATM.
  815. * Note: If there are more then one parser in @types, the kernel only takes the
  816. * partitions parsed out by the first parser.
  817. *
  818. * This function may return:
  819. * o a negative error code in case of failure
  820. * o zero if no partitions were found
  821. * o a positive number of found partitions, in which case on exit @pparts will
  822. * point to an array containing this number of &struct mtd_info objects.
  823. */
  824. int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
  825. struct mtd_partition **pparts,
  826. struct mtd_part_parser_data *data)
  827. {
  828. struct mtd_part_parser *parser;
  829. int ret = 0;
  830. if (!types)
  831. types = default_mtd_part_types;
  832. for ( ; ret <= 0 && *types; types++) {
  833. parser = get_partition_parser(*types);
  834. if (!parser && !request_module("%s", *types))
  835. parser = get_partition_parser(*types);
  836. if (!parser)
  837. continue;
  838. ret = (*parser->parse_fn)(master, pparts, data);
  839. put_partition_parser(parser);
  840. if (ret > 0) {
  841. printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
  842. ret, parser->name, master->name);
  843. break;
  844. }
  845. }
  846. return ret;
  847. }
  848. #endif
  849. /* Returns the size of the entire flash chip */
  850. uint64_t mtd_get_device_size(const struct mtd_info *mtd)
  851. {
  852. if (mtd_is_partition(mtd))
  853. return mtd->parent->size;
  854. return mtd->size;
  855. }
  856. EXPORT_SYMBOL_GPL(mtd_get_device_size);