label.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4. */
  5. #include <linux/device.h>
  6. #include <linux/ndctl.h>
  7. #include <linux/uuid.h>
  8. #include <linux/slab.h>
  9. #include <linux/io.h>
  10. #include <linux/nd.h>
  11. #include "nd-core.h"
  12. #include "label.h"
  13. #include "nd.h"
  14. static guid_t nvdimm_btt_guid;
  15. static guid_t nvdimm_btt2_guid;
  16. static guid_t nvdimm_pfn_guid;
  17. static guid_t nvdimm_dax_guid;
  18. static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
  19. static u32 best_seq(u32 a, u32 b)
  20. {
  21. a &= NSINDEX_SEQ_MASK;
  22. b &= NSINDEX_SEQ_MASK;
  23. if (a == 0 || a == b)
  24. return b;
  25. else if (b == 0)
  26. return a;
  27. else if (nd_inc_seq(a) == b)
  28. return b;
  29. else
  30. return a;
  31. }
  32. unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
  33. {
  34. return ndd->nslabel_size;
  35. }
  36. static size_t __sizeof_namespace_index(u32 nslot)
  37. {
  38. return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
  39. NSINDEX_ALIGN);
  40. }
  41. static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
  42. size_t index_size)
  43. {
  44. return (ndd->nsarea.config_size - index_size * 2) /
  45. sizeof_namespace_label(ndd);
  46. }
  47. int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
  48. {
  49. u32 tmp_nslot, n;
  50. tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
  51. n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN;
  52. return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
  53. }
  54. size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
  55. {
  56. u32 nslot, space, size;
  57. /*
  58. * Per UEFI 2.7, the minimum size of the Label Storage Area is large
  59. * enough to hold 2 index blocks and 2 labels. The minimum index
  60. * block size is 256 bytes. The label size is 128 for namespaces
  61. * prior to version 1.2 and at minimum 256 for version 1.2 and later.
  62. */
  63. nslot = nvdimm_num_label_slots(ndd);
  64. space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
  65. size = __sizeof_namespace_index(nslot) * 2;
  66. if (size <= space && nslot >= 2)
  67. return size / 2;
  68. dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
  69. ndd->nsarea.config_size, sizeof_namespace_label(ndd));
  70. return 0;
  71. }
  72. static int __nd_label_validate(struct nvdimm_drvdata *ndd)
  73. {
  74. /*
  75. * On media label format consists of two index blocks followed
  76. * by an array of labels. None of these structures are ever
  77. * updated in place. A sequence number tracks the current
  78. * active index and the next one to write, while labels are
  79. * written to free slots.
  80. *
  81. * +------------+
  82. * | |
  83. * | nsindex0 |
  84. * | |
  85. * +------------+
  86. * | |
  87. * | nsindex1 |
  88. * | |
  89. * +------------+
  90. * | label0 |
  91. * +------------+
  92. * | label1 |
  93. * +------------+
  94. * | |
  95. * ....nslot...
  96. * | |
  97. * +------------+
  98. * | labelN |
  99. * +------------+
  100. */
  101. struct nd_namespace_index *nsindex[] = {
  102. to_namespace_index(ndd, 0),
  103. to_namespace_index(ndd, 1),
  104. };
  105. const int num_index = ARRAY_SIZE(nsindex);
  106. struct device *dev = ndd->dev;
  107. bool valid[2] = { 0 };
  108. int i, num_valid = 0;
  109. u32 seq;
  110. for (i = 0; i < num_index; i++) {
  111. u32 nslot;
  112. u8 sig[NSINDEX_SIG_LEN];
  113. u64 sum_save, sum, size;
  114. unsigned int version, labelsize;
  115. memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
  116. if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
  117. dev_dbg(dev, "nsindex%d signature invalid\n", i);
  118. continue;
  119. }
  120. /* label sizes larger than 128 arrived with v1.2 */
  121. version = __le16_to_cpu(nsindex[i]->major) * 100
  122. + __le16_to_cpu(nsindex[i]->minor);
  123. if (version >= 102)
  124. labelsize = 1 << (7 + nsindex[i]->labelsize);
  125. else
  126. labelsize = 128;
  127. if (labelsize != sizeof_namespace_label(ndd)) {
  128. dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
  129. i, nsindex[i]->labelsize);
  130. continue;
  131. }
  132. sum_save = __le64_to_cpu(nsindex[i]->checksum);
  133. nsindex[i]->checksum = __cpu_to_le64(0);
  134. sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
  135. nsindex[i]->checksum = __cpu_to_le64(sum_save);
  136. if (sum != sum_save) {
  137. dev_dbg(dev, "nsindex%d checksum invalid\n", i);
  138. continue;
  139. }
  140. seq = __le32_to_cpu(nsindex[i]->seq);
  141. if ((seq & NSINDEX_SEQ_MASK) == 0) {
  142. dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
  143. continue;
  144. }
  145. /* sanity check the index against expected values */
  146. if (__le64_to_cpu(nsindex[i]->myoff)
  147. != i * sizeof_namespace_index(ndd)) {
  148. dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
  149. i, (unsigned long long)
  150. __le64_to_cpu(nsindex[i]->myoff));
  151. continue;
  152. }
  153. if (__le64_to_cpu(nsindex[i]->otheroff)
  154. != (!i) * sizeof_namespace_index(ndd)) {
  155. dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
  156. i, (unsigned long long)
  157. __le64_to_cpu(nsindex[i]->otheroff));
  158. continue;
  159. }
  160. if (__le64_to_cpu(nsindex[i]->labeloff)
  161. != 2 * sizeof_namespace_index(ndd)) {
  162. dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
  163. i, (unsigned long long)
  164. __le64_to_cpu(nsindex[i]->labeloff));
  165. continue;
  166. }
  167. size = __le64_to_cpu(nsindex[i]->mysize);
  168. if (size > sizeof_namespace_index(ndd)
  169. || size < sizeof(struct nd_namespace_index)) {
  170. dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
  171. continue;
  172. }
  173. nslot = __le32_to_cpu(nsindex[i]->nslot);
  174. if (nslot * sizeof_namespace_label(ndd)
  175. + 2 * sizeof_namespace_index(ndd)
  176. > ndd->nsarea.config_size) {
  177. dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
  178. i, nslot, ndd->nsarea.config_size);
  179. continue;
  180. }
  181. valid[i] = true;
  182. num_valid++;
  183. }
  184. switch (num_valid) {
  185. case 0:
  186. break;
  187. case 1:
  188. for (i = 0; i < num_index; i++)
  189. if (valid[i])
  190. return i;
  191. /* can't have num_valid > 0 but valid[] = { false, false } */
  192. WARN_ON(1);
  193. break;
  194. default:
  195. /* pick the best index... */
  196. seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
  197. __le32_to_cpu(nsindex[1]->seq));
  198. if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
  199. return 1;
  200. else
  201. return 0;
  202. break;
  203. }
  204. return -1;
  205. }
  206. static int nd_label_validate(struct nvdimm_drvdata *ndd)
  207. {
  208. /*
  209. * In order to probe for and validate namespace index blocks we
  210. * need to know the size of the labels, and we can't trust the
  211. * size of the labels until we validate the index blocks.
  212. * Resolve this dependency loop by probing for known label
  213. * sizes, but default to v1.2 256-byte namespace labels if
  214. * discovery fails.
  215. */
  216. int label_size[] = { 128, 256 };
  217. int i, rc;
  218. for (i = 0; i < ARRAY_SIZE(label_size); i++) {
  219. ndd->nslabel_size = label_size[i];
  220. rc = __nd_label_validate(ndd);
  221. if (rc >= 0)
  222. return rc;
  223. }
  224. return -1;
  225. }
  226. static void nd_label_copy(struct nvdimm_drvdata *ndd,
  227. struct nd_namespace_index *dst,
  228. struct nd_namespace_index *src)
  229. {
  230. /* just exit if either destination or source is NULL */
  231. if (!dst || !src)
  232. return;
  233. memcpy(dst, src, sizeof_namespace_index(ndd));
  234. }
  235. static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
  236. {
  237. void *base = to_namespace_index(ndd, 0);
  238. return base + 2 * sizeof_namespace_index(ndd);
  239. }
  240. static int to_slot(struct nvdimm_drvdata *ndd,
  241. struct nd_namespace_label *nd_label)
  242. {
  243. unsigned long label, base;
  244. label = (unsigned long) nd_label;
  245. base = (unsigned long) nd_label_base(ndd);
  246. return (label - base) / sizeof_namespace_label(ndd);
  247. }
  248. static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
  249. {
  250. unsigned long label, base;
  251. base = (unsigned long) nd_label_base(ndd);
  252. label = base + sizeof_namespace_label(ndd) * slot;
  253. return (struct nd_namespace_label *) label;
  254. }
  255. #define for_each_clear_bit_le(bit, addr, size) \
  256. for ((bit) = find_next_zero_bit_le((addr), (size), 0); \
  257. (bit) < (size); \
  258. (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
  259. /**
  260. * preamble_index - common variable initialization for nd_label_* routines
  261. * @ndd: dimm container for the relevant label set
  262. * @idx: namespace_index index
  263. * @nsindex_out: on return set to the currently active namespace index
  264. * @free: on return set to the free label bitmap in the index
  265. * @nslot: on return set to the number of slots in the label space
  266. */
  267. static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
  268. struct nd_namespace_index **nsindex_out,
  269. unsigned long **free, u32 *nslot)
  270. {
  271. struct nd_namespace_index *nsindex;
  272. nsindex = to_namespace_index(ndd, idx);
  273. if (nsindex == NULL)
  274. return false;
  275. *free = (unsigned long *) nsindex->free;
  276. *nslot = __le32_to_cpu(nsindex->nslot);
  277. *nsindex_out = nsindex;
  278. return true;
  279. }
  280. char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
  281. {
  282. if (!label_id || !uuid)
  283. return NULL;
  284. snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
  285. flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
  286. return label_id->id;
  287. }
  288. static bool preamble_current(struct nvdimm_drvdata *ndd,
  289. struct nd_namespace_index **nsindex,
  290. unsigned long **free, u32 *nslot)
  291. {
  292. return preamble_index(ndd, ndd->ns_current, nsindex,
  293. free, nslot);
  294. }
  295. static bool preamble_next(struct nvdimm_drvdata *ndd,
  296. struct nd_namespace_index **nsindex,
  297. unsigned long **free, u32 *nslot)
  298. {
  299. return preamble_index(ndd, ndd->ns_next, nsindex,
  300. free, nslot);
  301. }
  302. static bool slot_valid(struct nvdimm_drvdata *ndd,
  303. struct nd_namespace_label *nd_label, u32 slot)
  304. {
  305. /* check that we are written where we expect to be written */
  306. if (slot != __le32_to_cpu(nd_label->slot))
  307. return false;
  308. /* check checksum */
  309. if (namespace_label_has(ndd, checksum)) {
  310. u64 sum, sum_save;
  311. sum_save = __le64_to_cpu(nd_label->checksum);
  312. nd_label->checksum = __cpu_to_le64(0);
  313. sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
  314. nd_label->checksum = __cpu_to_le64(sum_save);
  315. if (sum != sum_save) {
  316. dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
  317. slot, sum);
  318. return false;
  319. }
  320. }
  321. return true;
  322. }
  323. int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
  324. {
  325. struct nd_namespace_index *nsindex;
  326. unsigned long *free;
  327. u32 nslot, slot;
  328. if (!preamble_current(ndd, &nsindex, &free, &nslot))
  329. return 0; /* no label, nothing to reserve */
  330. for_each_clear_bit_le(slot, free, nslot) {
  331. struct nvdimm *nvdimm = to_nvdimm(ndd->dev);
  332. struct nd_namespace_label *nd_label;
  333. struct nd_region *nd_region = NULL;
  334. u8 label_uuid[NSLABEL_UUID_LEN];
  335. struct nd_label_id label_id;
  336. struct resource *res;
  337. u32 flags;
  338. nd_label = to_label(ndd, slot);
  339. if (!slot_valid(ndd, nd_label, slot))
  340. continue;
  341. memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
  342. flags = __le32_to_cpu(nd_label->flags);
  343. if (test_bit(NDD_NOBLK, &nvdimm->flags))
  344. flags &= ~NSLABEL_FLAG_LOCAL;
  345. nd_label_gen_id(&label_id, label_uuid, flags);
  346. res = nvdimm_allocate_dpa(ndd, &label_id,
  347. __le64_to_cpu(nd_label->dpa),
  348. __le64_to_cpu(nd_label->rawsize));
  349. nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
  350. if (!res)
  351. return -EBUSY;
  352. }
  353. return 0;
  354. }
  355. int nd_label_data_init(struct nvdimm_drvdata *ndd)
  356. {
  357. size_t config_size, read_size, max_xfer, offset;
  358. struct nd_namespace_index *nsindex;
  359. unsigned int i;
  360. int rc = 0;
  361. u32 nslot;
  362. if (ndd->data)
  363. return 0;
  364. if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
  365. dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
  366. ndd->nsarea.max_xfer, ndd->nsarea.config_size);
  367. return -ENXIO;
  368. }
  369. /*
  370. * We need to determine the maximum index area as this is the section
  371. * we must read and validate before we can start processing labels.
  372. *
  373. * If the area is too small to contain the two indexes and 2 labels
  374. * then we abort.
  375. *
  376. * Start at a label size of 128 as this should result in the largest
  377. * possible namespace index size.
  378. */
  379. ndd->nslabel_size = 128;
  380. read_size = sizeof_namespace_index(ndd) * 2;
  381. if (!read_size)
  382. return -ENXIO;
  383. /* Allocate config data */
  384. config_size = ndd->nsarea.config_size;
  385. ndd->data = kvzalloc(config_size, GFP_KERNEL);
  386. if (!ndd->data)
  387. return -ENOMEM;
  388. /*
  389. * We want to guarantee as few reads as possible while conserving
  390. * memory. To do that we figure out how much unused space will be left
  391. * in the last read, divide that by the total number of reads it is
  392. * going to take given our maximum transfer size, and then reduce our
  393. * maximum transfer size based on that result.
  394. */
  395. max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
  396. if (read_size < max_xfer) {
  397. /* trim waste */
  398. max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
  399. DIV_ROUND_UP(config_size, max_xfer);
  400. /* make certain we read indexes in exactly 1 read */
  401. if (max_xfer < read_size)
  402. max_xfer = read_size;
  403. }
  404. /* Make our initial read size a multiple of max_xfer size */
  405. read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer,
  406. config_size);
  407. /* Read the index data */
  408. rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
  409. if (rc)
  410. goto out_err;
  411. /* Validate index data, if not valid assume all labels are invalid */
  412. ndd->ns_current = nd_label_validate(ndd);
  413. if (ndd->ns_current < 0)
  414. return 0;
  415. /* Record our index values */
  416. ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
  417. /* Copy "current" index on top of the "next" index */
  418. nsindex = to_current_namespace_index(ndd);
  419. nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
  420. /* Determine starting offset for label data */
  421. offset = __le64_to_cpu(nsindex->labeloff);
  422. nslot = __le32_to_cpu(nsindex->nslot);
  423. /* Loop through the free list pulling in any active labels */
  424. for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
  425. size_t label_read_size;
  426. /* zero out the unused labels */
  427. if (test_bit_le(i, nsindex->free)) {
  428. memset(ndd->data + offset, 0, ndd->nslabel_size);
  429. continue;
  430. }
  431. /* if we already read past here then just continue */
  432. if (offset + ndd->nslabel_size <= read_size)
  433. continue;
  434. /* if we haven't read in a while reset our read_size offset */
  435. if (read_size < offset)
  436. read_size = offset;
  437. /* determine how much more will be read after this next call. */
  438. label_read_size = offset + ndd->nslabel_size - read_size;
  439. label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
  440. max_xfer;
  441. /* truncate last read if needed */
  442. if (read_size + label_read_size > config_size)
  443. label_read_size = config_size - read_size;
  444. /* Read the label data */
  445. rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
  446. read_size, label_read_size);
  447. if (rc)
  448. goto out_err;
  449. /* push read_size to next read offset */
  450. read_size += label_read_size;
  451. }
  452. dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
  453. out_err:
  454. return rc;
  455. }
  456. int nd_label_active_count(struct nvdimm_drvdata *ndd)
  457. {
  458. struct nd_namespace_index *nsindex;
  459. unsigned long *free;
  460. u32 nslot, slot;
  461. int count = 0;
  462. if (!preamble_current(ndd, &nsindex, &free, &nslot))
  463. return 0;
  464. for_each_clear_bit_le(slot, free, nslot) {
  465. struct nd_namespace_label *nd_label;
  466. nd_label = to_label(ndd, slot);
  467. if (!slot_valid(ndd, nd_label, slot)) {
  468. u32 label_slot = __le32_to_cpu(nd_label->slot);
  469. u64 size = __le64_to_cpu(nd_label->rawsize);
  470. u64 dpa = __le64_to_cpu(nd_label->dpa);
  471. dev_dbg(ndd->dev,
  472. "slot%d invalid slot: %d dpa: %llx size: %llx\n",
  473. slot, label_slot, dpa, size);
  474. continue;
  475. }
  476. count++;
  477. }
  478. return count;
  479. }
  480. struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
  481. {
  482. struct nd_namespace_index *nsindex;
  483. unsigned long *free;
  484. u32 nslot, slot;
  485. if (!preamble_current(ndd, &nsindex, &free, &nslot))
  486. return NULL;
  487. for_each_clear_bit_le(slot, free, nslot) {
  488. struct nd_namespace_label *nd_label;
  489. nd_label = to_label(ndd, slot);
  490. if (!slot_valid(ndd, nd_label, slot))
  491. continue;
  492. if (n-- == 0)
  493. return to_label(ndd, slot);
  494. }
  495. return NULL;
  496. }
  497. u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
  498. {
  499. struct nd_namespace_index *nsindex;
  500. unsigned long *free;
  501. u32 nslot, slot;
  502. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  503. return UINT_MAX;
  504. WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
  505. slot = find_next_bit_le(free, nslot, 0);
  506. if (slot == nslot)
  507. return UINT_MAX;
  508. clear_bit_le(slot, free);
  509. return slot;
  510. }
  511. bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
  512. {
  513. struct nd_namespace_index *nsindex;
  514. unsigned long *free;
  515. u32 nslot;
  516. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  517. return false;
  518. WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
  519. if (slot < nslot)
  520. return !test_and_set_bit_le(slot, free);
  521. return false;
  522. }
  523. u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
  524. {
  525. struct nd_namespace_index *nsindex;
  526. unsigned long *free;
  527. u32 nslot;
  528. WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
  529. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  530. return nvdimm_num_label_slots(ndd);
  531. return bitmap_weight(free, nslot);
  532. }
  533. static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
  534. unsigned long flags)
  535. {
  536. struct nd_namespace_index *nsindex;
  537. unsigned long offset;
  538. u64 checksum;
  539. u32 nslot;
  540. int rc;
  541. nsindex = to_namespace_index(ndd, index);
  542. if (flags & ND_NSINDEX_INIT)
  543. nslot = nvdimm_num_label_slots(ndd);
  544. else
  545. nslot = __le32_to_cpu(nsindex->nslot);
  546. memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
  547. memset(&nsindex->flags, 0, 3);
  548. nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
  549. nsindex->seq = __cpu_to_le32(seq);
  550. offset = (unsigned long) nsindex
  551. - (unsigned long) to_namespace_index(ndd, 0);
  552. nsindex->myoff = __cpu_to_le64(offset);
  553. nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
  554. offset = (unsigned long) to_namespace_index(ndd,
  555. nd_label_next_nsindex(index))
  556. - (unsigned long) to_namespace_index(ndd, 0);
  557. nsindex->otheroff = __cpu_to_le64(offset);
  558. offset = (unsigned long) nd_label_base(ndd)
  559. - (unsigned long) to_namespace_index(ndd, 0);
  560. nsindex->labeloff = __cpu_to_le64(offset);
  561. nsindex->nslot = __cpu_to_le32(nslot);
  562. nsindex->major = __cpu_to_le16(1);
  563. if (sizeof_namespace_label(ndd) < 256)
  564. nsindex->minor = __cpu_to_le16(1);
  565. else
  566. nsindex->minor = __cpu_to_le16(2);
  567. nsindex->checksum = __cpu_to_le64(0);
  568. if (flags & ND_NSINDEX_INIT) {
  569. unsigned long *free = (unsigned long *) nsindex->free;
  570. u32 nfree = ALIGN(nslot, BITS_PER_LONG);
  571. int last_bits, i;
  572. memset(nsindex->free, 0xff, nfree / 8);
  573. for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
  574. clear_bit_le(nslot + i, free);
  575. }
  576. checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
  577. nsindex->checksum = __cpu_to_le64(checksum);
  578. rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
  579. nsindex, sizeof_namespace_index(ndd));
  580. if (rc < 0)
  581. return rc;
  582. if (flags & ND_NSINDEX_INIT)
  583. return 0;
  584. /* copy the index we just wrote to the new 'next' */
  585. WARN_ON(index != ndd->ns_next);
  586. nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
  587. ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
  588. ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
  589. WARN_ON(ndd->ns_current == ndd->ns_next);
  590. return 0;
  591. }
  592. static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
  593. struct nd_namespace_label *nd_label)
  594. {
  595. return (unsigned long) nd_label
  596. - (unsigned long) to_namespace_index(ndd, 0);
  597. }
  598. enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
  599. {
  600. if (guid_equal(guid, &nvdimm_btt_guid))
  601. return NVDIMM_CCLASS_BTT;
  602. else if (guid_equal(guid, &nvdimm_btt2_guid))
  603. return NVDIMM_CCLASS_BTT2;
  604. else if (guid_equal(guid, &nvdimm_pfn_guid))
  605. return NVDIMM_CCLASS_PFN;
  606. else if (guid_equal(guid, &nvdimm_dax_guid))
  607. return NVDIMM_CCLASS_DAX;
  608. else if (guid_equal(guid, &guid_null))
  609. return NVDIMM_CCLASS_NONE;
  610. return NVDIMM_CCLASS_UNKNOWN;
  611. }
  612. static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
  613. guid_t *target)
  614. {
  615. if (claim_class == NVDIMM_CCLASS_BTT)
  616. return &nvdimm_btt_guid;
  617. else if (claim_class == NVDIMM_CCLASS_BTT2)
  618. return &nvdimm_btt2_guid;
  619. else if (claim_class == NVDIMM_CCLASS_PFN)
  620. return &nvdimm_pfn_guid;
  621. else if (claim_class == NVDIMM_CCLASS_DAX)
  622. return &nvdimm_dax_guid;
  623. else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
  624. /*
  625. * If we're modifying a namespace for which we don't
  626. * know the claim_class, don't touch the existing guid.
  627. */
  628. return target;
  629. } else
  630. return &guid_null;
  631. }
  632. static void reap_victim(struct nd_mapping *nd_mapping,
  633. struct nd_label_ent *victim)
  634. {
  635. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  636. u32 slot = to_slot(ndd, victim->label);
  637. dev_dbg(ndd->dev, "free: %d\n", slot);
  638. nd_label_free_slot(ndd, slot);
  639. victim->label = NULL;
  640. }
  641. static int __pmem_label_update(struct nd_region *nd_region,
  642. struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
  643. int pos, unsigned long flags)
  644. {
  645. struct nd_namespace_common *ndns = &nspm->nsio.common;
  646. struct nd_interleave_set *nd_set = nd_region->nd_set;
  647. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  648. struct nd_namespace_label *nd_label;
  649. struct nd_namespace_index *nsindex;
  650. struct nd_label_ent *label_ent;
  651. struct nd_label_id label_id;
  652. struct resource *res;
  653. unsigned long *free;
  654. u32 nslot, slot;
  655. size_t offset;
  656. u64 cookie;
  657. int rc;
  658. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  659. return -ENXIO;
  660. cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
  661. nd_label_gen_id(&label_id, nspm->uuid, 0);
  662. for_each_dpa_resource(ndd, res)
  663. if (strcmp(res->name, label_id.id) == 0)
  664. break;
  665. if (!res) {
  666. WARN_ON_ONCE(1);
  667. return -ENXIO;
  668. }
  669. /* allocate and write the label to the staging (next) index */
  670. slot = nd_label_alloc_slot(ndd);
  671. if (slot == UINT_MAX)
  672. return -ENXIO;
  673. dev_dbg(ndd->dev, "allocated: %d\n", slot);
  674. nd_label = to_label(ndd, slot);
  675. memset(nd_label, 0, sizeof_namespace_label(ndd));
  676. memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
  677. if (nspm->alt_name)
  678. memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
  679. nd_label->flags = __cpu_to_le32(flags);
  680. nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
  681. nd_label->position = __cpu_to_le16(pos);
  682. nd_label->isetcookie = __cpu_to_le64(cookie);
  683. nd_label->rawsize = __cpu_to_le64(resource_size(res));
  684. nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
  685. nd_label->dpa = __cpu_to_le64(res->start);
  686. nd_label->slot = __cpu_to_le32(slot);
  687. if (namespace_label_has(ndd, type_guid))
  688. guid_copy(&nd_label->type_guid, &nd_set->type_guid);
  689. if (namespace_label_has(ndd, abstraction_guid))
  690. guid_copy(&nd_label->abstraction_guid,
  691. to_abstraction_guid(ndns->claim_class,
  692. &nd_label->abstraction_guid));
  693. if (namespace_label_has(ndd, checksum)) {
  694. u64 sum;
  695. nd_label->checksum = __cpu_to_le64(0);
  696. sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
  697. nd_label->checksum = __cpu_to_le64(sum);
  698. }
  699. nd_dbg_dpa(nd_region, ndd, res, "\n");
  700. /* update label */
  701. offset = nd_label_offset(ndd, nd_label);
  702. rc = nvdimm_set_config_data(ndd, offset, nd_label,
  703. sizeof_namespace_label(ndd));
  704. if (rc < 0)
  705. return rc;
  706. /* Garbage collect the previous label */
  707. mutex_lock(&nd_mapping->lock);
  708. list_for_each_entry(label_ent, &nd_mapping->labels, list) {
  709. if (!label_ent->label)
  710. continue;
  711. if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
  712. || memcmp(nspm->uuid, label_ent->label->uuid,
  713. NSLABEL_UUID_LEN) == 0)
  714. reap_victim(nd_mapping, label_ent);
  715. }
  716. /* update index */
  717. rc = nd_label_write_index(ndd, ndd->ns_next,
  718. nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
  719. if (rc == 0) {
  720. list_for_each_entry(label_ent, &nd_mapping->labels, list)
  721. if (!label_ent->label) {
  722. label_ent->label = nd_label;
  723. nd_label = NULL;
  724. break;
  725. }
  726. dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
  727. "failed to track label: %d\n",
  728. to_slot(ndd, nd_label));
  729. if (nd_label)
  730. rc = -ENXIO;
  731. }
  732. mutex_unlock(&nd_mapping->lock);
  733. return rc;
  734. }
  735. static bool is_old_resource(struct resource *res, struct resource **list, int n)
  736. {
  737. int i;
  738. if (res->flags & DPA_RESOURCE_ADJUSTED)
  739. return false;
  740. for (i = 0; i < n; i++)
  741. if (res == list[i])
  742. return true;
  743. return false;
  744. }
  745. static struct resource *to_resource(struct nvdimm_drvdata *ndd,
  746. struct nd_namespace_label *nd_label)
  747. {
  748. struct resource *res;
  749. for_each_dpa_resource(ndd, res) {
  750. if (res->start != __le64_to_cpu(nd_label->dpa))
  751. continue;
  752. if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
  753. continue;
  754. return res;
  755. }
  756. return NULL;
  757. }
  758. /*
  759. * 1/ Account all the labels that can be freed after this update
  760. * 2/ Allocate and write the label to the staging (next) index
  761. * 3/ Record the resources in the namespace device
  762. */
  763. static int __blk_label_update(struct nd_region *nd_region,
  764. struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
  765. int num_labels)
  766. {
  767. int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
  768. struct nd_interleave_set *nd_set = nd_region->nd_set;
  769. struct nd_namespace_common *ndns = &nsblk->common;
  770. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  771. struct nd_namespace_label *nd_label;
  772. struct nd_label_ent *label_ent, *e;
  773. struct nd_namespace_index *nsindex;
  774. unsigned long *free, *victim_map = NULL;
  775. struct resource *res, **old_res_list;
  776. struct nd_label_id label_id;
  777. u8 uuid[NSLABEL_UUID_LEN];
  778. int min_dpa_idx = 0;
  779. LIST_HEAD(list);
  780. u32 nslot, slot;
  781. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  782. return -ENXIO;
  783. old_res_list = nsblk->res;
  784. nfree = nd_label_nfree(ndd);
  785. old_num_resources = nsblk->num_resources;
  786. nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
  787. /*
  788. * We need to loop over the old resources a few times, which seems a
  789. * bit inefficient, but we need to know that we have the label
  790. * space before we start mutating the tracking structures.
  791. * Otherwise the recovery method of last resort for userspace is
  792. * disable and re-enable the parent region.
  793. */
  794. alloc = 0;
  795. for_each_dpa_resource(ndd, res) {
  796. if (strcmp(res->name, label_id.id) != 0)
  797. continue;
  798. if (!is_old_resource(res, old_res_list, old_num_resources))
  799. alloc++;
  800. }
  801. victims = 0;
  802. if (old_num_resources) {
  803. /* convert old local-label-map to dimm-slot victim-map */
  804. victim_map = bitmap_zalloc(nslot, GFP_KERNEL);
  805. if (!victim_map)
  806. return -ENOMEM;
  807. /* mark unused labels for garbage collection */
  808. for_each_clear_bit_le(slot, free, nslot) {
  809. nd_label = to_label(ndd, slot);
  810. memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
  811. if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
  812. continue;
  813. res = to_resource(ndd, nd_label);
  814. if (res && is_old_resource(res, old_res_list,
  815. old_num_resources))
  816. continue;
  817. slot = to_slot(ndd, nd_label);
  818. set_bit(slot, victim_map);
  819. victims++;
  820. }
  821. }
  822. /* don't allow updates that consume the last label */
  823. if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
  824. dev_info(&nsblk->common.dev, "insufficient label space\n");
  825. bitmap_free(victim_map);
  826. return -ENOSPC;
  827. }
  828. /* from here on we need to abort on error */
  829. /* assign all resources to the namespace before writing the labels */
  830. nsblk->res = NULL;
  831. nsblk->num_resources = 0;
  832. for_each_dpa_resource(ndd, res) {
  833. if (strcmp(res->name, label_id.id) != 0)
  834. continue;
  835. if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
  836. rc = -ENOMEM;
  837. goto abort;
  838. }
  839. }
  840. /* release slots associated with any invalidated UUIDs */
  841. mutex_lock(&nd_mapping->lock);
  842. list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list)
  843. if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)) {
  844. reap_victim(nd_mapping, label_ent);
  845. list_move(&label_ent->list, &list);
  846. }
  847. mutex_unlock(&nd_mapping->lock);
  848. /*
  849. * Find the resource associated with the first label in the set
  850. * per the v1.2 namespace specification.
  851. */
  852. for (i = 0; i < nsblk->num_resources; i++) {
  853. struct resource *min = nsblk->res[min_dpa_idx];
  854. res = nsblk->res[i];
  855. if (res->start < min->start)
  856. min_dpa_idx = i;
  857. }
  858. for (i = 0; i < nsblk->num_resources; i++) {
  859. size_t offset;
  860. res = nsblk->res[i];
  861. if (is_old_resource(res, old_res_list, old_num_resources))
  862. continue; /* carry-over */
  863. slot = nd_label_alloc_slot(ndd);
  864. if (slot == UINT_MAX) {
  865. rc = -ENXIO;
  866. goto abort;
  867. }
  868. dev_dbg(ndd->dev, "allocated: %d\n", slot);
  869. nd_label = to_label(ndd, slot);
  870. memset(nd_label, 0, sizeof_namespace_label(ndd));
  871. memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
  872. if (nsblk->alt_name)
  873. memcpy(nd_label->name, nsblk->alt_name,
  874. NSLABEL_NAME_LEN);
  875. nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
  876. /*
  877. * Use the presence of the type_guid as a flag to
  878. * determine isetcookie usage and nlabel + position
  879. * policy for blk-aperture namespaces.
  880. */
  881. if (namespace_label_has(ndd, type_guid)) {
  882. if (i == min_dpa_idx) {
  883. nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
  884. nd_label->position = __cpu_to_le16(0);
  885. } else {
  886. nd_label->nlabel = __cpu_to_le16(0xffff);
  887. nd_label->position = __cpu_to_le16(0xffff);
  888. }
  889. nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
  890. } else {
  891. nd_label->nlabel = __cpu_to_le16(0); /* N/A */
  892. nd_label->position = __cpu_to_le16(0); /* N/A */
  893. nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
  894. }
  895. nd_label->dpa = __cpu_to_le64(res->start);
  896. nd_label->rawsize = __cpu_to_le64(resource_size(res));
  897. nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
  898. nd_label->slot = __cpu_to_le32(slot);
  899. if (namespace_label_has(ndd, type_guid))
  900. guid_copy(&nd_label->type_guid, &nd_set->type_guid);
  901. if (namespace_label_has(ndd, abstraction_guid))
  902. guid_copy(&nd_label->abstraction_guid,
  903. to_abstraction_guid(ndns->claim_class,
  904. &nd_label->abstraction_guid));
  905. if (namespace_label_has(ndd, checksum)) {
  906. u64 sum;
  907. nd_label->checksum = __cpu_to_le64(0);
  908. sum = nd_fletcher64(nd_label,
  909. sizeof_namespace_label(ndd), 1);
  910. nd_label->checksum = __cpu_to_le64(sum);
  911. }
  912. /* update label */
  913. offset = nd_label_offset(ndd, nd_label);
  914. rc = nvdimm_set_config_data(ndd, offset, nd_label,
  915. sizeof_namespace_label(ndd));
  916. if (rc < 0)
  917. goto abort;
  918. }
  919. /* free up now unused slots in the new index */
  920. for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
  921. dev_dbg(ndd->dev, "free: %d\n", slot);
  922. nd_label_free_slot(ndd, slot);
  923. }
  924. /* update index */
  925. rc = nd_label_write_index(ndd, ndd->ns_next,
  926. nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
  927. if (rc)
  928. goto abort;
  929. /*
  930. * Now that the on-dimm labels are up to date, fix up the tracking
  931. * entries in nd_mapping->labels
  932. */
  933. nlabel = 0;
  934. mutex_lock(&nd_mapping->lock);
  935. list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
  936. nd_label = label_ent->label;
  937. if (!nd_label)
  938. continue;
  939. nlabel++;
  940. memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
  941. if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
  942. continue;
  943. nlabel--;
  944. list_move(&label_ent->list, &list);
  945. label_ent->label = NULL;
  946. }
  947. list_splice_tail_init(&list, &nd_mapping->labels);
  948. mutex_unlock(&nd_mapping->lock);
  949. if (nlabel + nsblk->num_resources > num_labels) {
  950. /*
  951. * Bug, we can't end up with more resources than
  952. * available labels
  953. */
  954. WARN_ON_ONCE(1);
  955. rc = -ENXIO;
  956. goto out;
  957. }
  958. mutex_lock(&nd_mapping->lock);
  959. label_ent = list_first_entry_or_null(&nd_mapping->labels,
  960. typeof(*label_ent), list);
  961. if (!label_ent) {
  962. WARN_ON(1);
  963. mutex_unlock(&nd_mapping->lock);
  964. rc = -ENXIO;
  965. goto out;
  966. }
  967. for_each_clear_bit_le(slot, free, nslot) {
  968. nd_label = to_label(ndd, slot);
  969. memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
  970. if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
  971. continue;
  972. res = to_resource(ndd, nd_label);
  973. res->flags &= ~DPA_RESOURCE_ADJUSTED;
  974. dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
  975. list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
  976. if (label_ent->label)
  977. continue;
  978. label_ent->label = nd_label;
  979. nd_label = NULL;
  980. break;
  981. }
  982. if (nd_label)
  983. dev_WARN(&nsblk->common.dev,
  984. "failed to track label slot%d\n", slot);
  985. }
  986. mutex_unlock(&nd_mapping->lock);
  987. out:
  988. kfree(old_res_list);
  989. bitmap_free(victim_map);
  990. return rc;
  991. abort:
  992. /*
  993. * 1/ repair the allocated label bitmap in the index
  994. * 2/ restore the resource list
  995. */
  996. nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
  997. kfree(nsblk->res);
  998. nsblk->res = old_res_list;
  999. nsblk->num_resources = old_num_resources;
  1000. old_res_list = NULL;
  1001. goto out;
  1002. }
  1003. static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
  1004. {
  1005. int i, old_num_labels = 0;
  1006. struct nd_label_ent *label_ent;
  1007. struct nd_namespace_index *nsindex;
  1008. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  1009. mutex_lock(&nd_mapping->lock);
  1010. list_for_each_entry(label_ent, &nd_mapping->labels, list)
  1011. old_num_labels++;
  1012. mutex_unlock(&nd_mapping->lock);
  1013. /*
  1014. * We need to preserve all the old labels for the mapping so
  1015. * they can be garbage collected after writing the new labels.
  1016. */
  1017. for (i = old_num_labels; i < num_labels; i++) {
  1018. label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
  1019. if (!label_ent)
  1020. return -ENOMEM;
  1021. mutex_lock(&nd_mapping->lock);
  1022. list_add_tail(&label_ent->list, &nd_mapping->labels);
  1023. mutex_unlock(&nd_mapping->lock);
  1024. }
  1025. if (ndd->ns_current == -1 || ndd->ns_next == -1)
  1026. /* pass */;
  1027. else
  1028. return max(num_labels, old_num_labels);
  1029. nsindex = to_namespace_index(ndd, 0);
  1030. memset(nsindex, 0, ndd->nsarea.config_size);
  1031. for (i = 0; i < 2; i++) {
  1032. int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
  1033. if (rc)
  1034. return rc;
  1035. }
  1036. ndd->ns_next = 1;
  1037. ndd->ns_current = 0;
  1038. return max(num_labels, old_num_labels);
  1039. }
  1040. static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
  1041. {
  1042. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  1043. struct nd_label_ent *label_ent, *e;
  1044. struct nd_namespace_index *nsindex;
  1045. u8 label_uuid[NSLABEL_UUID_LEN];
  1046. unsigned long *free;
  1047. LIST_HEAD(list);
  1048. u32 nslot, slot;
  1049. int active = 0;
  1050. if (!uuid)
  1051. return 0;
  1052. /* no index || no labels == nothing to delete */
  1053. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  1054. return 0;
  1055. mutex_lock(&nd_mapping->lock);
  1056. list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
  1057. struct nd_namespace_label *nd_label = label_ent->label;
  1058. if (!nd_label)
  1059. continue;
  1060. active++;
  1061. memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
  1062. if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
  1063. continue;
  1064. active--;
  1065. slot = to_slot(ndd, nd_label);
  1066. nd_label_free_slot(ndd, slot);
  1067. dev_dbg(ndd->dev, "free: %d\n", slot);
  1068. list_move_tail(&label_ent->list, &list);
  1069. label_ent->label = NULL;
  1070. }
  1071. list_splice_tail_init(&list, &nd_mapping->labels);
  1072. if (active == 0) {
  1073. nd_mapping_free_labels(nd_mapping);
  1074. dev_dbg(ndd->dev, "no more active labels\n");
  1075. }
  1076. mutex_unlock(&nd_mapping->lock);
  1077. return nd_label_write_index(ndd, ndd->ns_next,
  1078. nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
  1079. }
  1080. int nd_pmem_namespace_label_update(struct nd_region *nd_region,
  1081. struct nd_namespace_pmem *nspm, resource_size_t size)
  1082. {
  1083. int i, rc;
  1084. for (i = 0; i < nd_region->ndr_mappings; i++) {
  1085. struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  1086. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  1087. struct resource *res;
  1088. int count = 0;
  1089. if (size == 0) {
  1090. rc = del_labels(nd_mapping, nspm->uuid);
  1091. if (rc)
  1092. return rc;
  1093. continue;
  1094. }
  1095. for_each_dpa_resource(ndd, res)
  1096. if (strncmp(res->name, "pmem", 4) == 0)
  1097. count++;
  1098. WARN_ON_ONCE(!count);
  1099. rc = init_labels(nd_mapping, count);
  1100. if (rc < 0)
  1101. return rc;
  1102. rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
  1103. NSLABEL_FLAG_UPDATING);
  1104. if (rc)
  1105. return rc;
  1106. }
  1107. if (size == 0)
  1108. return 0;
  1109. /* Clear the UPDATING flag per UEFI 2.7 expectations */
  1110. for (i = 0; i < nd_region->ndr_mappings; i++) {
  1111. struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  1112. rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
  1113. if (rc)
  1114. return rc;
  1115. }
  1116. return 0;
  1117. }
  1118. int nd_blk_namespace_label_update(struct nd_region *nd_region,
  1119. struct nd_namespace_blk *nsblk, resource_size_t size)
  1120. {
  1121. struct nd_mapping *nd_mapping = &nd_region->mapping[0];
  1122. struct resource *res;
  1123. int count = 0;
  1124. if (size == 0)
  1125. return del_labels(nd_mapping, nsblk->uuid);
  1126. for_each_dpa_resource(to_ndd(nd_mapping), res)
  1127. count++;
  1128. count = init_labels(nd_mapping, count);
  1129. if (count < 0)
  1130. return count;
  1131. return __blk_label_update(nd_region, nd_mapping, nsblk, count);
  1132. }
  1133. int __init nd_label_init(void)
  1134. {
  1135. WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
  1136. WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
  1137. WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
  1138. WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
  1139. return 0;
  1140. }