dimm_devs.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6. #include <linux/moduleparam.h>
  7. #include <linux/vmalloc.h>
  8. #include <linux/device.h>
  9. #include <linux/ndctl.h>
  10. #include <linux/slab.h>
  11. #include <linux/io.h>
  12. #include <linux/fs.h>
  13. #include <linux/mm.h>
  14. #include "nd-core.h"
  15. #include "label.h"
  16. #include "pmem.h"
  17. #include "nd.h"
  18. static DEFINE_IDA(dimm_ida);
  19. static bool noblk;
  20. module_param(noblk, bool, 0444);
  21. MODULE_PARM_DESC(noblk, "force disable BLK / local alias support");
  22. /*
  23. * Retrieve bus and dimm handle and return if this bus supports
  24. * get_config_data commands
  25. */
  26. int nvdimm_check_config_data(struct device *dev)
  27. {
  28. struct nvdimm *nvdimm = to_nvdimm(dev);
  29. if (!nvdimm->cmd_mask ||
  30. !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
  31. if (test_bit(NDD_LABELING, &nvdimm->flags))
  32. return -ENXIO;
  33. else
  34. return -ENOTTY;
  35. }
  36. return 0;
  37. }
  38. static int validate_dimm(struct nvdimm_drvdata *ndd)
  39. {
  40. int rc;
  41. if (!ndd)
  42. return -EINVAL;
  43. rc = nvdimm_check_config_data(ndd->dev);
  44. if (rc)
  45. dev_dbg(ndd->dev, "%ps: %s error: %d\n",
  46. __builtin_return_address(0), __func__, rc);
  47. return rc;
  48. }
  49. /**
  50. * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
  51. * @nvdimm: dimm to initialize
  52. */
  53. int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
  54. {
  55. struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
  56. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  57. struct nvdimm_bus_descriptor *nd_desc;
  58. int rc = validate_dimm(ndd);
  59. int cmd_rc = 0;
  60. if (rc)
  61. return rc;
  62. if (cmd->config_size)
  63. return 0; /* already valid */
  64. memset(cmd, 0, sizeof(*cmd));
  65. nd_desc = nvdimm_bus->nd_desc;
  66. rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
  67. ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
  68. if (rc < 0)
  69. return rc;
  70. return cmd_rc;
  71. }
  72. int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
  73. size_t offset, size_t len)
  74. {
  75. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  76. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  77. int rc = validate_dimm(ndd), cmd_rc = 0;
  78. struct nd_cmd_get_config_data_hdr *cmd;
  79. size_t max_cmd_size, buf_offset;
  80. if (rc)
  81. return rc;
  82. if (offset + len > ndd->nsarea.config_size)
  83. return -ENXIO;
  84. max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
  85. cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
  86. if (!cmd)
  87. return -ENOMEM;
  88. for (buf_offset = 0; len;
  89. len -= cmd->in_length, buf_offset += cmd->in_length) {
  90. size_t cmd_size;
  91. cmd->in_offset = offset + buf_offset;
  92. cmd->in_length = min(max_cmd_size, len);
  93. cmd_size = sizeof(*cmd) + cmd->in_length;
  94. rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
  95. ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
  96. if (rc < 0)
  97. break;
  98. if (cmd_rc < 0) {
  99. rc = cmd_rc;
  100. break;
  101. }
  102. /* out_buf should be valid, copy it into our output buffer */
  103. memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
  104. }
  105. kvfree(cmd);
  106. return rc;
  107. }
  108. int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
  109. void *buf, size_t len)
  110. {
  111. size_t max_cmd_size, buf_offset;
  112. struct nd_cmd_set_config_hdr *cmd;
  113. int rc = validate_dimm(ndd), cmd_rc = 0;
  114. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  115. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  116. if (rc)
  117. return rc;
  118. if (offset + len > ndd->nsarea.config_size)
  119. return -ENXIO;
  120. max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
  121. cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
  122. if (!cmd)
  123. return -ENOMEM;
  124. for (buf_offset = 0; len; len -= cmd->in_length,
  125. buf_offset += cmd->in_length) {
  126. size_t cmd_size;
  127. cmd->in_offset = offset + buf_offset;
  128. cmd->in_length = min(max_cmd_size, len);
  129. memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
  130. /* status is output in the last 4-bytes of the command buffer */
  131. cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
  132. rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
  133. ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
  134. if (rc < 0)
  135. break;
  136. if (cmd_rc < 0) {
  137. rc = cmd_rc;
  138. break;
  139. }
  140. }
  141. kvfree(cmd);
  142. return rc;
  143. }
  144. void nvdimm_set_labeling(struct device *dev)
  145. {
  146. struct nvdimm *nvdimm = to_nvdimm(dev);
  147. set_bit(NDD_LABELING, &nvdimm->flags);
  148. }
  149. void nvdimm_set_locked(struct device *dev)
  150. {
  151. struct nvdimm *nvdimm = to_nvdimm(dev);
  152. set_bit(NDD_LOCKED, &nvdimm->flags);
  153. }
  154. void nvdimm_clear_locked(struct device *dev)
  155. {
  156. struct nvdimm *nvdimm = to_nvdimm(dev);
  157. clear_bit(NDD_LOCKED, &nvdimm->flags);
  158. }
  159. static void nvdimm_release(struct device *dev)
  160. {
  161. struct nvdimm *nvdimm = to_nvdimm(dev);
  162. ida_simple_remove(&dimm_ida, nvdimm->id);
  163. kfree(nvdimm);
  164. }
  165. struct nvdimm *to_nvdimm(struct device *dev)
  166. {
  167. struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
  168. WARN_ON(!is_nvdimm(dev));
  169. return nvdimm;
  170. }
  171. EXPORT_SYMBOL_GPL(to_nvdimm);
  172. struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
  173. {
  174. struct nd_region *nd_region = &ndbr->nd_region;
  175. struct nd_mapping *nd_mapping = &nd_region->mapping[0];
  176. return nd_mapping->nvdimm;
  177. }
  178. EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
  179. unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
  180. {
  181. /* pmem mapping properties are private to libnvdimm */
  182. return ARCH_MEMREMAP_PMEM;
  183. }
  184. EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);
  185. struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
  186. {
  187. struct nvdimm *nvdimm = nd_mapping->nvdimm;
  188. WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
  189. return dev_get_drvdata(&nvdimm->dev);
  190. }
  191. EXPORT_SYMBOL(to_ndd);
  192. void nvdimm_drvdata_release(struct kref *kref)
  193. {
  194. struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
  195. struct device *dev = ndd->dev;
  196. struct resource *res, *_r;
  197. dev_dbg(dev, "trace\n");
  198. nvdimm_bus_lock(dev);
  199. for_each_dpa_resource_safe(ndd, res, _r)
  200. nvdimm_free_dpa(ndd, res);
  201. nvdimm_bus_unlock(dev);
  202. kvfree(ndd->data);
  203. kfree(ndd);
  204. put_device(dev);
  205. }
  206. void get_ndd(struct nvdimm_drvdata *ndd)
  207. {
  208. kref_get(&ndd->kref);
  209. }
  210. void put_ndd(struct nvdimm_drvdata *ndd)
  211. {
  212. if (ndd)
  213. kref_put(&ndd->kref, nvdimm_drvdata_release);
  214. }
  215. const char *nvdimm_name(struct nvdimm *nvdimm)
  216. {
  217. return dev_name(&nvdimm->dev);
  218. }
  219. EXPORT_SYMBOL_GPL(nvdimm_name);
  220. struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
  221. {
  222. return &nvdimm->dev.kobj;
  223. }
  224. EXPORT_SYMBOL_GPL(nvdimm_kobj);
  225. unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
  226. {
  227. return nvdimm->cmd_mask;
  228. }
  229. EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
  230. void *nvdimm_provider_data(struct nvdimm *nvdimm)
  231. {
  232. if (nvdimm)
  233. return nvdimm->provider_data;
  234. return NULL;
  235. }
  236. EXPORT_SYMBOL_GPL(nvdimm_provider_data);
  237. static ssize_t commands_show(struct device *dev,
  238. struct device_attribute *attr, char *buf)
  239. {
  240. struct nvdimm *nvdimm = to_nvdimm(dev);
  241. int cmd, len = 0;
  242. if (!nvdimm->cmd_mask)
  243. return sprintf(buf, "\n");
  244. for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
  245. len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
  246. len += sprintf(buf + len, "\n");
  247. return len;
  248. }
  249. static DEVICE_ATTR_RO(commands);
  250. static ssize_t flags_show(struct device *dev,
  251. struct device_attribute *attr, char *buf)
  252. {
  253. struct nvdimm *nvdimm = to_nvdimm(dev);
  254. return sprintf(buf, "%s%s%s\n",
  255. test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
  256. test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
  257. test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
  258. }
  259. static DEVICE_ATTR_RO(flags);
  260. static ssize_t state_show(struct device *dev, struct device_attribute *attr,
  261. char *buf)
  262. {
  263. struct nvdimm *nvdimm = to_nvdimm(dev);
  264. /*
  265. * The state may be in the process of changing, userspace should
  266. * quiesce probing if it wants a static answer
  267. */
  268. nvdimm_bus_lock(dev);
  269. nvdimm_bus_unlock(dev);
  270. return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
  271. ? "active" : "idle");
  272. }
  273. static DEVICE_ATTR_RO(state);
  274. static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
  275. {
  276. struct device *dev;
  277. ssize_t rc;
  278. u32 nfree;
  279. if (!ndd)
  280. return -ENXIO;
  281. dev = ndd->dev;
  282. nvdimm_bus_lock(dev);
  283. nfree = nd_label_nfree(ndd);
  284. if (nfree - 1 > nfree) {
  285. dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
  286. nfree = 0;
  287. } else
  288. nfree--;
  289. rc = sprintf(buf, "%d\n", nfree);
  290. nvdimm_bus_unlock(dev);
  291. return rc;
  292. }
  293. static ssize_t available_slots_show(struct device *dev,
  294. struct device_attribute *attr, char *buf)
  295. {
  296. ssize_t rc;
  297. nd_device_lock(dev);
  298. rc = __available_slots_show(dev_get_drvdata(dev), buf);
  299. nd_device_unlock(dev);
  300. return rc;
  301. }
  302. static DEVICE_ATTR_RO(available_slots);
  303. __weak ssize_t security_show(struct device *dev,
  304. struct device_attribute *attr, char *buf)
  305. {
  306. struct nvdimm *nvdimm = to_nvdimm(dev);
  307. if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
  308. return sprintf(buf, "overwrite\n");
  309. if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
  310. return sprintf(buf, "disabled\n");
  311. if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
  312. return sprintf(buf, "unlocked\n");
  313. if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
  314. return sprintf(buf, "locked\n");
  315. return -ENOTTY;
  316. }
  317. static ssize_t frozen_show(struct device *dev,
  318. struct device_attribute *attr, char *buf)
  319. {
  320. struct nvdimm *nvdimm = to_nvdimm(dev);
  321. return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
  322. &nvdimm->sec.flags));
  323. }
  324. static DEVICE_ATTR_RO(frozen);
  325. static ssize_t security_store(struct device *dev,
  326. struct device_attribute *attr, const char *buf, size_t len)
  327. {
  328. ssize_t rc;
  329. /*
  330. * Require all userspace triggered security management to be
  331. * done while probing is idle and the DIMM is not in active use
  332. * in any region.
  333. */
  334. nd_device_lock(dev);
  335. nvdimm_bus_lock(dev);
  336. wait_nvdimm_bus_probe_idle(dev);
  337. rc = nvdimm_security_store(dev, buf, len);
  338. nvdimm_bus_unlock(dev);
  339. nd_device_unlock(dev);
  340. return rc;
  341. }
  342. static DEVICE_ATTR_RW(security);
  343. static struct attribute *nvdimm_attributes[] = {
  344. &dev_attr_state.attr,
  345. &dev_attr_flags.attr,
  346. &dev_attr_commands.attr,
  347. &dev_attr_available_slots.attr,
  348. &dev_attr_security.attr,
  349. &dev_attr_frozen.attr,
  350. NULL,
  351. };
  352. static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
  353. {
  354. struct device *dev = container_of(kobj, typeof(*dev), kobj);
  355. struct nvdimm *nvdimm = to_nvdimm(dev);
  356. if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr)
  357. return a->mode;
  358. if (!nvdimm->sec.flags)
  359. return 0;
  360. if (a == &dev_attr_security.attr) {
  361. /* Are there any state mutation ops (make writable)? */
  362. if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
  363. || nvdimm->sec.ops->change_key
  364. || nvdimm->sec.ops->erase
  365. || nvdimm->sec.ops->overwrite)
  366. return a->mode;
  367. return 0444;
  368. }
  369. if (nvdimm->sec.ops->freeze)
  370. return a->mode;
  371. return 0;
  372. }
  373. static const struct attribute_group nvdimm_attribute_group = {
  374. .attrs = nvdimm_attributes,
  375. .is_visible = nvdimm_visible,
  376. };
  377. static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf)
  378. {
  379. struct nvdimm *nvdimm = to_nvdimm(dev);
  380. enum nvdimm_fwa_result result;
  381. if (!nvdimm->fw_ops)
  382. return -EOPNOTSUPP;
  383. nvdimm_bus_lock(dev);
  384. result = nvdimm->fw_ops->activate_result(nvdimm);
  385. nvdimm_bus_unlock(dev);
  386. switch (result) {
  387. case NVDIMM_FWA_RESULT_NONE:
  388. return sprintf(buf, "none\n");
  389. case NVDIMM_FWA_RESULT_SUCCESS:
  390. return sprintf(buf, "success\n");
  391. case NVDIMM_FWA_RESULT_FAIL:
  392. return sprintf(buf, "fail\n");
  393. case NVDIMM_FWA_RESULT_NOTSTAGED:
  394. return sprintf(buf, "not_staged\n");
  395. case NVDIMM_FWA_RESULT_NEEDRESET:
  396. return sprintf(buf, "need_reset\n");
  397. default:
  398. return -ENXIO;
  399. }
  400. }
  401. static DEVICE_ATTR_ADMIN_RO(result);
  402. static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf)
  403. {
  404. struct nvdimm *nvdimm = to_nvdimm(dev);
  405. enum nvdimm_fwa_state state;
  406. if (!nvdimm->fw_ops)
  407. return -EOPNOTSUPP;
  408. nvdimm_bus_lock(dev);
  409. state = nvdimm->fw_ops->activate_state(nvdimm);
  410. nvdimm_bus_unlock(dev);
  411. switch (state) {
  412. case NVDIMM_FWA_IDLE:
  413. return sprintf(buf, "idle\n");
  414. case NVDIMM_FWA_BUSY:
  415. return sprintf(buf, "busy\n");
  416. case NVDIMM_FWA_ARMED:
  417. return sprintf(buf, "armed\n");
  418. default:
  419. return -ENXIO;
  420. }
  421. }
  422. static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
  423. const char *buf, size_t len)
  424. {
  425. struct nvdimm *nvdimm = to_nvdimm(dev);
  426. enum nvdimm_fwa_trigger arg;
  427. int rc;
  428. if (!nvdimm->fw_ops)
  429. return -EOPNOTSUPP;
  430. if (sysfs_streq(buf, "arm"))
  431. arg = NVDIMM_FWA_ARM;
  432. else if (sysfs_streq(buf, "disarm"))
  433. arg = NVDIMM_FWA_DISARM;
  434. else
  435. return -EINVAL;
  436. nvdimm_bus_lock(dev);
  437. rc = nvdimm->fw_ops->arm(nvdimm, arg);
  438. nvdimm_bus_unlock(dev);
  439. if (rc < 0)
  440. return rc;
  441. return len;
  442. }
  443. static DEVICE_ATTR_ADMIN_RW(activate);
  444. static struct attribute *nvdimm_firmware_attributes[] = {
  445. &dev_attr_activate.attr,
  446. &dev_attr_result.attr,
  447. NULL,
  448. };
  449. static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
  450. {
  451. struct device *dev = container_of(kobj, typeof(*dev), kobj);
  452. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  453. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  454. struct nvdimm *nvdimm = to_nvdimm(dev);
  455. enum nvdimm_fwa_capability cap;
  456. if (!nd_desc->fw_ops)
  457. return 0;
  458. if (!nvdimm->fw_ops)
  459. return 0;
  460. nvdimm_bus_lock(dev);
  461. cap = nd_desc->fw_ops->capability(nd_desc);
  462. nvdimm_bus_unlock(dev);
  463. if (cap < NVDIMM_FWA_CAP_QUIESCE)
  464. return 0;
  465. return a->mode;
  466. }
  467. static const struct attribute_group nvdimm_firmware_attribute_group = {
  468. .name = "firmware",
  469. .attrs = nvdimm_firmware_attributes,
  470. .is_visible = nvdimm_firmware_visible,
  471. };
  472. static const struct attribute_group *nvdimm_attribute_groups[] = {
  473. &nd_device_attribute_group,
  474. &nvdimm_attribute_group,
  475. &nvdimm_firmware_attribute_group,
  476. NULL,
  477. };
  478. static const struct device_type nvdimm_device_type = {
  479. .name = "nvdimm",
  480. .release = nvdimm_release,
  481. .groups = nvdimm_attribute_groups,
  482. };
  483. bool is_nvdimm(struct device *dev)
  484. {
  485. return dev->type == &nvdimm_device_type;
  486. }
  487. struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
  488. void *provider_data, const struct attribute_group **groups,
  489. unsigned long flags, unsigned long cmd_mask, int num_flush,
  490. struct resource *flush_wpq, const char *dimm_id,
  491. const struct nvdimm_security_ops *sec_ops,
  492. const struct nvdimm_fw_ops *fw_ops)
  493. {
  494. struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
  495. struct device *dev;
  496. if (!nvdimm)
  497. return NULL;
  498. nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
  499. if (nvdimm->id < 0) {
  500. kfree(nvdimm);
  501. return NULL;
  502. }
  503. nvdimm->dimm_id = dimm_id;
  504. nvdimm->provider_data = provider_data;
  505. if (noblk)
  506. flags |= 1 << NDD_NOBLK;
  507. nvdimm->flags = flags;
  508. nvdimm->cmd_mask = cmd_mask;
  509. nvdimm->num_flush = num_flush;
  510. nvdimm->flush_wpq = flush_wpq;
  511. atomic_set(&nvdimm->busy, 0);
  512. dev = &nvdimm->dev;
  513. dev_set_name(dev, "nmem%d", nvdimm->id);
  514. dev->parent = &nvdimm_bus->dev;
  515. dev->type = &nvdimm_device_type;
  516. dev->devt = MKDEV(nvdimm_major, nvdimm->id);
  517. dev->groups = groups;
  518. nvdimm->sec.ops = sec_ops;
  519. nvdimm->fw_ops = fw_ops;
  520. nvdimm->sec.overwrite_tmo = 0;
  521. INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
  522. /*
  523. * Security state must be initialized before device_add() for
  524. * attribute visibility.
  525. */
  526. /* get security state and extended (master) state */
  527. nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
  528. nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
  529. nd_device_register(dev);
  530. return nvdimm;
  531. }
  532. EXPORT_SYMBOL_GPL(__nvdimm_create);
  533. static void shutdown_security_notify(void *data)
  534. {
  535. struct nvdimm *nvdimm = data;
  536. sysfs_put(nvdimm->sec.overwrite_state);
  537. }
  538. int nvdimm_security_setup_events(struct device *dev)
  539. {
  540. struct nvdimm *nvdimm = to_nvdimm(dev);
  541. if (!nvdimm->sec.flags || !nvdimm->sec.ops
  542. || !nvdimm->sec.ops->overwrite)
  543. return 0;
  544. nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
  545. if (!nvdimm->sec.overwrite_state)
  546. return -ENOMEM;
  547. return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
  548. }
  549. EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
  550. int nvdimm_in_overwrite(struct nvdimm *nvdimm)
  551. {
  552. return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
  553. }
  554. EXPORT_SYMBOL_GPL(nvdimm_in_overwrite);
  555. int nvdimm_security_freeze(struct nvdimm *nvdimm)
  556. {
  557. int rc;
  558. WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
  559. if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze)
  560. return -EOPNOTSUPP;
  561. if (!nvdimm->sec.flags)
  562. return -EIO;
  563. if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
  564. dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n");
  565. return -EBUSY;
  566. }
  567. rc = nvdimm->sec.ops->freeze(nvdimm);
  568. nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
  569. return rc;
  570. }
  571. static unsigned long dpa_align(struct nd_region *nd_region)
  572. {
  573. struct device *dev = &nd_region->dev;
  574. if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev),
  575. "bus lock required for capacity provision\n"))
  576. return 0;
  577. if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align
  578. % nd_region->ndr_mappings,
  579. "invalid region align %#lx mappings: %d\n",
  580. nd_region->align, nd_region->ndr_mappings))
  581. return 0;
  582. return nd_region->align / nd_region->ndr_mappings;
  583. }
  584. int alias_dpa_busy(struct device *dev, void *data)
  585. {
  586. resource_size_t map_end, blk_start, new;
  587. struct blk_alloc_info *info = data;
  588. struct nd_mapping *nd_mapping;
  589. struct nd_region *nd_region;
  590. struct nvdimm_drvdata *ndd;
  591. struct resource *res;
  592. unsigned long align;
  593. int i;
  594. if (!is_memory(dev))
  595. return 0;
  596. nd_region = to_nd_region(dev);
  597. for (i = 0; i < nd_region->ndr_mappings; i++) {
  598. nd_mapping = &nd_region->mapping[i];
  599. if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
  600. break;
  601. }
  602. if (i >= nd_region->ndr_mappings)
  603. return 0;
  604. ndd = to_ndd(nd_mapping);
  605. map_end = nd_mapping->start + nd_mapping->size - 1;
  606. blk_start = nd_mapping->start;
  607. /*
  608. * In the allocation case ->res is set to free space that we are
  609. * looking to validate against PMEM aliasing collision rules
  610. * (i.e. BLK is allocated after all aliased PMEM).
  611. */
  612. if (info->res) {
  613. if (info->res->start >= nd_mapping->start
  614. && info->res->start < map_end)
  615. /* pass */;
  616. else
  617. return 0;
  618. }
  619. retry:
  620. /*
  621. * Find the free dpa from the end of the last pmem allocation to
  622. * the end of the interleave-set mapping.
  623. */
  624. align = dpa_align(nd_region);
  625. if (!align)
  626. return 0;
  627. for_each_dpa_resource(ndd, res) {
  628. resource_size_t start, end;
  629. if (strncmp(res->name, "pmem", 4) != 0)
  630. continue;
  631. start = ALIGN_DOWN(res->start, align);
  632. end = ALIGN(res->end + 1, align) - 1;
  633. if ((start >= blk_start && start < map_end)
  634. || (end >= blk_start && end <= map_end)) {
  635. new = max(blk_start, min(map_end, end) + 1);
  636. if (new != blk_start) {
  637. blk_start = new;
  638. goto retry;
  639. }
  640. }
  641. }
  642. /* update the free space range with the probed blk_start */
  643. if (info->res && blk_start > info->res->start) {
  644. info->res->start = max(info->res->start, blk_start);
  645. if (info->res->start > info->res->end)
  646. info->res->end = info->res->start - 1;
  647. return 1;
  648. }
  649. info->available -= blk_start - nd_mapping->start;
  650. return 0;
  651. }
  652. /**
  653. * nd_blk_available_dpa - account the unused dpa of BLK region
  654. * @nd_mapping: container of dpa-resource-root + labels
  655. *
  656. * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
  657. * we arrange for them to never start at an lower dpa than the last
  658. * PMEM allocation in an aliased region.
  659. */
  660. resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
  661. {
  662. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
  663. struct nd_mapping *nd_mapping = &nd_region->mapping[0];
  664. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  665. struct blk_alloc_info info = {
  666. .nd_mapping = nd_mapping,
  667. .available = nd_mapping->size,
  668. .res = NULL,
  669. };
  670. struct resource *res;
  671. unsigned long align;
  672. if (!ndd)
  673. return 0;
  674. device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
  675. /* now account for busy blk allocations in unaliased dpa */
  676. align = dpa_align(nd_region);
  677. if (!align)
  678. return 0;
  679. for_each_dpa_resource(ndd, res) {
  680. resource_size_t start, end, size;
  681. if (strncmp(res->name, "blk", 3) != 0)
  682. continue;
  683. start = ALIGN_DOWN(res->start, align);
  684. end = ALIGN(res->end + 1, align) - 1;
  685. size = end - start + 1;
  686. if (size >= info.available)
  687. return 0;
  688. info.available -= size;
  689. }
  690. return info.available;
  691. }
  692. /**
  693. * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
  694. * contiguous unallocated dpa range.
  695. * @nd_region: constrain available space check to this reference region
  696. * @nd_mapping: container of dpa-resource-root + labels
  697. */
  698. resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
  699. struct nd_mapping *nd_mapping)
  700. {
  701. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  702. struct nvdimm_bus *nvdimm_bus;
  703. resource_size_t max = 0;
  704. struct resource *res;
  705. unsigned long align;
  706. /* if a dimm is disabled the available capacity is zero */
  707. if (!ndd)
  708. return 0;
  709. align = dpa_align(nd_region);
  710. if (!align)
  711. return 0;
  712. nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  713. if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
  714. return 0;
  715. for_each_dpa_resource(ndd, res) {
  716. resource_size_t start, end;
  717. if (strcmp(res->name, "pmem-reserve") != 0)
  718. continue;
  719. /* trim free space relative to current alignment setting */
  720. start = ALIGN(res->start, align);
  721. end = ALIGN_DOWN(res->end + 1, align) - 1;
  722. if (end < start)
  723. continue;
  724. if (end - start + 1 > max)
  725. max = end - start + 1;
  726. }
  727. release_free_pmem(nvdimm_bus, nd_mapping);
  728. return max;
  729. }
  730. /**
  731. * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
  732. * @nd_mapping: container of dpa-resource-root + labels
  733. * @nd_region: constrain available space check to this reference region
  734. * @overlap: calculate available space assuming this level of overlap
  735. *
  736. * Validate that a PMEM label, if present, aligns with the start of an
  737. * interleave set and truncate the available size at the lowest BLK
  738. * overlap point.
  739. *
  740. * The expectation is that this routine is called multiple times as it
  741. * probes for the largest BLK encroachment for any single member DIMM of
  742. * the interleave set. Once that value is determined the PMEM-limit for
  743. * the set can be established.
  744. */
  745. resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
  746. struct nd_mapping *nd_mapping, resource_size_t *overlap)
  747. {
  748. resource_size_t map_start, map_end, busy = 0, available, blk_start;
  749. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  750. struct resource *res;
  751. const char *reason;
  752. unsigned long align;
  753. if (!ndd)
  754. return 0;
  755. align = dpa_align(nd_region);
  756. if (!align)
  757. return 0;
  758. map_start = nd_mapping->start;
  759. map_end = map_start + nd_mapping->size - 1;
  760. blk_start = max(map_start, map_end + 1 - *overlap);
  761. for_each_dpa_resource(ndd, res) {
  762. resource_size_t start, end;
  763. start = ALIGN_DOWN(res->start, align);
  764. end = ALIGN(res->end + 1, align) - 1;
  765. if (start >= map_start && start < map_end) {
  766. if (strncmp(res->name, "blk", 3) == 0)
  767. blk_start = min(blk_start,
  768. max(map_start, start));
  769. else if (end > map_end) {
  770. reason = "misaligned to iset";
  771. goto err;
  772. } else
  773. busy += end - start + 1;
  774. } else if (end >= map_start && end <= map_end) {
  775. if (strncmp(res->name, "blk", 3) == 0) {
  776. /*
  777. * If a BLK allocation overlaps the start of
  778. * PMEM the entire interleave set may now only
  779. * be used for BLK.
  780. */
  781. blk_start = map_start;
  782. } else
  783. busy += end - start + 1;
  784. } else if (map_start > start && map_start < end) {
  785. /* total eclipse of the mapping */
  786. busy += nd_mapping->size;
  787. blk_start = map_start;
  788. }
  789. }
  790. *overlap = map_end + 1 - blk_start;
  791. available = blk_start - map_start;
  792. if (busy < available)
  793. return ALIGN_DOWN(available - busy, align);
  794. return 0;
  795. err:
  796. nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
  797. return 0;
  798. }
  799. void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
  800. {
  801. WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
  802. kfree(res->name);
  803. __release_region(&ndd->dpa, res->start, resource_size(res));
  804. }
  805. struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
  806. struct nd_label_id *label_id, resource_size_t start,
  807. resource_size_t n)
  808. {
  809. char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
  810. struct resource *res;
  811. if (!name)
  812. return NULL;
  813. WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
  814. res = __request_region(&ndd->dpa, start, n, name, 0);
  815. if (!res)
  816. kfree(name);
  817. return res;
  818. }
  819. /**
  820. * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
  821. * @nvdimm: container of dpa-resource-root + labels
  822. * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
  823. */
  824. resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
  825. struct nd_label_id *label_id)
  826. {
  827. resource_size_t allocated = 0;
  828. struct resource *res;
  829. for_each_dpa_resource(ndd, res)
  830. if (strcmp(res->name, label_id->id) == 0)
  831. allocated += resource_size(res);
  832. return allocated;
  833. }
  834. static int count_dimms(struct device *dev, void *c)
  835. {
  836. int *count = c;
  837. if (is_nvdimm(dev))
  838. (*count)++;
  839. return 0;
  840. }
  841. int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
  842. {
  843. int count = 0;
  844. /* Flush any possible dimm registration failures */
  845. nd_synchronize();
  846. device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
  847. dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
  848. if (count != dimm_count)
  849. return -ENXIO;
  850. return 0;
  851. }
  852. EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
  853. void __exit nvdimm_devs_exit(void)
  854. {
  855. ida_destroy(&dimm_ida);
  856. }