pmem.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Persistent Memory Driver
  4. *
  5. * Copyright (c) 2014-2015, Intel Corporation.
  6. * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
  7. * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
  8. */
  9. #include <linux/blkdev.h>
  10. #include <linux/hdreg.h>
  11. #include <linux/init.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/set_memory.h>
  14. #include <linux/module.h>
  15. #include <linux/moduleparam.h>
  16. #include <linux/badblocks.h>
  17. #include <linux/memremap.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/blk-mq.h>
  20. #include <linux/pfn_t.h>
  21. #include <linux/slab.h>
  22. #include <linux/uio.h>
  23. #include <linux/dax.h>
  24. #include <linux/nd.h>
  25. #include <linux/backing-dev.h>
  26. #include <linux/mm.h>
  27. #include <asm/cacheflush.h>
  28. #include "pmem.h"
  29. #include "pfn.h"
  30. #include "nd.h"
  31. static struct device *to_dev(struct pmem_device *pmem)
  32. {
  33. /*
  34. * nvdimm bus services need a 'dev' parameter, and we record the device
  35. * at init in bb.dev.
  36. */
  37. return pmem->bb.dev;
  38. }
  39. static struct nd_region *to_region(struct pmem_device *pmem)
  40. {
  41. return to_nd_region(to_dev(pmem)->parent);
  42. }
  43. static void hwpoison_clear(struct pmem_device *pmem,
  44. phys_addr_t phys, unsigned int len)
  45. {
  46. unsigned long pfn_start, pfn_end, pfn;
  47. /* only pmem in the linear map supports HWPoison */
  48. if (is_vmalloc_addr(pmem->virt_addr))
  49. return;
  50. pfn_start = PHYS_PFN(phys);
  51. pfn_end = pfn_start + PHYS_PFN(len);
  52. for (pfn = pfn_start; pfn < pfn_end; pfn++) {
  53. struct page *page = pfn_to_page(pfn);
  54. /*
  55. * Note, no need to hold a get_dev_pagemap() reference
  56. * here since we're in the driver I/O path and
  57. * outstanding I/O requests pin the dev_pagemap.
  58. */
  59. if (test_and_clear_pmem_poison(page))
  60. clear_mce_nospec(pfn);
  61. }
  62. }
  63. static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
  64. phys_addr_t offset, unsigned int len)
  65. {
  66. struct device *dev = to_dev(pmem);
  67. sector_t sector;
  68. long cleared;
  69. blk_status_t rc = BLK_STS_OK;
  70. sector = (offset - pmem->data_offset) / 512;
  71. cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
  72. if (cleared < len)
  73. rc = BLK_STS_IOERR;
  74. if (cleared > 0 && cleared / 512) {
  75. hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
  76. cleared /= 512;
  77. dev_dbg(dev, "%#llx clear %ld sector%s\n",
  78. (unsigned long long) sector, cleared,
  79. cleared > 1 ? "s" : "");
  80. badblocks_clear(&pmem->bb, sector, cleared);
  81. if (pmem->bb_state)
  82. sysfs_notify_dirent(pmem->bb_state);
  83. }
  84. arch_invalidate_pmem(pmem->virt_addr + offset, len);
  85. return rc;
  86. }
  87. static void write_pmem(void *pmem_addr, struct page *page,
  88. unsigned int off, unsigned int len)
  89. {
  90. unsigned int chunk;
  91. void *mem;
  92. while (len) {
  93. mem = kmap_atomic(page);
  94. chunk = min_t(unsigned int, len, PAGE_SIZE - off);
  95. memcpy_flushcache(pmem_addr, mem + off, chunk);
  96. kunmap_atomic(mem);
  97. len -= chunk;
  98. off = 0;
  99. page++;
  100. pmem_addr += chunk;
  101. }
  102. }
  103. static blk_status_t read_pmem(struct page *page, unsigned int off,
  104. void *pmem_addr, unsigned int len)
  105. {
  106. unsigned int chunk;
  107. unsigned long rem;
  108. void *mem;
  109. while (len) {
  110. mem = kmap_atomic(page);
  111. chunk = min_t(unsigned int, len, PAGE_SIZE - off);
  112. rem = copy_mc_to_kernel(mem + off, pmem_addr, chunk);
  113. kunmap_atomic(mem);
  114. if (rem)
  115. return BLK_STS_IOERR;
  116. len -= chunk;
  117. off = 0;
  118. page++;
  119. pmem_addr += chunk;
  120. }
  121. return BLK_STS_OK;
  122. }
  123. static blk_status_t pmem_do_read(struct pmem_device *pmem,
  124. struct page *page, unsigned int page_off,
  125. sector_t sector, unsigned int len)
  126. {
  127. blk_status_t rc;
  128. phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
  129. void *pmem_addr = pmem->virt_addr + pmem_off;
  130. if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
  131. return BLK_STS_IOERR;
  132. rc = read_pmem(page, page_off, pmem_addr, len);
  133. flush_dcache_page(page);
  134. return rc;
  135. }
  136. static blk_status_t pmem_do_write(struct pmem_device *pmem,
  137. struct page *page, unsigned int page_off,
  138. sector_t sector, unsigned int len)
  139. {
  140. blk_status_t rc = BLK_STS_OK;
  141. bool bad_pmem = false;
  142. phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
  143. void *pmem_addr = pmem->virt_addr + pmem_off;
  144. if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
  145. bad_pmem = true;
  146. /*
  147. * Note that we write the data both before and after
  148. * clearing poison. The write before clear poison
  149. * handles situations where the latest written data is
  150. * preserved and the clear poison operation simply marks
  151. * the address range as valid without changing the data.
  152. * In this case application software can assume that an
  153. * interrupted write will either return the new good
  154. * data or an error.
  155. *
  156. * However, if pmem_clear_poison() leaves the data in an
  157. * indeterminate state we need to perform the write
  158. * after clear poison.
  159. */
  160. flush_dcache_page(page);
  161. write_pmem(pmem_addr, page, page_off, len);
  162. if (unlikely(bad_pmem)) {
  163. rc = pmem_clear_poison(pmem, pmem_off, len);
  164. write_pmem(pmem_addr, page, page_off, len);
  165. }
  166. return rc;
  167. }
  168. static blk_qc_t pmem_submit_bio(struct bio *bio)
  169. {
  170. int ret = 0;
  171. blk_status_t rc = 0;
  172. bool do_acct;
  173. unsigned long start;
  174. struct bio_vec bvec;
  175. struct bvec_iter iter;
  176. struct pmem_device *pmem = bio->bi_disk->private_data;
  177. struct nd_region *nd_region = to_region(pmem);
  178. if (bio->bi_opf & REQ_PREFLUSH)
  179. ret = nvdimm_flush(nd_region, bio);
  180. do_acct = blk_queue_io_stat(bio->bi_disk->queue);
  181. if (do_acct)
  182. start = bio_start_io_acct(bio);
  183. bio_for_each_segment(bvec, bio, iter) {
  184. if (op_is_write(bio_op(bio)))
  185. rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset,
  186. iter.bi_sector, bvec.bv_len);
  187. else
  188. rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset,
  189. iter.bi_sector, bvec.bv_len);
  190. if (rc) {
  191. bio->bi_status = rc;
  192. break;
  193. }
  194. }
  195. if (do_acct)
  196. bio_end_io_acct(bio, start);
  197. if (bio->bi_opf & REQ_FUA)
  198. ret = nvdimm_flush(nd_region, bio);
  199. if (ret)
  200. bio->bi_status = errno_to_blk_status(ret);
  201. bio_endio(bio);
  202. return BLK_QC_T_NONE;
  203. }
  204. static int pmem_rw_page(struct block_device *bdev, sector_t sector,
  205. struct page *page, unsigned int op)
  206. {
  207. struct pmem_device *pmem = bdev->bd_disk->private_data;
  208. blk_status_t rc;
  209. if (op_is_write(op))
  210. rc = pmem_do_write(pmem, page, 0, sector, thp_size(page));
  211. else
  212. rc = pmem_do_read(pmem, page, 0, sector, thp_size(page));
  213. /*
  214. * The ->rw_page interface is subtle and tricky. The core
  215. * retries on any error, so we can only invoke page_endio() in
  216. * the successful completion case. Otherwise, we'll see crashes
  217. * caused by double completion.
  218. */
  219. if (rc == 0)
  220. page_endio(page, op_is_write(op), 0);
  221. return blk_status_to_errno(rc);
  222. }
  223. /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
  224. __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
  225. long nr_pages, void **kaddr, pfn_t *pfn)
  226. {
  227. resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
  228. if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
  229. PFN_PHYS(nr_pages))))
  230. return -EIO;
  231. if (kaddr)
  232. *kaddr = pmem->virt_addr + offset;
  233. if (pfn)
  234. *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
  235. /*
  236. * If badblocks are present, limit known good range to the
  237. * requested range.
  238. */
  239. if (unlikely(pmem->bb.count))
  240. return nr_pages;
  241. return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
  242. }
  243. static const struct block_device_operations pmem_fops = {
  244. .owner = THIS_MODULE,
  245. .submit_bio = pmem_submit_bio,
  246. .rw_page = pmem_rw_page,
  247. };
  248. static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
  249. size_t nr_pages)
  250. {
  251. struct pmem_device *pmem = dax_get_private(dax_dev);
  252. return blk_status_to_errno(pmem_do_write(pmem, ZERO_PAGE(0), 0,
  253. PFN_PHYS(pgoff) >> SECTOR_SHIFT,
  254. PAGE_SIZE));
  255. }
  256. static long pmem_dax_direct_access(struct dax_device *dax_dev,
  257. pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
  258. {
  259. struct pmem_device *pmem = dax_get_private(dax_dev);
  260. return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
  261. }
  262. /*
  263. * Use the 'no check' versions of copy_from_iter_flushcache() and
  264. * copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds
  265. * checking, both file offset and device offset, is handled by
  266. * dax_iomap_actor()
  267. */
  268. static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
  269. void *addr, size_t bytes, struct iov_iter *i)
  270. {
  271. return _copy_from_iter_flushcache(addr, bytes, i);
  272. }
  273. static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
  274. void *addr, size_t bytes, struct iov_iter *i)
  275. {
  276. return _copy_mc_to_iter(addr, bytes, i);
  277. }
  278. static const struct dax_operations pmem_dax_ops = {
  279. .direct_access = pmem_dax_direct_access,
  280. .dax_supported = generic_fsdax_supported,
  281. .copy_from_iter = pmem_copy_from_iter,
  282. .copy_to_iter = pmem_copy_to_iter,
  283. .zero_page_range = pmem_dax_zero_page_range,
  284. };
  285. static const struct attribute_group *pmem_attribute_groups[] = {
  286. &dax_attribute_group,
  287. NULL,
  288. };
  289. static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap)
  290. {
  291. struct request_queue *q =
  292. container_of(pgmap->ref, struct request_queue, q_usage_counter);
  293. blk_cleanup_queue(q);
  294. }
  295. static void pmem_release_queue(void *pgmap)
  296. {
  297. pmem_pagemap_cleanup(pgmap);
  298. }
  299. static void pmem_pagemap_kill(struct dev_pagemap *pgmap)
  300. {
  301. struct request_queue *q =
  302. container_of(pgmap->ref, struct request_queue, q_usage_counter);
  303. blk_freeze_queue_start(q);
  304. }
  305. static void pmem_release_disk(void *__pmem)
  306. {
  307. struct pmem_device *pmem = __pmem;
  308. kill_dax(pmem->dax_dev);
  309. put_dax(pmem->dax_dev);
  310. del_gendisk(pmem->disk);
  311. put_disk(pmem->disk);
  312. }
  313. static const struct dev_pagemap_ops fsdax_pagemap_ops = {
  314. .kill = pmem_pagemap_kill,
  315. .cleanup = pmem_pagemap_cleanup,
  316. };
  317. static int pmem_attach_disk(struct device *dev,
  318. struct nd_namespace_common *ndns)
  319. {
  320. struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
  321. struct nd_region *nd_region = to_nd_region(dev->parent);
  322. int nid = dev_to_node(dev), fua;
  323. struct resource *res = &nsio->res;
  324. struct range bb_range;
  325. struct nd_pfn *nd_pfn = NULL;
  326. struct dax_device *dax_dev;
  327. struct nd_pfn_sb *pfn_sb;
  328. struct pmem_device *pmem;
  329. struct request_queue *q;
  330. struct device *gendev;
  331. struct gendisk *disk;
  332. void *addr;
  333. int rc;
  334. unsigned long flags = 0UL;
  335. pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
  336. if (!pmem)
  337. return -ENOMEM;
  338. rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
  339. if (rc)
  340. return rc;
  341. /* while nsio_rw_bytes is active, parse a pfn info block if present */
  342. if (is_nd_pfn(dev)) {
  343. nd_pfn = to_nd_pfn(dev);
  344. rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
  345. if (rc)
  346. return rc;
  347. }
  348. /* we're attaching a block device, disable raw namespace access */
  349. devm_namespace_disable(dev, ndns);
  350. dev_set_drvdata(dev, pmem);
  351. pmem->phys_addr = res->start;
  352. pmem->size = resource_size(res);
  353. fua = nvdimm_has_flush(nd_region);
  354. if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
  355. dev_warn(dev, "unable to guarantee persistence of writes\n");
  356. fua = 0;
  357. }
  358. if (!devm_request_mem_region(dev, res->start, resource_size(res),
  359. dev_name(&ndns->dev))) {
  360. dev_warn(dev, "could not reserve region %pR\n", res);
  361. return -EBUSY;
  362. }
  363. q = blk_alloc_queue(dev_to_node(dev));
  364. if (!q)
  365. return -ENOMEM;
  366. pmem->pfn_flags = PFN_DEV;
  367. pmem->pgmap.ref = &q->q_usage_counter;
  368. if (is_nd_pfn(dev)) {
  369. pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
  370. pmem->pgmap.ops = &fsdax_pagemap_ops;
  371. addr = devm_memremap_pages(dev, &pmem->pgmap);
  372. pfn_sb = nd_pfn->pfn_sb;
  373. pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
  374. pmem->pfn_pad = resource_size(res) -
  375. range_len(&pmem->pgmap.range);
  376. pmem->pfn_flags |= PFN_MAP;
  377. bb_range = pmem->pgmap.range;
  378. bb_range.start += pmem->data_offset;
  379. } else if (pmem_should_map_pages(dev)) {
  380. pmem->pgmap.range.start = res->start;
  381. pmem->pgmap.range.end = res->end;
  382. pmem->pgmap.nr_range = 1;
  383. pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
  384. pmem->pgmap.ops = &fsdax_pagemap_ops;
  385. addr = devm_memremap_pages(dev, &pmem->pgmap);
  386. pmem->pfn_flags |= PFN_MAP;
  387. bb_range = pmem->pgmap.range;
  388. } else {
  389. addr = devm_memremap(dev, pmem->phys_addr,
  390. pmem->size, ARCH_MEMREMAP_PMEM);
  391. if (devm_add_action_or_reset(dev, pmem_release_queue,
  392. &pmem->pgmap))
  393. return -ENOMEM;
  394. bb_range.start = res->start;
  395. bb_range.end = res->end;
  396. }
  397. if (IS_ERR(addr))
  398. return PTR_ERR(addr);
  399. pmem->virt_addr = addr;
  400. blk_queue_write_cache(q, true, fua);
  401. blk_queue_physical_block_size(q, PAGE_SIZE);
  402. blk_queue_logical_block_size(q, pmem_sector_size(ndns));
  403. blk_queue_max_hw_sectors(q, UINT_MAX);
  404. blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
  405. if (pmem->pfn_flags & PFN_MAP)
  406. blk_queue_flag_set(QUEUE_FLAG_DAX, q);
  407. disk = alloc_disk_node(0, nid);
  408. if (!disk)
  409. return -ENOMEM;
  410. pmem->disk = disk;
  411. disk->fops = &pmem_fops;
  412. disk->queue = q;
  413. disk->flags = GENHD_FL_EXT_DEVT;
  414. disk->private_data = pmem;
  415. nvdimm_namespace_disk_name(ndns, disk->disk_name);
  416. set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
  417. / 512);
  418. if (devm_init_badblocks(dev, &pmem->bb))
  419. return -ENOMEM;
  420. nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range);
  421. disk->bb = &pmem->bb;
  422. if (is_nvdimm_sync(nd_region))
  423. flags = DAXDEV_F_SYNC;
  424. dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags);
  425. if (IS_ERR(dax_dev)) {
  426. put_disk(disk);
  427. return PTR_ERR(dax_dev);
  428. }
  429. dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
  430. pmem->dax_dev = dax_dev;
  431. gendev = disk_to_dev(disk);
  432. gendev->groups = pmem_attribute_groups;
  433. device_add_disk(dev, disk, NULL);
  434. if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
  435. return -ENOMEM;
  436. nvdimm_check_and_set_ro(disk);
  437. pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
  438. "badblocks");
  439. if (!pmem->bb_state)
  440. dev_warn(dev, "'badblocks' notification disabled\n");
  441. return 0;
  442. }
  443. static int nd_pmem_probe(struct device *dev)
  444. {
  445. int ret;
  446. struct nd_namespace_common *ndns;
  447. ndns = nvdimm_namespace_common_probe(dev);
  448. if (IS_ERR(ndns))
  449. return PTR_ERR(ndns);
  450. if (is_nd_btt(dev))
  451. return nvdimm_namespace_attach_btt(ndns);
  452. if (is_nd_pfn(dev))
  453. return pmem_attach_disk(dev, ndns);
  454. ret = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
  455. if (ret)
  456. return ret;
  457. ret = nd_btt_probe(dev, ndns);
  458. if (ret == 0)
  459. return -ENXIO;
  460. /*
  461. * We have two failure conditions here, there is no
  462. * info reserver block or we found a valid info reserve block
  463. * but failed to initialize the pfn superblock.
  464. *
  465. * For the first case consider namespace as a raw pmem namespace
  466. * and attach a disk.
  467. *
  468. * For the latter, consider this a success and advance the namespace
  469. * seed.
  470. */
  471. ret = nd_pfn_probe(dev, ndns);
  472. if (ret == 0)
  473. return -ENXIO;
  474. else if (ret == -EOPNOTSUPP)
  475. return ret;
  476. ret = nd_dax_probe(dev, ndns);
  477. if (ret == 0)
  478. return -ENXIO;
  479. else if (ret == -EOPNOTSUPP)
  480. return ret;
  481. /* probe complete, attach handles namespace enabling */
  482. devm_namespace_disable(dev, ndns);
  483. return pmem_attach_disk(dev, ndns);
  484. }
  485. static int nd_pmem_remove(struct device *dev)
  486. {
  487. struct pmem_device *pmem = dev_get_drvdata(dev);
  488. if (is_nd_btt(dev))
  489. nvdimm_namespace_detach_btt(to_nd_btt(dev));
  490. else {
  491. /*
  492. * Note, this assumes nd_device_lock() context to not
  493. * race nd_pmem_notify()
  494. */
  495. sysfs_put(pmem->bb_state);
  496. pmem->bb_state = NULL;
  497. }
  498. nvdimm_flush(to_nd_region(dev->parent), NULL);
  499. return 0;
  500. }
  501. static void nd_pmem_shutdown(struct device *dev)
  502. {
  503. nvdimm_flush(to_nd_region(dev->parent), NULL);
  504. }
  505. static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
  506. {
  507. struct nd_region *nd_region;
  508. resource_size_t offset = 0, end_trunc = 0;
  509. struct nd_namespace_common *ndns;
  510. struct nd_namespace_io *nsio;
  511. struct badblocks *bb;
  512. struct range range;
  513. struct kernfs_node *bb_state;
  514. if (event != NVDIMM_REVALIDATE_POISON)
  515. return;
  516. if (is_nd_btt(dev)) {
  517. struct nd_btt *nd_btt = to_nd_btt(dev);
  518. ndns = nd_btt->ndns;
  519. nd_region = to_nd_region(ndns->dev.parent);
  520. nsio = to_nd_namespace_io(&ndns->dev);
  521. bb = &nsio->bb;
  522. bb_state = NULL;
  523. } else {
  524. struct pmem_device *pmem = dev_get_drvdata(dev);
  525. nd_region = to_region(pmem);
  526. bb = &pmem->bb;
  527. bb_state = pmem->bb_state;
  528. if (is_nd_pfn(dev)) {
  529. struct nd_pfn *nd_pfn = to_nd_pfn(dev);
  530. struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
  531. ndns = nd_pfn->ndns;
  532. offset = pmem->data_offset +
  533. __le32_to_cpu(pfn_sb->start_pad);
  534. end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
  535. } else {
  536. ndns = to_ndns(dev);
  537. }
  538. nsio = to_nd_namespace_io(&ndns->dev);
  539. }
  540. range.start = nsio->res.start + offset;
  541. range.end = nsio->res.end - end_trunc;
  542. nvdimm_badblocks_populate(nd_region, bb, &range);
  543. if (bb_state)
  544. sysfs_notify_dirent(bb_state);
  545. }
  546. MODULE_ALIAS("pmem");
  547. MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
  548. MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
  549. static struct nd_device_driver nd_pmem_driver = {
  550. .probe = nd_pmem_probe,
  551. .remove = nd_pmem_remove,
  552. .notify = nd_pmem_notify,
  553. .shutdown = nd_pmem_shutdown,
  554. .drv = {
  555. .name = "nd_pmem",
  556. },
  557. .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
  558. };
  559. module_nd_driver(nd_pmem_driver);
  560. MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
  561. MODULE_LICENSE("GPL v2");