region.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4. */
  5. #include <linux/cpumask.h>
  6. #include <linux/module.h>
  7. #include <linux/device.h>
  8. #include <linux/nd.h>
  9. #include "nd-core.h"
  10. #include "nd.h"
  11. static int nd_region_probe(struct device *dev)
  12. {
  13. int err, rc;
  14. static unsigned long once;
  15. struct nd_region_data *ndrd;
  16. struct nd_region *nd_region = to_nd_region(dev);
  17. if (nd_region->num_lanes > num_online_cpus()
  18. && nd_region->num_lanes < num_possible_cpus()
  19. && !test_and_set_bit(0, &once)) {
  20. dev_dbg(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
  21. num_online_cpus(), nd_region->num_lanes,
  22. num_possible_cpus());
  23. dev_dbg(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n",
  24. nd_region->num_lanes);
  25. }
  26. rc = nd_region_activate(nd_region);
  27. if (rc)
  28. return rc;
  29. rc = nd_blk_region_init(nd_region);
  30. if (rc)
  31. return rc;
  32. if (is_memory(&nd_region->dev)) {
  33. struct range range = {
  34. .start = nd_region->ndr_start,
  35. .end = nd_region->ndr_start + nd_region->ndr_size - 1,
  36. };
  37. if (devm_init_badblocks(dev, &nd_region->bb))
  38. return -ENODEV;
  39. nd_region->bb_state = sysfs_get_dirent(nd_region->dev.kobj.sd,
  40. "badblocks");
  41. if (!nd_region->bb_state)
  42. dev_warn(&nd_region->dev,
  43. "'badblocks' notification disabled\n");
  44. nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range);
  45. }
  46. rc = nd_region_register_namespaces(nd_region, &err);
  47. if (rc < 0)
  48. return rc;
  49. ndrd = dev_get_drvdata(dev);
  50. ndrd->ns_active = rc;
  51. ndrd->ns_count = rc + err;
  52. if (rc && err && rc == err)
  53. return -ENODEV;
  54. nd_region->btt_seed = nd_btt_create(nd_region);
  55. nd_region->pfn_seed = nd_pfn_create(nd_region);
  56. nd_region->dax_seed = nd_dax_create(nd_region);
  57. if (err == 0)
  58. return 0;
  59. /*
  60. * Given multiple namespaces per region, we do not want to
  61. * disable all the successfully registered peer namespaces upon
  62. * a single registration failure. If userspace is missing a
  63. * namespace that it expects it can disable/re-enable the region
  64. * to retry discovery after correcting the failure.
  65. * <regionX>/namespaces returns the current
  66. * "<async-registered>/<total>" namespace count.
  67. */
  68. dev_err(dev, "failed to register %d namespace%s, continuing...\n",
  69. err, err == 1 ? "" : "s");
  70. return 0;
  71. }
  72. static int child_unregister(struct device *dev, void *data)
  73. {
  74. nd_device_unregister(dev, ND_SYNC);
  75. return 0;
  76. }
  77. static int nd_region_remove(struct device *dev)
  78. {
  79. struct nd_region *nd_region = to_nd_region(dev);
  80. device_for_each_child(dev, NULL, child_unregister);
  81. /* flush attribute readers and disable */
  82. nvdimm_bus_lock(dev);
  83. nd_region->ns_seed = NULL;
  84. nd_region->btt_seed = NULL;
  85. nd_region->pfn_seed = NULL;
  86. nd_region->dax_seed = NULL;
  87. dev_set_drvdata(dev, NULL);
  88. nvdimm_bus_unlock(dev);
  89. /*
  90. * Note, this assumes nd_device_lock() context to not race
  91. * nd_region_notify()
  92. */
  93. sysfs_put(nd_region->bb_state);
  94. nd_region->bb_state = NULL;
  95. return 0;
  96. }
  97. static int child_notify(struct device *dev, void *data)
  98. {
  99. nd_device_notify(dev, *(enum nvdimm_event *) data);
  100. return 0;
  101. }
  102. static void nd_region_notify(struct device *dev, enum nvdimm_event event)
  103. {
  104. if (event == NVDIMM_REVALIDATE_POISON) {
  105. struct nd_region *nd_region = to_nd_region(dev);
  106. if (is_memory(&nd_region->dev)) {
  107. struct range range = {
  108. .start = nd_region->ndr_start,
  109. .end = nd_region->ndr_start +
  110. nd_region->ndr_size - 1,
  111. };
  112. nvdimm_badblocks_populate(nd_region,
  113. &nd_region->bb, &range);
  114. if (nd_region->bb_state)
  115. sysfs_notify_dirent(nd_region->bb_state);
  116. }
  117. }
  118. device_for_each_child(dev, &event, child_notify);
  119. }
  120. static struct nd_device_driver nd_region_driver = {
  121. .probe = nd_region_probe,
  122. .remove = nd_region_remove,
  123. .notify = nd_region_notify,
  124. .drv = {
  125. .name = "nd_region",
  126. },
  127. .type = ND_DRIVER_REGION_BLK | ND_DRIVER_REGION_PMEM,
  128. };
  129. int __init nd_region_init(void)
  130. {
  131. return nd_driver_register(&nd_region_driver);
  132. }
  133. void nd_region_exit(void)
  134. {
  135. driver_unregister(&nd_region_driver.drv);
  136. }
  137. MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_PMEM);
  138. MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_BLK);