core.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
  4. * Initial release: Matias Bjorling <m@bjorling.me>
  5. */
  6. #define pr_fmt(fmt) "nvm: " fmt
  7. #include <linux/list.h>
  8. #include <linux/types.h>
  9. #include <linux/sem.h>
  10. #include <linux/bitmap.h>
  11. #include <linux/module.h>
  12. #include <linux/moduleparam.h>
  13. #include <linux/miscdevice.h>
  14. #include <linux/lightnvm.h>
  15. #include <linux/sched/sysctl.h>
  16. static LIST_HEAD(nvm_tgt_types);
  17. static DECLARE_RWSEM(nvm_tgtt_lock);
  18. static LIST_HEAD(nvm_devices);
  19. static DECLARE_RWSEM(nvm_lock);
  20. /* Map between virtual and physical channel and lun */
  21. struct nvm_ch_map {
  22. int ch_off;
  23. int num_lun;
  24. int *lun_offs;
  25. };
  26. struct nvm_dev_map {
  27. struct nvm_ch_map *chnls;
  28. int num_ch;
  29. };
  30. static void nvm_free(struct kref *ref);
  31. static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
  32. {
  33. struct nvm_target *tgt;
  34. list_for_each_entry(tgt, &dev->targets, list)
  35. if (!strcmp(name, tgt->disk->disk_name))
  36. return tgt;
  37. return NULL;
  38. }
  39. static bool nvm_target_exists(const char *name)
  40. {
  41. struct nvm_dev *dev;
  42. struct nvm_target *tgt;
  43. bool ret = false;
  44. down_write(&nvm_lock);
  45. list_for_each_entry(dev, &nvm_devices, devices) {
  46. mutex_lock(&dev->mlock);
  47. list_for_each_entry(tgt, &dev->targets, list) {
  48. if (!strcmp(name, tgt->disk->disk_name)) {
  49. ret = true;
  50. mutex_unlock(&dev->mlock);
  51. goto out;
  52. }
  53. }
  54. mutex_unlock(&dev->mlock);
  55. }
  56. out:
  57. up_write(&nvm_lock);
  58. return ret;
  59. }
  60. static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
  61. {
  62. int i;
  63. for (i = lun_begin; i <= lun_end; i++) {
  64. if (test_and_set_bit(i, dev->lun_map)) {
  65. pr_err("lun %d already allocated\n", i);
  66. goto err;
  67. }
  68. }
  69. return 0;
  70. err:
  71. while (--i >= lun_begin)
  72. clear_bit(i, dev->lun_map);
  73. return -EBUSY;
  74. }
  75. static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
  76. int lun_end)
  77. {
  78. int i;
  79. for (i = lun_begin; i <= lun_end; i++)
  80. WARN_ON(!test_and_clear_bit(i, dev->lun_map));
  81. }
  82. static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
  83. {
  84. struct nvm_dev *dev = tgt_dev->parent;
  85. struct nvm_dev_map *dev_map = tgt_dev->map;
  86. int i, j;
  87. for (i = 0; i < dev_map->num_ch; i++) {
  88. struct nvm_ch_map *ch_map = &dev_map->chnls[i];
  89. int *lun_offs = ch_map->lun_offs;
  90. int ch = i + ch_map->ch_off;
  91. if (clear) {
  92. for (j = 0; j < ch_map->num_lun; j++) {
  93. int lun = j + lun_offs[j];
  94. int lunid = (ch * dev->geo.num_lun) + lun;
  95. WARN_ON(!test_and_clear_bit(lunid,
  96. dev->lun_map));
  97. }
  98. }
  99. kfree(ch_map->lun_offs);
  100. }
  101. kfree(dev_map->chnls);
  102. kfree(dev_map);
  103. kfree(tgt_dev->luns);
  104. kfree(tgt_dev);
  105. }
  106. static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
  107. u16 lun_begin, u16 lun_end,
  108. u16 op)
  109. {
  110. struct nvm_tgt_dev *tgt_dev = NULL;
  111. struct nvm_dev_map *dev_rmap = dev->rmap;
  112. struct nvm_dev_map *dev_map;
  113. struct ppa_addr *luns;
  114. int num_lun = lun_end - lun_begin + 1;
  115. int luns_left = num_lun;
  116. int num_ch = num_lun / dev->geo.num_lun;
  117. int num_ch_mod = num_lun % dev->geo.num_lun;
  118. int bch = lun_begin / dev->geo.num_lun;
  119. int blun = lun_begin % dev->geo.num_lun;
  120. int lunid = 0;
  121. int lun_balanced = 1;
  122. int sec_per_lun, prev_num_lun;
  123. int i, j;
  124. num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
  125. dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
  126. if (!dev_map)
  127. goto err_dev;
  128. dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
  129. if (!dev_map->chnls)
  130. goto err_chnls;
  131. luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
  132. if (!luns)
  133. goto err_luns;
  134. prev_num_lun = (luns_left > dev->geo.num_lun) ?
  135. dev->geo.num_lun : luns_left;
  136. for (i = 0; i < num_ch; i++) {
  137. struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
  138. int *lun_roffs = ch_rmap->lun_offs;
  139. struct nvm_ch_map *ch_map = &dev_map->chnls[i];
  140. int *lun_offs;
  141. int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
  142. dev->geo.num_lun : luns_left;
  143. if (lun_balanced && prev_num_lun != luns_in_chnl)
  144. lun_balanced = 0;
  145. ch_map->ch_off = ch_rmap->ch_off = bch;
  146. ch_map->num_lun = luns_in_chnl;
  147. lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
  148. if (!lun_offs)
  149. goto err_ch;
  150. for (j = 0; j < luns_in_chnl; j++) {
  151. luns[lunid].ppa = 0;
  152. luns[lunid].a.ch = i;
  153. luns[lunid++].a.lun = j;
  154. lun_offs[j] = blun;
  155. lun_roffs[j + blun] = blun;
  156. }
  157. ch_map->lun_offs = lun_offs;
  158. /* when starting a new channel, lun offset is reset */
  159. blun = 0;
  160. luns_left -= luns_in_chnl;
  161. }
  162. dev_map->num_ch = num_ch;
  163. tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
  164. if (!tgt_dev)
  165. goto err_ch;
  166. /* Inherit device geometry from parent */
  167. memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
  168. /* Target device only owns a portion of the physical device */
  169. tgt_dev->geo.num_ch = num_ch;
  170. tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
  171. tgt_dev->geo.all_luns = num_lun;
  172. tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
  173. tgt_dev->geo.op = op;
  174. sec_per_lun = dev->geo.clba * dev->geo.num_chk;
  175. tgt_dev->geo.total_secs = num_lun * sec_per_lun;
  176. tgt_dev->q = dev->q;
  177. tgt_dev->map = dev_map;
  178. tgt_dev->luns = luns;
  179. tgt_dev->parent = dev;
  180. return tgt_dev;
  181. err_ch:
  182. while (--i >= 0)
  183. kfree(dev_map->chnls[i].lun_offs);
  184. kfree(luns);
  185. err_luns:
  186. kfree(dev_map->chnls);
  187. err_chnls:
  188. kfree(dev_map);
  189. err_dev:
  190. return tgt_dev;
  191. }
  192. static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
  193. {
  194. struct nvm_tgt_type *tt;
  195. list_for_each_entry(tt, &nvm_tgt_types, list)
  196. if (!strcmp(name, tt->name))
  197. return tt;
  198. return NULL;
  199. }
  200. static struct nvm_tgt_type *nvm_find_target_type(const char *name)
  201. {
  202. struct nvm_tgt_type *tt;
  203. down_write(&nvm_tgtt_lock);
  204. tt = __nvm_find_target_type(name);
  205. up_write(&nvm_tgtt_lock);
  206. return tt;
  207. }
  208. static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
  209. int lun_end)
  210. {
  211. if (lun_begin > lun_end || lun_end >= geo->all_luns) {
  212. pr_err("lun out of bound (%u:%u > %u)\n",
  213. lun_begin, lun_end, geo->all_luns - 1);
  214. return -EINVAL;
  215. }
  216. return 0;
  217. }
  218. static int __nvm_config_simple(struct nvm_dev *dev,
  219. struct nvm_ioctl_create_simple *s)
  220. {
  221. struct nvm_geo *geo = &dev->geo;
  222. if (s->lun_begin == -1 && s->lun_end == -1) {
  223. s->lun_begin = 0;
  224. s->lun_end = geo->all_luns - 1;
  225. }
  226. return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
  227. }
  228. static int __nvm_config_extended(struct nvm_dev *dev,
  229. struct nvm_ioctl_create_extended *e)
  230. {
  231. if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
  232. e->lun_begin = 0;
  233. e->lun_end = dev->geo.all_luns - 1;
  234. }
  235. /* op not set falls into target's default */
  236. if (e->op == 0xFFFF) {
  237. e->op = NVM_TARGET_DEFAULT_OP;
  238. } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
  239. pr_err("invalid over provisioning value\n");
  240. return -EINVAL;
  241. }
  242. return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
  243. }
  244. static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
  245. {
  246. struct nvm_ioctl_create_extended e;
  247. struct request_queue *tqueue;
  248. struct gendisk *tdisk;
  249. struct nvm_tgt_type *tt;
  250. struct nvm_target *t;
  251. struct nvm_tgt_dev *tgt_dev;
  252. void *targetdata;
  253. unsigned int mdts;
  254. int ret;
  255. switch (create->conf.type) {
  256. case NVM_CONFIG_TYPE_SIMPLE:
  257. ret = __nvm_config_simple(dev, &create->conf.s);
  258. if (ret)
  259. return ret;
  260. e.lun_begin = create->conf.s.lun_begin;
  261. e.lun_end = create->conf.s.lun_end;
  262. e.op = NVM_TARGET_DEFAULT_OP;
  263. break;
  264. case NVM_CONFIG_TYPE_EXTENDED:
  265. ret = __nvm_config_extended(dev, &create->conf.e);
  266. if (ret)
  267. return ret;
  268. e = create->conf.e;
  269. break;
  270. default:
  271. pr_err("config type not valid\n");
  272. return -EINVAL;
  273. }
  274. tt = nvm_find_target_type(create->tgttype);
  275. if (!tt) {
  276. pr_err("target type %s not found\n", create->tgttype);
  277. return -EINVAL;
  278. }
  279. if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
  280. pr_err("device is incompatible with target L2P type.\n");
  281. return -EINVAL;
  282. }
  283. if (nvm_target_exists(create->tgtname)) {
  284. pr_err("target name already exists (%s)\n",
  285. create->tgtname);
  286. return -EINVAL;
  287. }
  288. ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
  289. if (ret)
  290. return ret;
  291. t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
  292. if (!t) {
  293. ret = -ENOMEM;
  294. goto err_reserve;
  295. }
  296. tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
  297. if (!tgt_dev) {
  298. pr_err("could not create target device\n");
  299. ret = -ENOMEM;
  300. goto err_t;
  301. }
  302. tdisk = alloc_disk(0);
  303. if (!tdisk) {
  304. ret = -ENOMEM;
  305. goto err_dev;
  306. }
  307. tqueue = blk_alloc_queue(dev->q->node);
  308. if (!tqueue) {
  309. ret = -ENOMEM;
  310. goto err_disk;
  311. }
  312. strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
  313. tdisk->flags = GENHD_FL_EXT_DEVT;
  314. tdisk->major = 0;
  315. tdisk->first_minor = 0;
  316. tdisk->fops = tt->bops;
  317. tdisk->queue = tqueue;
  318. targetdata = tt->init(tgt_dev, tdisk, create->flags);
  319. if (IS_ERR(targetdata)) {
  320. ret = PTR_ERR(targetdata);
  321. goto err_init;
  322. }
  323. tdisk->private_data = targetdata;
  324. tqueue->queuedata = targetdata;
  325. mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
  326. if (dev->geo.mdts) {
  327. mdts = min_t(u32, dev->geo.mdts,
  328. (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
  329. }
  330. blk_queue_max_hw_sectors(tqueue, mdts);
  331. set_capacity(tdisk, tt->capacity(targetdata));
  332. add_disk(tdisk);
  333. if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
  334. ret = -ENOMEM;
  335. goto err_sysfs;
  336. }
  337. t->type = tt;
  338. t->disk = tdisk;
  339. t->dev = tgt_dev;
  340. mutex_lock(&dev->mlock);
  341. list_add_tail(&t->list, &dev->targets);
  342. mutex_unlock(&dev->mlock);
  343. __module_get(tt->owner);
  344. return 0;
  345. err_sysfs:
  346. if (tt->exit)
  347. tt->exit(targetdata, true);
  348. err_init:
  349. blk_cleanup_queue(tqueue);
  350. tdisk->queue = NULL;
  351. err_disk:
  352. put_disk(tdisk);
  353. err_dev:
  354. nvm_remove_tgt_dev(tgt_dev, 0);
  355. err_t:
  356. kfree(t);
  357. err_reserve:
  358. nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
  359. return ret;
  360. }
  361. static void __nvm_remove_target(struct nvm_target *t, bool graceful)
  362. {
  363. struct nvm_tgt_type *tt = t->type;
  364. struct gendisk *tdisk = t->disk;
  365. struct request_queue *q = tdisk->queue;
  366. del_gendisk(tdisk);
  367. blk_cleanup_queue(q);
  368. if (tt->sysfs_exit)
  369. tt->sysfs_exit(tdisk);
  370. if (tt->exit)
  371. tt->exit(tdisk->private_data, graceful);
  372. nvm_remove_tgt_dev(t->dev, 1);
  373. put_disk(tdisk);
  374. module_put(t->type->owner);
  375. list_del(&t->list);
  376. kfree(t);
  377. }
  378. /**
  379. * nvm_remove_tgt - Removes a target from the media manager
  380. * @remove: ioctl structure with target name to remove.
  381. *
  382. * Returns:
  383. * 0: on success
  384. * 1: on not found
  385. * <0: on error
  386. */
  387. static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
  388. {
  389. struct nvm_target *t = NULL;
  390. struct nvm_dev *dev;
  391. down_read(&nvm_lock);
  392. list_for_each_entry(dev, &nvm_devices, devices) {
  393. mutex_lock(&dev->mlock);
  394. t = nvm_find_target(dev, remove->tgtname);
  395. if (t) {
  396. mutex_unlock(&dev->mlock);
  397. break;
  398. }
  399. mutex_unlock(&dev->mlock);
  400. }
  401. up_read(&nvm_lock);
  402. if (!t) {
  403. pr_err("failed to remove target %s\n",
  404. remove->tgtname);
  405. return 1;
  406. }
  407. __nvm_remove_target(t, true);
  408. kref_put(&dev->ref, nvm_free);
  409. return 0;
  410. }
  411. static int nvm_register_map(struct nvm_dev *dev)
  412. {
  413. struct nvm_dev_map *rmap;
  414. int i, j;
  415. rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
  416. if (!rmap)
  417. goto err_rmap;
  418. rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
  419. GFP_KERNEL);
  420. if (!rmap->chnls)
  421. goto err_chnls;
  422. for (i = 0; i < dev->geo.num_ch; i++) {
  423. struct nvm_ch_map *ch_rmap;
  424. int *lun_roffs;
  425. int luns_in_chnl = dev->geo.num_lun;
  426. ch_rmap = &rmap->chnls[i];
  427. ch_rmap->ch_off = -1;
  428. ch_rmap->num_lun = luns_in_chnl;
  429. lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
  430. if (!lun_roffs)
  431. goto err_ch;
  432. for (j = 0; j < luns_in_chnl; j++)
  433. lun_roffs[j] = -1;
  434. ch_rmap->lun_offs = lun_roffs;
  435. }
  436. dev->rmap = rmap;
  437. return 0;
  438. err_ch:
  439. while (--i >= 0)
  440. kfree(rmap->chnls[i].lun_offs);
  441. err_chnls:
  442. kfree(rmap);
  443. err_rmap:
  444. return -ENOMEM;
  445. }
  446. static void nvm_unregister_map(struct nvm_dev *dev)
  447. {
  448. struct nvm_dev_map *rmap = dev->rmap;
  449. int i;
  450. for (i = 0; i < dev->geo.num_ch; i++)
  451. kfree(rmap->chnls[i].lun_offs);
  452. kfree(rmap->chnls);
  453. kfree(rmap);
  454. }
  455. static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
  456. {
  457. struct nvm_dev_map *dev_map = tgt_dev->map;
  458. struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
  459. int lun_off = ch_map->lun_offs[p->a.lun];
  460. p->a.ch += ch_map->ch_off;
  461. p->a.lun += lun_off;
  462. }
  463. static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
  464. {
  465. struct nvm_dev *dev = tgt_dev->parent;
  466. struct nvm_dev_map *dev_rmap = dev->rmap;
  467. struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
  468. int lun_roff = ch_rmap->lun_offs[p->a.lun];
  469. p->a.ch -= ch_rmap->ch_off;
  470. p->a.lun -= lun_roff;
  471. }
  472. static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
  473. struct ppa_addr *ppa_list, int nr_ppas)
  474. {
  475. int i;
  476. for (i = 0; i < nr_ppas; i++) {
  477. nvm_map_to_dev(tgt_dev, &ppa_list[i]);
  478. ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
  479. }
  480. }
  481. static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
  482. struct ppa_addr *ppa_list, int nr_ppas)
  483. {
  484. int i;
  485. for (i = 0; i < nr_ppas; i++) {
  486. ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
  487. nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
  488. }
  489. }
  490. static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
  491. {
  492. struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
  493. nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
  494. }
  495. static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
  496. {
  497. struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
  498. nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
  499. }
  500. int nvm_register_tgt_type(struct nvm_tgt_type *tt)
  501. {
  502. int ret = 0;
  503. down_write(&nvm_tgtt_lock);
  504. if (__nvm_find_target_type(tt->name))
  505. ret = -EEXIST;
  506. else
  507. list_add(&tt->list, &nvm_tgt_types);
  508. up_write(&nvm_tgtt_lock);
  509. return ret;
  510. }
  511. EXPORT_SYMBOL(nvm_register_tgt_type);
  512. void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
  513. {
  514. if (!tt)
  515. return;
  516. down_write(&nvm_tgtt_lock);
  517. list_del(&tt->list);
  518. up_write(&nvm_tgtt_lock);
  519. }
  520. EXPORT_SYMBOL(nvm_unregister_tgt_type);
  521. void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
  522. dma_addr_t *dma_handler)
  523. {
  524. return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
  525. dma_handler);
  526. }
  527. EXPORT_SYMBOL(nvm_dev_dma_alloc);
  528. void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
  529. {
  530. dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
  531. }
  532. EXPORT_SYMBOL(nvm_dev_dma_free);
  533. static struct nvm_dev *nvm_find_nvm_dev(const char *name)
  534. {
  535. struct nvm_dev *dev;
  536. list_for_each_entry(dev, &nvm_devices, devices)
  537. if (!strcmp(name, dev->name))
  538. return dev;
  539. return NULL;
  540. }
  541. static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
  542. const struct ppa_addr *ppas, int nr_ppas)
  543. {
  544. struct nvm_dev *dev = tgt_dev->parent;
  545. struct nvm_geo *geo = &tgt_dev->geo;
  546. int i, plane_cnt, pl_idx;
  547. struct ppa_addr ppa;
  548. if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
  549. rqd->nr_ppas = nr_ppas;
  550. rqd->ppa_addr = ppas[0];
  551. return 0;
  552. }
  553. rqd->nr_ppas = nr_ppas;
  554. rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
  555. if (!rqd->ppa_list) {
  556. pr_err("failed to allocate dma memory\n");
  557. return -ENOMEM;
  558. }
  559. plane_cnt = geo->pln_mode;
  560. rqd->nr_ppas *= plane_cnt;
  561. for (i = 0; i < nr_ppas; i++) {
  562. for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
  563. ppa = ppas[i];
  564. ppa.g.pl = pl_idx;
  565. rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
  566. }
  567. }
  568. return 0;
  569. }
  570. static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
  571. struct nvm_rq *rqd)
  572. {
  573. if (!rqd->ppa_list)
  574. return;
  575. nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
  576. }
  577. static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
  578. {
  579. int flags = 0;
  580. if (geo->version == NVM_OCSSD_SPEC_20)
  581. return 0;
  582. if (rqd->is_seq)
  583. flags |= geo->pln_mode >> 1;
  584. if (rqd->opcode == NVM_OP_PREAD)
  585. flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
  586. else if (rqd->opcode == NVM_OP_PWRITE)
  587. flags |= NVM_IO_SCRAMBLE_ENABLE;
  588. return flags;
  589. }
  590. int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf)
  591. {
  592. struct nvm_dev *dev = tgt_dev->parent;
  593. int ret;
  594. if (!dev->ops->submit_io)
  595. return -ENODEV;
  596. nvm_rq_tgt_to_dev(tgt_dev, rqd);
  597. rqd->dev = tgt_dev;
  598. rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
  599. /* In case of error, fail with right address format */
  600. ret = dev->ops->submit_io(dev, rqd, buf);
  601. if (ret)
  602. nvm_rq_dev_to_tgt(tgt_dev, rqd);
  603. return ret;
  604. }
  605. EXPORT_SYMBOL(nvm_submit_io);
  606. static void nvm_sync_end_io(struct nvm_rq *rqd)
  607. {
  608. struct completion *waiting = rqd->private;
  609. complete(waiting);
  610. }
  611. static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd,
  612. void *buf)
  613. {
  614. DECLARE_COMPLETION_ONSTACK(wait);
  615. int ret = 0;
  616. rqd->end_io = nvm_sync_end_io;
  617. rqd->private = &wait;
  618. ret = dev->ops->submit_io(dev, rqd, buf);
  619. if (ret)
  620. return ret;
  621. wait_for_completion_io(&wait);
  622. return 0;
  623. }
  624. int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
  625. void *buf)
  626. {
  627. struct nvm_dev *dev = tgt_dev->parent;
  628. int ret;
  629. if (!dev->ops->submit_io)
  630. return -ENODEV;
  631. nvm_rq_tgt_to_dev(tgt_dev, rqd);
  632. rqd->dev = tgt_dev;
  633. rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
  634. ret = nvm_submit_io_wait(dev, rqd, buf);
  635. return ret;
  636. }
  637. EXPORT_SYMBOL(nvm_submit_io_sync);
  638. void nvm_end_io(struct nvm_rq *rqd)
  639. {
  640. struct nvm_tgt_dev *tgt_dev = rqd->dev;
  641. /* Convert address space */
  642. if (tgt_dev)
  643. nvm_rq_dev_to_tgt(tgt_dev, rqd);
  644. if (rqd->end_io)
  645. rqd->end_io(rqd);
  646. }
  647. EXPORT_SYMBOL(nvm_end_io);
  648. static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
  649. {
  650. if (!dev->ops->submit_io)
  651. return -ENODEV;
  652. rqd->dev = NULL;
  653. rqd->flags = nvm_set_flags(&dev->geo, rqd);
  654. return nvm_submit_io_wait(dev, rqd, NULL);
  655. }
  656. static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
  657. {
  658. struct nvm_rq rqd = { NULL };
  659. struct bio bio;
  660. struct bio_vec bio_vec;
  661. struct page *page;
  662. int ret;
  663. page = alloc_page(GFP_KERNEL);
  664. if (!page)
  665. return -ENOMEM;
  666. bio_init(&bio, &bio_vec, 1);
  667. bio_add_page(&bio, page, PAGE_SIZE, 0);
  668. bio_set_op_attrs(&bio, REQ_OP_READ, 0);
  669. rqd.bio = &bio;
  670. rqd.opcode = NVM_OP_PREAD;
  671. rqd.is_seq = 1;
  672. rqd.nr_ppas = 1;
  673. rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
  674. ret = nvm_submit_io_sync_raw(dev, &rqd);
  675. __free_page(page);
  676. if (ret)
  677. return ret;
  678. return rqd.error;
  679. }
  680. /*
  681. * Scans a 1.2 chunk first and last page to determine if its state.
  682. * If the chunk is found to be open, also scan it to update the write
  683. * pointer.
  684. */
  685. static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
  686. struct nvm_chk_meta *meta)
  687. {
  688. struct nvm_geo *geo = &dev->geo;
  689. int ret, pg, pl;
  690. /* sense first page */
  691. ret = nvm_bb_chunk_sense(dev, ppa);
  692. if (ret < 0) /* io error */
  693. return ret;
  694. else if (ret == 0) /* valid data */
  695. meta->state = NVM_CHK_ST_OPEN;
  696. else if (ret > 0) {
  697. /*
  698. * If empty page, the chunk is free, else it is an
  699. * actual io error. In that case, mark it offline.
  700. */
  701. switch (ret) {
  702. case NVM_RSP_ERR_EMPTYPAGE:
  703. meta->state = NVM_CHK_ST_FREE;
  704. return 0;
  705. case NVM_RSP_ERR_FAILCRC:
  706. case NVM_RSP_ERR_FAILECC:
  707. case NVM_RSP_WARN_HIGHECC:
  708. meta->state = NVM_CHK_ST_OPEN;
  709. goto scan;
  710. default:
  711. return -ret; /* other io error */
  712. }
  713. }
  714. /* sense last page */
  715. ppa.g.pg = geo->num_pg - 1;
  716. ppa.g.pl = geo->num_pln - 1;
  717. ret = nvm_bb_chunk_sense(dev, ppa);
  718. if (ret < 0) /* io error */
  719. return ret;
  720. else if (ret == 0) { /* Chunk fully written */
  721. meta->state = NVM_CHK_ST_CLOSED;
  722. meta->wp = geo->clba;
  723. return 0;
  724. } else if (ret > 0) {
  725. switch (ret) {
  726. case NVM_RSP_ERR_EMPTYPAGE:
  727. case NVM_RSP_ERR_FAILCRC:
  728. case NVM_RSP_ERR_FAILECC:
  729. case NVM_RSP_WARN_HIGHECC:
  730. meta->state = NVM_CHK_ST_OPEN;
  731. break;
  732. default:
  733. return -ret; /* other io error */
  734. }
  735. }
  736. scan:
  737. /*
  738. * chunk is open, we scan sequentially to update the write pointer.
  739. * We make the assumption that targets write data across all planes
  740. * before moving to the next page.
  741. */
  742. for (pg = 0; pg < geo->num_pg; pg++) {
  743. for (pl = 0; pl < geo->num_pln; pl++) {
  744. ppa.g.pg = pg;
  745. ppa.g.pl = pl;
  746. ret = nvm_bb_chunk_sense(dev, ppa);
  747. if (ret < 0) /* io error */
  748. return ret;
  749. else if (ret == 0) {
  750. meta->wp += geo->ws_min;
  751. } else if (ret > 0) {
  752. switch (ret) {
  753. case NVM_RSP_ERR_EMPTYPAGE:
  754. return 0;
  755. case NVM_RSP_ERR_FAILCRC:
  756. case NVM_RSP_ERR_FAILECC:
  757. case NVM_RSP_WARN_HIGHECC:
  758. meta->wp += geo->ws_min;
  759. break;
  760. default:
  761. return -ret; /* other io error */
  762. }
  763. }
  764. }
  765. }
  766. return 0;
  767. }
  768. /*
  769. * folds a bad block list from its plane representation to its
  770. * chunk representation.
  771. *
  772. * If any of the planes status are bad or grown bad, the chunk is marked
  773. * offline. If not bad, the first plane state acts as the chunk state.
  774. */
  775. static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
  776. u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
  777. {
  778. struct nvm_geo *geo = &dev->geo;
  779. int ret, blk, pl, offset, blktype;
  780. for (blk = 0; blk < geo->num_chk; blk++) {
  781. offset = blk * geo->pln_mode;
  782. blktype = blks[offset];
  783. for (pl = 0; pl < geo->pln_mode; pl++) {
  784. if (blks[offset + pl] &
  785. (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
  786. blktype = blks[offset + pl];
  787. break;
  788. }
  789. }
  790. ppa.g.blk = blk;
  791. meta->wp = 0;
  792. meta->type = NVM_CHK_TP_W_SEQ;
  793. meta->wi = 0;
  794. meta->slba = generic_to_dev_addr(dev, ppa).ppa;
  795. meta->cnlb = dev->geo.clba;
  796. if (blktype == NVM_BLK_T_FREE) {
  797. ret = nvm_bb_chunk_scan(dev, ppa, meta);
  798. if (ret)
  799. return ret;
  800. } else {
  801. meta->state = NVM_CHK_ST_OFFLINE;
  802. }
  803. meta++;
  804. }
  805. return 0;
  806. }
  807. static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
  808. int nchks, struct nvm_chk_meta *meta)
  809. {
  810. struct nvm_geo *geo = &dev->geo;
  811. struct ppa_addr ppa;
  812. u8 *blks;
  813. int ch, lun, nr_blks;
  814. int ret = 0;
  815. ppa.ppa = slba;
  816. ppa = dev_to_generic_addr(dev, ppa);
  817. if (ppa.g.blk != 0)
  818. return -EINVAL;
  819. if ((nchks % geo->num_chk) != 0)
  820. return -EINVAL;
  821. nr_blks = geo->num_chk * geo->pln_mode;
  822. blks = kmalloc(nr_blks, GFP_KERNEL);
  823. if (!blks)
  824. return -ENOMEM;
  825. for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
  826. for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
  827. struct ppa_addr ppa_gen, ppa_dev;
  828. if (!nchks)
  829. goto done;
  830. ppa_gen.ppa = 0;
  831. ppa_gen.g.ch = ch;
  832. ppa_gen.g.lun = lun;
  833. ppa_dev = generic_to_dev_addr(dev, ppa_gen);
  834. ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
  835. if (ret)
  836. goto done;
  837. ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
  838. meta);
  839. if (ret)
  840. goto done;
  841. meta += geo->num_chk;
  842. nchks -= geo->num_chk;
  843. }
  844. }
  845. done:
  846. kfree(blks);
  847. return ret;
  848. }
  849. int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
  850. int nchks, struct nvm_chk_meta *meta)
  851. {
  852. struct nvm_dev *dev = tgt_dev->parent;
  853. nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
  854. if (dev->geo.version == NVM_OCSSD_SPEC_12)
  855. return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
  856. return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
  857. }
  858. EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
  859. int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
  860. int nr_ppas, int type)
  861. {
  862. struct nvm_dev *dev = tgt_dev->parent;
  863. struct nvm_rq rqd;
  864. int ret;
  865. if (dev->geo.version == NVM_OCSSD_SPEC_20)
  866. return 0;
  867. if (nr_ppas > NVM_MAX_VLBA) {
  868. pr_err("unable to update all blocks atomically\n");
  869. return -EINVAL;
  870. }
  871. memset(&rqd, 0, sizeof(struct nvm_rq));
  872. nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
  873. nvm_rq_tgt_to_dev(tgt_dev, &rqd);
  874. ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
  875. nvm_free_rqd_ppalist(tgt_dev, &rqd);
  876. if (ret)
  877. return -EINVAL;
  878. return 0;
  879. }
  880. EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
  881. static int nvm_core_init(struct nvm_dev *dev)
  882. {
  883. struct nvm_geo *geo = &dev->geo;
  884. int ret;
  885. dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
  886. sizeof(unsigned long), GFP_KERNEL);
  887. if (!dev->lun_map)
  888. return -ENOMEM;
  889. INIT_LIST_HEAD(&dev->area_list);
  890. INIT_LIST_HEAD(&dev->targets);
  891. mutex_init(&dev->mlock);
  892. spin_lock_init(&dev->lock);
  893. ret = nvm_register_map(dev);
  894. if (ret)
  895. goto err_fmtype;
  896. return 0;
  897. err_fmtype:
  898. kfree(dev->lun_map);
  899. return ret;
  900. }
  901. static void nvm_free(struct kref *ref)
  902. {
  903. struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
  904. if (dev->dma_pool)
  905. dev->ops->destroy_dma_pool(dev->dma_pool);
  906. if (dev->rmap)
  907. nvm_unregister_map(dev);
  908. kfree(dev->lun_map);
  909. kfree(dev);
  910. }
  911. static int nvm_init(struct nvm_dev *dev)
  912. {
  913. struct nvm_geo *geo = &dev->geo;
  914. int ret = -EINVAL;
  915. if (dev->ops->identity(dev)) {
  916. pr_err("device could not be identified\n");
  917. goto err;
  918. }
  919. pr_debug("ver:%u.%u nvm_vendor:%x\n", geo->major_ver_id,
  920. geo->minor_ver_id, geo->vmnt);
  921. ret = nvm_core_init(dev);
  922. if (ret) {
  923. pr_err("could not initialize core structures.\n");
  924. goto err;
  925. }
  926. pr_info("registered %s [%u/%u/%u/%u/%u]\n",
  927. dev->name, dev->geo.ws_min, dev->geo.ws_opt,
  928. dev->geo.num_chk, dev->geo.all_luns,
  929. dev->geo.num_ch);
  930. return 0;
  931. err:
  932. pr_err("failed to initialize nvm\n");
  933. return ret;
  934. }
  935. struct nvm_dev *nvm_alloc_dev(int node)
  936. {
  937. struct nvm_dev *dev;
  938. dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
  939. if (dev)
  940. kref_init(&dev->ref);
  941. return dev;
  942. }
  943. EXPORT_SYMBOL(nvm_alloc_dev);
  944. int nvm_register(struct nvm_dev *dev)
  945. {
  946. int ret, exp_pool_size;
  947. if (!dev->q || !dev->ops) {
  948. kref_put(&dev->ref, nvm_free);
  949. return -EINVAL;
  950. }
  951. ret = nvm_init(dev);
  952. if (ret) {
  953. kref_put(&dev->ref, nvm_free);
  954. return ret;
  955. }
  956. exp_pool_size = max_t(int, PAGE_SIZE,
  957. (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
  958. exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
  959. dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
  960. exp_pool_size);
  961. if (!dev->dma_pool) {
  962. pr_err("could not create dma pool\n");
  963. kref_put(&dev->ref, nvm_free);
  964. return -ENOMEM;
  965. }
  966. /* register device with a supported media manager */
  967. down_write(&nvm_lock);
  968. list_add(&dev->devices, &nvm_devices);
  969. up_write(&nvm_lock);
  970. return 0;
  971. }
  972. EXPORT_SYMBOL(nvm_register);
  973. void nvm_unregister(struct nvm_dev *dev)
  974. {
  975. struct nvm_target *t, *tmp;
  976. mutex_lock(&dev->mlock);
  977. list_for_each_entry_safe(t, tmp, &dev->targets, list) {
  978. if (t->dev->parent != dev)
  979. continue;
  980. __nvm_remove_target(t, false);
  981. kref_put(&dev->ref, nvm_free);
  982. }
  983. mutex_unlock(&dev->mlock);
  984. down_write(&nvm_lock);
  985. list_del(&dev->devices);
  986. up_write(&nvm_lock);
  987. kref_put(&dev->ref, nvm_free);
  988. }
  989. EXPORT_SYMBOL(nvm_unregister);
  990. static int __nvm_configure_create(struct nvm_ioctl_create *create)
  991. {
  992. struct nvm_dev *dev;
  993. int ret;
  994. down_write(&nvm_lock);
  995. dev = nvm_find_nvm_dev(create->dev);
  996. up_write(&nvm_lock);
  997. if (!dev) {
  998. pr_err("device not found\n");
  999. return -EINVAL;
  1000. }
  1001. kref_get(&dev->ref);
  1002. ret = nvm_create_tgt(dev, create);
  1003. if (ret)
  1004. kref_put(&dev->ref, nvm_free);
  1005. return ret;
  1006. }
  1007. static long nvm_ioctl_info(struct file *file, void __user *arg)
  1008. {
  1009. struct nvm_ioctl_info *info;
  1010. struct nvm_tgt_type *tt;
  1011. int tgt_iter = 0;
  1012. info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
  1013. if (IS_ERR(info))
  1014. return -EFAULT;
  1015. info->version[0] = NVM_VERSION_MAJOR;
  1016. info->version[1] = NVM_VERSION_MINOR;
  1017. info->version[2] = NVM_VERSION_PATCH;
  1018. down_write(&nvm_tgtt_lock);
  1019. list_for_each_entry(tt, &nvm_tgt_types, list) {
  1020. struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
  1021. tgt->version[0] = tt->version[0];
  1022. tgt->version[1] = tt->version[1];
  1023. tgt->version[2] = tt->version[2];
  1024. strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
  1025. tgt_iter++;
  1026. }
  1027. info->tgtsize = tgt_iter;
  1028. up_write(&nvm_tgtt_lock);
  1029. if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
  1030. kfree(info);
  1031. return -EFAULT;
  1032. }
  1033. kfree(info);
  1034. return 0;
  1035. }
  1036. static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
  1037. {
  1038. struct nvm_ioctl_get_devices *devices;
  1039. struct nvm_dev *dev;
  1040. int i = 0;
  1041. devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
  1042. if (!devices)
  1043. return -ENOMEM;
  1044. down_write(&nvm_lock);
  1045. list_for_each_entry(dev, &nvm_devices, devices) {
  1046. struct nvm_ioctl_device_info *info = &devices->info[i];
  1047. strlcpy(info->devname, dev->name, sizeof(info->devname));
  1048. /* kept for compatibility */
  1049. info->bmversion[0] = 1;
  1050. info->bmversion[1] = 0;
  1051. info->bmversion[2] = 0;
  1052. strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
  1053. i++;
  1054. if (i >= ARRAY_SIZE(devices->info)) {
  1055. pr_err("max %zd devices can be reported.\n",
  1056. ARRAY_SIZE(devices->info));
  1057. break;
  1058. }
  1059. }
  1060. up_write(&nvm_lock);
  1061. devices->nr_devices = i;
  1062. if (copy_to_user(arg, devices,
  1063. sizeof(struct nvm_ioctl_get_devices))) {
  1064. kfree(devices);
  1065. return -EFAULT;
  1066. }
  1067. kfree(devices);
  1068. return 0;
  1069. }
  1070. static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
  1071. {
  1072. struct nvm_ioctl_create create;
  1073. if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
  1074. return -EFAULT;
  1075. if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
  1076. create.conf.e.rsv != 0) {
  1077. pr_err("reserved config field in use\n");
  1078. return -EINVAL;
  1079. }
  1080. create.dev[DISK_NAME_LEN - 1] = '\0';
  1081. create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
  1082. create.tgtname[DISK_NAME_LEN - 1] = '\0';
  1083. if (create.flags != 0) {
  1084. __u32 flags = create.flags;
  1085. /* Check for valid flags */
  1086. if (flags & NVM_TARGET_FACTORY)
  1087. flags &= ~NVM_TARGET_FACTORY;
  1088. if (flags) {
  1089. pr_err("flag not supported\n");
  1090. return -EINVAL;
  1091. }
  1092. }
  1093. return __nvm_configure_create(&create);
  1094. }
  1095. static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
  1096. {
  1097. struct nvm_ioctl_remove remove;
  1098. if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
  1099. return -EFAULT;
  1100. remove.tgtname[DISK_NAME_LEN - 1] = '\0';
  1101. if (remove.flags != 0) {
  1102. pr_err("no flags supported\n");
  1103. return -EINVAL;
  1104. }
  1105. return nvm_remove_tgt(&remove);
  1106. }
  1107. /* kept for compatibility reasons */
  1108. static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
  1109. {
  1110. struct nvm_ioctl_dev_init init;
  1111. if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
  1112. return -EFAULT;
  1113. if (init.flags != 0) {
  1114. pr_err("no flags supported\n");
  1115. return -EINVAL;
  1116. }
  1117. return 0;
  1118. }
  1119. /* Kept for compatibility reasons */
  1120. static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
  1121. {
  1122. struct nvm_ioctl_dev_factory fact;
  1123. if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
  1124. return -EFAULT;
  1125. fact.dev[DISK_NAME_LEN - 1] = '\0';
  1126. if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
  1127. return -EINVAL;
  1128. return 0;
  1129. }
  1130. static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
  1131. {
  1132. void __user *argp = (void __user *)arg;
  1133. if (!capable(CAP_SYS_ADMIN))
  1134. return -EPERM;
  1135. switch (cmd) {
  1136. case NVM_INFO:
  1137. return nvm_ioctl_info(file, argp);
  1138. case NVM_GET_DEVICES:
  1139. return nvm_ioctl_get_devices(file, argp);
  1140. case NVM_DEV_CREATE:
  1141. return nvm_ioctl_dev_create(file, argp);
  1142. case NVM_DEV_REMOVE:
  1143. return nvm_ioctl_dev_remove(file, argp);
  1144. case NVM_DEV_INIT:
  1145. return nvm_ioctl_dev_init(file, argp);
  1146. case NVM_DEV_FACTORY:
  1147. return nvm_ioctl_dev_factory(file, argp);
  1148. }
  1149. return 0;
  1150. }
  1151. static const struct file_operations _ctl_fops = {
  1152. .open = nonseekable_open,
  1153. .unlocked_ioctl = nvm_ctl_ioctl,
  1154. .owner = THIS_MODULE,
  1155. .llseek = noop_llseek,
  1156. };
  1157. static struct miscdevice _nvm_misc = {
  1158. .minor = MISC_DYNAMIC_MINOR,
  1159. .name = "lightnvm",
  1160. .nodename = "lightnvm/control",
  1161. .fops = &_ctl_fops,
  1162. };
  1163. builtin_misc_device(_nvm_misc);