lightnvm.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef NVM_H
  3. #define NVM_H
  4. #include <linux/blkdev.h>
  5. #include <linux/types.h>
  6. #include <uapi/linux/lightnvm.h>
  7. enum {
  8. NVM_IO_OK = 0,
  9. NVM_IO_REQUEUE = 1,
  10. NVM_IO_DONE = 2,
  11. NVM_IO_ERR = 3,
  12. NVM_IOTYPE_NONE = 0,
  13. NVM_IOTYPE_GC = 1,
  14. };
  15. /* common format */
  16. #define NVM_GEN_CH_BITS (8)
  17. #define NVM_GEN_LUN_BITS (8)
  18. #define NVM_GEN_BLK_BITS (16)
  19. #define NVM_GEN_RESERVED (32)
  20. /* 1.2 format */
  21. #define NVM_12_PG_BITS (16)
  22. #define NVM_12_PL_BITS (4)
  23. #define NVM_12_SEC_BITS (4)
  24. #define NVM_12_RESERVED (8)
  25. /* 2.0 format */
  26. #define NVM_20_SEC_BITS (24)
  27. #define NVM_20_RESERVED (8)
  28. enum {
  29. NVM_OCSSD_SPEC_12 = 12,
  30. NVM_OCSSD_SPEC_20 = 20,
  31. };
  32. struct ppa_addr {
  33. /* Generic structure for all addresses */
  34. union {
  35. /* generic device format */
  36. struct {
  37. u64 ch : NVM_GEN_CH_BITS;
  38. u64 lun : NVM_GEN_LUN_BITS;
  39. u64 blk : NVM_GEN_BLK_BITS;
  40. u64 reserved : NVM_GEN_RESERVED;
  41. } a;
  42. /* 1.2 device format */
  43. struct {
  44. u64 ch : NVM_GEN_CH_BITS;
  45. u64 lun : NVM_GEN_LUN_BITS;
  46. u64 blk : NVM_GEN_BLK_BITS;
  47. u64 pg : NVM_12_PG_BITS;
  48. u64 pl : NVM_12_PL_BITS;
  49. u64 sec : NVM_12_SEC_BITS;
  50. u64 reserved : NVM_12_RESERVED;
  51. } g;
  52. /* 2.0 device format */
  53. struct {
  54. u64 grp : NVM_GEN_CH_BITS;
  55. u64 pu : NVM_GEN_LUN_BITS;
  56. u64 chk : NVM_GEN_BLK_BITS;
  57. u64 sec : NVM_20_SEC_BITS;
  58. u64 reserved : NVM_20_RESERVED;
  59. } m;
  60. struct {
  61. u64 line : 63;
  62. u64 is_cached : 1;
  63. } c;
  64. u64 ppa;
  65. };
  66. };
  67. struct nvm_rq;
  68. struct nvm_id;
  69. struct nvm_dev;
  70. struct nvm_tgt_dev;
  71. struct nvm_chk_meta;
  72. typedef int (nvm_id_fn)(struct nvm_dev *);
  73. typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
  74. typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
  75. typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
  76. struct nvm_chk_meta *);
  77. typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *, void *);
  78. typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int);
  79. typedef void (nvm_destroy_dma_pool_fn)(void *);
  80. typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
  81. dma_addr_t *);
  82. typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
  83. struct nvm_dev_ops {
  84. nvm_id_fn *identity;
  85. nvm_op_bb_tbl_fn *get_bb_tbl;
  86. nvm_op_set_bb_fn *set_bb_tbl;
  87. nvm_get_chk_meta_fn *get_chk_meta;
  88. nvm_submit_io_fn *submit_io;
  89. nvm_create_dma_pool_fn *create_dma_pool;
  90. nvm_destroy_dma_pool_fn *destroy_dma_pool;
  91. nvm_dev_dma_alloc_fn *dev_dma_alloc;
  92. nvm_dev_dma_free_fn *dev_dma_free;
  93. };
  94. #ifdef CONFIG_NVM
  95. #include <linux/blkdev.h>
  96. #include <linux/file.h>
  97. #include <linux/dmapool.h>
  98. #include <uapi/linux/lightnvm.h>
  99. enum {
  100. /* HW Responsibilities */
  101. NVM_RSP_L2P = 1 << 0,
  102. NVM_RSP_ECC = 1 << 1,
  103. /* Physical Adressing Mode */
  104. NVM_ADDRMODE_LINEAR = 0,
  105. NVM_ADDRMODE_CHANNEL = 1,
  106. /* Plane programming mode for LUN */
  107. NVM_PLANE_SINGLE = 1,
  108. NVM_PLANE_DOUBLE = 2,
  109. NVM_PLANE_QUAD = 4,
  110. /* Status codes */
  111. NVM_RSP_SUCCESS = 0x0,
  112. NVM_RSP_NOT_CHANGEABLE = 0x1,
  113. NVM_RSP_ERR_FAILWRITE = 0x40ff,
  114. NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
  115. NVM_RSP_ERR_FAILECC = 0x4281,
  116. NVM_RSP_ERR_FAILCRC = 0x4004,
  117. NVM_RSP_WARN_HIGHECC = 0x4700,
  118. /* Device opcodes */
  119. NVM_OP_PWRITE = 0x91,
  120. NVM_OP_PREAD = 0x92,
  121. NVM_OP_ERASE = 0x90,
  122. /* PPA Command Flags */
  123. NVM_IO_SNGL_ACCESS = 0x0,
  124. NVM_IO_DUAL_ACCESS = 0x1,
  125. NVM_IO_QUAD_ACCESS = 0x2,
  126. /* NAND Access Modes */
  127. NVM_IO_SUSPEND = 0x80,
  128. NVM_IO_SLC_MODE = 0x100,
  129. NVM_IO_SCRAMBLE_ENABLE = 0x200,
  130. /* Block Types */
  131. NVM_BLK_T_FREE = 0x0,
  132. NVM_BLK_T_BAD = 0x1,
  133. NVM_BLK_T_GRWN_BAD = 0x2,
  134. NVM_BLK_T_DEV = 0x4,
  135. NVM_BLK_T_HOST = 0x8,
  136. /* Memory capabilities */
  137. NVM_ID_CAP_SLC = 0x1,
  138. NVM_ID_CAP_CMD_SUSPEND = 0x2,
  139. NVM_ID_CAP_SCRAMBLE = 0x4,
  140. NVM_ID_CAP_ENCRYPT = 0x8,
  141. /* Memory types */
  142. NVM_ID_FMTYPE_SLC = 0,
  143. NVM_ID_FMTYPE_MLC = 1,
  144. /* Device capabilities */
  145. NVM_ID_DCAP_BBLKMGMT = 0x1,
  146. NVM_UD_DCAP_ECC = 0x2,
  147. };
  148. struct nvm_id_lp_mlc {
  149. u16 num_pairs;
  150. u8 pairs[886];
  151. };
  152. struct nvm_id_lp_tbl {
  153. __u8 id[8];
  154. struct nvm_id_lp_mlc mlc;
  155. };
  156. struct nvm_addrf_12 {
  157. u8 ch_len;
  158. u8 lun_len;
  159. u8 blk_len;
  160. u8 pg_len;
  161. u8 pln_len;
  162. u8 sec_len;
  163. u8 ch_offset;
  164. u8 lun_offset;
  165. u8 blk_offset;
  166. u8 pg_offset;
  167. u8 pln_offset;
  168. u8 sec_offset;
  169. u64 ch_mask;
  170. u64 lun_mask;
  171. u64 blk_mask;
  172. u64 pg_mask;
  173. u64 pln_mask;
  174. u64 sec_mask;
  175. };
  176. struct nvm_addrf {
  177. u8 ch_len;
  178. u8 lun_len;
  179. u8 chk_len;
  180. u8 sec_len;
  181. u8 rsv_len[2];
  182. u8 ch_offset;
  183. u8 lun_offset;
  184. u8 chk_offset;
  185. u8 sec_offset;
  186. u8 rsv_off[2];
  187. u64 ch_mask;
  188. u64 lun_mask;
  189. u64 chk_mask;
  190. u64 sec_mask;
  191. u64 rsv_mask[2];
  192. };
  193. enum {
  194. /* Chunk states */
  195. NVM_CHK_ST_FREE = 1 << 0,
  196. NVM_CHK_ST_CLOSED = 1 << 1,
  197. NVM_CHK_ST_OPEN = 1 << 2,
  198. NVM_CHK_ST_OFFLINE = 1 << 3,
  199. /* Chunk types */
  200. NVM_CHK_TP_W_SEQ = 1 << 0,
  201. NVM_CHK_TP_W_RAN = 1 << 1,
  202. NVM_CHK_TP_SZ_SPEC = 1 << 4,
  203. };
  204. /*
  205. * Note: The structure size is linked to nvme_nvm_chk_meta such that the same
  206. * buffer can be used when converting from little endian to cpu addressing.
  207. */
  208. struct nvm_chk_meta {
  209. u8 state;
  210. u8 type;
  211. u8 wi;
  212. u8 rsvd[5];
  213. u64 slba;
  214. u64 cnlb;
  215. u64 wp;
  216. };
  217. struct nvm_target {
  218. struct list_head list;
  219. struct nvm_tgt_dev *dev;
  220. struct nvm_tgt_type *type;
  221. struct gendisk *disk;
  222. };
  223. #define ADDR_EMPTY (~0ULL)
  224. #define NVM_TARGET_DEFAULT_OP (101)
  225. #define NVM_TARGET_MIN_OP (3)
  226. #define NVM_TARGET_MAX_OP (80)
  227. #define NVM_VERSION_MAJOR 1
  228. #define NVM_VERSION_MINOR 0
  229. #define NVM_VERSION_PATCH 0
  230. #define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */
  231. struct nvm_rq;
  232. typedef void (nvm_end_io_fn)(struct nvm_rq *);
  233. struct nvm_rq {
  234. struct nvm_tgt_dev *dev;
  235. struct bio *bio;
  236. union {
  237. struct ppa_addr ppa_addr;
  238. dma_addr_t dma_ppa_list;
  239. };
  240. struct ppa_addr *ppa_list;
  241. void *meta_list;
  242. dma_addr_t dma_meta_list;
  243. nvm_end_io_fn *end_io;
  244. uint8_t opcode;
  245. uint16_t nr_ppas;
  246. uint16_t flags;
  247. u64 ppa_status; /* ppa media status */
  248. int error;
  249. int is_seq; /* Sequential hint flag. 1.2 only */
  250. void *private;
  251. };
  252. static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
  253. {
  254. return pdu - sizeof(struct nvm_rq);
  255. }
  256. static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
  257. {
  258. return rqdata + 1;
  259. }
  260. static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd)
  261. {
  262. return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
  263. }
  264. enum {
  265. NVM_BLK_ST_FREE = 0x1, /* Free block */
  266. NVM_BLK_ST_TGT = 0x2, /* Block in use by target */
  267. NVM_BLK_ST_BAD = 0x8, /* Bad block */
  268. };
  269. /* Instance geometry */
  270. struct nvm_geo {
  271. /* device reported version */
  272. u8 major_ver_id;
  273. u8 minor_ver_id;
  274. /* kernel short version */
  275. u8 version;
  276. /* instance specific geometry */
  277. int num_ch;
  278. int num_lun; /* per channel */
  279. /* calculated values */
  280. int all_luns; /* across channels */
  281. int all_chunks; /* across channels */
  282. int op; /* over-provision in instance */
  283. sector_t total_secs; /* across channels */
  284. /* chunk geometry */
  285. u32 num_chk; /* chunks per lun */
  286. u32 clba; /* sectors per chunk */
  287. u16 csecs; /* sector size */
  288. u16 sos; /* out-of-band area size */
  289. bool ext; /* metadata in extended data buffer */
  290. u32 mdts; /* Max data transfer size*/
  291. /* device write constrains */
  292. u32 ws_min; /* minimum write size */
  293. u32 ws_opt; /* optimal write size */
  294. u32 mw_cunits; /* distance required for successful read */
  295. u32 maxoc; /* maximum open chunks */
  296. u32 maxocpu; /* maximum open chunks per parallel unit */
  297. /* device capabilities */
  298. u32 mccap;
  299. /* device timings */
  300. u32 trdt; /* Avg. Tread (ns) */
  301. u32 trdm; /* Max Tread (ns) */
  302. u32 tprt; /* Avg. Tprog (ns) */
  303. u32 tprm; /* Max Tprog (ns) */
  304. u32 tbet; /* Avg. Terase (ns) */
  305. u32 tbem; /* Max Terase (ns) */
  306. /* generic address format */
  307. struct nvm_addrf addrf;
  308. /* 1.2 compatibility */
  309. u8 vmnt;
  310. u32 cap;
  311. u32 dom;
  312. u8 mtype;
  313. u8 fmtype;
  314. u16 cpar;
  315. u32 mpos;
  316. u8 num_pln;
  317. u8 pln_mode;
  318. u16 num_pg;
  319. u16 fpg_sz;
  320. };
  321. /* sub-device structure */
  322. struct nvm_tgt_dev {
  323. /* Device information */
  324. struct nvm_geo geo;
  325. /* Base ppas for target LUNs */
  326. struct ppa_addr *luns;
  327. struct request_queue *q;
  328. struct nvm_dev *parent;
  329. void *map;
  330. };
  331. struct nvm_dev {
  332. struct nvm_dev_ops *ops;
  333. struct list_head devices;
  334. /* Device information */
  335. struct nvm_geo geo;
  336. unsigned long *lun_map;
  337. void *dma_pool;
  338. /* Backend device */
  339. struct request_queue *q;
  340. char name[DISK_NAME_LEN];
  341. void *private_data;
  342. struct kref ref;
  343. void *rmap;
  344. struct mutex mlock;
  345. spinlock_t lock;
  346. /* target management */
  347. struct list_head area_list;
  348. struct list_head targets;
  349. };
  350. static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
  351. struct ppa_addr r)
  352. {
  353. struct nvm_geo *geo = &dev->geo;
  354. struct ppa_addr l;
  355. if (geo->version == NVM_OCSSD_SPEC_12) {
  356. struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
  357. l.ppa = ((u64)r.g.ch) << ppaf->ch_offset;
  358. l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset;
  359. l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset;
  360. l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset;
  361. l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset;
  362. l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset;
  363. } else {
  364. struct nvm_addrf *lbaf = &geo->addrf;
  365. l.ppa = ((u64)r.m.grp) << lbaf->ch_offset;
  366. l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset;
  367. l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset;
  368. l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset;
  369. }
  370. return l;
  371. }
  372. static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
  373. struct ppa_addr r)
  374. {
  375. struct nvm_geo *geo = &dev->geo;
  376. struct ppa_addr l;
  377. l.ppa = 0;
  378. if (geo->version == NVM_OCSSD_SPEC_12) {
  379. struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
  380. l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset;
  381. l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset;
  382. l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset;
  383. l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset;
  384. l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset;
  385. l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset;
  386. } else {
  387. struct nvm_addrf *lbaf = &geo->addrf;
  388. l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset;
  389. l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset;
  390. l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset;
  391. l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset;
  392. }
  393. return l;
  394. }
  395. static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf,
  396. struct ppa_addr p)
  397. {
  398. struct nvm_geo *geo = &dev->geo;
  399. u64 caddr;
  400. if (geo->version == NVM_OCSSD_SPEC_12) {
  401. struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf;
  402. caddr = (u64)p.g.pg << ppaf->pg_offset;
  403. caddr |= (u64)p.g.pl << ppaf->pln_offset;
  404. caddr |= (u64)p.g.sec << ppaf->sec_offset;
  405. } else {
  406. caddr = p.m.sec;
  407. }
  408. return caddr;
  409. }
  410. static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev,
  411. void *addrf, u32 ppa32)
  412. {
  413. struct ppa_addr ppa64;
  414. ppa64.ppa = 0;
  415. if (ppa32 == -1) {
  416. ppa64.ppa = ADDR_EMPTY;
  417. } else if (ppa32 & (1U << 31)) {
  418. ppa64.c.line = ppa32 & ((~0U) >> 1);
  419. ppa64.c.is_cached = 1;
  420. } else {
  421. struct nvm_geo *geo = &dev->geo;
  422. if (geo->version == NVM_OCSSD_SPEC_12) {
  423. struct nvm_addrf_12 *ppaf = addrf;
  424. ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
  425. ppaf->ch_offset;
  426. ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
  427. ppaf->lun_offset;
  428. ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
  429. ppaf->blk_offset;
  430. ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
  431. ppaf->pg_offset;
  432. ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
  433. ppaf->pln_offset;
  434. ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
  435. ppaf->sec_offset;
  436. } else {
  437. struct nvm_addrf *lbaf = addrf;
  438. ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
  439. lbaf->ch_offset;
  440. ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
  441. lbaf->lun_offset;
  442. ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
  443. lbaf->chk_offset;
  444. ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
  445. lbaf->sec_offset;
  446. }
  447. }
  448. return ppa64;
  449. }
  450. static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev,
  451. void *addrf, struct ppa_addr ppa64)
  452. {
  453. u32 ppa32 = 0;
  454. if (ppa64.ppa == ADDR_EMPTY) {
  455. ppa32 = ~0U;
  456. } else if (ppa64.c.is_cached) {
  457. ppa32 |= ppa64.c.line;
  458. ppa32 |= 1U << 31;
  459. } else {
  460. struct nvm_geo *geo = &dev->geo;
  461. if (geo->version == NVM_OCSSD_SPEC_12) {
  462. struct nvm_addrf_12 *ppaf = addrf;
  463. ppa32 |= ppa64.g.ch << ppaf->ch_offset;
  464. ppa32 |= ppa64.g.lun << ppaf->lun_offset;
  465. ppa32 |= ppa64.g.blk << ppaf->blk_offset;
  466. ppa32 |= ppa64.g.pg << ppaf->pg_offset;
  467. ppa32 |= ppa64.g.pl << ppaf->pln_offset;
  468. ppa32 |= ppa64.g.sec << ppaf->sec_offset;
  469. } else {
  470. struct nvm_addrf *lbaf = addrf;
  471. ppa32 |= ppa64.m.grp << lbaf->ch_offset;
  472. ppa32 |= ppa64.m.pu << lbaf->lun_offset;
  473. ppa32 |= ppa64.m.chk << lbaf->chk_offset;
  474. ppa32 |= ppa64.m.sec << lbaf->sec_offset;
  475. }
  476. }
  477. return ppa32;
  478. }
  479. static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev,
  480. struct ppa_addr *ppa)
  481. {
  482. struct nvm_geo *geo = &dev->geo;
  483. int last = 0;
  484. if (geo->version == NVM_OCSSD_SPEC_12) {
  485. int sec = ppa->g.sec;
  486. sec++;
  487. if (sec == geo->ws_min) {
  488. int pg = ppa->g.pg;
  489. sec = 0;
  490. pg++;
  491. if (pg == geo->num_pg) {
  492. int pl = ppa->g.pl;
  493. pg = 0;
  494. pl++;
  495. if (pl == geo->num_pln)
  496. last = 1;
  497. ppa->g.pl = pl;
  498. }
  499. ppa->g.pg = pg;
  500. }
  501. ppa->g.sec = sec;
  502. } else {
  503. ppa->m.sec++;
  504. if (ppa->m.sec == geo->clba)
  505. last = 1;
  506. }
  507. return last;
  508. }
  509. typedef sector_t (nvm_tgt_capacity_fn)(void *);
  510. typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
  511. int flags);
  512. typedef void (nvm_tgt_exit_fn)(void *, bool);
  513. typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
  514. typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
  515. enum {
  516. NVM_TGT_F_DEV_L2P = 0,
  517. NVM_TGT_F_HOST_L2P = 1 << 0,
  518. };
  519. struct nvm_tgt_type {
  520. const char *name;
  521. unsigned int version[3];
  522. int flags;
  523. /* target entry points */
  524. const struct block_device_operations *bops;
  525. nvm_tgt_capacity_fn *capacity;
  526. /* module-specific init/teardown */
  527. nvm_tgt_init_fn *init;
  528. nvm_tgt_exit_fn *exit;
  529. /* sysfs */
  530. nvm_tgt_sysfs_init_fn *sysfs_init;
  531. nvm_tgt_sysfs_exit_fn *sysfs_exit;
  532. /* For internal use */
  533. struct list_head list;
  534. struct module *owner;
  535. };
  536. extern int nvm_register_tgt_type(struct nvm_tgt_type *);
  537. extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
  538. extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
  539. extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
  540. extern struct nvm_dev *nvm_alloc_dev(int);
  541. extern int nvm_register(struct nvm_dev *);
  542. extern void nvm_unregister(struct nvm_dev *);
  543. extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr,
  544. int, struct nvm_chk_meta *);
  545. extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *,
  546. int, int);
  547. extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *, void *);
  548. extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *, void *);
  549. extern void nvm_end_io(struct nvm_rq *);
  550. #else /* CONFIG_NVM */
  551. struct nvm_dev_ops;
  552. static inline struct nvm_dev *nvm_alloc_dev(int node)
  553. {
  554. return ERR_PTR(-EINVAL);
  555. }
  556. static inline int nvm_register(struct nvm_dev *dev)
  557. {
  558. return -EINVAL;
  559. }
  560. static inline void nvm_unregister(struct nvm_dev *dev) {}
  561. #endif /* CONFIG_NVM */
  562. #endif /* LIGHTNVM.H */