target_core_device.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*******************************************************************************
  3. * Filename: target_core_device.c (based on iscsi_target_device.c)
  4. *
  5. * This file contains the TCM Virtual Device and Disk Transport
  6. * agnostic related functions.
  7. *
  8. * (c) Copyright 2003-2013 Datera, Inc.
  9. *
  10. * Nicholas A. Bellinger <nab@kernel.org>
  11. *
  12. ******************************************************************************/
  13. #include <linux/net.h>
  14. #include <linux/string.h>
  15. #include <linux/delay.h>
  16. #include <linux/timer.h>
  17. #include <linux/slab.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/kthread.h>
  20. #include <linux/in.h>
  21. #include <linux/export.h>
  22. #include <linux/t10-pi.h>
  23. #include <asm/unaligned.h>
  24. #include <net/sock.h>
  25. #include <net/tcp.h>
  26. #include <scsi/scsi_common.h>
  27. #include <scsi/scsi_proto.h>
  28. #include <target/target_core_base.h>
  29. #include <target/target_core_backend.h>
  30. #include <target/target_core_fabric.h>
  31. #include "target_core_internal.h"
  32. #include "target_core_alua.h"
  33. #include "target_core_pr.h"
  34. #include "target_core_ua.h"
  35. static DEFINE_MUTEX(device_mutex);
  36. static LIST_HEAD(device_list);
  37. static DEFINE_IDR(devices_idr);
  38. static struct se_hba *lun0_hba;
  39. /* not static, needed by tpg.c */
  40. struct se_device *g_lun0_dev;
  41. sense_reason_t
  42. transport_lookup_cmd_lun(struct se_cmd *se_cmd)
  43. {
  44. struct se_lun *se_lun = NULL;
  45. struct se_session *se_sess = se_cmd->se_sess;
  46. struct se_node_acl *nacl = se_sess->se_node_acl;
  47. struct se_dev_entry *deve;
  48. sense_reason_t ret = TCM_NO_SENSE;
  49. rcu_read_lock();
  50. deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
  51. if (deve) {
  52. atomic_long_inc(&deve->total_cmds);
  53. if (se_cmd->data_direction == DMA_TO_DEVICE)
  54. atomic_long_add(se_cmd->data_length,
  55. &deve->write_bytes);
  56. else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  57. atomic_long_add(se_cmd->data_length,
  58. &deve->read_bytes);
  59. se_lun = rcu_dereference(deve->se_lun);
  60. if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
  61. se_lun = NULL;
  62. goto out_unlock;
  63. }
  64. se_cmd->se_lun = se_lun;
  65. se_cmd->pr_res_key = deve->pr_res_key;
  66. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  67. se_cmd->lun_ref_active = true;
  68. if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
  69. deve->lun_access_ro) {
  70. pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
  71. " Access for 0x%08llx\n",
  72. se_cmd->se_tfo->fabric_name,
  73. se_cmd->orig_fe_lun);
  74. rcu_read_unlock();
  75. ret = TCM_WRITE_PROTECTED;
  76. goto ref_dev;
  77. }
  78. }
  79. out_unlock:
  80. rcu_read_unlock();
  81. if (!se_lun) {
  82. /*
  83. * Use the se_portal_group->tpg_virt_lun0 to allow for
  84. * REPORT_LUNS, et al to be returned when no active
  85. * MappedLUN=0 exists for this Initiator Port.
  86. */
  87. if (se_cmd->orig_fe_lun != 0) {
  88. pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
  89. " Access for 0x%08llx from %s\n",
  90. se_cmd->se_tfo->fabric_name,
  91. se_cmd->orig_fe_lun,
  92. nacl->initiatorname);
  93. return TCM_NON_EXISTENT_LUN;
  94. }
  95. se_lun = se_sess->se_tpg->tpg_virt_lun0;
  96. se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
  97. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  98. percpu_ref_get(&se_lun->lun_ref);
  99. se_cmd->lun_ref_active = true;
  100. /*
  101. * Force WRITE PROTECT for virtual LUN 0
  102. */
  103. if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
  104. (se_cmd->data_direction != DMA_NONE)) {
  105. ret = TCM_WRITE_PROTECTED;
  106. goto ref_dev;
  107. }
  108. }
  109. /*
  110. * RCU reference protected by percpu se_lun->lun_ref taken above that
  111. * must drop to zero (including initial reference) before this se_lun
  112. * pointer can be kfree_rcu() by the final se_lun->lun_group put via
  113. * target_core_fabric_configfs.c:target_fabric_port_release
  114. */
  115. ref_dev:
  116. se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
  117. atomic_long_inc(&se_cmd->se_dev->num_cmds);
  118. if (se_cmd->data_direction == DMA_TO_DEVICE)
  119. atomic_long_add(se_cmd->data_length,
  120. &se_cmd->se_dev->write_bytes);
  121. else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  122. atomic_long_add(se_cmd->data_length,
  123. &se_cmd->se_dev->read_bytes);
  124. return ret;
  125. }
  126. EXPORT_SYMBOL(transport_lookup_cmd_lun);
  127. int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
  128. {
  129. struct se_dev_entry *deve;
  130. struct se_lun *se_lun = NULL;
  131. struct se_session *se_sess = se_cmd->se_sess;
  132. struct se_node_acl *nacl = se_sess->se_node_acl;
  133. struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
  134. unsigned long flags;
  135. rcu_read_lock();
  136. deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
  137. if (deve) {
  138. se_lun = rcu_dereference(deve->se_lun);
  139. if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
  140. se_lun = NULL;
  141. goto out_unlock;
  142. }
  143. se_cmd->se_lun = se_lun;
  144. se_cmd->pr_res_key = deve->pr_res_key;
  145. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  146. se_cmd->lun_ref_active = true;
  147. }
  148. out_unlock:
  149. rcu_read_unlock();
  150. if (!se_lun) {
  151. pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
  152. " Access for 0x%08llx for %s\n",
  153. se_cmd->se_tfo->fabric_name,
  154. se_cmd->orig_fe_lun,
  155. nacl->initiatorname);
  156. return -ENODEV;
  157. }
  158. se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
  159. se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
  160. spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
  161. list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
  162. spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
  163. return 0;
  164. }
  165. EXPORT_SYMBOL(transport_lookup_tmr_lun);
  166. bool target_lun_is_rdonly(struct se_cmd *cmd)
  167. {
  168. struct se_session *se_sess = cmd->se_sess;
  169. struct se_dev_entry *deve;
  170. bool ret;
  171. rcu_read_lock();
  172. deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
  173. ret = deve && deve->lun_access_ro;
  174. rcu_read_unlock();
  175. return ret;
  176. }
  177. EXPORT_SYMBOL(target_lun_is_rdonly);
  178. /*
  179. * This function is called from core_scsi3_emulate_pro_register_and_move()
  180. * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
  181. * when a matching rtpi is found.
  182. */
  183. struct se_dev_entry *core_get_se_deve_from_rtpi(
  184. struct se_node_acl *nacl,
  185. u16 rtpi)
  186. {
  187. struct se_dev_entry *deve;
  188. struct se_lun *lun;
  189. struct se_portal_group *tpg = nacl->se_tpg;
  190. rcu_read_lock();
  191. hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
  192. lun = rcu_dereference(deve->se_lun);
  193. if (!lun) {
  194. pr_err("%s device entries device pointer is"
  195. " NULL, but Initiator has access.\n",
  196. tpg->se_tpg_tfo->fabric_name);
  197. continue;
  198. }
  199. if (lun->lun_rtpi != rtpi)
  200. continue;
  201. kref_get(&deve->pr_kref);
  202. rcu_read_unlock();
  203. return deve;
  204. }
  205. rcu_read_unlock();
  206. return NULL;
  207. }
  208. void core_free_device_list_for_node(
  209. struct se_node_acl *nacl,
  210. struct se_portal_group *tpg)
  211. {
  212. struct se_dev_entry *deve;
  213. mutex_lock(&nacl->lun_entry_mutex);
  214. hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
  215. struct se_lun *lun = rcu_dereference_check(deve->se_lun,
  216. lockdep_is_held(&nacl->lun_entry_mutex));
  217. core_disable_device_list_for_node(lun, deve, nacl, tpg);
  218. }
  219. mutex_unlock(&nacl->lun_entry_mutex);
  220. }
  221. void core_update_device_list_access(
  222. u64 mapped_lun,
  223. bool lun_access_ro,
  224. struct se_node_acl *nacl)
  225. {
  226. struct se_dev_entry *deve;
  227. mutex_lock(&nacl->lun_entry_mutex);
  228. deve = target_nacl_find_deve(nacl, mapped_lun);
  229. if (deve)
  230. deve->lun_access_ro = lun_access_ro;
  231. mutex_unlock(&nacl->lun_entry_mutex);
  232. }
  233. /*
  234. * Called with rcu_read_lock or nacl->device_list_lock held.
  235. */
  236. struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
  237. {
  238. struct se_dev_entry *deve;
  239. hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
  240. if (deve->mapped_lun == mapped_lun)
  241. return deve;
  242. return NULL;
  243. }
  244. EXPORT_SYMBOL(target_nacl_find_deve);
  245. void target_pr_kref_release(struct kref *kref)
  246. {
  247. struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
  248. pr_kref);
  249. complete(&deve->pr_comp);
  250. }
  251. static void
  252. target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
  253. bool skip_new)
  254. {
  255. struct se_dev_entry *tmp;
  256. rcu_read_lock();
  257. hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
  258. if (skip_new && tmp == new)
  259. continue;
  260. core_scsi3_ua_allocate(tmp, 0x3F,
  261. ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
  262. }
  263. rcu_read_unlock();
  264. }
  265. int core_enable_device_list_for_node(
  266. struct se_lun *lun,
  267. struct se_lun_acl *lun_acl,
  268. u64 mapped_lun,
  269. bool lun_access_ro,
  270. struct se_node_acl *nacl,
  271. struct se_portal_group *tpg)
  272. {
  273. struct se_dev_entry *orig, *new;
  274. new = kzalloc(sizeof(*new), GFP_KERNEL);
  275. if (!new) {
  276. pr_err("Unable to allocate se_dev_entry memory\n");
  277. return -ENOMEM;
  278. }
  279. spin_lock_init(&new->ua_lock);
  280. INIT_LIST_HEAD(&new->ua_list);
  281. INIT_LIST_HEAD(&new->lun_link);
  282. new->mapped_lun = mapped_lun;
  283. kref_init(&new->pr_kref);
  284. init_completion(&new->pr_comp);
  285. new->lun_access_ro = lun_access_ro;
  286. new->creation_time = get_jiffies_64();
  287. new->attach_count++;
  288. mutex_lock(&nacl->lun_entry_mutex);
  289. orig = target_nacl_find_deve(nacl, mapped_lun);
  290. if (orig && orig->se_lun) {
  291. struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
  292. lockdep_is_held(&nacl->lun_entry_mutex));
  293. if (orig_lun != lun) {
  294. pr_err("Existing orig->se_lun doesn't match new lun"
  295. " for dynamic -> explicit NodeACL conversion:"
  296. " %s\n", nacl->initiatorname);
  297. mutex_unlock(&nacl->lun_entry_mutex);
  298. kfree(new);
  299. return -EINVAL;
  300. }
  301. if (orig->se_lun_acl != NULL) {
  302. pr_warn_ratelimited("Detected existing explicit"
  303. " se_lun_acl->se_lun_group reference for %s"
  304. " mapped_lun: %llu, failing\n",
  305. nacl->initiatorname, mapped_lun);
  306. mutex_unlock(&nacl->lun_entry_mutex);
  307. kfree(new);
  308. return -EINVAL;
  309. }
  310. rcu_assign_pointer(new->se_lun, lun);
  311. rcu_assign_pointer(new->se_lun_acl, lun_acl);
  312. hlist_del_rcu(&orig->link);
  313. hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
  314. mutex_unlock(&nacl->lun_entry_mutex);
  315. spin_lock(&lun->lun_deve_lock);
  316. list_del(&orig->lun_link);
  317. list_add_tail(&new->lun_link, &lun->lun_deve_list);
  318. spin_unlock(&lun->lun_deve_lock);
  319. kref_put(&orig->pr_kref, target_pr_kref_release);
  320. wait_for_completion(&orig->pr_comp);
  321. target_luns_data_has_changed(nacl, new, true);
  322. kfree_rcu(orig, rcu_head);
  323. return 0;
  324. }
  325. rcu_assign_pointer(new->se_lun, lun);
  326. rcu_assign_pointer(new->se_lun_acl, lun_acl);
  327. hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
  328. mutex_unlock(&nacl->lun_entry_mutex);
  329. spin_lock(&lun->lun_deve_lock);
  330. list_add_tail(&new->lun_link, &lun->lun_deve_list);
  331. spin_unlock(&lun->lun_deve_lock);
  332. target_luns_data_has_changed(nacl, new, true);
  333. return 0;
  334. }
  335. void core_disable_device_list_for_node(
  336. struct se_lun *lun,
  337. struct se_dev_entry *orig,
  338. struct se_node_acl *nacl,
  339. struct se_portal_group *tpg)
  340. {
  341. /*
  342. * rcu_dereference_raw protected by se_lun->lun_group symlink
  343. * reference to se_device->dev_group.
  344. */
  345. struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
  346. lockdep_assert_held(&nacl->lun_entry_mutex);
  347. /*
  348. * If the MappedLUN entry is being disabled, the entry in
  349. * lun->lun_deve_list must be removed now before clearing the
  350. * struct se_dev_entry pointers below as logic in
  351. * core_alua_do_transition_tg_pt() depends on these being present.
  352. *
  353. * deve->se_lun_acl will be NULL for demo-mode created LUNs
  354. * that have not been explicitly converted to MappedLUNs ->
  355. * struct se_lun_acl, but we remove deve->lun_link from
  356. * lun->lun_deve_list. This also means that active UAs and
  357. * NodeACL context specific PR metadata for demo-mode
  358. * MappedLUN *deve will be released below..
  359. */
  360. spin_lock(&lun->lun_deve_lock);
  361. list_del(&orig->lun_link);
  362. spin_unlock(&lun->lun_deve_lock);
  363. /*
  364. * Disable struct se_dev_entry LUN ACL mapping
  365. */
  366. core_scsi3_ua_release_all(orig);
  367. hlist_del_rcu(&orig->link);
  368. clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
  369. orig->lun_access_ro = false;
  370. orig->creation_time = 0;
  371. orig->attach_count--;
  372. /*
  373. * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
  374. * or REGISTER_AND_MOVE PR operation to complete.
  375. */
  376. kref_put(&orig->pr_kref, target_pr_kref_release);
  377. wait_for_completion(&orig->pr_comp);
  378. rcu_assign_pointer(orig->se_lun, NULL);
  379. rcu_assign_pointer(orig->se_lun_acl, NULL);
  380. kfree_rcu(orig, rcu_head);
  381. core_scsi3_free_pr_reg_from_nacl(dev, nacl);
  382. target_luns_data_has_changed(nacl, NULL, false);
  383. }
  384. /* core_clear_lun_from_tpg():
  385. *
  386. *
  387. */
  388. void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
  389. {
  390. struct se_node_acl *nacl;
  391. struct se_dev_entry *deve;
  392. mutex_lock(&tpg->acl_node_mutex);
  393. list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
  394. mutex_lock(&nacl->lun_entry_mutex);
  395. hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
  396. struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
  397. lockdep_is_held(&nacl->lun_entry_mutex));
  398. if (lun != tmp_lun)
  399. continue;
  400. core_disable_device_list_for_node(lun, deve, nacl, tpg);
  401. }
  402. mutex_unlock(&nacl->lun_entry_mutex);
  403. }
  404. mutex_unlock(&tpg->acl_node_mutex);
  405. }
  406. int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
  407. {
  408. struct se_lun *tmp;
  409. spin_lock(&dev->se_port_lock);
  410. if (dev->export_count == 0x0000ffff) {
  411. pr_warn("Reached dev->dev_port_count =="
  412. " 0x0000ffff\n");
  413. spin_unlock(&dev->se_port_lock);
  414. return -ENOSPC;
  415. }
  416. again:
  417. /*
  418. * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
  419. * Here is the table from spc4r17 section 7.7.3.8.
  420. *
  421. * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
  422. *
  423. * Code Description
  424. * 0h Reserved
  425. * 1h Relative port 1, historically known as port A
  426. * 2h Relative port 2, historically known as port B
  427. * 3h to FFFFh Relative port 3 through 65 535
  428. */
  429. lun->lun_rtpi = dev->dev_rpti_counter++;
  430. if (!lun->lun_rtpi)
  431. goto again;
  432. list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
  433. /*
  434. * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
  435. * for 16-bit wrap..
  436. */
  437. if (lun->lun_rtpi == tmp->lun_rtpi)
  438. goto again;
  439. }
  440. spin_unlock(&dev->se_port_lock);
  441. return 0;
  442. }
  443. static void se_release_vpd_for_dev(struct se_device *dev)
  444. {
  445. struct t10_vpd *vpd, *vpd_tmp;
  446. spin_lock(&dev->t10_wwn.t10_vpd_lock);
  447. list_for_each_entry_safe(vpd, vpd_tmp,
  448. &dev->t10_wwn.t10_vpd_list, vpd_list) {
  449. list_del(&vpd->vpd_list);
  450. kfree(vpd);
  451. }
  452. spin_unlock(&dev->t10_wwn.t10_vpd_lock);
  453. }
  454. static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
  455. {
  456. u32 aligned_max_sectors;
  457. u32 alignment;
  458. /*
  459. * Limit max_sectors to a PAGE_SIZE aligned value for modern
  460. * transport_allocate_data_tasks() operation.
  461. */
  462. alignment = max(1ul, PAGE_SIZE / block_size);
  463. aligned_max_sectors = rounddown(max_sectors, alignment);
  464. if (max_sectors != aligned_max_sectors)
  465. pr_info("Rounding down aligned max_sectors from %u to %u\n",
  466. max_sectors, aligned_max_sectors);
  467. return aligned_max_sectors;
  468. }
  469. int core_dev_add_lun(
  470. struct se_portal_group *tpg,
  471. struct se_device *dev,
  472. struct se_lun *lun)
  473. {
  474. int rc;
  475. rc = core_tpg_add_lun(tpg, lun, false, dev);
  476. if (rc < 0)
  477. return rc;
  478. pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
  479. " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
  480. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  481. tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
  482. /*
  483. * Update LUN maps for dynamically added initiators when
  484. * generate_node_acl is enabled.
  485. */
  486. if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
  487. struct se_node_acl *acl;
  488. mutex_lock(&tpg->acl_node_mutex);
  489. list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  490. if (acl->dynamic_node_acl &&
  491. (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
  492. !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
  493. core_tpg_add_node_to_devs(acl, tpg, lun);
  494. }
  495. }
  496. mutex_unlock(&tpg->acl_node_mutex);
  497. }
  498. return 0;
  499. }
  500. /* core_dev_del_lun():
  501. *
  502. *
  503. */
  504. void core_dev_del_lun(
  505. struct se_portal_group *tpg,
  506. struct se_lun *lun)
  507. {
  508. pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
  509. " device object\n", tpg->se_tpg_tfo->fabric_name,
  510. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  511. tpg->se_tpg_tfo->fabric_name);
  512. core_tpg_remove_lun(tpg, lun);
  513. }
  514. struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
  515. struct se_portal_group *tpg,
  516. struct se_node_acl *nacl,
  517. u64 mapped_lun,
  518. int *ret)
  519. {
  520. struct se_lun_acl *lacl;
  521. if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
  522. pr_err("%s InitiatorName exceeds maximum size.\n",
  523. tpg->se_tpg_tfo->fabric_name);
  524. *ret = -EOVERFLOW;
  525. return NULL;
  526. }
  527. lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
  528. if (!lacl) {
  529. pr_err("Unable to allocate memory for struct se_lun_acl.\n");
  530. *ret = -ENOMEM;
  531. return NULL;
  532. }
  533. lacl->mapped_lun = mapped_lun;
  534. lacl->se_lun_nacl = nacl;
  535. return lacl;
  536. }
  537. int core_dev_add_initiator_node_lun_acl(
  538. struct se_portal_group *tpg,
  539. struct se_lun_acl *lacl,
  540. struct se_lun *lun,
  541. bool lun_access_ro)
  542. {
  543. struct se_node_acl *nacl = lacl->se_lun_nacl;
  544. /*
  545. * rcu_dereference_raw protected by se_lun->lun_group symlink
  546. * reference to se_device->dev_group.
  547. */
  548. struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
  549. if (!nacl)
  550. return -EINVAL;
  551. if (lun->lun_access_ro)
  552. lun_access_ro = true;
  553. lacl->se_lun = lun;
  554. if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
  555. lun_access_ro, nacl, tpg) < 0)
  556. return -EINVAL;
  557. pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
  558. " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
  559. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
  560. lun_access_ro ? "RO" : "RW",
  561. nacl->initiatorname);
  562. /*
  563. * Check to see if there are any existing persistent reservation APTPL
  564. * pre-registrations that need to be enabled for this LUN ACL..
  565. */
  566. core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
  567. lacl->mapped_lun);
  568. return 0;
  569. }
  570. int core_dev_del_initiator_node_lun_acl(
  571. struct se_lun *lun,
  572. struct se_lun_acl *lacl)
  573. {
  574. struct se_portal_group *tpg = lun->lun_tpg;
  575. struct se_node_acl *nacl;
  576. struct se_dev_entry *deve;
  577. nacl = lacl->se_lun_nacl;
  578. if (!nacl)
  579. return -EINVAL;
  580. mutex_lock(&nacl->lun_entry_mutex);
  581. deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
  582. if (deve)
  583. core_disable_device_list_for_node(lun, deve, nacl, tpg);
  584. mutex_unlock(&nacl->lun_entry_mutex);
  585. pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
  586. " InitiatorNode: %s Mapped LUN: %llu\n",
  587. tpg->se_tpg_tfo->fabric_name,
  588. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  589. nacl->initiatorname, lacl->mapped_lun);
  590. return 0;
  591. }
  592. void core_dev_free_initiator_node_lun_acl(
  593. struct se_portal_group *tpg,
  594. struct se_lun_acl *lacl)
  595. {
  596. pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
  597. " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
  598. tpg->se_tpg_tfo->tpg_get_tag(tpg),
  599. tpg->se_tpg_tfo->fabric_name,
  600. lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
  601. kfree(lacl);
  602. }
  603. static void scsi_dump_inquiry(struct se_device *dev)
  604. {
  605. struct t10_wwn *wwn = &dev->t10_wwn;
  606. int device_type = dev->transport->get_device_type(dev);
  607. /*
  608. * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
  609. */
  610. pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
  611. wwn->vendor);
  612. pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
  613. wwn->model);
  614. pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
  615. wwn->revision);
  616. pr_debug(" Type: %s ", scsi_device_type(device_type));
  617. }
  618. struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
  619. {
  620. struct se_device *dev;
  621. struct se_lun *xcopy_lun;
  622. dev = hba->backend->ops->alloc_device(hba, name);
  623. if (!dev)
  624. return NULL;
  625. dev->se_hba = hba;
  626. dev->transport = hba->backend->ops;
  627. dev->transport_flags = dev->transport->transport_flags_default;
  628. dev->prot_length = sizeof(struct t10_pi_tuple);
  629. dev->hba_index = hba->hba_index;
  630. INIT_LIST_HEAD(&dev->dev_sep_list);
  631. INIT_LIST_HEAD(&dev->dev_tmr_list);
  632. INIT_LIST_HEAD(&dev->delayed_cmd_list);
  633. INIT_LIST_HEAD(&dev->state_list);
  634. INIT_LIST_HEAD(&dev->qf_cmd_list);
  635. spin_lock_init(&dev->execute_task_lock);
  636. spin_lock_init(&dev->delayed_cmd_lock);
  637. spin_lock_init(&dev->dev_reservation_lock);
  638. spin_lock_init(&dev->se_port_lock);
  639. spin_lock_init(&dev->se_tmr_lock);
  640. spin_lock_init(&dev->qf_cmd_lock);
  641. sema_init(&dev->caw_sem, 1);
  642. INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
  643. spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
  644. INIT_LIST_HEAD(&dev->t10_pr.registration_list);
  645. INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
  646. spin_lock_init(&dev->t10_pr.registration_lock);
  647. spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
  648. INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
  649. spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
  650. INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
  651. spin_lock_init(&dev->t10_alua.lba_map_lock);
  652. INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
  653. dev->t10_wwn.t10_dev = dev;
  654. dev->t10_alua.t10_dev = dev;
  655. dev->dev_attrib.da_dev = dev;
  656. dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
  657. dev->dev_attrib.emulate_dpo = 1;
  658. dev->dev_attrib.emulate_fua_write = 1;
  659. dev->dev_attrib.emulate_fua_read = 1;
  660. dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
  661. dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR;
  662. dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
  663. dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
  664. dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
  665. dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
  666. dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
  667. dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
  668. dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
  669. dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
  670. dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
  671. dev->dev_attrib.is_nonrot = DA_IS_NONROT;
  672. dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
  673. dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
  674. dev->dev_attrib.max_unmap_block_desc_count =
  675. DA_MAX_UNMAP_BLOCK_DESC_COUNT;
  676. dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
  677. dev->dev_attrib.unmap_granularity_alignment =
  678. DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
  679. dev->dev_attrib.unmap_zeroes_data =
  680. DA_UNMAP_ZEROES_DATA_DEFAULT;
  681. dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
  682. xcopy_lun = &dev->xcopy_lun;
  683. rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
  684. init_completion(&xcopy_lun->lun_shutdown_comp);
  685. INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
  686. INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
  687. mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
  688. xcopy_lun->lun_tpg = &xcopy_pt_tpg;
  689. /* Preload the default INQUIRY const values */
  690. strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
  691. strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
  692. sizeof(dev->t10_wwn.model));
  693. strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
  694. sizeof(dev->t10_wwn.revision));
  695. return dev;
  696. }
  697. /*
  698. * Check if the underlying struct block_device request_queue supports
  699. * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
  700. * in ATA and we need to set TPE=1
  701. */
  702. bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
  703. struct request_queue *q)
  704. {
  705. int block_size = queue_logical_block_size(q);
  706. if (!blk_queue_discard(q))
  707. return false;
  708. attrib->max_unmap_lba_count =
  709. q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
  710. /*
  711. * Currently hardcoded to 1 in Linux/SCSI code..
  712. */
  713. attrib->max_unmap_block_desc_count = 1;
  714. attrib->unmap_granularity = q->limits.discard_granularity / block_size;
  715. attrib->unmap_granularity_alignment = q->limits.discard_alignment /
  716. block_size;
  717. attrib->unmap_zeroes_data = !!(q->limits.max_write_zeroes_sectors);
  718. return true;
  719. }
  720. EXPORT_SYMBOL(target_configure_unmap_from_queue);
  721. /*
  722. * Convert from blocksize advertised to the initiator to the 512 byte
  723. * units unconditionally used by the Linux block layer.
  724. */
  725. sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
  726. {
  727. switch (dev->dev_attrib.block_size) {
  728. case 4096:
  729. return lb << 3;
  730. case 2048:
  731. return lb << 2;
  732. case 1024:
  733. return lb << 1;
  734. default:
  735. return lb;
  736. }
  737. }
  738. EXPORT_SYMBOL(target_to_linux_sector);
  739. struct devices_idr_iter {
  740. struct config_item *prev_item;
  741. int (*fn)(struct se_device *dev, void *data);
  742. void *data;
  743. };
  744. static int target_devices_idr_iter(int id, void *p, void *data)
  745. __must_hold(&device_mutex)
  746. {
  747. struct devices_idr_iter *iter = data;
  748. struct se_device *dev = p;
  749. int ret;
  750. config_item_put(iter->prev_item);
  751. iter->prev_item = NULL;
  752. /*
  753. * We add the device early to the idr, so it can be used
  754. * by backend modules during configuration. We do not want
  755. * to allow other callers to access partially setup devices,
  756. * so we skip them here.
  757. */
  758. if (!target_dev_configured(dev))
  759. return 0;
  760. iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
  761. if (!iter->prev_item)
  762. return 0;
  763. mutex_unlock(&device_mutex);
  764. ret = iter->fn(dev, iter->data);
  765. mutex_lock(&device_mutex);
  766. return ret;
  767. }
  768. /**
  769. * target_for_each_device - iterate over configured devices
  770. * @fn: iterator function
  771. * @data: pointer to data that will be passed to fn
  772. *
  773. * fn must return 0 to continue looping over devices. non-zero will break
  774. * from the loop and return that value to the caller.
  775. */
  776. int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
  777. void *data)
  778. {
  779. struct devices_idr_iter iter = { .fn = fn, .data = data };
  780. int ret;
  781. mutex_lock(&device_mutex);
  782. ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
  783. mutex_unlock(&device_mutex);
  784. config_item_put(iter.prev_item);
  785. return ret;
  786. }
  787. int target_configure_device(struct se_device *dev)
  788. {
  789. struct se_hba *hba = dev->se_hba;
  790. int ret, id;
  791. if (target_dev_configured(dev)) {
  792. pr_err("se_dev->se_dev_ptr already set for storage"
  793. " object\n");
  794. return -EEXIST;
  795. }
  796. /*
  797. * Add early so modules like tcmu can use during its
  798. * configuration.
  799. */
  800. mutex_lock(&device_mutex);
  801. /*
  802. * Use cyclic to try and avoid collisions with devices
  803. * that were recently removed.
  804. */
  805. id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
  806. mutex_unlock(&device_mutex);
  807. if (id < 0) {
  808. ret = -ENOMEM;
  809. goto out;
  810. }
  811. dev->dev_index = id;
  812. ret = dev->transport->configure_device(dev);
  813. if (ret)
  814. goto out_free_index;
  815. /*
  816. * XXX: there is not much point to have two different values here..
  817. */
  818. dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
  819. dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
  820. /*
  821. * Align max_hw_sectors down to PAGE_SIZE I/O transfers
  822. */
  823. dev->dev_attrib.hw_max_sectors =
  824. se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
  825. dev->dev_attrib.hw_block_size);
  826. dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
  827. dev->creation_time = get_jiffies_64();
  828. ret = core_setup_alua(dev);
  829. if (ret)
  830. goto out_destroy_device;
  831. /*
  832. * Setup work_queue for QUEUE_FULL
  833. */
  834. INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
  835. scsi_dump_inquiry(dev);
  836. spin_lock(&hba->device_lock);
  837. hba->dev_count++;
  838. spin_unlock(&hba->device_lock);
  839. dev->dev_flags |= DF_CONFIGURED;
  840. return 0;
  841. out_destroy_device:
  842. dev->transport->destroy_device(dev);
  843. out_free_index:
  844. mutex_lock(&device_mutex);
  845. idr_remove(&devices_idr, dev->dev_index);
  846. mutex_unlock(&device_mutex);
  847. out:
  848. se_release_vpd_for_dev(dev);
  849. return ret;
  850. }
  851. void target_free_device(struct se_device *dev)
  852. {
  853. struct se_hba *hba = dev->se_hba;
  854. WARN_ON(!list_empty(&dev->dev_sep_list));
  855. if (target_dev_configured(dev)) {
  856. dev->transport->destroy_device(dev);
  857. mutex_lock(&device_mutex);
  858. idr_remove(&devices_idr, dev->dev_index);
  859. mutex_unlock(&device_mutex);
  860. spin_lock(&hba->device_lock);
  861. hba->dev_count--;
  862. spin_unlock(&hba->device_lock);
  863. }
  864. core_alua_free_lu_gp_mem(dev);
  865. core_alua_set_lba_map(dev, NULL, 0, 0);
  866. core_scsi3_free_all_registrations(dev);
  867. se_release_vpd_for_dev(dev);
  868. if (dev->transport->free_prot)
  869. dev->transport->free_prot(dev);
  870. dev->transport->free_device(dev);
  871. }
  872. int core_dev_setup_virtual_lun0(void)
  873. {
  874. struct se_hba *hba;
  875. struct se_device *dev;
  876. char buf[] = "rd_pages=8,rd_nullio=1";
  877. int ret;
  878. hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
  879. if (IS_ERR(hba))
  880. return PTR_ERR(hba);
  881. dev = target_alloc_device(hba, "virt_lun0");
  882. if (!dev) {
  883. ret = -ENOMEM;
  884. goto out_free_hba;
  885. }
  886. hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
  887. ret = target_configure_device(dev);
  888. if (ret)
  889. goto out_free_se_dev;
  890. lun0_hba = hba;
  891. g_lun0_dev = dev;
  892. return 0;
  893. out_free_se_dev:
  894. target_free_device(dev);
  895. out_free_hba:
  896. core_delete_hba(hba);
  897. return ret;
  898. }
  899. void core_dev_release_virtual_lun0(void)
  900. {
  901. struct se_hba *hba = lun0_hba;
  902. if (!hba)
  903. return;
  904. if (g_lun0_dev)
  905. target_free_device(g_lun0_dev);
  906. core_delete_hba(hba);
  907. }
  908. /*
  909. * Common CDB parsing for kernel and user passthrough.
  910. */
  911. sense_reason_t
  912. passthrough_parse_cdb(struct se_cmd *cmd,
  913. sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
  914. {
  915. unsigned char *cdb = cmd->t_task_cdb;
  916. struct se_device *dev = cmd->se_dev;
  917. unsigned int size;
  918. /*
  919. * For REPORT LUNS we always need to emulate the response, for everything
  920. * else, pass it up.
  921. */
  922. if (cdb[0] == REPORT_LUNS) {
  923. cmd->execute_cmd = spc_emulate_report_luns;
  924. return TCM_NO_SENSE;
  925. }
  926. /*
  927. * With emulate_pr disabled, all reservation requests should fail,
  928. * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
  929. */
  930. if (!dev->dev_attrib.emulate_pr &&
  931. ((cdb[0] == PERSISTENT_RESERVE_IN) ||
  932. (cdb[0] == PERSISTENT_RESERVE_OUT) ||
  933. (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
  934. (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
  935. return TCM_UNSUPPORTED_SCSI_OPCODE;
  936. }
  937. /*
  938. * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
  939. * emulate the response, since tcmu does not have the information
  940. * required to process these commands.
  941. */
  942. if (!(dev->transport_flags &
  943. TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
  944. if (cdb[0] == PERSISTENT_RESERVE_IN) {
  945. cmd->execute_cmd = target_scsi3_emulate_pr_in;
  946. size = get_unaligned_be16(&cdb[7]);
  947. return target_cmd_size_check(cmd, size);
  948. }
  949. if (cdb[0] == PERSISTENT_RESERVE_OUT) {
  950. cmd->execute_cmd = target_scsi3_emulate_pr_out;
  951. size = get_unaligned_be32(&cdb[5]);
  952. return target_cmd_size_check(cmd, size);
  953. }
  954. if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
  955. cmd->execute_cmd = target_scsi2_reservation_release;
  956. if (cdb[0] == RELEASE_10)
  957. size = get_unaligned_be16(&cdb[7]);
  958. else
  959. size = cmd->data_length;
  960. return target_cmd_size_check(cmd, size);
  961. }
  962. if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
  963. cmd->execute_cmd = target_scsi2_reservation_reserve;
  964. if (cdb[0] == RESERVE_10)
  965. size = get_unaligned_be16(&cdb[7]);
  966. else
  967. size = cmd->data_length;
  968. return target_cmd_size_check(cmd, size);
  969. }
  970. }
  971. /* Set DATA_CDB flag for ops that should have it */
  972. switch (cdb[0]) {
  973. case READ_6:
  974. case READ_10:
  975. case READ_12:
  976. case READ_16:
  977. case WRITE_6:
  978. case WRITE_10:
  979. case WRITE_12:
  980. case WRITE_16:
  981. case WRITE_VERIFY:
  982. case WRITE_VERIFY_12:
  983. case WRITE_VERIFY_16:
  984. case COMPARE_AND_WRITE:
  985. case XDWRITEREAD_10:
  986. cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
  987. break;
  988. case VARIABLE_LENGTH_CMD:
  989. switch (get_unaligned_be16(&cdb[8])) {
  990. case READ_32:
  991. case WRITE_32:
  992. case WRITE_VERIFY_32:
  993. case XDWRITEREAD_32:
  994. cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
  995. break;
  996. }
  997. }
  998. cmd->execute_cmd = exec_cmd;
  999. return TCM_NO_SENSE;
  1000. }
  1001. EXPORT_SYMBOL(passthrough_parse_cdb);