ql4_mbx.c 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic iSCSI HBA Driver
  4. * Copyright (c) 2003-2013 QLogic Corporation
  5. */
  6. #include <linux/ctype.h>
  7. #include "ql4_def.h"
  8. #include "ql4_glbl.h"
  9. #include "ql4_dbg.h"
  10. #include "ql4_inline.h"
  11. #include "ql4_version.h"
  12. void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
  13. int in_count)
  14. {
  15. int i;
  16. /* Load all mailbox registers, except mailbox 0. */
  17. for (i = 1; i < in_count; i++)
  18. writel(mbx_cmd[i], &ha->reg->mailbox[i]);
  19. /* Wakeup firmware */
  20. writel(mbx_cmd[0], &ha->reg->mailbox[0]);
  21. readl(&ha->reg->mailbox[0]);
  22. writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
  23. readl(&ha->reg->ctrl_status);
  24. }
  25. void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
  26. {
  27. int intr_status;
  28. intr_status = readl(&ha->reg->ctrl_status);
  29. if (intr_status & INTR_PENDING) {
  30. /*
  31. * Service the interrupt.
  32. * The ISR will save the mailbox status registers
  33. * to a temporary storage location in the adapter structure.
  34. */
  35. ha->mbox_status_count = out_count;
  36. ha->isp_ops->interrupt_service_routine(ha, intr_status);
  37. }
  38. }
  39. /**
  40. * qla4xxx_is_intr_poll_mode – Are we allowed to poll for interrupts?
  41. * @ha: Pointer to host adapter structure.
  42. * returns: 1=polling mode, 0=non-polling mode
  43. **/
  44. static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha)
  45. {
  46. int rval = 1;
  47. if (is_qla8032(ha) || is_qla8042(ha)) {
  48. if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
  49. test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags))
  50. rval = 0;
  51. } else {
  52. if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
  53. test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
  54. test_bit(AF_ONLINE, &ha->flags) &&
  55. !test_bit(AF_HA_REMOVAL, &ha->flags))
  56. rval = 0;
  57. }
  58. return rval;
  59. }
  60. /**
  61. * qla4xxx_mailbox_command - issues mailbox commands
  62. * @ha: Pointer to host adapter structure.
  63. * @inCount: number of mailbox registers to load.
  64. * @outCount: number of mailbox registers to return.
  65. * @mbx_cmd: data pointer for mailbox in registers.
  66. * @mbx_sts: data pointer for mailbox out registers.
  67. *
  68. * This routine issue mailbox commands and waits for completion.
  69. * If outCount is 0, this routine completes successfully WITHOUT waiting
  70. * for the mailbox command to complete.
  71. **/
  72. int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
  73. uint8_t outCount, uint32_t *mbx_cmd,
  74. uint32_t *mbx_sts)
  75. {
  76. int status = QLA_ERROR;
  77. uint8_t i;
  78. u_long wait_count;
  79. unsigned long flags = 0;
  80. uint32_t dev_state;
  81. /* Make sure that pointers are valid */
  82. if (!mbx_cmd || !mbx_sts) {
  83. DEBUG2(printk("scsi%ld: %s: Invalid mbx_cmd or mbx_sts "
  84. "pointer\n", ha->host_no, __func__));
  85. return status;
  86. }
  87. if (is_qla40XX(ha)) {
  88. if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
  89. DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
  90. "prematurely completing mbx cmd as "
  91. "adapter removal detected\n",
  92. ha->host_no, __func__));
  93. return status;
  94. }
  95. }
  96. if ((is_aer_supported(ha)) &&
  97. (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
  98. DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
  99. "timeout MBX Exiting.\n", ha->host_no, __func__));
  100. return status;
  101. }
  102. /* Mailbox code active */
  103. wait_count = MBOX_TOV * 100;
  104. while (wait_count--) {
  105. mutex_lock(&ha->mbox_sem);
  106. if (!test_bit(AF_MBOX_COMMAND, &ha->flags)) {
  107. set_bit(AF_MBOX_COMMAND, &ha->flags);
  108. mutex_unlock(&ha->mbox_sem);
  109. break;
  110. }
  111. mutex_unlock(&ha->mbox_sem);
  112. if (!wait_count) {
  113. DEBUG2(printk("scsi%ld: %s: mbox_sem failed\n",
  114. ha->host_no, __func__));
  115. return status;
  116. }
  117. msleep(10);
  118. }
  119. if (is_qla80XX(ha)) {
  120. if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
  121. DEBUG2(ql4_printk(KERN_WARNING, ha,
  122. "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
  123. ha->host_no, __func__));
  124. goto mbox_exit;
  125. }
  126. /* Do not send any mbx cmd if h/w is in failed state*/
  127. ha->isp_ops->idc_lock(ha);
  128. dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
  129. ha->isp_ops->idc_unlock(ha);
  130. if (dev_state == QLA8XXX_DEV_FAILED) {
  131. ql4_printk(KERN_WARNING, ha,
  132. "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
  133. ha->host_no, __func__);
  134. goto mbox_exit;
  135. }
  136. }
  137. spin_lock_irqsave(&ha->hardware_lock, flags);
  138. ha->mbox_status_count = outCount;
  139. for (i = 0; i < outCount; i++)
  140. ha->mbox_status[i] = 0;
  141. /* Queue the mailbox command to the firmware */
  142. ha->isp_ops->queue_mailbox_command(ha, mbx_cmd, inCount);
  143. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  144. /* Wait for completion */
  145. /*
  146. * If we don't want status, don't wait for the mailbox command to
  147. * complete. For example, MBOX_CMD_RESET_FW doesn't return status,
  148. * you must poll the inbound Interrupt Mask for completion.
  149. */
  150. if (outCount == 0) {
  151. status = QLA_SUCCESS;
  152. goto mbox_exit;
  153. }
  154. /*
  155. * Wait for completion: Poll or completion queue
  156. */
  157. if (qla4xxx_is_intr_poll_mode(ha)) {
  158. /* Poll for command to complete */
  159. wait_count = jiffies + MBOX_TOV * HZ;
  160. while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
  161. if (time_after_eq(jiffies, wait_count))
  162. break;
  163. /*
  164. * Service the interrupt.
  165. * The ISR will save the mailbox status registers
  166. * to a temporary storage location in the adapter
  167. * structure.
  168. */
  169. spin_lock_irqsave(&ha->hardware_lock, flags);
  170. ha->isp_ops->process_mailbox_interrupt(ha, outCount);
  171. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  172. msleep(10);
  173. }
  174. } else {
  175. /* Do not poll for completion. Use completion queue */
  176. set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
  177. wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
  178. clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
  179. }
  180. /* Check for mailbox timeout. */
  181. if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) {
  182. if (is_qla80XX(ha) &&
  183. test_bit(AF_FW_RECOVERY, &ha->flags)) {
  184. DEBUG2(ql4_printk(KERN_INFO, ha,
  185. "scsi%ld: %s: prematurely completing mbx cmd as "
  186. "firmware recovery detected\n",
  187. ha->host_no, __func__));
  188. goto mbox_exit;
  189. }
  190. ql4_printk(KERN_WARNING, ha, "scsi%ld: Mailbox Cmd 0x%08X timed out, Scheduling Adapter Reset\n",
  191. ha->host_no, mbx_cmd[0]);
  192. ha->mailbox_timeout_count++;
  193. mbx_sts[0] = (-1);
  194. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  195. if (is_qla8022(ha)) {
  196. ql4_printk(KERN_INFO, ha,
  197. "disabling pause transmit on port 0 & 1.\n");
  198. qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
  199. CRB_NIU_XG_PAUSE_CTL_P0 |
  200. CRB_NIU_XG_PAUSE_CTL_P1);
  201. } else if (is_qla8032(ha) || is_qla8042(ha)) {
  202. ql4_printk(KERN_INFO, ha, " %s: disabling pause transmit on port 0 & 1.\n",
  203. __func__);
  204. qla4_83xx_disable_pause(ha);
  205. }
  206. goto mbox_exit;
  207. }
  208. /*
  209. * Copy the mailbox out registers to the caller's mailbox in/out
  210. * structure.
  211. */
  212. spin_lock_irqsave(&ha->hardware_lock, flags);
  213. for (i = 0; i < outCount; i++)
  214. mbx_sts[i] = ha->mbox_status[i];
  215. /* Set return status and error flags (if applicable). */
  216. switch (ha->mbox_status[0]) {
  217. case MBOX_STS_COMMAND_COMPLETE:
  218. status = QLA_SUCCESS;
  219. break;
  220. case MBOX_STS_INTERMEDIATE_COMPLETION:
  221. status = QLA_SUCCESS;
  222. break;
  223. case MBOX_STS_BUSY:
  224. ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Cmd = %08X, ISP BUSY\n",
  225. ha->host_no, __func__, mbx_cmd[0]);
  226. ha->mailbox_timeout_count++;
  227. break;
  228. default:
  229. ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: FAILED, MBOX CMD = %08X, MBOX STS = %08X %08X %08X %08X %08X %08X %08X %08X\n",
  230. ha->host_no, __func__, mbx_cmd[0], mbx_sts[0],
  231. mbx_sts[1], mbx_sts[2], mbx_sts[3], mbx_sts[4],
  232. mbx_sts[5], mbx_sts[6], mbx_sts[7]);
  233. break;
  234. }
  235. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  236. mbox_exit:
  237. mutex_lock(&ha->mbox_sem);
  238. clear_bit(AF_MBOX_COMMAND, &ha->flags);
  239. mutex_unlock(&ha->mbox_sem);
  240. clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
  241. return status;
  242. }
  243. /**
  244. * qla4xxx_get_minidump_template - Get the firmware template
  245. * @ha: Pointer to host adapter structure.
  246. * @phys_addr: dma address for template
  247. *
  248. * Obtain the minidump template from firmware during initialization
  249. * as it may not be available when minidump is desired.
  250. **/
  251. int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
  252. dma_addr_t phys_addr)
  253. {
  254. uint32_t mbox_cmd[MBOX_REG_COUNT];
  255. uint32_t mbox_sts[MBOX_REG_COUNT];
  256. int status;
  257. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  258. memset(&mbox_sts, 0, sizeof(mbox_sts));
  259. mbox_cmd[0] = MBOX_CMD_MINIDUMP;
  260. mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND;
  261. mbox_cmd[2] = LSDW(phys_addr);
  262. mbox_cmd[3] = MSDW(phys_addr);
  263. mbox_cmd[4] = ha->fw_dump_tmplt_size;
  264. mbox_cmd[5] = 0;
  265. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
  266. &mbox_sts[0]);
  267. if (status != QLA_SUCCESS) {
  268. DEBUG2(ql4_printk(KERN_INFO, ha,
  269. "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n",
  270. ha->host_no, __func__, mbox_cmd[0],
  271. mbox_sts[0], mbox_sts[1]));
  272. }
  273. return status;
  274. }
  275. /**
  276. * qla4xxx_req_template_size - Get minidump template size from firmware.
  277. * @ha: Pointer to host adapter structure.
  278. **/
  279. int qla4xxx_req_template_size(struct scsi_qla_host *ha)
  280. {
  281. uint32_t mbox_cmd[MBOX_REG_COUNT];
  282. uint32_t mbox_sts[MBOX_REG_COUNT];
  283. int status;
  284. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  285. memset(&mbox_sts, 0, sizeof(mbox_sts));
  286. mbox_cmd[0] = MBOX_CMD_MINIDUMP;
  287. mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND;
  288. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
  289. &mbox_sts[0]);
  290. if (status == QLA_SUCCESS) {
  291. ha->fw_dump_tmplt_size = mbox_sts[1];
  292. DEBUG2(ql4_printk(KERN_INFO, ha,
  293. "%s: sts[0]=0x%04x, template size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n",
  294. __func__, mbox_sts[0], mbox_sts[1],
  295. mbox_sts[2], mbox_sts[3], mbox_sts[4],
  296. mbox_sts[5], mbox_sts[6], mbox_sts[7]));
  297. if (ha->fw_dump_tmplt_size == 0)
  298. status = QLA_ERROR;
  299. } else {
  300. ql4_printk(KERN_WARNING, ha,
  301. "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n",
  302. __func__, mbox_sts[0], mbox_sts[1]);
  303. status = QLA_ERROR;
  304. }
  305. return status;
  306. }
  307. void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
  308. {
  309. set_bit(AF_FW_RECOVERY, &ha->flags);
  310. ql4_printk(KERN_INFO, ha, "scsi%ld: %s: set FW RECOVERY!\n",
  311. ha->host_no, __func__);
  312. if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
  313. if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags)) {
  314. complete(&ha->mbx_intr_comp);
  315. ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw "
  316. "recovery, doing premature completion of "
  317. "mbx cmd\n", ha->host_no, __func__);
  318. } else {
  319. set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
  320. ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw "
  321. "recovery, doing premature completion of "
  322. "polling mbx cmd\n", ha->host_no, __func__);
  323. }
  324. }
  325. }
  326. static uint8_t
  327. qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
  328. uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
  329. {
  330. memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
  331. memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
  332. if (is_qla8022(ha))
  333. qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, 0);
  334. mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
  335. mbox_cmd[1] = 0;
  336. mbox_cmd[2] = LSDW(init_fw_cb_dma);
  337. mbox_cmd[3] = MSDW(init_fw_cb_dma);
  338. mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
  339. if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) !=
  340. QLA_SUCCESS) {
  341. DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
  342. "MBOX_CMD_INITIALIZE_FIRMWARE"
  343. " failed w/ status %04X\n",
  344. ha->host_no, __func__, mbox_sts[0]));
  345. return QLA_ERROR;
  346. }
  347. return QLA_SUCCESS;
  348. }
  349. uint8_t
  350. qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
  351. uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
  352. {
  353. memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
  354. memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
  355. mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
  356. mbox_cmd[2] = LSDW(init_fw_cb_dma);
  357. mbox_cmd[3] = MSDW(init_fw_cb_dma);
  358. mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
  359. if (qla4xxx_mailbox_command(ha, 5, 5, mbox_cmd, mbox_sts) !=
  360. QLA_SUCCESS) {
  361. DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
  362. "MBOX_CMD_GET_INIT_FW_CTRL_BLOCK"
  363. " failed w/ status %04X\n",
  364. ha->host_no, __func__, mbox_sts[0]));
  365. return QLA_ERROR;
  366. }
  367. return QLA_SUCCESS;
  368. }
  369. uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state)
  370. {
  371. uint8_t ipaddr_state;
  372. switch (fw_ipaddr_state) {
  373. case IP_ADDRSTATE_UNCONFIGURED:
  374. ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED;
  375. break;
  376. case IP_ADDRSTATE_INVALID:
  377. ipaddr_state = ISCSI_IPDDRESS_STATE_INVALID;
  378. break;
  379. case IP_ADDRSTATE_ACQUIRING:
  380. ipaddr_state = ISCSI_IPDDRESS_STATE_ACQUIRING;
  381. break;
  382. case IP_ADDRSTATE_TENTATIVE:
  383. ipaddr_state = ISCSI_IPDDRESS_STATE_TENTATIVE;
  384. break;
  385. case IP_ADDRSTATE_DEPRICATED:
  386. ipaddr_state = ISCSI_IPDDRESS_STATE_DEPRECATED;
  387. break;
  388. case IP_ADDRSTATE_PREFERRED:
  389. ipaddr_state = ISCSI_IPDDRESS_STATE_VALID;
  390. break;
  391. case IP_ADDRSTATE_DISABLING:
  392. ipaddr_state = ISCSI_IPDDRESS_STATE_DISABLING;
  393. break;
  394. default:
  395. ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED;
  396. }
  397. return ipaddr_state;
  398. }
  399. static void
  400. qla4xxx_update_local_ip(struct scsi_qla_host *ha,
  401. struct addr_ctrl_blk *init_fw_cb)
  402. {
  403. ha->ip_config.tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts);
  404. ha->ip_config.ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts);
  405. ha->ip_config.ipv4_addr_state =
  406. qla4xxx_set_ipaddr_state(init_fw_cb->ipv4_addr_state);
  407. ha->ip_config.eth_mtu_size =
  408. le16_to_cpu(init_fw_cb->eth_mtu_size);
  409. ha->ip_config.ipv4_port = le16_to_cpu(init_fw_cb->ipv4_port);
  410. if (ha->acb_version == ACB_SUPPORTED) {
  411. ha->ip_config.ipv6_options = le16_to_cpu(init_fw_cb->ipv6_opts);
  412. ha->ip_config.ipv6_addl_options =
  413. le16_to_cpu(init_fw_cb->ipv6_addtl_opts);
  414. ha->ip_config.ipv6_tcp_options =
  415. le16_to_cpu(init_fw_cb->ipv6_tcp_opts);
  416. }
  417. /* Save IPv4 Address Info */
  418. memcpy(ha->ip_config.ip_address, init_fw_cb->ipv4_addr,
  419. min(sizeof(ha->ip_config.ip_address),
  420. sizeof(init_fw_cb->ipv4_addr)));
  421. memcpy(ha->ip_config.subnet_mask, init_fw_cb->ipv4_subnet,
  422. min(sizeof(ha->ip_config.subnet_mask),
  423. sizeof(init_fw_cb->ipv4_subnet)));
  424. memcpy(ha->ip_config.gateway, init_fw_cb->ipv4_gw_addr,
  425. min(sizeof(ha->ip_config.gateway),
  426. sizeof(init_fw_cb->ipv4_gw_addr)));
  427. ha->ip_config.ipv4_vlan_tag = be16_to_cpu(init_fw_cb->ipv4_vlan_tag);
  428. ha->ip_config.control = init_fw_cb->control;
  429. ha->ip_config.tcp_wsf = init_fw_cb->ipv4_tcp_wsf;
  430. ha->ip_config.ipv4_tos = init_fw_cb->ipv4_tos;
  431. ha->ip_config.ipv4_cache_id = init_fw_cb->ipv4_cacheid;
  432. ha->ip_config.ipv4_alt_cid_len = init_fw_cb->ipv4_dhcp_alt_cid_len;
  433. memcpy(ha->ip_config.ipv4_alt_cid, init_fw_cb->ipv4_dhcp_alt_cid,
  434. min(sizeof(ha->ip_config.ipv4_alt_cid),
  435. sizeof(init_fw_cb->ipv4_dhcp_alt_cid)));
  436. ha->ip_config.ipv4_vid_len = init_fw_cb->ipv4_dhcp_vid_len;
  437. memcpy(ha->ip_config.ipv4_vid, init_fw_cb->ipv4_dhcp_vid,
  438. min(sizeof(ha->ip_config.ipv4_vid),
  439. sizeof(init_fw_cb->ipv4_dhcp_vid)));
  440. ha->ip_config.ipv4_ttl = init_fw_cb->ipv4_ttl;
  441. ha->ip_config.def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
  442. ha->ip_config.abort_timer = init_fw_cb->abort_timer;
  443. ha->ip_config.iscsi_options = le16_to_cpu(init_fw_cb->iscsi_opts);
  444. ha->ip_config.iscsi_max_pdu_size =
  445. le16_to_cpu(init_fw_cb->iscsi_max_pdu_size);
  446. ha->ip_config.iscsi_first_burst_len =
  447. le16_to_cpu(init_fw_cb->iscsi_fburst_len);
  448. ha->ip_config.iscsi_max_outstnd_r2t =
  449. le16_to_cpu(init_fw_cb->iscsi_max_outstnd_r2t);
  450. ha->ip_config.iscsi_max_burst_len =
  451. le16_to_cpu(init_fw_cb->iscsi_max_burst_len);
  452. memcpy(ha->ip_config.iscsi_name, init_fw_cb->iscsi_name,
  453. min(sizeof(ha->ip_config.iscsi_name),
  454. sizeof(init_fw_cb->iscsi_name)));
  455. if (is_ipv6_enabled(ha)) {
  456. /* Save IPv6 Address */
  457. ha->ip_config.ipv6_link_local_state =
  458. qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_lnk_lcl_addr_state);
  459. ha->ip_config.ipv6_addr0_state =
  460. qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr0_state);
  461. ha->ip_config.ipv6_addr1_state =
  462. qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr1_state);
  463. switch (le16_to_cpu(init_fw_cb->ipv6_dflt_rtr_state)) {
  464. case IPV6_RTRSTATE_UNKNOWN:
  465. ha->ip_config.ipv6_default_router_state =
  466. ISCSI_ROUTER_STATE_UNKNOWN;
  467. break;
  468. case IPV6_RTRSTATE_MANUAL:
  469. ha->ip_config.ipv6_default_router_state =
  470. ISCSI_ROUTER_STATE_MANUAL;
  471. break;
  472. case IPV6_RTRSTATE_ADVERTISED:
  473. ha->ip_config.ipv6_default_router_state =
  474. ISCSI_ROUTER_STATE_ADVERTISED;
  475. break;
  476. case IPV6_RTRSTATE_STALE:
  477. ha->ip_config.ipv6_default_router_state =
  478. ISCSI_ROUTER_STATE_STALE;
  479. break;
  480. default:
  481. ha->ip_config.ipv6_default_router_state =
  482. ISCSI_ROUTER_STATE_UNKNOWN;
  483. }
  484. ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE;
  485. ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80;
  486. memcpy(&ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[8],
  487. init_fw_cb->ipv6_if_id,
  488. min(sizeof(ha->ip_config.ipv6_link_local_addr)/2,
  489. sizeof(init_fw_cb->ipv6_if_id)));
  490. memcpy(&ha->ip_config.ipv6_addr0, init_fw_cb->ipv6_addr0,
  491. min(sizeof(ha->ip_config.ipv6_addr0),
  492. sizeof(init_fw_cb->ipv6_addr0)));
  493. memcpy(&ha->ip_config.ipv6_addr1, init_fw_cb->ipv6_addr1,
  494. min(sizeof(ha->ip_config.ipv6_addr1),
  495. sizeof(init_fw_cb->ipv6_addr1)));
  496. memcpy(&ha->ip_config.ipv6_default_router_addr,
  497. init_fw_cb->ipv6_dflt_rtr_addr,
  498. min(sizeof(ha->ip_config.ipv6_default_router_addr),
  499. sizeof(init_fw_cb->ipv6_dflt_rtr_addr)));
  500. ha->ip_config.ipv6_vlan_tag =
  501. be16_to_cpu(init_fw_cb->ipv6_vlan_tag);
  502. ha->ip_config.ipv6_port = le16_to_cpu(init_fw_cb->ipv6_port);
  503. ha->ip_config.ipv6_cache_id = init_fw_cb->ipv6_cache_id;
  504. ha->ip_config.ipv6_flow_lbl =
  505. le16_to_cpu(init_fw_cb->ipv6_flow_lbl);
  506. ha->ip_config.ipv6_traffic_class =
  507. init_fw_cb->ipv6_traffic_class;
  508. ha->ip_config.ipv6_hop_limit = init_fw_cb->ipv6_hop_limit;
  509. ha->ip_config.ipv6_nd_reach_time =
  510. le32_to_cpu(init_fw_cb->ipv6_nd_reach_time);
  511. ha->ip_config.ipv6_nd_rexmit_timer =
  512. le32_to_cpu(init_fw_cb->ipv6_nd_rexmit_timer);
  513. ha->ip_config.ipv6_nd_stale_timeout =
  514. le32_to_cpu(init_fw_cb->ipv6_nd_stale_timeout);
  515. ha->ip_config.ipv6_dup_addr_detect_count =
  516. init_fw_cb->ipv6_dup_addr_detect_count;
  517. ha->ip_config.ipv6_gw_advrt_mtu =
  518. le32_to_cpu(init_fw_cb->ipv6_gw_advrt_mtu);
  519. ha->ip_config.ipv6_tcp_wsf = init_fw_cb->ipv6_tcp_wsf;
  520. }
  521. }
  522. uint8_t
  523. qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
  524. uint32_t *mbox_cmd,
  525. uint32_t *mbox_sts,
  526. struct addr_ctrl_blk *init_fw_cb,
  527. dma_addr_t init_fw_cb_dma)
  528. {
  529. if (qla4xxx_get_ifcb(ha, mbox_cmd, mbox_sts, init_fw_cb_dma)
  530. != QLA_SUCCESS) {
  531. DEBUG2(printk(KERN_WARNING
  532. "scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
  533. ha->host_no, __func__));
  534. return QLA_ERROR;
  535. }
  536. DEBUG2(qla4xxx_dump_buffer(init_fw_cb, sizeof(struct addr_ctrl_blk)));
  537. /* Save some info in adapter structure. */
  538. ha->acb_version = init_fw_cb->acb_version;
  539. ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options);
  540. ha->heartbeat_interval = init_fw_cb->hb_interval;
  541. memcpy(ha->name_string, init_fw_cb->iscsi_name,
  542. min(sizeof(ha->name_string),
  543. sizeof(init_fw_cb->iscsi_name)));
  544. ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
  545. /*memcpy(ha->alias, init_fw_cb->Alias,
  546. min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
  547. qla4xxx_update_local_ip(ha, init_fw_cb);
  548. return QLA_SUCCESS;
  549. }
  550. /**
  551. * qla4xxx_initialize_fw_cb - initializes firmware control block.
  552. * @ha: Pointer to host adapter structure.
  553. **/
  554. int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
  555. {
  556. struct addr_ctrl_blk *init_fw_cb;
  557. dma_addr_t init_fw_cb_dma;
  558. uint32_t mbox_cmd[MBOX_REG_COUNT];
  559. uint32_t mbox_sts[MBOX_REG_COUNT];
  560. int status = QLA_ERROR;
  561. init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
  562. sizeof(struct addr_ctrl_blk),
  563. &init_fw_cb_dma, GFP_KERNEL);
  564. if (init_fw_cb == NULL) {
  565. DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
  566. ha->host_no, __func__));
  567. goto exit_init_fw_cb_no_free;
  568. }
  569. /* Get Initialize Firmware Control Block. */
  570. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  571. memset(&mbox_sts, 0, sizeof(mbox_sts));
  572. if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
  573. QLA_SUCCESS) {
  574. goto exit_init_fw_cb;
  575. }
  576. /* Fill in the request and response queue information. */
  577. init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
  578. init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
  579. init_fw_cb->rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
  580. init_fw_cb->compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
  581. init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
  582. init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
  583. init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
  584. init_fw_cb->compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
  585. init_fw_cb->shdwreg_addr_lo = cpu_to_le32(LSDW(ha->shadow_regs_dma));
  586. init_fw_cb->shdwreg_addr_hi = cpu_to_le32(MSDW(ha->shadow_regs_dma));
  587. /* Set up required options. */
  588. init_fw_cb->fw_options |=
  589. __constant_cpu_to_le16(FWOPT_SESSION_MODE |
  590. FWOPT_INITIATOR_MODE);
  591. if (is_qla80XX(ha))
  592. init_fw_cb->fw_options |=
  593. __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
  594. init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
  595. init_fw_cb->add_fw_options = 0;
  596. init_fw_cb->add_fw_options |=
  597. __constant_cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT);
  598. init_fw_cb->add_fw_options |=
  599. __constant_cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE);
  600. if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
  601. != QLA_SUCCESS) {
  602. DEBUG2(printk(KERN_WARNING
  603. "scsi%ld: %s: Failed to set init_fw_ctrl_blk\n",
  604. ha->host_no, __func__));
  605. goto exit_init_fw_cb;
  606. }
  607. if (qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0],
  608. init_fw_cb, init_fw_cb_dma) != QLA_SUCCESS) {
  609. DEBUG2(printk("scsi%ld: %s: Failed to update local ifcb\n",
  610. ha->host_no, __func__));
  611. goto exit_init_fw_cb;
  612. }
  613. status = QLA_SUCCESS;
  614. exit_init_fw_cb:
  615. dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
  616. init_fw_cb, init_fw_cb_dma);
  617. exit_init_fw_cb_no_free:
  618. return status;
  619. }
  620. /**
  621. * qla4xxx_get_dhcp_ip_address - gets HBA ip address via DHCP
  622. * @ha: Pointer to host adapter structure.
  623. **/
  624. int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
  625. {
  626. struct addr_ctrl_blk *init_fw_cb;
  627. dma_addr_t init_fw_cb_dma;
  628. uint32_t mbox_cmd[MBOX_REG_COUNT];
  629. uint32_t mbox_sts[MBOX_REG_COUNT];
  630. init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
  631. sizeof(struct addr_ctrl_blk),
  632. &init_fw_cb_dma, GFP_KERNEL);
  633. if (init_fw_cb == NULL) {
  634. printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
  635. __func__);
  636. return QLA_ERROR;
  637. }
  638. /* Get Initialize Firmware Control Block. */
  639. if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
  640. QLA_SUCCESS) {
  641. DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
  642. ha->host_no, __func__));
  643. dma_free_coherent(&ha->pdev->dev,
  644. sizeof(struct addr_ctrl_blk),
  645. init_fw_cb, init_fw_cb_dma);
  646. return QLA_ERROR;
  647. }
  648. /* Save IP Address. */
  649. qla4xxx_update_local_ip(ha, init_fw_cb);
  650. dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
  651. init_fw_cb, init_fw_cb_dma);
  652. return QLA_SUCCESS;
  653. }
  654. /**
  655. * qla4xxx_get_firmware_state - gets firmware state of HBA
  656. * @ha: Pointer to host adapter structure.
  657. **/
  658. int qla4xxx_get_firmware_state(struct scsi_qla_host * ha)
  659. {
  660. uint32_t mbox_cmd[MBOX_REG_COUNT];
  661. uint32_t mbox_sts[MBOX_REG_COUNT];
  662. /* Get firmware version */
  663. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  664. memset(&mbox_sts, 0, sizeof(mbox_sts));
  665. mbox_cmd[0] = MBOX_CMD_GET_FW_STATE;
  666. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 4, &mbox_cmd[0], &mbox_sts[0]) !=
  667. QLA_SUCCESS) {
  668. DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ "
  669. "status %04X\n", ha->host_no, __func__,
  670. mbox_sts[0]));
  671. return QLA_ERROR;
  672. }
  673. ha->firmware_state = mbox_sts[1];
  674. ha->board_id = mbox_sts[2];
  675. ha->addl_fw_state = mbox_sts[3];
  676. DEBUG2(printk("scsi%ld: %s firmware_state=0x%x\n",
  677. ha->host_no, __func__, ha->firmware_state);)
  678. return QLA_SUCCESS;
  679. }
  680. /**
  681. * qla4xxx_get_firmware_status - retrieves firmware status
  682. * @ha: Pointer to host adapter structure.
  683. **/
  684. int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
  685. {
  686. uint32_t mbox_cmd[MBOX_REG_COUNT];
  687. uint32_t mbox_sts[MBOX_REG_COUNT];
  688. /* Get firmware version */
  689. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  690. memset(&mbox_sts, 0, sizeof(mbox_sts));
  691. mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS;
  692. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) !=
  693. QLA_SUCCESS) {
  694. DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ "
  695. "status %04X\n", ha->host_no, __func__,
  696. mbox_sts[0]));
  697. return QLA_ERROR;
  698. }
  699. /* High-water mark of IOCBs */
  700. ha->iocb_hiwat = mbox_sts[2];
  701. DEBUG2(ql4_printk(KERN_INFO, ha,
  702. "%s: firmware IOCBs available = %d\n", __func__,
  703. ha->iocb_hiwat));
  704. if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION)
  705. ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
  706. /* Ideally, we should not enter this code, as the # of firmware
  707. * IOCBs is hard-coded in the firmware. We set a default
  708. * iocb_hiwat here just in case */
  709. if (ha->iocb_hiwat == 0) {
  710. ha->iocb_hiwat = REQUEST_QUEUE_DEPTH / 4;
  711. DEBUG2(ql4_printk(KERN_WARNING, ha,
  712. "%s: Setting IOCB's to = %d\n", __func__,
  713. ha->iocb_hiwat));
  714. }
  715. return QLA_SUCCESS;
  716. }
  717. /*
  718. * qla4xxx_get_fwddb_entry - retrieves firmware ddb entry
  719. * @ha: Pointer to host adapter structure.
  720. * @fw_ddb_index: Firmware's device database index
  721. * @fw_ddb_entry: Pointer to firmware's device database entry structure
  722. * @num_valid_ddb_entries: Pointer to number of valid ddb entries
  723. * @next_ddb_index: Pointer to next valid device database index
  724. * @fw_ddb_device_state: Pointer to device state
  725. **/
  726. int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
  727. uint16_t fw_ddb_index,
  728. struct dev_db_entry *fw_ddb_entry,
  729. dma_addr_t fw_ddb_entry_dma,
  730. uint32_t *num_valid_ddb_entries,
  731. uint32_t *next_ddb_index,
  732. uint32_t *fw_ddb_device_state,
  733. uint32_t *conn_err_detail,
  734. uint16_t *tcp_source_port_num,
  735. uint16_t *connection_id)
  736. {
  737. int status = QLA_ERROR;
  738. uint16_t options;
  739. uint32_t mbox_cmd[MBOX_REG_COUNT];
  740. uint32_t mbox_sts[MBOX_REG_COUNT];
  741. /* Make sure the device index is valid */
  742. if (fw_ddb_index >= MAX_DDB_ENTRIES) {
  743. DEBUG2(printk("scsi%ld: %s: ddb [%d] out of range.\n",
  744. ha->host_no, __func__, fw_ddb_index));
  745. goto exit_get_fwddb;
  746. }
  747. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  748. memset(&mbox_sts, 0, sizeof(mbox_sts));
  749. if (fw_ddb_entry)
  750. memset(fw_ddb_entry, 0, sizeof(struct dev_db_entry));
  751. mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY;
  752. mbox_cmd[1] = (uint32_t) fw_ddb_index;
  753. mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
  754. mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
  755. mbox_cmd[4] = sizeof(struct dev_db_entry);
  756. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 7, &mbox_cmd[0], &mbox_sts[0]) ==
  757. QLA_ERROR) {
  758. DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed"
  759. " with status 0x%04X\n", ha->host_no, __func__,
  760. mbox_sts[0]));
  761. goto exit_get_fwddb;
  762. }
  763. if (fw_ddb_index != mbox_sts[1]) {
  764. DEBUG2(printk("scsi%ld: %s: ddb mismatch [%d] != [%d].\n",
  765. ha->host_no, __func__, fw_ddb_index,
  766. mbox_sts[1]));
  767. goto exit_get_fwddb;
  768. }
  769. if (fw_ddb_entry) {
  770. options = le16_to_cpu(fw_ddb_entry->options);
  771. if (options & DDB_OPT_IPV6_DEVICE) {
  772. ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d "
  773. "Next %d State %04x ConnErr %08x %pI6 "
  774. ":%04d \"%s\"\n", __func__, fw_ddb_index,
  775. mbox_sts[0], mbox_sts[2], mbox_sts[3],
  776. mbox_sts[4], mbox_sts[5],
  777. fw_ddb_entry->ip_addr,
  778. le16_to_cpu(fw_ddb_entry->port),
  779. fw_ddb_entry->iscsi_name);
  780. } else {
  781. ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d "
  782. "Next %d State %04x ConnErr %08x %pI4 "
  783. ":%04d \"%s\"\n", __func__, fw_ddb_index,
  784. mbox_sts[0], mbox_sts[2], mbox_sts[3],
  785. mbox_sts[4], mbox_sts[5],
  786. fw_ddb_entry->ip_addr,
  787. le16_to_cpu(fw_ddb_entry->port),
  788. fw_ddb_entry->iscsi_name);
  789. }
  790. }
  791. if (num_valid_ddb_entries)
  792. *num_valid_ddb_entries = mbox_sts[2];
  793. if (next_ddb_index)
  794. *next_ddb_index = mbox_sts[3];
  795. if (fw_ddb_device_state)
  796. *fw_ddb_device_state = mbox_sts[4];
  797. /*
  798. * RA: This mailbox has been changed to pass connection error and
  799. * details. Its true for ISP4010 as per Version E - Not sure when it
  800. * was changed. Get the time2wait from the fw_dd_entry field :
  801. * default_time2wait which we call it as minTime2Wait DEV_DB_ENTRY
  802. * struct.
  803. */
  804. if (conn_err_detail)
  805. *conn_err_detail = mbox_sts[5];
  806. if (tcp_source_port_num)
  807. *tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16);
  808. if (connection_id)
  809. *connection_id = (uint16_t) mbox_sts[6] & 0x00FF;
  810. status = QLA_SUCCESS;
  811. exit_get_fwddb:
  812. return status;
  813. }
  814. int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index)
  815. {
  816. uint32_t mbox_cmd[MBOX_REG_COUNT];
  817. uint32_t mbox_sts[MBOX_REG_COUNT];
  818. int status;
  819. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  820. memset(&mbox_sts, 0, sizeof(mbox_sts));
  821. mbox_cmd[0] = MBOX_CMD_CONN_OPEN;
  822. mbox_cmd[1] = fw_ddb_index;
  823. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
  824. &mbox_sts[0]);
  825. DEBUG2(ql4_printk(KERN_INFO, ha,
  826. "%s: status = %d mbx0 = 0x%x mbx1 = 0x%x\n",
  827. __func__, status, mbox_sts[0], mbox_sts[1]));
  828. return status;
  829. }
  830. /**
  831. * qla4xxx_set_fwddb_entry - sets a ddb entry.
  832. * @ha: Pointer to host adapter structure.
  833. * @fw_ddb_index: Firmware's device database index
  834. * @fw_ddb_entry_dma: dma address of ddb entry
  835. * @mbx_sts: mailbox 0 to be returned or NULL
  836. *
  837. * This routine initializes or updates the adapter's device database
  838. * entry for the specified device.
  839. **/
  840. int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
  841. dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts)
  842. {
  843. uint32_t mbox_cmd[MBOX_REG_COUNT];
  844. uint32_t mbox_sts[MBOX_REG_COUNT];
  845. int status;
  846. /* Do not wait for completion. The firmware will send us an
  847. * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
  848. */
  849. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  850. memset(&mbox_sts, 0, sizeof(mbox_sts));
  851. mbox_cmd[0] = MBOX_CMD_SET_DATABASE_ENTRY;
  852. mbox_cmd[1] = (uint32_t) fw_ddb_index;
  853. mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
  854. mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
  855. mbox_cmd[4] = sizeof(struct dev_db_entry);
  856. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
  857. &mbox_sts[0]);
  858. if (mbx_sts)
  859. *mbx_sts = mbox_sts[0];
  860. DEBUG2(printk("scsi%ld: %s: status=%d mbx0=0x%x mbx4=0x%x\n",
  861. ha->host_no, __func__, status, mbox_sts[0], mbox_sts[4]);)
  862. return status;
  863. }
  864. int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha,
  865. struct ddb_entry *ddb_entry, int options)
  866. {
  867. int status;
  868. uint32_t mbox_cmd[MBOX_REG_COUNT];
  869. uint32_t mbox_sts[MBOX_REG_COUNT];
  870. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  871. memset(&mbox_sts, 0, sizeof(mbox_sts));
  872. mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
  873. mbox_cmd[1] = ddb_entry->fw_ddb_index;
  874. mbox_cmd[3] = options;
  875. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
  876. &mbox_sts[0]);
  877. if (status != QLA_SUCCESS) {
  878. DEBUG2(ql4_printk(KERN_INFO, ha,
  879. "%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
  880. "failed sts %04X %04X", __func__,
  881. mbox_sts[0], mbox_sts[1]));
  882. if ((mbox_sts[0] == MBOX_STS_COMMAND_ERROR) &&
  883. (mbox_sts[1] == DDB_NOT_LOGGED_IN)) {
  884. set_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
  885. }
  886. }
  887. return status;
  888. }
  889. /**
  890. * qla4xxx_get_crash_record - retrieves crash record.
  891. * @ha: Pointer to host adapter structure.
  892. *
  893. * This routine retrieves a crash record from the QLA4010 after an 8002h aen.
  894. **/
  895. void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
  896. {
  897. uint32_t mbox_cmd[MBOX_REG_COUNT];
  898. uint32_t mbox_sts[MBOX_REG_COUNT];
  899. struct crash_record *crash_record = NULL;
  900. dma_addr_t crash_record_dma = 0;
  901. uint32_t crash_record_size = 0;
  902. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  903. memset(&mbox_sts, 0, sizeof(mbox_cmd));
  904. /* Get size of crash record. */
  905. mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
  906. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
  907. QLA_SUCCESS) {
  908. DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n",
  909. ha->host_no, __func__));
  910. goto exit_get_crash_record;
  911. }
  912. crash_record_size = mbox_sts[4];
  913. if (crash_record_size == 0) {
  914. DEBUG2(printk("scsi%ld: %s: ERROR: Crash record size is 0!\n",
  915. ha->host_no, __func__));
  916. goto exit_get_crash_record;
  917. }
  918. /* Alloc Memory for Crash Record. */
  919. crash_record = dma_alloc_coherent(&ha->pdev->dev, crash_record_size,
  920. &crash_record_dma, GFP_KERNEL);
  921. if (crash_record == NULL)
  922. goto exit_get_crash_record;
  923. /* Get Crash Record. */
  924. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  925. memset(&mbox_sts, 0, sizeof(mbox_cmd));
  926. mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
  927. mbox_cmd[2] = LSDW(crash_record_dma);
  928. mbox_cmd[3] = MSDW(crash_record_dma);
  929. mbox_cmd[4] = crash_record_size;
  930. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
  931. QLA_SUCCESS)
  932. goto exit_get_crash_record;
  933. /* Dump Crash Record. */
  934. exit_get_crash_record:
  935. if (crash_record)
  936. dma_free_coherent(&ha->pdev->dev, crash_record_size,
  937. crash_record, crash_record_dma);
  938. }
  939. /**
  940. * qla4xxx_get_conn_event_log - retrieves connection event log
  941. * @ha: Pointer to host adapter structure.
  942. **/
  943. void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
  944. {
  945. uint32_t mbox_cmd[MBOX_REG_COUNT];
  946. uint32_t mbox_sts[MBOX_REG_COUNT];
  947. struct conn_event_log_entry *event_log = NULL;
  948. dma_addr_t event_log_dma = 0;
  949. uint32_t event_log_size = 0;
  950. uint32_t num_valid_entries;
  951. uint32_t oldest_entry = 0;
  952. uint32_t max_event_log_entries;
  953. uint8_t i;
  954. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  955. memset(&mbox_sts, 0, sizeof(mbox_cmd));
  956. /* Get size of crash record. */
  957. mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
  958. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
  959. QLA_SUCCESS)
  960. goto exit_get_event_log;
  961. event_log_size = mbox_sts[4];
  962. if (event_log_size == 0)
  963. goto exit_get_event_log;
  964. /* Alloc Memory for Crash Record. */
  965. event_log = dma_alloc_coherent(&ha->pdev->dev, event_log_size,
  966. &event_log_dma, GFP_KERNEL);
  967. if (event_log == NULL)
  968. goto exit_get_event_log;
  969. /* Get Crash Record. */
  970. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  971. memset(&mbox_sts, 0, sizeof(mbox_cmd));
  972. mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
  973. mbox_cmd[2] = LSDW(event_log_dma);
  974. mbox_cmd[3] = MSDW(event_log_dma);
  975. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
  976. QLA_SUCCESS) {
  977. DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event "
  978. "log!\n", ha->host_no, __func__));
  979. goto exit_get_event_log;
  980. }
  981. /* Dump Event Log. */
  982. num_valid_entries = mbox_sts[1];
  983. max_event_log_entries = event_log_size /
  984. sizeof(struct conn_event_log_entry);
  985. if (num_valid_entries > max_event_log_entries)
  986. oldest_entry = num_valid_entries % max_event_log_entries;
  987. DEBUG3(printk("scsi%ld: Connection Event Log Dump (%d entries):\n",
  988. ha->host_no, num_valid_entries));
  989. if (ql4xextended_error_logging == 3) {
  990. if (oldest_entry == 0) {
  991. /* Circular Buffer has not wrapped around */
  992. for (i=0; i < num_valid_entries; i++) {
  993. qla4xxx_dump_buffer((uint8_t *)event_log+
  994. (i*sizeof(*event_log)),
  995. sizeof(*event_log));
  996. }
  997. }
  998. else {
  999. /* Circular Buffer has wrapped around -
  1000. * display accordingly*/
  1001. for (i=oldest_entry; i < max_event_log_entries; i++) {
  1002. qla4xxx_dump_buffer((uint8_t *)event_log+
  1003. (i*sizeof(*event_log)),
  1004. sizeof(*event_log));
  1005. }
  1006. for (i=0; i < oldest_entry; i++) {
  1007. qla4xxx_dump_buffer((uint8_t *)event_log+
  1008. (i*sizeof(*event_log)),
  1009. sizeof(*event_log));
  1010. }
  1011. }
  1012. }
  1013. exit_get_event_log:
  1014. if (event_log)
  1015. dma_free_coherent(&ha->pdev->dev, event_log_size, event_log,
  1016. event_log_dma);
  1017. }
  1018. /**
  1019. * qla4xxx_abort_task - issues Abort Task
  1020. * @ha: Pointer to host adapter structure.
  1021. * @srb: Pointer to srb entry
  1022. *
  1023. * This routine performs a LUN RESET on the specified target/lun.
  1024. * The caller must ensure that the ddb_entry and lun_entry pointers
  1025. * are valid before calling this routine.
  1026. **/
  1027. int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb)
  1028. {
  1029. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1030. uint32_t mbox_sts[MBOX_REG_COUNT];
  1031. struct scsi_cmnd *cmd = srb->cmd;
  1032. int status = QLA_SUCCESS;
  1033. unsigned long flags = 0;
  1034. uint32_t index;
  1035. /*
  1036. * Send abort task command to ISP, so that the ISP will return
  1037. * request with ABORT status
  1038. */
  1039. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1040. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1041. spin_lock_irqsave(&ha->hardware_lock, flags);
  1042. index = (unsigned long)(unsigned char *)cmd->host_scribble;
  1043. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1044. /* Firmware already posted completion on response queue */
  1045. if (index == MAX_SRBS)
  1046. return status;
  1047. mbox_cmd[0] = MBOX_CMD_ABORT_TASK;
  1048. mbox_cmd[1] = srb->ddb->fw_ddb_index;
  1049. mbox_cmd[2] = index;
  1050. /* Immediate Command Enable */
  1051. mbox_cmd[5] = 0x01;
  1052. qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
  1053. &mbox_sts[0]);
  1054. if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE) {
  1055. status = QLA_ERROR;
  1056. DEBUG2(printk(KERN_WARNING "scsi%ld:%d:%llu: abort task FAILED: "
  1057. "mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n",
  1058. ha->host_no, cmd->device->id, cmd->device->lun, mbox_sts[0],
  1059. mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4]));
  1060. }
  1061. return status;
  1062. }
  1063. /**
  1064. * qla4xxx_reset_lun - issues LUN Reset
  1065. * @ha: Pointer to host adapter structure.
  1066. * @ddb_entry: Pointer to device database entry
  1067. * @lun: lun number
  1068. *
  1069. * This routine performs a LUN RESET on the specified target/lun.
  1070. * The caller must ensure that the ddb_entry and lun_entry pointers
  1071. * are valid before calling this routine.
  1072. **/
  1073. int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
  1074. uint64_t lun)
  1075. {
  1076. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1077. uint32_t mbox_sts[MBOX_REG_COUNT];
  1078. uint32_t scsi_lun[2];
  1079. int status = QLA_SUCCESS;
  1080. DEBUG2(printk("scsi%ld:%d:%llu: lun reset issued\n", ha->host_no,
  1081. ddb_entry->fw_ddb_index, lun));
  1082. /*
  1083. * Send lun reset command to ISP, so that the ISP will return all
  1084. * outstanding requests with RESET status
  1085. */
  1086. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1087. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1088. int_to_scsilun(lun, (struct scsi_lun *) scsi_lun);
  1089. mbox_cmd[0] = MBOX_CMD_LUN_RESET;
  1090. mbox_cmd[1] = ddb_entry->fw_ddb_index;
  1091. /* FW expects LUN bytes 0-3 in Incoming Mailbox 2
  1092. * (LUN byte 0 is LSByte, byte 3 is MSByte) */
  1093. mbox_cmd[2] = cpu_to_le32(scsi_lun[0]);
  1094. /* FW expects LUN bytes 4-7 in Incoming Mailbox 3
  1095. * (LUN byte 4 is LSByte, byte 7 is MSByte) */
  1096. mbox_cmd[3] = cpu_to_le32(scsi_lun[1]);
  1097. mbox_cmd[5] = 0x01; /* Immediate Command Enable */
  1098. qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
  1099. if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
  1100. mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
  1101. status = QLA_ERROR;
  1102. return status;
  1103. }
  1104. /**
  1105. * qla4xxx_reset_target - issues target Reset
  1106. * @ha: Pointer to host adapter structure.
  1107. * @ddb_entry: Pointer to device database entry
  1108. *
  1109. * This routine performs a TARGET RESET on the specified target.
  1110. * The caller must ensure that the ddb_entry pointers
  1111. * are valid before calling this routine.
  1112. **/
  1113. int qla4xxx_reset_target(struct scsi_qla_host *ha,
  1114. struct ddb_entry *ddb_entry)
  1115. {
  1116. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1117. uint32_t mbox_sts[MBOX_REG_COUNT];
  1118. int status = QLA_SUCCESS;
  1119. DEBUG2(printk("scsi%ld:%d: target reset issued\n", ha->host_no,
  1120. ddb_entry->fw_ddb_index));
  1121. /*
  1122. * Send target reset command to ISP, so that the ISP will return all
  1123. * outstanding requests with RESET status
  1124. */
  1125. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1126. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1127. mbox_cmd[0] = MBOX_CMD_TARGET_WARM_RESET;
  1128. mbox_cmd[1] = ddb_entry->fw_ddb_index;
  1129. mbox_cmd[5] = 0x01; /* Immediate Command Enable */
  1130. qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
  1131. &mbox_sts[0]);
  1132. if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
  1133. mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
  1134. status = QLA_ERROR;
  1135. return status;
  1136. }
  1137. int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
  1138. uint32_t offset, uint32_t len)
  1139. {
  1140. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1141. uint32_t mbox_sts[MBOX_REG_COUNT];
  1142. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1143. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1144. mbox_cmd[0] = MBOX_CMD_READ_FLASH;
  1145. mbox_cmd[1] = LSDW(dma_addr);
  1146. mbox_cmd[2] = MSDW(dma_addr);
  1147. mbox_cmd[3] = offset;
  1148. mbox_cmd[4] = len;
  1149. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], &mbox_sts[0]) !=
  1150. QLA_SUCCESS) {
  1151. DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ "
  1152. "status %04X %04X, offset %08x, len %08x\n", ha->host_no,
  1153. __func__, mbox_sts[0], mbox_sts[1], offset, len));
  1154. return QLA_ERROR;
  1155. }
  1156. return QLA_SUCCESS;
  1157. }
  1158. /**
  1159. * qla4xxx_about_firmware - gets FW, iscsi draft and boot loader version
  1160. * @ha: Pointer to host adapter structure.
  1161. *
  1162. * Retrieves the FW version, iSCSI draft version & bootloader version of HBA.
  1163. * Mailboxes 2 & 3 may hold an address for data. Make sure that we write 0 to
  1164. * those mailboxes, if unused.
  1165. **/
  1166. int qla4xxx_about_firmware(struct scsi_qla_host *ha)
  1167. {
  1168. struct about_fw_info *about_fw = NULL;
  1169. dma_addr_t about_fw_dma;
  1170. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1171. uint32_t mbox_sts[MBOX_REG_COUNT];
  1172. int status = QLA_ERROR;
  1173. about_fw = dma_alloc_coherent(&ha->pdev->dev,
  1174. sizeof(struct about_fw_info),
  1175. &about_fw_dma, GFP_KERNEL);
  1176. if (!about_fw) {
  1177. DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
  1178. "for about_fw\n", __func__));
  1179. return status;
  1180. }
  1181. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1182. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1183. mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
  1184. mbox_cmd[2] = LSDW(about_fw_dma);
  1185. mbox_cmd[3] = MSDW(about_fw_dma);
  1186. mbox_cmd[4] = sizeof(struct about_fw_info);
  1187. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
  1188. &mbox_cmd[0], &mbox_sts[0]);
  1189. if (status != QLA_SUCCESS) {
  1190. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_ABOUT_FW "
  1191. "failed w/ status %04X\n", __func__,
  1192. mbox_sts[0]));
  1193. goto exit_about_fw;
  1194. }
  1195. /* Save version information. */
  1196. ha->fw_info.fw_major = le16_to_cpu(about_fw->fw_major);
  1197. ha->fw_info.fw_minor = le16_to_cpu(about_fw->fw_minor);
  1198. ha->fw_info.fw_patch = le16_to_cpu(about_fw->fw_patch);
  1199. ha->fw_info.fw_build = le16_to_cpu(about_fw->fw_build);
  1200. memcpy(ha->fw_info.fw_build_date, about_fw->fw_build_date,
  1201. sizeof(about_fw->fw_build_date));
  1202. memcpy(ha->fw_info.fw_build_time, about_fw->fw_build_time,
  1203. sizeof(about_fw->fw_build_time));
  1204. strcpy((char *)ha->fw_info.fw_build_user,
  1205. skip_spaces((char *)about_fw->fw_build_user));
  1206. ha->fw_info.fw_load_source = le16_to_cpu(about_fw->fw_load_source);
  1207. ha->fw_info.iscsi_major = le16_to_cpu(about_fw->iscsi_major);
  1208. ha->fw_info.iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
  1209. ha->fw_info.bootload_major = le16_to_cpu(about_fw->bootload_major);
  1210. ha->fw_info.bootload_minor = le16_to_cpu(about_fw->bootload_minor);
  1211. ha->fw_info.bootload_patch = le16_to_cpu(about_fw->bootload_patch);
  1212. ha->fw_info.bootload_build = le16_to_cpu(about_fw->bootload_build);
  1213. strcpy((char *)ha->fw_info.extended_timestamp,
  1214. skip_spaces((char *)about_fw->extended_timestamp));
  1215. ha->fw_uptime_secs = le32_to_cpu(mbox_sts[5]);
  1216. ha->fw_uptime_msecs = le32_to_cpu(mbox_sts[6]);
  1217. status = QLA_SUCCESS;
  1218. exit_about_fw:
  1219. dma_free_coherent(&ha->pdev->dev, sizeof(struct about_fw_info),
  1220. about_fw, about_fw_dma);
  1221. return status;
  1222. }
  1223. int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
  1224. dma_addr_t dma_addr)
  1225. {
  1226. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1227. uint32_t mbox_sts[MBOX_REG_COUNT];
  1228. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1229. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1230. mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS;
  1231. mbox_cmd[1] = options;
  1232. mbox_cmd[2] = LSDW(dma_addr);
  1233. mbox_cmd[3] = MSDW(dma_addr);
  1234. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
  1235. QLA_SUCCESS) {
  1236. DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
  1237. ha->host_no, __func__, mbox_sts[0]));
  1238. return QLA_ERROR;
  1239. }
  1240. return QLA_SUCCESS;
  1241. }
  1242. int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index,
  1243. uint32_t *mbx_sts)
  1244. {
  1245. int status;
  1246. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1247. uint32_t mbox_sts[MBOX_REG_COUNT];
  1248. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1249. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1250. mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY;
  1251. mbox_cmd[1] = ddb_index;
  1252. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
  1253. &mbox_sts[0]);
  1254. if (status != QLA_SUCCESS) {
  1255. DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
  1256. __func__, mbox_sts[0]));
  1257. }
  1258. *mbx_sts = mbox_sts[0];
  1259. return status;
  1260. }
  1261. int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index)
  1262. {
  1263. int status;
  1264. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1265. uint32_t mbox_sts[MBOX_REG_COUNT];
  1266. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1267. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1268. mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY;
  1269. mbox_cmd[1] = ddb_index;
  1270. status = qla4xxx_mailbox_command(ha, 2, 1, &mbox_cmd[0],
  1271. &mbox_sts[0]);
  1272. if (status != QLA_SUCCESS) {
  1273. DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
  1274. __func__, mbox_sts[0]));
  1275. }
  1276. return status;
  1277. }
  1278. int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
  1279. uint32_t offset, uint32_t length, uint32_t options)
  1280. {
  1281. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1282. uint32_t mbox_sts[MBOX_REG_COUNT];
  1283. int status = QLA_SUCCESS;
  1284. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1285. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1286. mbox_cmd[0] = MBOX_CMD_WRITE_FLASH;
  1287. mbox_cmd[1] = LSDW(dma_addr);
  1288. mbox_cmd[2] = MSDW(dma_addr);
  1289. mbox_cmd[3] = offset;
  1290. mbox_cmd[4] = length;
  1291. mbox_cmd[5] = options;
  1292. status = qla4xxx_mailbox_command(ha, 6, 2, &mbox_cmd[0], &mbox_sts[0]);
  1293. if (status != QLA_SUCCESS) {
  1294. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_WRITE_FLASH "
  1295. "failed w/ status %04X, mbx1 %04X\n",
  1296. __func__, mbox_sts[0], mbox_sts[1]));
  1297. }
  1298. return status;
  1299. }
  1300. int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
  1301. struct dev_db_entry *fw_ddb_entry,
  1302. dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index)
  1303. {
  1304. uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
  1305. uint32_t dev_db_end_offset;
  1306. int status = QLA_ERROR;
  1307. memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
  1308. dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry));
  1309. dev_db_end_offset = FLASH_OFFSET_DB_END;
  1310. if (dev_db_start_offset > dev_db_end_offset) {
  1311. DEBUG2(ql4_printk(KERN_ERR, ha,
  1312. "%s:Invalid DDB index %d", __func__,
  1313. ddb_index));
  1314. goto exit_bootdb_failed;
  1315. }
  1316. if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
  1317. sizeof(*fw_ddb_entry)) != QLA_SUCCESS) {
  1318. ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
  1319. "failed\n", ha->host_no, __func__);
  1320. goto exit_bootdb_failed;
  1321. }
  1322. if (fw_ddb_entry->cookie == DDB_VALID_COOKIE)
  1323. status = QLA_SUCCESS;
  1324. exit_bootdb_failed:
  1325. return status;
  1326. }
  1327. int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha,
  1328. struct dev_db_entry *fw_ddb_entry,
  1329. dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index)
  1330. {
  1331. uint32_t dev_db_start_offset;
  1332. uint32_t dev_db_end_offset;
  1333. int status = QLA_ERROR;
  1334. memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
  1335. if (is_qla40XX(ha)) {
  1336. dev_db_start_offset = FLASH_OFFSET_DB_INFO;
  1337. dev_db_end_offset = FLASH_OFFSET_DB_END;
  1338. } else {
  1339. dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
  1340. (ha->hw.flt_region_ddb << 2);
  1341. /* flt_ddb_size is DDB table size for both ports
  1342. * so divide it by 2 to calculate the offset for second port
  1343. */
  1344. if (ha->port_num == 1)
  1345. dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
  1346. dev_db_end_offset = dev_db_start_offset +
  1347. (ha->hw.flt_ddb_size / 2);
  1348. }
  1349. dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry));
  1350. if (dev_db_start_offset > dev_db_end_offset) {
  1351. DEBUG2(ql4_printk(KERN_ERR, ha,
  1352. "%s:Invalid DDB index %d", __func__,
  1353. ddb_index));
  1354. goto exit_fdb_failed;
  1355. }
  1356. if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
  1357. sizeof(*fw_ddb_entry)) != QLA_SUCCESS) {
  1358. ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash failed\n",
  1359. ha->host_no, __func__);
  1360. goto exit_fdb_failed;
  1361. }
  1362. if (fw_ddb_entry->cookie == DDB_VALID_COOKIE)
  1363. status = QLA_SUCCESS;
  1364. exit_fdb_failed:
  1365. return status;
  1366. }
  1367. int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
  1368. uint16_t idx)
  1369. {
  1370. int ret = 0;
  1371. int rval = QLA_ERROR;
  1372. uint32_t offset = 0, chap_size;
  1373. struct ql4_chap_table *chap_table;
  1374. dma_addr_t chap_dma;
  1375. chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
  1376. if (chap_table == NULL)
  1377. return -ENOMEM;
  1378. chap_size = sizeof(struct ql4_chap_table);
  1379. if (is_qla40XX(ha))
  1380. offset = FLASH_CHAP_OFFSET | (idx * chap_size);
  1381. else {
  1382. offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
  1383. /* flt_chap_size is CHAP table size for both ports
  1384. * so divide it by 2 to calculate the offset for second port
  1385. */
  1386. if (ha->port_num == 1)
  1387. offset += (ha->hw.flt_chap_size / 2);
  1388. offset += (idx * chap_size);
  1389. }
  1390. rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
  1391. if (rval != QLA_SUCCESS) {
  1392. ret = -EINVAL;
  1393. goto exit_get_chap;
  1394. }
  1395. DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
  1396. __le16_to_cpu(chap_table->cookie)));
  1397. if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
  1398. ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
  1399. goto exit_get_chap;
  1400. }
  1401. strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
  1402. strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
  1403. chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
  1404. exit_get_chap:
  1405. dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
  1406. return ret;
  1407. }
  1408. /**
  1409. * qla4xxx_set_chap - Make a chap entry at the given index
  1410. * @ha: pointer to adapter structure
  1411. * @username: CHAP username to set
  1412. * @password: CHAP password to set
  1413. * @idx: CHAP index at which to make the entry
  1414. * @bidi: type of chap entry (chap_in or chap_out)
  1415. *
  1416. * Create chap entry at the given index with the information provided.
  1417. *
  1418. * Note: Caller should acquire the chap lock before getting here.
  1419. **/
  1420. int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
  1421. uint16_t idx, int bidi)
  1422. {
  1423. int ret = 0;
  1424. int rval = QLA_ERROR;
  1425. uint32_t offset = 0;
  1426. struct ql4_chap_table *chap_table;
  1427. uint32_t chap_size = 0;
  1428. dma_addr_t chap_dma;
  1429. chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
  1430. if (chap_table == NULL) {
  1431. ret = -ENOMEM;
  1432. goto exit_set_chap;
  1433. }
  1434. if (bidi)
  1435. chap_table->flags |= BIT_6; /* peer */
  1436. else
  1437. chap_table->flags |= BIT_7; /* local */
  1438. chap_table->secret_len = strlen(password);
  1439. strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN - 1);
  1440. strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN - 1);
  1441. chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
  1442. if (is_qla40XX(ha)) {
  1443. chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table);
  1444. offset = FLASH_CHAP_OFFSET;
  1445. } else { /* Single region contains CHAP info for both ports which is
  1446. * divided into half for each port.
  1447. */
  1448. chap_size = ha->hw.flt_chap_size / 2;
  1449. offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
  1450. if (ha->port_num == 1)
  1451. offset += chap_size;
  1452. }
  1453. offset += (idx * sizeof(struct ql4_chap_table));
  1454. rval = qla4xxx_set_flash(ha, chap_dma, offset,
  1455. sizeof(struct ql4_chap_table),
  1456. FLASH_OPT_RMW_COMMIT);
  1457. if (rval == QLA_SUCCESS && ha->chap_list) {
  1458. /* Update ha chap_list cache */
  1459. memcpy((struct ql4_chap_table *)ha->chap_list + idx,
  1460. chap_table, sizeof(struct ql4_chap_table));
  1461. }
  1462. dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
  1463. if (rval != QLA_SUCCESS)
  1464. ret = -EINVAL;
  1465. exit_set_chap:
  1466. return ret;
  1467. }
  1468. int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
  1469. char *password, uint16_t chap_index)
  1470. {
  1471. int rval = QLA_ERROR;
  1472. struct ql4_chap_table *chap_table = NULL;
  1473. int max_chap_entries;
  1474. if (!ha->chap_list) {
  1475. ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
  1476. rval = QLA_ERROR;
  1477. goto exit_uni_chap;
  1478. }
  1479. if (!username || !password) {
  1480. ql4_printk(KERN_ERR, ha, "No memory for username & secret\n");
  1481. rval = QLA_ERROR;
  1482. goto exit_uni_chap;
  1483. }
  1484. if (is_qla80XX(ha))
  1485. max_chap_entries = (ha->hw.flt_chap_size / 2) /
  1486. sizeof(struct ql4_chap_table);
  1487. else
  1488. max_chap_entries = MAX_CHAP_ENTRIES_40XX;
  1489. if (chap_index > max_chap_entries) {
  1490. ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
  1491. rval = QLA_ERROR;
  1492. goto exit_uni_chap;
  1493. }
  1494. mutex_lock(&ha->chap_sem);
  1495. chap_table = (struct ql4_chap_table *)ha->chap_list + chap_index;
  1496. if (chap_table->cookie != __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
  1497. rval = QLA_ERROR;
  1498. goto exit_unlock_uni_chap;
  1499. }
  1500. if (!(chap_table->flags & BIT_7)) {
  1501. ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n");
  1502. rval = QLA_ERROR;
  1503. goto exit_unlock_uni_chap;
  1504. }
  1505. strlcpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN);
  1506. strlcpy(username, chap_table->name, MAX_CHAP_NAME_LEN);
  1507. rval = QLA_SUCCESS;
  1508. exit_unlock_uni_chap:
  1509. mutex_unlock(&ha->chap_sem);
  1510. exit_uni_chap:
  1511. return rval;
  1512. }
  1513. /**
  1514. * qla4xxx_get_chap_index - Get chap index given username and secret
  1515. * @ha: pointer to adapter structure
  1516. * @username: CHAP username to be searched
  1517. * @password: CHAP password to be searched
  1518. * @bidi: Is this a BIDI CHAP
  1519. * @chap_index: CHAP index to be returned
  1520. *
  1521. * Match the username and password in the chap_list, return the index if a
  1522. * match is found. If a match is not found then add the entry in FLASH and
  1523. * return the index at which entry is written in the FLASH.
  1524. **/
  1525. int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
  1526. char *password, int bidi, uint16_t *chap_index)
  1527. {
  1528. int i, rval;
  1529. int free_index = -1;
  1530. int found_index = 0;
  1531. int max_chap_entries = 0;
  1532. struct ql4_chap_table *chap_table;
  1533. if (is_qla80XX(ha))
  1534. max_chap_entries = (ha->hw.flt_chap_size / 2) /
  1535. sizeof(struct ql4_chap_table);
  1536. else
  1537. max_chap_entries = MAX_CHAP_ENTRIES_40XX;
  1538. if (!ha->chap_list) {
  1539. ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
  1540. return QLA_ERROR;
  1541. }
  1542. if (!username || !password) {
  1543. ql4_printk(KERN_ERR, ha, "Do not have username and psw\n");
  1544. return QLA_ERROR;
  1545. }
  1546. mutex_lock(&ha->chap_sem);
  1547. for (i = 0; i < max_chap_entries; i++) {
  1548. chap_table = (struct ql4_chap_table *)ha->chap_list + i;
  1549. if (chap_table->cookie !=
  1550. __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
  1551. if (i > MAX_RESRV_CHAP_IDX && free_index == -1)
  1552. free_index = i;
  1553. continue;
  1554. }
  1555. if (bidi) {
  1556. if (chap_table->flags & BIT_7)
  1557. continue;
  1558. } else {
  1559. if (chap_table->flags & BIT_6)
  1560. continue;
  1561. }
  1562. if (!strncmp(chap_table->secret, password,
  1563. MAX_CHAP_SECRET_LEN) &&
  1564. !strncmp(chap_table->name, username,
  1565. MAX_CHAP_NAME_LEN)) {
  1566. *chap_index = i;
  1567. found_index = 1;
  1568. break;
  1569. }
  1570. }
  1571. /* If chap entry is not present and a free index is available then
  1572. * write the entry in flash
  1573. */
  1574. if (!found_index && free_index != -1) {
  1575. rval = qla4xxx_set_chap(ha, username, password,
  1576. free_index, bidi);
  1577. if (!rval) {
  1578. *chap_index = free_index;
  1579. found_index = 1;
  1580. }
  1581. }
  1582. mutex_unlock(&ha->chap_sem);
  1583. if (found_index)
  1584. return QLA_SUCCESS;
  1585. return QLA_ERROR;
  1586. }
  1587. int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha,
  1588. uint16_t fw_ddb_index,
  1589. uint16_t connection_id,
  1590. uint16_t option)
  1591. {
  1592. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1593. uint32_t mbox_sts[MBOX_REG_COUNT];
  1594. int status = QLA_SUCCESS;
  1595. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1596. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1597. mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
  1598. mbox_cmd[1] = fw_ddb_index;
  1599. mbox_cmd[2] = connection_id;
  1600. mbox_cmd[3] = option;
  1601. status = qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]);
  1602. if (status != QLA_SUCCESS) {
  1603. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_CONN_CLOSE "
  1604. "option %04x failed w/ status %04X %04X\n",
  1605. __func__, option, mbox_sts[0], mbox_sts[1]));
  1606. }
  1607. return status;
  1608. }
  1609. /**
  1610. * qla4_84xx_extend_idc_tmo - Extend IDC Timeout.
  1611. * @ha: Pointer to host adapter structure.
  1612. * @ext_tmo: idc timeout value
  1613. *
  1614. * Requests firmware to extend the idc timeout value.
  1615. **/
  1616. static int qla4_84xx_extend_idc_tmo(struct scsi_qla_host *ha, uint32_t ext_tmo)
  1617. {
  1618. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1619. uint32_t mbox_sts[MBOX_REG_COUNT];
  1620. int status;
  1621. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1622. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1623. ext_tmo &= 0xf;
  1624. mbox_cmd[0] = MBOX_CMD_IDC_TIME_EXTEND;
  1625. mbox_cmd[1] = ((ha->idc_info.request_desc & 0xfffff0ff) |
  1626. (ext_tmo << 8)); /* new timeout */
  1627. mbox_cmd[2] = ha->idc_info.info1;
  1628. mbox_cmd[3] = ha->idc_info.info2;
  1629. mbox_cmd[4] = ha->idc_info.info3;
  1630. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
  1631. mbox_cmd, mbox_sts);
  1632. if (status != QLA_SUCCESS) {
  1633. DEBUG2(ql4_printk(KERN_INFO, ha,
  1634. "scsi%ld: %s: failed status %04X\n",
  1635. ha->host_no, __func__, mbox_sts[0]));
  1636. return QLA_ERROR;
  1637. } else {
  1638. ql4_printk(KERN_INFO, ha, "%s: IDC timeout extended by %d secs\n",
  1639. __func__, ext_tmo);
  1640. }
  1641. return QLA_SUCCESS;
  1642. }
  1643. int qla4xxx_disable_acb(struct scsi_qla_host *ha)
  1644. {
  1645. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1646. uint32_t mbox_sts[MBOX_REG_COUNT];
  1647. int status = QLA_SUCCESS;
  1648. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1649. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1650. mbox_cmd[0] = MBOX_CMD_DISABLE_ACB;
  1651. status = qla4xxx_mailbox_command(ha, 8, 5, &mbox_cmd[0], &mbox_sts[0]);
  1652. if (status != QLA_SUCCESS) {
  1653. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_DISABLE_ACB "
  1654. "failed w/ status %04X %04X %04X", __func__,
  1655. mbox_sts[0], mbox_sts[1], mbox_sts[2]));
  1656. } else {
  1657. if (is_qla8042(ha) &&
  1658. test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) &&
  1659. (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) {
  1660. /*
  1661. * Disable ACB mailbox command takes time to complete
  1662. * based on the total number of targets connected.
  1663. * For 512 targets, it took approximately 5 secs to
  1664. * complete. Setting the timeout value to 8, with the 3
  1665. * secs buffer.
  1666. */
  1667. qla4_84xx_extend_idc_tmo(ha, IDC_EXTEND_TOV);
  1668. if (!wait_for_completion_timeout(&ha->disable_acb_comp,
  1669. IDC_EXTEND_TOV * HZ)) {
  1670. ql4_printk(KERN_WARNING, ha, "%s: Disable ACB Completion not received\n",
  1671. __func__);
  1672. }
  1673. }
  1674. }
  1675. return status;
  1676. }
  1677. int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
  1678. uint32_t acb_type, uint32_t len)
  1679. {
  1680. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1681. uint32_t mbox_sts[MBOX_REG_COUNT];
  1682. int status = QLA_SUCCESS;
  1683. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1684. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1685. mbox_cmd[0] = MBOX_CMD_GET_ACB;
  1686. mbox_cmd[1] = acb_type;
  1687. mbox_cmd[2] = LSDW(acb_dma);
  1688. mbox_cmd[3] = MSDW(acb_dma);
  1689. mbox_cmd[4] = len;
  1690. status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]);
  1691. if (status != QLA_SUCCESS) {
  1692. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_GET_ACB "
  1693. "failed w/ status %04X\n", __func__,
  1694. mbox_sts[0]));
  1695. }
  1696. return status;
  1697. }
  1698. int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
  1699. uint32_t *mbox_sts, dma_addr_t acb_dma)
  1700. {
  1701. int status = QLA_SUCCESS;
  1702. memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
  1703. memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
  1704. mbox_cmd[0] = MBOX_CMD_SET_ACB;
  1705. mbox_cmd[1] = 0; /* Primary ACB */
  1706. mbox_cmd[2] = LSDW(acb_dma);
  1707. mbox_cmd[3] = MSDW(acb_dma);
  1708. mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
  1709. status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]);
  1710. if (status != QLA_SUCCESS) {
  1711. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_SET_ACB "
  1712. "failed w/ status %04X\n", __func__,
  1713. mbox_sts[0]));
  1714. }
  1715. return status;
  1716. }
  1717. int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
  1718. struct ddb_entry *ddb_entry,
  1719. struct iscsi_cls_conn *cls_conn,
  1720. uint32_t *mbx_sts)
  1721. {
  1722. struct dev_db_entry *fw_ddb_entry;
  1723. struct iscsi_conn *conn;
  1724. struct iscsi_session *sess;
  1725. struct qla_conn *qla_conn;
  1726. struct sockaddr *dst_addr;
  1727. dma_addr_t fw_ddb_entry_dma;
  1728. int status = QLA_SUCCESS;
  1729. int rval = 0;
  1730. struct sockaddr_in *addr;
  1731. struct sockaddr_in6 *addr6;
  1732. char *ip;
  1733. uint16_t iscsi_opts = 0;
  1734. uint32_t options = 0;
  1735. uint16_t idx, *ptid;
  1736. fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
  1737. &fw_ddb_entry_dma, GFP_KERNEL);
  1738. if (!fw_ddb_entry) {
  1739. DEBUG2(ql4_printk(KERN_ERR, ha,
  1740. "%s: Unable to allocate dma buffer.\n",
  1741. __func__));
  1742. rval = -ENOMEM;
  1743. goto exit_set_param_no_free;
  1744. }
  1745. conn = cls_conn->dd_data;
  1746. qla_conn = conn->dd_data;
  1747. sess = conn->session;
  1748. dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
  1749. if (dst_addr->sa_family == AF_INET6)
  1750. options |= IPV6_DEFAULT_DDB_ENTRY;
  1751. status = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
  1752. if (status == QLA_ERROR) {
  1753. rval = -EINVAL;
  1754. goto exit_set_param;
  1755. }
  1756. ptid = (uint16_t *)&fw_ddb_entry->isid[1];
  1757. *ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id);
  1758. DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%pmR]\n", fw_ddb_entry->isid));
  1759. iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options);
  1760. memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias));
  1761. memset(fw_ddb_entry->iscsi_name, 0, sizeof(fw_ddb_entry->iscsi_name));
  1762. if (sess->targetname != NULL) {
  1763. memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
  1764. min(strlen(sess->targetname),
  1765. sizeof(fw_ddb_entry->iscsi_name)));
  1766. }
  1767. memset(fw_ddb_entry->ip_addr, 0, sizeof(fw_ddb_entry->ip_addr));
  1768. memset(fw_ddb_entry->tgt_addr, 0, sizeof(fw_ddb_entry->tgt_addr));
  1769. fw_ddb_entry->options = DDB_OPT_TARGET | DDB_OPT_AUTO_SENDTGTS_DISABLE;
  1770. if (dst_addr->sa_family == AF_INET) {
  1771. addr = (struct sockaddr_in *)dst_addr;
  1772. ip = (char *)&addr->sin_addr;
  1773. memcpy(fw_ddb_entry->ip_addr, ip, IP_ADDR_LEN);
  1774. fw_ddb_entry->port = cpu_to_le16(ntohs(addr->sin_port));
  1775. DEBUG2(ql4_printk(KERN_INFO, ha,
  1776. "%s: Destination Address [%pI4]: index [%d]\n",
  1777. __func__, fw_ddb_entry->ip_addr,
  1778. ddb_entry->fw_ddb_index));
  1779. } else if (dst_addr->sa_family == AF_INET6) {
  1780. addr6 = (struct sockaddr_in6 *)dst_addr;
  1781. ip = (char *)&addr6->sin6_addr;
  1782. memcpy(fw_ddb_entry->ip_addr, ip, IPv6_ADDR_LEN);
  1783. fw_ddb_entry->port = cpu_to_le16(ntohs(addr6->sin6_port));
  1784. fw_ddb_entry->options |= DDB_OPT_IPV6_DEVICE;
  1785. DEBUG2(ql4_printk(KERN_INFO, ha,
  1786. "%s: Destination Address [%pI6]: index [%d]\n",
  1787. __func__, fw_ddb_entry->ip_addr,
  1788. ddb_entry->fw_ddb_index));
  1789. } else {
  1790. ql4_printk(KERN_ERR, ha,
  1791. "%s: Failed to get IP Address\n",
  1792. __func__);
  1793. rval = -EINVAL;
  1794. goto exit_set_param;
  1795. }
  1796. /* CHAP */
  1797. if (sess->username != NULL && sess->password != NULL) {
  1798. if (strlen(sess->username) && strlen(sess->password)) {
  1799. iscsi_opts |= BIT_7;
  1800. rval = qla4xxx_get_chap_index(ha, sess->username,
  1801. sess->password,
  1802. LOCAL_CHAP, &idx);
  1803. if (rval)
  1804. goto exit_set_param;
  1805. fw_ddb_entry->chap_tbl_idx = cpu_to_le16(idx);
  1806. }
  1807. }
  1808. if (sess->username_in != NULL && sess->password_in != NULL) {
  1809. /* Check if BIDI CHAP */
  1810. if (strlen(sess->username_in) && strlen(sess->password_in)) {
  1811. iscsi_opts |= BIT_4;
  1812. rval = qla4xxx_get_chap_index(ha, sess->username_in,
  1813. sess->password_in,
  1814. BIDI_CHAP, &idx);
  1815. if (rval)
  1816. goto exit_set_param;
  1817. }
  1818. }
  1819. if (sess->initial_r2t_en)
  1820. iscsi_opts |= BIT_10;
  1821. if (sess->imm_data_en)
  1822. iscsi_opts |= BIT_11;
  1823. fw_ddb_entry->iscsi_options = cpu_to_le16(iscsi_opts);
  1824. if (conn->max_recv_dlength)
  1825. fw_ddb_entry->iscsi_max_rcv_data_seg_len =
  1826. __constant_cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS));
  1827. if (sess->max_r2t)
  1828. fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
  1829. if (sess->first_burst)
  1830. fw_ddb_entry->iscsi_first_burst_len =
  1831. __constant_cpu_to_le16((sess->first_burst / BYTE_UNITS));
  1832. if (sess->max_burst)
  1833. fw_ddb_entry->iscsi_max_burst_len =
  1834. __constant_cpu_to_le16((sess->max_burst / BYTE_UNITS));
  1835. if (sess->time2wait)
  1836. fw_ddb_entry->iscsi_def_time2wait =
  1837. cpu_to_le16(sess->time2wait);
  1838. if (sess->time2retain)
  1839. fw_ddb_entry->iscsi_def_time2retain =
  1840. cpu_to_le16(sess->time2retain);
  1841. status = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
  1842. fw_ddb_entry_dma, mbx_sts);
  1843. if (status != QLA_SUCCESS)
  1844. rval = -EINVAL;
  1845. exit_set_param:
  1846. dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
  1847. fw_ddb_entry, fw_ddb_entry_dma);
  1848. exit_set_param_no_free:
  1849. return rval;
  1850. }
  1851. int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
  1852. uint16_t stats_size, dma_addr_t stats_dma)
  1853. {
  1854. int status = QLA_SUCCESS;
  1855. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1856. uint32_t mbox_sts[MBOX_REG_COUNT];
  1857. memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
  1858. memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
  1859. mbox_cmd[0] = MBOX_CMD_GET_MANAGEMENT_DATA;
  1860. mbox_cmd[1] = fw_ddb_index;
  1861. mbox_cmd[2] = LSDW(stats_dma);
  1862. mbox_cmd[3] = MSDW(stats_dma);
  1863. mbox_cmd[4] = stats_size;
  1864. status = qla4xxx_mailbox_command(ha, 5, 1, &mbox_cmd[0], &mbox_sts[0]);
  1865. if (status != QLA_SUCCESS) {
  1866. DEBUG2(ql4_printk(KERN_WARNING, ha,
  1867. "%s: MBOX_CMD_GET_MANAGEMENT_DATA "
  1868. "failed w/ status %04X\n", __func__,
  1869. mbox_sts[0]));
  1870. }
  1871. return status;
  1872. }
  1873. int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx,
  1874. uint32_t ip_idx, uint32_t *sts)
  1875. {
  1876. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1877. uint32_t mbox_sts[MBOX_REG_COUNT];
  1878. int status = QLA_SUCCESS;
  1879. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1880. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1881. mbox_cmd[0] = MBOX_CMD_GET_IP_ADDR_STATE;
  1882. mbox_cmd[1] = acb_idx;
  1883. mbox_cmd[2] = ip_idx;
  1884. status = qla4xxx_mailbox_command(ha, 3, 8, &mbox_cmd[0], &mbox_sts[0]);
  1885. if (status != QLA_SUCCESS) {
  1886. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: "
  1887. "MBOX_CMD_GET_IP_ADDR_STATE failed w/ "
  1888. "status %04X\n", __func__, mbox_sts[0]));
  1889. }
  1890. memcpy(sts, mbox_sts, sizeof(mbox_sts));
  1891. return status;
  1892. }
  1893. int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
  1894. uint32_t offset, uint32_t size)
  1895. {
  1896. int status = QLA_SUCCESS;
  1897. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1898. uint32_t mbox_sts[MBOX_REG_COUNT];
  1899. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1900. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1901. mbox_cmd[0] = MBOX_CMD_GET_NVRAM;
  1902. mbox_cmd[1] = LSDW(nvram_dma);
  1903. mbox_cmd[2] = MSDW(nvram_dma);
  1904. mbox_cmd[3] = offset;
  1905. mbox_cmd[4] = size;
  1906. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
  1907. &mbox_sts[0]);
  1908. if (status != QLA_SUCCESS) {
  1909. DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
  1910. "status %04X\n", ha->host_no, __func__,
  1911. mbox_sts[0]));
  1912. }
  1913. return status;
  1914. }
  1915. int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
  1916. uint32_t offset, uint32_t size)
  1917. {
  1918. int status = QLA_SUCCESS;
  1919. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1920. uint32_t mbox_sts[MBOX_REG_COUNT];
  1921. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1922. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1923. mbox_cmd[0] = MBOX_CMD_SET_NVRAM;
  1924. mbox_cmd[1] = LSDW(nvram_dma);
  1925. mbox_cmd[2] = MSDW(nvram_dma);
  1926. mbox_cmd[3] = offset;
  1927. mbox_cmd[4] = size;
  1928. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
  1929. &mbox_sts[0]);
  1930. if (status != QLA_SUCCESS) {
  1931. DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
  1932. "status %04X\n", ha->host_no, __func__,
  1933. mbox_sts[0]));
  1934. }
  1935. return status;
  1936. }
  1937. int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
  1938. uint32_t region, uint32_t field0,
  1939. uint32_t field1)
  1940. {
  1941. int status = QLA_SUCCESS;
  1942. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1943. uint32_t mbox_sts[MBOX_REG_COUNT];
  1944. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1945. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1946. mbox_cmd[0] = MBOX_CMD_RESTORE_FACTORY_DEFAULTS;
  1947. mbox_cmd[3] = region;
  1948. mbox_cmd[4] = field0;
  1949. mbox_cmd[5] = field1;
  1950. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0],
  1951. &mbox_sts[0]);
  1952. if (status != QLA_SUCCESS) {
  1953. DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
  1954. "status %04X\n", ha->host_no, __func__,
  1955. mbox_sts[0]));
  1956. }
  1957. return status;
  1958. }
  1959. /**
  1960. * qla4_8xxx_set_param - set driver version in firmware.
  1961. * @ha: Pointer to host adapter structure.
  1962. * @param: Parameter to set i.e driver version
  1963. **/
  1964. int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param)
  1965. {
  1966. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1967. uint32_t mbox_sts[MBOX_REG_COUNT];
  1968. uint32_t status;
  1969. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1970. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1971. mbox_cmd[0] = MBOX_CMD_SET_PARAM;
  1972. if (param == SET_DRVR_VERSION) {
  1973. mbox_cmd[1] = SET_DRVR_VERSION;
  1974. strncpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION,
  1975. MAX_DRVR_VER_LEN - 1);
  1976. } else {
  1977. ql4_printk(KERN_ERR, ha, "%s: invalid parameter 0x%x\n",
  1978. __func__, param);
  1979. status = QLA_ERROR;
  1980. goto exit_set_param;
  1981. }
  1982. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, mbox_cmd,
  1983. mbox_sts);
  1984. if (status == QLA_ERROR)
  1985. ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
  1986. __func__, mbox_sts[0]);
  1987. exit_set_param:
  1988. return status;
  1989. }
  1990. /**
  1991. * qla4_83xx_post_idc_ack - post IDC ACK
  1992. * @ha: Pointer to host adapter structure.
  1993. *
  1994. * Posts IDC ACK for IDC Request Notification AEN.
  1995. **/
  1996. int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha)
  1997. {
  1998. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1999. uint32_t mbox_sts[MBOX_REG_COUNT];
  2000. int status;
  2001. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  2002. memset(&mbox_sts, 0, sizeof(mbox_sts));
  2003. mbox_cmd[0] = MBOX_CMD_IDC_ACK;
  2004. mbox_cmd[1] = ha->idc_info.request_desc;
  2005. mbox_cmd[2] = ha->idc_info.info1;
  2006. mbox_cmd[3] = ha->idc_info.info2;
  2007. mbox_cmd[4] = ha->idc_info.info3;
  2008. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
  2009. mbox_cmd, mbox_sts);
  2010. if (status == QLA_ERROR)
  2011. ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
  2012. mbox_sts[0]);
  2013. else
  2014. ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n", __func__);
  2015. return status;
  2016. }
  2017. int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)
  2018. {
  2019. uint32_t mbox_cmd[MBOX_REG_COUNT];
  2020. uint32_t mbox_sts[MBOX_REG_COUNT];
  2021. struct addr_ctrl_blk *acb = NULL;
  2022. uint32_t acb_len = sizeof(struct addr_ctrl_blk);
  2023. int rval = QLA_SUCCESS;
  2024. dma_addr_t acb_dma;
  2025. acb = dma_alloc_coherent(&ha->pdev->dev,
  2026. sizeof(struct addr_ctrl_blk),
  2027. &acb_dma, GFP_KERNEL);
  2028. if (!acb) {
  2029. ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", __func__);
  2030. rval = QLA_ERROR;
  2031. goto exit_config_acb;
  2032. }
  2033. memset(acb, 0, acb_len);
  2034. switch (acb_config) {
  2035. case ACB_CONFIG_DISABLE:
  2036. rval = qla4xxx_get_acb(ha, acb_dma, 0, acb_len);
  2037. if (rval != QLA_SUCCESS)
  2038. goto exit_free_acb;
  2039. rval = qla4xxx_disable_acb(ha);
  2040. if (rval != QLA_SUCCESS)
  2041. goto exit_free_acb;
  2042. if (!ha->saved_acb)
  2043. ha->saved_acb = kzalloc(acb_len, GFP_KERNEL);
  2044. if (!ha->saved_acb) {
  2045. ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
  2046. __func__);
  2047. rval = QLA_ERROR;
  2048. goto exit_free_acb;
  2049. }
  2050. memcpy(ha->saved_acb, acb, acb_len);
  2051. break;
  2052. case ACB_CONFIG_SET:
  2053. if (!ha->saved_acb) {
  2054. ql4_printk(KERN_ERR, ha, "%s: Can't set ACB, Saved ACB not available\n",
  2055. __func__);
  2056. rval = QLA_ERROR;
  2057. goto exit_free_acb;
  2058. }
  2059. memcpy(acb, ha->saved_acb, acb_len);
  2060. rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
  2061. if (rval != QLA_SUCCESS)
  2062. goto exit_free_acb;
  2063. break;
  2064. default:
  2065. ql4_printk(KERN_ERR, ha, "%s: Invalid ACB Configuration\n",
  2066. __func__);
  2067. }
  2068. exit_free_acb:
  2069. dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb,
  2070. acb_dma);
  2071. exit_config_acb:
  2072. if ((acb_config == ACB_CONFIG_SET) && ha->saved_acb) {
  2073. kfree(ha->saved_acb);
  2074. ha->saved_acb = NULL;
  2075. }
  2076. DEBUG2(ql4_printk(KERN_INFO, ha,
  2077. "%s %s\n", __func__,
  2078. rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
  2079. return rval;
  2080. }
  2081. int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config)
  2082. {
  2083. uint32_t mbox_cmd[MBOX_REG_COUNT];
  2084. uint32_t mbox_sts[MBOX_REG_COUNT];
  2085. int status;
  2086. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  2087. memset(&mbox_sts, 0, sizeof(mbox_sts));
  2088. mbox_cmd[0] = MBOX_CMD_GET_PORT_CONFIG;
  2089. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
  2090. mbox_cmd, mbox_sts);
  2091. if (status == QLA_SUCCESS)
  2092. *config = mbox_sts[1];
  2093. else
  2094. ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
  2095. mbox_sts[0]);
  2096. return status;
  2097. }
  2098. int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config)
  2099. {
  2100. uint32_t mbox_cmd[MBOX_REG_COUNT];
  2101. uint32_t mbox_sts[MBOX_REG_COUNT];
  2102. int status;
  2103. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  2104. memset(&mbox_sts, 0, sizeof(mbox_sts));
  2105. mbox_cmd[0] = MBOX_CMD_SET_PORT_CONFIG;
  2106. mbox_cmd[1] = *config;
  2107. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
  2108. mbox_cmd, mbox_sts);
  2109. if (status != QLA_SUCCESS)
  2110. ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
  2111. mbox_sts[0]);
  2112. return status;
  2113. }