ql4_isr.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic iSCSI HBA Driver
  4. * Copyright (c) 2003-2013 QLogic Corporation
  5. */
  6. #include "ql4_def.h"
  7. #include "ql4_glbl.h"
  8. #include "ql4_dbg.h"
  9. #include "ql4_inline.h"
  10. /**
  11. * qla4xxx_copy_sense - copy sense data into cmd sense buffer
  12. * @ha: Pointer to host adapter structure.
  13. * @sts_entry: Pointer to status entry structure.
  14. * @srb: Pointer to srb structure.
  15. **/
  16. static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
  17. struct status_entry *sts_entry,
  18. struct srb *srb)
  19. {
  20. struct scsi_cmnd *cmd = srb->cmd;
  21. uint16_t sense_len;
  22. memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  23. sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
  24. if (sense_len == 0) {
  25. DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%llu: %s:"
  26. " sense len 0\n", ha->host_no,
  27. cmd->device->channel, cmd->device->id,
  28. cmd->device->lun, __func__));
  29. ha->status_srb = NULL;
  30. return;
  31. }
  32. /* Save total available sense length,
  33. * not to exceed cmd's sense buffer size */
  34. sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
  35. srb->req_sense_ptr = cmd->sense_buffer;
  36. srb->req_sense_len = sense_len;
  37. /* Copy sense from sts_entry pkt */
  38. sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN);
  39. memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len);
  40. DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: %s: sense key = %x, "
  41. "ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no,
  42. cmd->device->channel, cmd->device->id,
  43. cmd->device->lun, __func__,
  44. sts_entry->senseData[2] & 0x0f,
  45. sts_entry->senseData[7],
  46. sts_entry->senseData[12],
  47. sts_entry->senseData[13]));
  48. DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len));
  49. srb->flags |= SRB_GOT_SENSE;
  50. /* Update srb, in case a sts_cont pkt follows */
  51. srb->req_sense_ptr += sense_len;
  52. srb->req_sense_len -= sense_len;
  53. if (srb->req_sense_len != 0)
  54. ha->status_srb = srb;
  55. else
  56. ha->status_srb = NULL;
  57. }
  58. /**
  59. * qla4xxx_status_cont_entry - Process a Status Continuations entry.
  60. * @ha: SCSI driver HA context
  61. * @sts_cont: Entry pointer
  62. *
  63. * Extended sense data.
  64. */
  65. static void
  66. qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
  67. struct status_cont_entry *sts_cont)
  68. {
  69. struct srb *srb = ha->status_srb;
  70. struct scsi_cmnd *cmd;
  71. uint16_t sense_len;
  72. if (srb == NULL)
  73. return;
  74. cmd = srb->cmd;
  75. if (cmd == NULL) {
  76. DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned "
  77. "back to OS srb=%p srb->state:%d\n", ha->host_no,
  78. __func__, srb, srb->state));
  79. ha->status_srb = NULL;
  80. return;
  81. }
  82. /* Copy sense data. */
  83. sense_len = min_t(uint16_t, srb->req_sense_len,
  84. IOCB_MAX_EXT_SENSEDATA_LEN);
  85. memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len);
  86. DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len));
  87. srb->req_sense_ptr += sense_len;
  88. srb->req_sense_len -= sense_len;
  89. /* Place command on done queue. */
  90. if (srb->req_sense_len == 0) {
  91. kref_put(&srb->srb_ref, qla4xxx_srb_compl);
  92. ha->status_srb = NULL;
  93. }
  94. }
  95. /**
  96. * qla4xxx_status_entry - processes status IOCBs
  97. * @ha: Pointer to host adapter structure.
  98. * @sts_entry: Pointer to status entry structure.
  99. **/
  100. static void qla4xxx_status_entry(struct scsi_qla_host *ha,
  101. struct status_entry *sts_entry)
  102. {
  103. uint8_t scsi_status;
  104. struct scsi_cmnd *cmd;
  105. struct srb *srb;
  106. struct ddb_entry *ddb_entry;
  107. uint32_t residual;
  108. srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
  109. if (!srb) {
  110. ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
  111. "handle=0x%0x, srb=%p\n", __func__,
  112. sts_entry->handle, srb);
  113. if (is_qla80XX(ha))
  114. set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
  115. else
  116. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  117. return;
  118. }
  119. cmd = srb->cmd;
  120. if (cmd == NULL) {
  121. DEBUG2(printk("scsi%ld: %s: Command already returned back to "
  122. "OS pkt->handle=%d srb=%p srb->state:%d\n",
  123. ha->host_no, __func__, sts_entry->handle,
  124. srb, srb->state));
  125. ql4_printk(KERN_WARNING, ha, "Command is NULL:"
  126. " already returned to OS (srb=%p)\n", srb);
  127. return;
  128. }
  129. ddb_entry = srb->ddb;
  130. if (ddb_entry == NULL) {
  131. cmd->result = DID_NO_CONNECT << 16;
  132. goto status_entry_exit;
  133. }
  134. residual = le32_to_cpu(sts_entry->residualByteCnt);
  135. /* Translate ISP error to a Linux SCSI error. */
  136. scsi_status = sts_entry->scsiStatus;
  137. switch (sts_entry->completionStatus) {
  138. case SCS_COMPLETE:
  139. if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
  140. cmd->result = DID_ERROR << 16;
  141. break;
  142. }
  143. if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
  144. scsi_set_resid(cmd, residual);
  145. if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
  146. cmd->underflow)) {
  147. cmd->result = DID_ERROR << 16;
  148. DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: "
  149. "Mid-layer Data underrun0, "
  150. "xferlen = 0x%x, "
  151. "residual = 0x%x\n", ha->host_no,
  152. cmd->device->channel,
  153. cmd->device->id,
  154. cmd->device->lun, __func__,
  155. scsi_bufflen(cmd), residual));
  156. break;
  157. }
  158. }
  159. cmd->result = DID_OK << 16 | scsi_status;
  160. if (scsi_status != SCSI_CHECK_CONDITION)
  161. break;
  162. /* Copy Sense Data into sense buffer. */
  163. qla4xxx_copy_sense(ha, sts_entry, srb);
  164. break;
  165. case SCS_INCOMPLETE:
  166. /* Always set the status to DID_ERROR, since
  167. * all conditions result in that status anyway */
  168. cmd->result = DID_ERROR << 16;
  169. break;
  170. case SCS_RESET_OCCURRED:
  171. DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Device RESET occurred\n",
  172. ha->host_no, cmd->device->channel,
  173. cmd->device->id, cmd->device->lun, __func__));
  174. cmd->result = DID_RESET << 16;
  175. break;
  176. case SCS_ABORTED:
  177. DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Abort occurred\n",
  178. ha->host_no, cmd->device->channel,
  179. cmd->device->id, cmd->device->lun, __func__));
  180. cmd->result = DID_RESET << 16;
  181. break;
  182. case SCS_TIMEOUT:
  183. DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: Timeout\n",
  184. ha->host_no, cmd->device->channel,
  185. cmd->device->id, cmd->device->lun));
  186. cmd->result = DID_TRANSPORT_DISRUPTED << 16;
  187. /*
  188. * Mark device missing so that we won't continue to send
  189. * I/O to this device. We should get a ddb state change
  190. * AEN soon.
  191. */
  192. if (iscsi_is_session_online(ddb_entry->sess))
  193. qla4xxx_mark_device_missing(ddb_entry->sess);
  194. break;
  195. case SCS_DATA_UNDERRUN:
  196. case SCS_DATA_OVERRUN:
  197. if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
  198. (sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
  199. DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: " "Data overrun\n",
  200. ha->host_no,
  201. cmd->device->channel, cmd->device->id,
  202. cmd->device->lun, __func__));
  203. cmd->result = DID_ERROR << 16;
  204. break;
  205. }
  206. scsi_set_resid(cmd, residual);
  207. if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) {
  208. /* Both the firmware and target reported UNDERRUN:
  209. *
  210. * MID-LAYER UNDERFLOW case:
  211. * Some kernels do not properly detect midlayer
  212. * underflow, so we manually check it and return
  213. * ERROR if the minimum required data was not
  214. * received.
  215. *
  216. * ALL OTHER cases:
  217. * Fall thru to check scsi_status
  218. */
  219. if (!scsi_status && (scsi_bufflen(cmd) - residual) <
  220. cmd->underflow) {
  221. DEBUG2(ql4_printk(KERN_INFO, ha,
  222. "scsi%ld:%d:%d:%llu: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n",
  223. ha->host_no,
  224. cmd->device->channel,
  225. cmd->device->id,
  226. cmd->device->lun, __func__,
  227. scsi_bufflen(cmd),
  228. residual));
  229. cmd->result = DID_ERROR << 16;
  230. break;
  231. }
  232. } else if (scsi_status != SAM_STAT_TASK_SET_FULL &&
  233. scsi_status != SAM_STAT_BUSY) {
  234. /*
  235. * The firmware reports UNDERRUN, but the target does
  236. * not report it:
  237. *
  238. * scsi_status | host_byte device_byte
  239. * | (19:16) (7:0)
  240. * ============= | ========= ===========
  241. * TASK_SET_FULL | DID_OK scsi_status
  242. * BUSY | DID_OK scsi_status
  243. * ALL OTHERS | DID_ERROR scsi_status
  244. *
  245. * Note: If scsi_status is task set full or busy,
  246. * then this else if would fall thru to check the
  247. * scsi_status and return DID_OK.
  248. */
  249. DEBUG2(ql4_printk(KERN_INFO, ha,
  250. "scsi%ld:%d:%d:%llu: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
  251. ha->host_no,
  252. cmd->device->channel,
  253. cmd->device->id,
  254. cmd->device->lun, __func__,
  255. residual,
  256. scsi_bufflen(cmd)));
  257. cmd->result = DID_ERROR << 16 | scsi_status;
  258. goto check_scsi_status;
  259. }
  260. cmd->result = DID_OK << 16 | scsi_status;
  261. check_scsi_status:
  262. if (scsi_status == SAM_STAT_CHECK_CONDITION)
  263. qla4xxx_copy_sense(ha, sts_entry, srb);
  264. break;
  265. case SCS_DEVICE_LOGGED_OUT:
  266. case SCS_DEVICE_UNAVAILABLE:
  267. DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: SCS_DEVICE "
  268. "state: 0x%x\n", ha->host_no,
  269. cmd->device->channel, cmd->device->id,
  270. cmd->device->lun, sts_entry->completionStatus));
  271. /*
  272. * Mark device missing so that we won't continue to
  273. * send I/O to this device. We should get a ddb
  274. * state change AEN soon.
  275. */
  276. if (iscsi_is_session_online(ddb_entry->sess))
  277. qla4xxx_mark_device_missing(ddb_entry->sess);
  278. cmd->result = DID_TRANSPORT_DISRUPTED << 16;
  279. break;
  280. case SCS_QUEUE_FULL:
  281. /*
  282. * SCSI Mid-Layer handles device queue full
  283. */
  284. cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
  285. DEBUG2(printk("scsi%ld:%d:%llu: %s: QUEUE FULL detected "
  286. "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
  287. " iResp=%02x\n", ha->host_no, cmd->device->id,
  288. cmd->device->lun, __func__,
  289. sts_entry->completionStatus,
  290. sts_entry->scsiStatus, sts_entry->state_flags,
  291. sts_entry->iscsiFlags,
  292. sts_entry->iscsiResponse));
  293. break;
  294. default:
  295. cmd->result = DID_ERROR << 16;
  296. break;
  297. }
  298. status_entry_exit:
  299. /* complete the request, if not waiting for status_continuation pkt */
  300. srb->cc_stat = sts_entry->completionStatus;
  301. if (ha->status_srb == NULL)
  302. kref_put(&srb->srb_ref, qla4xxx_srb_compl);
  303. }
  304. /**
  305. * qla4xxx_passthru_status_entry - processes passthru status IOCBs (0x3C)
  306. * @ha: Pointer to host adapter structure.
  307. * @sts_entry: Pointer to status entry structure.
  308. **/
  309. static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
  310. struct passthru_status *sts_entry)
  311. {
  312. struct iscsi_task *task;
  313. struct ddb_entry *ddb_entry;
  314. struct ql4_task_data *task_data;
  315. struct iscsi_cls_conn *cls_conn;
  316. struct iscsi_conn *conn;
  317. itt_t itt;
  318. uint32_t fw_ddb_index;
  319. itt = sts_entry->handle;
  320. fw_ddb_index = le32_to_cpu(sts_entry->target);
  321. ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
  322. if (ddb_entry == NULL) {
  323. ql4_printk(KERN_ERR, ha, "%s: Invalid target index = 0x%x\n",
  324. __func__, sts_entry->target);
  325. return;
  326. }
  327. cls_conn = ddb_entry->conn;
  328. conn = cls_conn->dd_data;
  329. spin_lock(&conn->session->back_lock);
  330. task = iscsi_itt_to_task(conn, itt);
  331. spin_unlock(&conn->session->back_lock);
  332. if (task == NULL) {
  333. ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__);
  334. return;
  335. }
  336. task_data = task->dd_data;
  337. memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status));
  338. ha->iocb_cnt -= task_data->iocb_req_cnt;
  339. queue_work(ha->task_wq, &task_data->task_work);
  340. }
  341. static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha,
  342. uint32_t index)
  343. {
  344. struct mrb *mrb = NULL;
  345. /* validate handle and remove from active array */
  346. if (index >= MAX_MRB)
  347. return mrb;
  348. mrb = ha->active_mrb_array[index];
  349. ha->active_mrb_array[index] = NULL;
  350. if (!mrb)
  351. return mrb;
  352. /* update counters */
  353. ha->iocb_cnt -= mrb->iocb_cnt;
  354. return mrb;
  355. }
  356. static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha,
  357. struct mbox_status_iocb *mbox_sts_entry)
  358. {
  359. struct mrb *mrb;
  360. uint32_t status;
  361. uint32_t data_size;
  362. mrb = qla4xxx_del_mrb_from_active_array(ha,
  363. le32_to_cpu(mbox_sts_entry->handle));
  364. if (mrb == NULL) {
  365. ql4_printk(KERN_WARNING, ha, "%s: mrb[%d] is null\n", __func__,
  366. mbox_sts_entry->handle);
  367. return;
  368. }
  369. switch (mrb->mbox_cmd) {
  370. case MBOX_CMD_PING:
  371. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox_cmd = 0x%x, "
  372. "mbox_sts[0] = 0x%x, mbox_sts[6] = 0x%x\n",
  373. __func__, mrb->mbox_cmd,
  374. mbox_sts_entry->out_mbox[0],
  375. mbox_sts_entry->out_mbox[6]));
  376. if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE)
  377. status = ISCSI_PING_SUCCESS;
  378. else
  379. status = mbox_sts_entry->out_mbox[6];
  380. data_size = sizeof(mbox_sts_entry->out_mbox);
  381. qla4xxx_post_ping_evt_work(ha, status, mrb->pid, data_size,
  382. (uint8_t *) mbox_sts_entry->out_mbox);
  383. break;
  384. default:
  385. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: invalid mbox_cmd = "
  386. "0x%x\n", __func__, mrb->mbox_cmd));
  387. }
  388. kfree(mrb);
  389. return;
  390. }
  391. /**
  392. * qla4xxx_process_response_queue - process response queue completions
  393. * @ha: Pointer to host adapter structure.
  394. *
  395. * This routine process response queue completions in interrupt context.
  396. * Hardware_lock locked upon entry
  397. **/
  398. void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
  399. {
  400. uint32_t count = 0;
  401. struct srb *srb = NULL;
  402. struct status_entry *sts_entry;
  403. /* Process all responses from response queue */
  404. while ((ha->response_ptr->signature != RESPONSE_PROCESSED)) {
  405. sts_entry = (struct status_entry *) ha->response_ptr;
  406. count++;
  407. /* Advance pointers for next entry */
  408. if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
  409. ha->response_out = 0;
  410. ha->response_ptr = ha->response_ring;
  411. } else {
  412. ha->response_out++;
  413. ha->response_ptr++;
  414. }
  415. /* process entry */
  416. switch (sts_entry->hdr.entryType) {
  417. case ET_STATUS:
  418. /* Common status */
  419. qla4xxx_status_entry(ha, sts_entry);
  420. break;
  421. case ET_PASSTHRU_STATUS:
  422. if (sts_entry->hdr.systemDefined == SD_ISCSI_PDU)
  423. qla4xxx_passthru_status_entry(ha,
  424. (struct passthru_status *)sts_entry);
  425. else
  426. ql4_printk(KERN_ERR, ha,
  427. "%s: Invalid status received\n",
  428. __func__);
  429. break;
  430. case ET_STATUS_CONTINUATION:
  431. qla4xxx_status_cont_entry(ha,
  432. (struct status_cont_entry *) sts_entry);
  433. break;
  434. case ET_COMMAND:
  435. /* ISP device queue is full. Command not
  436. * accepted by ISP. Queue command for
  437. * later */
  438. srb = qla4xxx_del_from_active_array(ha,
  439. le32_to_cpu(sts_entry->
  440. handle));
  441. if (srb == NULL)
  442. goto exit_prq_invalid_handle;
  443. DEBUG2(printk("scsi%ld: %s: FW device queue full, "
  444. "srb %p\n", ha->host_no, __func__, srb));
  445. /* ETRY normally by sending it back with
  446. * DID_BUS_BUSY */
  447. srb->cmd->result = DID_BUS_BUSY << 16;
  448. kref_put(&srb->srb_ref, qla4xxx_srb_compl);
  449. break;
  450. case ET_CONTINUE:
  451. /* Just throw away the continuation entries */
  452. DEBUG2(printk("scsi%ld: %s: Continuation entry - "
  453. "ignoring\n", ha->host_no, __func__));
  454. break;
  455. case ET_MBOX_STATUS:
  456. DEBUG2(ql4_printk(KERN_INFO, ha,
  457. "%s: mbox status IOCB\n", __func__));
  458. qla4xxx_mbox_status_entry(ha,
  459. (struct mbox_status_iocb *)sts_entry);
  460. break;
  461. default:
  462. /*
  463. * Invalid entry in response queue, reset RISC
  464. * firmware.
  465. */
  466. DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
  467. "response queue \n", ha->host_no,
  468. __func__,
  469. sts_entry->hdr.entryType));
  470. goto exit_prq_error;
  471. }
  472. ((struct response *)sts_entry)->signature = RESPONSE_PROCESSED;
  473. wmb();
  474. }
  475. /*
  476. * Tell ISP we're done with response(s). This also clears the interrupt.
  477. */
  478. ha->isp_ops->complete_iocb(ha);
  479. return;
  480. exit_prq_invalid_handle:
  481. DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
  482. ha->host_no, __func__, srb, sts_entry->hdr.entryType,
  483. sts_entry->completionStatus));
  484. exit_prq_error:
  485. ha->isp_ops->complete_iocb(ha);
  486. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  487. }
  488. /**
  489. * qla4_83xx_loopback_in_progress: Is loopback in progress?
  490. * @ha: Pointer to host adapter structure.
  491. * returns: 1 = loopback in progress, 0 = loopback not in progress
  492. **/
  493. static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)
  494. {
  495. int rval = 1;
  496. if (is_qla8032(ha) || is_qla8042(ha)) {
  497. if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) ||
  498. (ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) {
  499. DEBUG2(ql4_printk(KERN_INFO, ha,
  500. "%s: Loopback diagnostics in progress\n",
  501. __func__));
  502. rval = 1;
  503. } else {
  504. DEBUG2(ql4_printk(KERN_INFO, ha,
  505. "%s: Loopback diagnostics not in progress\n",
  506. __func__));
  507. rval = 0;
  508. }
  509. }
  510. return rval;
  511. }
  512. static void qla4xxx_update_ipaddr_state(struct scsi_qla_host *ha,
  513. uint32_t ipaddr_idx,
  514. uint32_t ipaddr_fw_state)
  515. {
  516. uint8_t ipaddr_state;
  517. uint8_t ip_idx;
  518. ip_idx = ipaddr_idx & 0xF;
  519. ipaddr_state = qla4xxx_set_ipaddr_state((uint8_t)ipaddr_fw_state);
  520. switch (ip_idx) {
  521. case 0:
  522. ha->ip_config.ipv4_addr_state = ipaddr_state;
  523. break;
  524. case 1:
  525. ha->ip_config.ipv6_link_local_state = ipaddr_state;
  526. break;
  527. case 2:
  528. ha->ip_config.ipv6_addr0_state = ipaddr_state;
  529. break;
  530. case 3:
  531. ha->ip_config.ipv6_addr1_state = ipaddr_state;
  532. break;
  533. default:
  534. ql4_printk(KERN_INFO, ha, "%s: Invalid IPADDR index %d\n",
  535. __func__, ip_idx);
  536. }
  537. }
  538. static void qla4xxx_default_router_changed(struct scsi_qla_host *ha,
  539. uint32_t *mbox_sts)
  540. {
  541. memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[0],
  542. &mbox_sts[2], sizeof(uint32_t));
  543. memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[1],
  544. &mbox_sts[3], sizeof(uint32_t));
  545. memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[2],
  546. &mbox_sts[4], sizeof(uint32_t));
  547. memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[3],
  548. &mbox_sts[5], sizeof(uint32_t));
  549. }
  550. /**
  551. * qla4xxx_isr_decode_mailbox - decodes mailbox status
  552. * @ha: Pointer to host adapter structure.
  553. * @mbox_status: Mailbox status.
  554. *
  555. * This routine decodes the mailbox status during the ISR.
  556. * Hardware_lock locked upon entry. runs in interrupt context.
  557. **/
  558. static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
  559. uint32_t mbox_status)
  560. {
  561. int i;
  562. uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
  563. __le32 __iomem *mailbox_out;
  564. uint32_t opcode = 0;
  565. if (is_qla8032(ha) || is_qla8042(ha))
  566. mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0];
  567. else if (is_qla8022(ha))
  568. mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0];
  569. else
  570. mailbox_out = &ha->reg->mailbox[0];
  571. if ((mbox_status == MBOX_STS_BUSY) ||
  572. (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
  573. (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
  574. ha->mbox_status[0] = mbox_status;
  575. if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
  576. /*
  577. * Copy all mailbox registers to a temporary
  578. * location and set mailbox command done flag
  579. */
  580. for (i = 0; i < ha->mbox_status_count; i++)
  581. ha->mbox_status[i] = readl(&mailbox_out[i]);
  582. set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
  583. if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags))
  584. complete(&ha->mbx_intr_comp);
  585. }
  586. } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
  587. for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
  588. mbox_sts[i] = readl(&mailbox_out[i]);
  589. /* Immediately process the AENs that don't require much work.
  590. * Only queue the database_changed AENs */
  591. if (ha->aen_log.count < MAX_AEN_ENTRIES) {
  592. for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
  593. ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
  594. mbox_sts[i];
  595. ha->aen_log.count++;
  596. }
  597. switch (mbox_status) {
  598. case MBOX_ASTS_SYSTEM_ERROR:
  599. /* Log Mailbox registers */
  600. ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
  601. qla4xxx_dump_registers(ha);
  602. if ((is_qla8022(ha) && ql4xdontresethba) ||
  603. ((is_qla8032(ha) || is_qla8042(ha)) &&
  604. qla4_83xx_idc_dontreset(ha))) {
  605. DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
  606. ha->host_no, __func__));
  607. } else {
  608. set_bit(AF_GET_CRASH_RECORD, &ha->flags);
  609. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  610. }
  611. break;
  612. case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
  613. case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
  614. case MBOX_ASTS_NVRAM_INVALID:
  615. case MBOX_ASTS_IP_ADDRESS_CHANGED:
  616. case MBOX_ASTS_DHCP_LEASE_EXPIRED:
  617. DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
  618. "Reset HA\n", ha->host_no, mbox_status));
  619. if (is_qla80XX(ha))
  620. set_bit(DPC_RESET_HA_FW_CONTEXT,
  621. &ha->dpc_flags);
  622. else
  623. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  624. break;
  625. case MBOX_ASTS_LINK_UP:
  626. set_bit(AF_LINK_UP, &ha->flags);
  627. if (test_bit(AF_INIT_DONE, &ha->flags))
  628. set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
  629. ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__);
  630. qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKUP,
  631. sizeof(mbox_sts),
  632. (uint8_t *) mbox_sts);
  633. if ((is_qla8032(ha) || is_qla8042(ha)) &&
  634. ha->notify_link_up_comp)
  635. complete(&ha->link_up_comp);
  636. break;
  637. case MBOX_ASTS_LINK_DOWN:
  638. clear_bit(AF_LINK_UP, &ha->flags);
  639. if (test_bit(AF_INIT_DONE, &ha->flags)) {
  640. set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
  641. qla4xxx_wake_dpc(ha);
  642. }
  643. ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
  644. qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN,
  645. sizeof(mbox_sts),
  646. (uint8_t *) mbox_sts);
  647. break;
  648. case MBOX_ASTS_HEARTBEAT:
  649. ha->seconds_since_last_heartbeat = 0;
  650. break;
  651. case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
  652. DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
  653. "ACQUIRED\n", ha->host_no, mbox_status));
  654. set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
  655. break;
  656. case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
  657. case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
  658. * mode
  659. * only */
  660. case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
  661. case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
  662. case MBOX_ASTS_SUBNET_STATE_CHANGE:
  663. case MBOX_ASTS_DUPLICATE_IP:
  664. /* No action */
  665. DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
  666. mbox_status));
  667. break;
  668. case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
  669. printk("scsi%ld: AEN %04x, mbox_sts[2]=%04x, "
  670. "mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0],
  671. mbox_sts[2], mbox_sts[3]);
  672. qla4xxx_update_ipaddr_state(ha, mbox_sts[5],
  673. mbox_sts[3]);
  674. /* mbox_sts[2] = Old ACB state
  675. * mbox_sts[3] = new ACB state */
  676. if ((mbox_sts[3] == IP_ADDRSTATE_PREFERRED) &&
  677. ((mbox_sts[2] == IP_ADDRSTATE_TENTATIVE) ||
  678. (mbox_sts[2] == IP_ADDRSTATE_ACQUIRING))) {
  679. set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
  680. } else if ((mbox_sts[3] == IP_ADDRSTATE_ACQUIRING) &&
  681. (mbox_sts[2] == IP_ADDRSTATE_PREFERRED)) {
  682. if (is_qla80XX(ha))
  683. set_bit(DPC_RESET_HA_FW_CONTEXT,
  684. &ha->dpc_flags);
  685. else
  686. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  687. } else if (mbox_sts[3] == IP_ADDRSTATE_DISABLING) {
  688. ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n",
  689. ha->host_no, __func__);
  690. } else if (mbox_sts[3] == IP_ADDRSTATE_UNCONFIGURED) {
  691. complete(&ha->disable_acb_comp);
  692. ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n",
  693. ha->host_no, __func__);
  694. }
  695. break;
  696. case MBOX_ASTS_IPV6_LINK_MTU_CHANGE:
  697. case MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED:
  698. case MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED:
  699. /* No action */
  700. DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x\n",
  701. ha->host_no, mbox_status));
  702. break;
  703. case MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD:
  704. DEBUG2(ql4_printk(KERN_INFO, ha,
  705. "scsi%ld: AEN %04x, IPv6 ERROR, "
  706. "mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3}=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
  707. ha->host_no, mbox_sts[0], mbox_sts[1],
  708. mbox_sts[2], mbox_sts[3], mbox_sts[4],
  709. mbox_sts[5]));
  710. break;
  711. case MBOX_ASTS_MAC_ADDRESS_CHANGED:
  712. case MBOX_ASTS_DNS:
  713. /* No action */
  714. DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
  715. "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
  716. ha->host_no, mbox_sts[0],
  717. mbox_sts[1], mbox_sts[2]));
  718. break;
  719. case MBOX_ASTS_SELF_TEST_FAILED:
  720. case MBOX_ASTS_LOGIN_FAILED:
  721. /* No action */
  722. DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
  723. "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
  724. ha->host_no, mbox_sts[0], mbox_sts[1],
  725. mbox_sts[2], mbox_sts[3]));
  726. break;
  727. case MBOX_ASTS_DATABASE_CHANGED:
  728. /* Queue AEN information and process it in the DPC
  729. * routine */
  730. if (ha->aen_q_count > 0) {
  731. /* decrement available counter */
  732. ha->aen_q_count--;
  733. for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
  734. ha->aen_q[ha->aen_in].mbox_sts[i] =
  735. mbox_sts[i];
  736. /* print debug message */
  737. DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
  738. "mb1:0x%x mb2:0x%x mb3:0x%x "
  739. "mb4:0x%x mb5:0x%x\n",
  740. ha->host_no, ha->aen_in,
  741. mbox_sts[0], mbox_sts[1],
  742. mbox_sts[2], mbox_sts[3],
  743. mbox_sts[4], mbox_sts[5]));
  744. /* advance pointer */
  745. ha->aen_in++;
  746. if (ha->aen_in == MAX_AEN_ENTRIES)
  747. ha->aen_in = 0;
  748. /* The DPC routine will process the aen */
  749. set_bit(DPC_AEN, &ha->dpc_flags);
  750. } else {
  751. DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
  752. "overflowed! AEN LOST!!\n",
  753. ha->host_no, __func__,
  754. mbox_sts[0]));
  755. DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
  756. ha->host_no));
  757. for (i = 0; i < MAX_AEN_ENTRIES; i++) {
  758. DEBUG2(printk("AEN[%d] %04x %04x %04x "
  759. "%04x\n", i, mbox_sts[0],
  760. mbox_sts[1], mbox_sts[2],
  761. mbox_sts[3]));
  762. }
  763. }
  764. break;
  765. case MBOX_ASTS_TXSCVR_INSERTED:
  766. DEBUG2(printk(KERN_WARNING
  767. "scsi%ld: AEN %04x Transceiver"
  768. " inserted\n", ha->host_no, mbox_sts[0]));
  769. break;
  770. case MBOX_ASTS_TXSCVR_REMOVED:
  771. DEBUG2(printk(KERN_WARNING
  772. "scsi%ld: AEN %04x Transceiver"
  773. " removed\n", ha->host_no, mbox_sts[0]));
  774. break;
  775. case MBOX_ASTS_IDC_REQUEST_NOTIFICATION:
  776. if (is_qla8032(ha) || is_qla8042(ha)) {
  777. DEBUG2(ql4_printk(KERN_INFO, ha,
  778. "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
  779. ha->host_no, mbox_sts[0],
  780. mbox_sts[1], mbox_sts[2],
  781. mbox_sts[3], mbox_sts[4]));
  782. opcode = mbox_sts[1] >> 16;
  783. if ((opcode == MBOX_CMD_SET_PORT_CONFIG) ||
  784. (opcode == MBOX_CMD_PORT_RESET)) {
  785. set_bit(DPC_POST_IDC_ACK,
  786. &ha->dpc_flags);
  787. ha->idc_info.request_desc = mbox_sts[1];
  788. ha->idc_info.info1 = mbox_sts[2];
  789. ha->idc_info.info2 = mbox_sts[3];
  790. ha->idc_info.info3 = mbox_sts[4];
  791. qla4xxx_wake_dpc(ha);
  792. }
  793. }
  794. break;
  795. case MBOX_ASTS_IDC_COMPLETE:
  796. if (is_qla8032(ha) || is_qla8042(ha)) {
  797. DEBUG2(ql4_printk(KERN_INFO, ha,
  798. "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
  799. ha->host_no, mbox_sts[0],
  800. mbox_sts[1], mbox_sts[2],
  801. mbox_sts[3], mbox_sts[4]));
  802. DEBUG2(ql4_printk(KERN_INFO, ha,
  803. "scsi:%ld: AEN %04x IDC Complete notification\n",
  804. ha->host_no, mbox_sts[0]));
  805. opcode = mbox_sts[1] >> 16;
  806. if (ha->notify_idc_comp)
  807. complete(&ha->idc_comp);
  808. if ((opcode == MBOX_CMD_SET_PORT_CONFIG) ||
  809. (opcode == MBOX_CMD_PORT_RESET))
  810. ha->idc_info.info2 = mbox_sts[3];
  811. if (qla4_83xx_loopback_in_progress(ha)) {
  812. set_bit(AF_LOOPBACK, &ha->flags);
  813. } else {
  814. clear_bit(AF_LOOPBACK, &ha->flags);
  815. if (ha->saved_acb)
  816. set_bit(DPC_RESTORE_ACB,
  817. &ha->dpc_flags);
  818. }
  819. qla4xxx_wake_dpc(ha);
  820. }
  821. break;
  822. case MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED:
  823. DEBUG2(ql4_printk(KERN_INFO, ha,
  824. "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
  825. ha->host_no, mbox_sts[0], mbox_sts[1],
  826. mbox_sts[2], mbox_sts[3], mbox_sts[4],
  827. mbox_sts[5]));
  828. DEBUG2(ql4_printk(KERN_INFO, ha,
  829. "scsi%ld: AEN %04x Received IPv6 default router changed notification\n",
  830. ha->host_no, mbox_sts[0]));
  831. qla4xxx_default_router_changed(ha, mbox_sts);
  832. break;
  833. case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION:
  834. DEBUG2(ql4_printk(KERN_INFO, ha,
  835. "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
  836. ha->host_no, mbox_sts[0], mbox_sts[1],
  837. mbox_sts[2], mbox_sts[3], mbox_sts[4],
  838. mbox_sts[5]));
  839. DEBUG2(ql4_printk(KERN_INFO, ha,
  840. "scsi%ld: AEN %04x Received IDC Extend Timeout notification\n",
  841. ha->host_no, mbox_sts[0]));
  842. /* new IDC timeout */
  843. ha->idc_extend_tmo = mbox_sts[1];
  844. break;
  845. case MBOX_ASTS_INITIALIZATION_FAILED:
  846. DEBUG2(ql4_printk(KERN_INFO, ha,
  847. "scsi%ld: AEN %04x, mbox_sts[3]=%08x\n",
  848. ha->host_no, mbox_sts[0],
  849. mbox_sts[3]));
  850. break;
  851. case MBOX_ASTS_SYSTEM_WARNING_EVENT:
  852. DEBUG2(ql4_printk(KERN_WARNING, ha,
  853. "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
  854. ha->host_no, mbox_sts[0], mbox_sts[1],
  855. mbox_sts[2], mbox_sts[3], mbox_sts[4],
  856. mbox_sts[5]));
  857. break;
  858. case MBOX_ASTS_DCBX_CONF_CHANGE:
  859. DEBUG2(ql4_printk(KERN_INFO, ha,
  860. "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
  861. ha->host_no, mbox_sts[0], mbox_sts[1],
  862. mbox_sts[2], mbox_sts[3], mbox_sts[4],
  863. mbox_sts[5]));
  864. DEBUG2(ql4_printk(KERN_INFO, ha,
  865. "scsi%ld: AEN %04x Received DCBX configuration changed notification\n",
  866. ha->host_no, mbox_sts[0]));
  867. break;
  868. default:
  869. DEBUG2(printk(KERN_WARNING
  870. "scsi%ld: AEN %04x UNKNOWN\n",
  871. ha->host_no, mbox_sts[0]));
  872. break;
  873. }
  874. } else {
  875. DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
  876. ha->host_no, mbox_status));
  877. ha->mbox_status[0] = mbox_status;
  878. }
  879. }
  880. void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
  881. uint32_t intr_status)
  882. {
  883. /* Process mailbox/asynch event interrupt.*/
  884. if (intr_status) {
  885. qla4xxx_isr_decode_mailbox(ha,
  886. readl(&ha->qla4_83xx_reg->mailbox_out[0]));
  887. /* clear the interrupt */
  888. writel(0, &ha->qla4_83xx_reg->risc_intr);
  889. } else {
  890. qla4xxx_process_response_queue(ha);
  891. }
  892. /* clear the interrupt */
  893. writel(0, &ha->qla4_83xx_reg->mb_int_mask);
  894. }
  895. /**
  896. * qla4_82xx_interrupt_service_routine - isr
  897. * @ha: pointer to host adapter structure.
  898. * @intr_status: Local interrupt status/type.
  899. *
  900. * This is the main interrupt service routine.
  901. * hardware_lock locked upon entry. runs in interrupt context.
  902. **/
  903. void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
  904. uint32_t intr_status)
  905. {
  906. /* Process response queue interrupt. */
  907. if ((intr_status & HSRX_RISC_IOCB_INT) &&
  908. test_bit(AF_INIT_DONE, &ha->flags))
  909. qla4xxx_process_response_queue(ha);
  910. /* Process mailbox/asynch event interrupt.*/
  911. if (intr_status & HSRX_RISC_MB_INT)
  912. qla4xxx_isr_decode_mailbox(ha,
  913. readl(&ha->qla4_82xx_reg->mailbox_out[0]));
  914. /* clear the interrupt */
  915. writel(0, &ha->qla4_82xx_reg->host_int);
  916. readl(&ha->qla4_82xx_reg->host_int);
  917. }
  918. /**
  919. * qla4xxx_interrupt_service_routine - isr
  920. * @ha: pointer to host adapter structure.
  921. * @intr_status: Local interrupt status/type.
  922. *
  923. * This is the main interrupt service routine.
  924. * hardware_lock locked upon entry. runs in interrupt context.
  925. **/
  926. void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
  927. uint32_t intr_status)
  928. {
  929. /* Process response queue interrupt. */
  930. if (intr_status & CSR_SCSI_COMPLETION_INTR)
  931. qla4xxx_process_response_queue(ha);
  932. /* Process mailbox/asynch event interrupt.*/
  933. if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
  934. qla4xxx_isr_decode_mailbox(ha,
  935. readl(&ha->reg->mailbox[0]));
  936. /* Clear Mailbox Interrupt */
  937. writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
  938. &ha->reg->ctrl_status);
  939. readl(&ha->reg->ctrl_status);
  940. }
  941. }
  942. /**
  943. * qla4_82xx_spurious_interrupt - processes spurious interrupt
  944. * @ha: pointer to host adapter structure.
  945. * @reqs_count: .
  946. *
  947. **/
  948. static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha,
  949. uint8_t reqs_count)
  950. {
  951. if (reqs_count)
  952. return;
  953. DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
  954. if (is_qla8022(ha)) {
  955. writel(0, &ha->qla4_82xx_reg->host_int);
  956. if (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled)
  957. qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
  958. 0xfbff);
  959. }
  960. ha->spurious_int_count++;
  961. }
  962. /**
  963. * qla4xxx_intr_handler - hardware interrupt handler.
  964. * @irq: Unused
  965. * @dev_id: Pointer to host adapter structure
  966. **/
  967. irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
  968. {
  969. struct scsi_qla_host *ha;
  970. uint32_t intr_status;
  971. unsigned long flags = 0;
  972. uint8_t reqs_count = 0;
  973. ha = (struct scsi_qla_host *) dev_id;
  974. if (!ha) {
  975. DEBUG2(printk(KERN_INFO
  976. "qla4xxx: Interrupt with NULL host ptr\n"));
  977. return IRQ_NONE;
  978. }
  979. spin_lock_irqsave(&ha->hardware_lock, flags);
  980. ha->isr_count++;
  981. /*
  982. * Repeatedly service interrupts up to a maximum of
  983. * MAX_REQS_SERVICED_PER_INTR
  984. */
  985. while (1) {
  986. /*
  987. * Read interrupt status
  988. */
  989. if (ha->isp_ops->rd_shdw_rsp_q_in(ha) !=
  990. ha->response_out)
  991. intr_status = CSR_SCSI_COMPLETION_INTR;
  992. else
  993. intr_status = readl(&ha->reg->ctrl_status);
  994. if ((intr_status &
  995. (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 0) {
  996. if (reqs_count == 0)
  997. ha->spurious_int_count++;
  998. break;
  999. }
  1000. if (intr_status & CSR_FATAL_ERROR) {
  1001. DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
  1002. "Status 0x%04x\n", ha->host_no,
  1003. readl(isp_port_error_status (ha))));
  1004. /* Issue Soft Reset to clear this error condition.
  1005. * This will prevent the RISC from repeatedly
  1006. * interrupting the driver; thus, allowing the DPC to
  1007. * get scheduled to continue error recovery.
  1008. * NOTE: Disabling RISC interrupts does not work in
  1009. * this case, as CSR_FATAL_ERROR overrides
  1010. * CSR_SCSI_INTR_ENABLE */
  1011. if ((readl(&ha->reg->ctrl_status) &
  1012. CSR_SCSI_RESET_INTR) == 0) {
  1013. writel(set_rmask(CSR_SOFT_RESET),
  1014. &ha->reg->ctrl_status);
  1015. readl(&ha->reg->ctrl_status);
  1016. }
  1017. writel(set_rmask(CSR_FATAL_ERROR),
  1018. &ha->reg->ctrl_status);
  1019. readl(&ha->reg->ctrl_status);
  1020. __qla4xxx_disable_intrs(ha);
  1021. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  1022. break;
  1023. } else if (intr_status & CSR_SCSI_RESET_INTR) {
  1024. clear_bit(AF_ONLINE, &ha->flags);
  1025. __qla4xxx_disable_intrs(ha);
  1026. writel(set_rmask(CSR_SCSI_RESET_INTR),
  1027. &ha->reg->ctrl_status);
  1028. readl(&ha->reg->ctrl_status);
  1029. if (!test_bit(AF_HA_REMOVAL, &ha->flags))
  1030. set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
  1031. break;
  1032. } else if (intr_status & INTR_PENDING) {
  1033. ha->isp_ops->interrupt_service_routine(ha, intr_status);
  1034. ha->total_io_count++;
  1035. if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
  1036. break;
  1037. }
  1038. }
  1039. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1040. return IRQ_HANDLED;
  1041. }
  1042. /**
  1043. * qla4_82xx_intr_handler - hardware interrupt handler.
  1044. * @irq: Unused
  1045. * @dev_id: Pointer to host adapter structure
  1046. **/
  1047. irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id)
  1048. {
  1049. struct scsi_qla_host *ha = dev_id;
  1050. uint32_t intr_status;
  1051. uint32_t status;
  1052. unsigned long flags = 0;
  1053. uint8_t reqs_count = 0;
  1054. if (unlikely(pci_channel_offline(ha->pdev)))
  1055. return IRQ_HANDLED;
  1056. ha->isr_count++;
  1057. status = qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
  1058. if (!(status & ha->nx_legacy_intr.int_vec_bit))
  1059. return IRQ_NONE;
  1060. status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG);
  1061. if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
  1062. DEBUG7(ql4_printk(KERN_INFO, ha,
  1063. "%s legacy Int not triggered\n", __func__));
  1064. return IRQ_NONE;
  1065. }
  1066. /* clear the interrupt */
  1067. qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
  1068. /* read twice to ensure write is flushed */
  1069. qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
  1070. qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
  1071. spin_lock_irqsave(&ha->hardware_lock, flags);
  1072. while (1) {
  1073. if (!(readl(&ha->qla4_82xx_reg->host_int) &
  1074. ISRX_82XX_RISC_INT)) {
  1075. qla4_82xx_spurious_interrupt(ha, reqs_count);
  1076. break;
  1077. }
  1078. intr_status = readl(&ha->qla4_82xx_reg->host_status);
  1079. if ((intr_status &
  1080. (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
  1081. qla4_82xx_spurious_interrupt(ha, reqs_count);
  1082. break;
  1083. }
  1084. ha->isp_ops->interrupt_service_routine(ha, intr_status);
  1085. /* Enable Interrupt */
  1086. qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
  1087. if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
  1088. break;
  1089. }
  1090. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1091. return IRQ_HANDLED;
  1092. }
  1093. #define LEG_INT_PTR_B31 (1 << 31)
  1094. #define LEG_INT_PTR_B30 (1 << 30)
  1095. #define PF_BITS_MASK (0xF << 16)
  1096. /**
  1097. * qla4_83xx_intr_handler - hardware interrupt handler.
  1098. * @irq: Unused
  1099. * @dev_id: Pointer to host adapter structure
  1100. **/
  1101. irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
  1102. {
  1103. struct scsi_qla_host *ha = dev_id;
  1104. uint32_t leg_int_ptr = 0;
  1105. unsigned long flags = 0;
  1106. ha->isr_count++;
  1107. leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
  1108. /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
  1109. if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
  1110. DEBUG7(ql4_printk(KERN_ERR, ha,
  1111. "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
  1112. __func__));
  1113. return IRQ_NONE;
  1114. }
  1115. /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
  1116. if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
  1117. DEBUG7(ql4_printk(KERN_ERR, ha,
  1118. "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
  1119. __func__, (leg_int_ptr & PF_BITS_MASK),
  1120. ha->pf_bit));
  1121. return IRQ_NONE;
  1122. }
  1123. /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
  1124. * Control register and poll till Legacy Interrupt Pointer register
  1125. * bit30 is 0.
  1126. */
  1127. writel(0, &ha->qla4_83xx_reg->leg_int_trig);
  1128. do {
  1129. leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
  1130. if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit)
  1131. break;
  1132. } while (leg_int_ptr & LEG_INT_PTR_B30);
  1133. spin_lock_irqsave(&ha->hardware_lock, flags);
  1134. leg_int_ptr = readl(&ha->qla4_83xx_reg->risc_intr);
  1135. ha->isp_ops->interrupt_service_routine(ha, leg_int_ptr);
  1136. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1137. return IRQ_HANDLED;
  1138. }
  1139. irqreturn_t
  1140. qla4_8xxx_msi_handler(int irq, void *dev_id)
  1141. {
  1142. struct scsi_qla_host *ha;
  1143. ha = (struct scsi_qla_host *) dev_id;
  1144. if (!ha) {
  1145. DEBUG2(printk(KERN_INFO
  1146. "qla4xxx: MSIX: Interrupt with NULL host ptr\n"));
  1147. return IRQ_NONE;
  1148. }
  1149. ha->isr_count++;
  1150. /* clear the interrupt */
  1151. qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
  1152. /* read twice to ensure write is flushed */
  1153. qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
  1154. qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
  1155. return qla4_8xxx_default_intr_handler(irq, dev_id);
  1156. }
  1157. static irqreturn_t qla4_83xx_mailbox_intr_handler(int irq, void *dev_id)
  1158. {
  1159. struct scsi_qla_host *ha = dev_id;
  1160. unsigned long flags;
  1161. uint32_t ival = 0;
  1162. spin_lock_irqsave(&ha->hardware_lock, flags);
  1163. ival = readl(&ha->qla4_83xx_reg->risc_intr);
  1164. if (ival == 0) {
  1165. ql4_printk(KERN_INFO, ha,
  1166. "%s: It is a spurious mailbox interrupt!\n",
  1167. __func__);
  1168. ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
  1169. ival &= ~INT_MASK_FW_MB;
  1170. writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
  1171. goto exit;
  1172. }
  1173. qla4xxx_isr_decode_mailbox(ha,
  1174. readl(&ha->qla4_83xx_reg->mailbox_out[0]));
  1175. writel(0, &ha->qla4_83xx_reg->risc_intr);
  1176. ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
  1177. ival &= ~INT_MASK_FW_MB;
  1178. writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
  1179. ha->isr_count++;
  1180. exit:
  1181. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1182. return IRQ_HANDLED;
  1183. }
  1184. /**
  1185. * qla4_8xxx_default_intr_handler - hardware interrupt handler.
  1186. * @irq: Unused
  1187. * @dev_id: Pointer to host adapter structure
  1188. *
  1189. * This interrupt handler is called directly for MSI-X, and
  1190. * called indirectly for MSI.
  1191. **/
  1192. irqreturn_t
  1193. qla4_8xxx_default_intr_handler(int irq, void *dev_id)
  1194. {
  1195. struct scsi_qla_host *ha = dev_id;
  1196. unsigned long flags;
  1197. uint32_t intr_status;
  1198. uint8_t reqs_count = 0;
  1199. if (is_qla8032(ha) || is_qla8042(ha)) {
  1200. qla4_83xx_mailbox_intr_handler(irq, dev_id);
  1201. } else {
  1202. spin_lock_irqsave(&ha->hardware_lock, flags);
  1203. while (1) {
  1204. if (!(readl(&ha->qla4_82xx_reg->host_int) &
  1205. ISRX_82XX_RISC_INT)) {
  1206. qla4_82xx_spurious_interrupt(ha, reqs_count);
  1207. break;
  1208. }
  1209. intr_status = readl(&ha->qla4_82xx_reg->host_status);
  1210. if ((intr_status &
  1211. (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
  1212. qla4_82xx_spurious_interrupt(ha, reqs_count);
  1213. break;
  1214. }
  1215. ha->isp_ops->interrupt_service_routine(ha, intr_status);
  1216. if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
  1217. break;
  1218. }
  1219. ha->isr_count++;
  1220. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1221. }
  1222. return IRQ_HANDLED;
  1223. }
  1224. irqreturn_t
  1225. qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
  1226. {
  1227. struct scsi_qla_host *ha = dev_id;
  1228. unsigned long flags;
  1229. int intr_status;
  1230. uint32_t ival = 0;
  1231. spin_lock_irqsave(&ha->hardware_lock, flags);
  1232. if (is_qla8032(ha) || is_qla8042(ha)) {
  1233. ival = readl(&ha->qla4_83xx_reg->iocb_int_mask);
  1234. if (ival == 0) {
  1235. ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n",
  1236. __func__);
  1237. goto exit_msix_rsp_q;
  1238. }
  1239. qla4xxx_process_response_queue(ha);
  1240. writel(0, &ha->qla4_83xx_reg->iocb_int_mask);
  1241. } else {
  1242. intr_status = readl(&ha->qla4_82xx_reg->host_status);
  1243. if (intr_status & HSRX_RISC_IOCB_INT) {
  1244. qla4xxx_process_response_queue(ha);
  1245. writel(0, &ha->qla4_82xx_reg->host_int);
  1246. } else {
  1247. ql4_printk(KERN_INFO, ha, "%s: spurious iocb interrupt...\n",
  1248. __func__);
  1249. goto exit_msix_rsp_q;
  1250. }
  1251. }
  1252. ha->isr_count++;
  1253. exit_msix_rsp_q:
  1254. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1255. return IRQ_HANDLED;
  1256. }
  1257. /**
  1258. * qla4xxx_process_aen - processes AENs generated by firmware
  1259. * @ha: pointer to host adapter structure.
  1260. * @process_aen: type of AENs to process
  1261. *
  1262. * Processes specific types of Asynchronous Events generated by firmware.
  1263. * The type of AENs to process is specified by process_aen and can be
  1264. * PROCESS_ALL_AENS 0
  1265. * FLUSH_DDB_CHANGED_AENS 1
  1266. * RELOGIN_DDB_CHANGED_AENS 2
  1267. **/
  1268. void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
  1269. {
  1270. uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
  1271. struct aen *aen;
  1272. int i;
  1273. unsigned long flags;
  1274. spin_lock_irqsave(&ha->hardware_lock, flags);
  1275. while (ha->aen_out != ha->aen_in) {
  1276. aen = &ha->aen_q[ha->aen_out];
  1277. /* copy aen information to local structure */
  1278. for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
  1279. mbox_sts[i] = aen->mbox_sts[i];
  1280. ha->aen_q_count++;
  1281. ha->aen_out++;
  1282. if (ha->aen_out == MAX_AEN_ENTRIES)
  1283. ha->aen_out = 0;
  1284. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1285. DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
  1286. " mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
  1287. (ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
  1288. mbox_sts[0], mbox_sts[1], mbox_sts[2],
  1289. mbox_sts[3], mbox_sts[4]));
  1290. switch (mbox_sts[0]) {
  1291. case MBOX_ASTS_DATABASE_CHANGED:
  1292. switch (process_aen) {
  1293. case FLUSH_DDB_CHANGED_AENS:
  1294. DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
  1295. "[%d] state=%04x FLUSHED!\n",
  1296. ha->host_no, ha->aen_out,
  1297. mbox_sts[0], mbox_sts[2],
  1298. mbox_sts[3]));
  1299. break;
  1300. case PROCESS_ALL_AENS:
  1301. default:
  1302. /* Specific device. */
  1303. if (mbox_sts[1] == 1)
  1304. qla4xxx_process_ddb_changed(ha,
  1305. mbox_sts[2], mbox_sts[3],
  1306. mbox_sts[4]);
  1307. break;
  1308. }
  1309. }
  1310. spin_lock_irqsave(&ha->hardware_lock, flags);
  1311. }
  1312. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1313. }
  1314. int qla4xxx_request_irqs(struct scsi_qla_host *ha)
  1315. {
  1316. int ret = 0;
  1317. int rval = QLA_ERROR;
  1318. if (is_qla40XX(ha))
  1319. goto try_intx;
  1320. if (ql4xenablemsix == 2) {
  1321. /* Note: MSI Interrupts not supported for ISP8324 and ISP8042 */
  1322. if (is_qla8032(ha) || is_qla8042(ha)) {
  1323. ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP%04x, Falling back-to INTx mode\n",
  1324. __func__, ha->pdev->device);
  1325. goto try_intx;
  1326. }
  1327. goto try_msi;
  1328. }
  1329. if (ql4xenablemsix == 0 || ql4xenablemsix != 1)
  1330. goto try_intx;
  1331. /* Trying MSI-X */
  1332. ret = qla4_8xxx_enable_msix(ha);
  1333. if (!ret) {
  1334. DEBUG2(ql4_printk(KERN_INFO, ha,
  1335. "MSI-X: Enabled (0x%X).\n", ha->revision_id));
  1336. goto irq_attached;
  1337. } else {
  1338. if (is_qla8032(ha) || is_qla8042(ha)) {
  1339. ql4_printk(KERN_INFO, ha, "%s: ISP%04x: MSI-X: Falling back-to INTx mode. ret = %d\n",
  1340. __func__, ha->pdev->device, ret);
  1341. goto try_intx;
  1342. }
  1343. }
  1344. ql4_printk(KERN_WARNING, ha,
  1345. "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
  1346. try_msi:
  1347. /* Trying MSI */
  1348. ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
  1349. if (ret > 0) {
  1350. ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
  1351. 0, DRIVER_NAME, ha);
  1352. if (!ret) {
  1353. DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
  1354. goto irq_attached;
  1355. } else {
  1356. ql4_printk(KERN_WARNING, ha,
  1357. "MSI: Failed to reserve interrupt %d "
  1358. "already in use.\n", ha->pdev->irq);
  1359. pci_free_irq_vectors(ha->pdev);
  1360. }
  1361. }
  1362. try_intx:
  1363. if (is_qla8022(ha)) {
  1364. ql4_printk(KERN_WARNING, ha, "%s: ISP82xx Legacy interrupt not supported\n",
  1365. __func__);
  1366. goto irq_not_attached;
  1367. }
  1368. /* Trying INTx */
  1369. ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
  1370. IRQF_SHARED, DRIVER_NAME, ha);
  1371. if (!ret) {
  1372. DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
  1373. goto irq_attached;
  1374. } else {
  1375. ql4_printk(KERN_WARNING, ha,
  1376. "INTx: Failed to reserve interrupt %d already in"
  1377. " use.\n", ha->pdev->irq);
  1378. goto irq_not_attached;
  1379. }
  1380. irq_attached:
  1381. set_bit(AF_IRQ_ATTACHED, &ha->flags);
  1382. ha->host->irq = ha->pdev->irq;
  1383. ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
  1384. __func__, ha->pdev->irq);
  1385. rval = QLA_SUCCESS;
  1386. irq_not_attached:
  1387. return rval;
  1388. }
  1389. void qla4xxx_free_irqs(struct scsi_qla_host *ha)
  1390. {
  1391. if (!test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
  1392. return;
  1393. if (ha->pdev->msix_enabled)
  1394. free_irq(pci_irq_vector(ha->pdev, 1), ha);
  1395. free_irq(pci_irq_vector(ha->pdev, 0), ha);
  1396. pci_free_irq_vectors(ha->pdev);
  1397. }