skd_main.c 93 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
  4. * was acquired by Western Digital in 2012.
  5. *
  6. * Copyright 2012 sTec, Inc.
  7. * Copyright (c) 2017 Western Digital Corporation or its affiliates.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/init.h>
  12. #include <linux/pci.h>
  13. #include <linux/slab.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/blkdev.h>
  16. #include <linux/blk-mq.h>
  17. #include <linux/sched.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/compiler.h>
  20. #include <linux/workqueue.h>
  21. #include <linux/delay.h>
  22. #include <linux/time.h>
  23. #include <linux/hdreg.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/completion.h>
  26. #include <linux/scatterlist.h>
  27. #include <linux/err.h>
  28. #include <linux/aer.h>
  29. #include <linux/wait.h>
  30. #include <linux/stringify.h>
  31. #include <scsi/scsi.h>
  32. #include <scsi/sg.h>
  33. #include <linux/io.h>
  34. #include <linux/uaccess.h>
  35. #include <asm/unaligned.h>
  36. #include "skd_s1120.h"
  37. static int skd_dbg_level;
  38. static int skd_isr_comp_limit = 4;
  39. #define SKD_ASSERT(expr) \
  40. do { \
  41. if (unlikely(!(expr))) { \
  42. pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
  43. # expr, __FILE__, __func__, __LINE__); \
  44. } \
  45. } while (0)
  46. #define DRV_NAME "skd"
  47. #define PFX DRV_NAME ": "
  48. MODULE_LICENSE("GPL");
  49. MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver");
  50. #define PCI_VENDOR_ID_STEC 0x1B39
  51. #define PCI_DEVICE_ID_S1120 0x0001
  52. #define SKD_FUA_NV (1 << 1)
  53. #define SKD_MINORS_PER_DEVICE 16
  54. #define SKD_MAX_QUEUE_DEPTH 200u
  55. #define SKD_PAUSE_TIMEOUT (5 * 1000)
  56. #define SKD_N_FITMSG_BYTES (512u)
  57. #define SKD_MAX_REQ_PER_MSG 14
  58. #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
  59. /* SG elements are 32 bytes, so we can make this 4096 and still be under the
  60. * 128KB limit. That allows 4096*4K = 16M xfer size
  61. */
  62. #define SKD_N_SG_PER_REQ_DEFAULT 256u
  63. #define SKD_N_COMPLETION_ENTRY 256u
  64. #define SKD_N_READ_CAP_BYTES (8u)
  65. #define SKD_N_INTERNAL_BYTES (512u)
  66. #define SKD_SKCOMP_SIZE \
  67. ((sizeof(struct fit_completion_entry_v1) + \
  68. sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
  69. /* 5 bits of uniqifier, 0xF800 */
  70. #define SKD_ID_TABLE_MASK (3u << 8u)
  71. #define SKD_ID_RW_REQUEST (0u << 8u)
  72. #define SKD_ID_INTERNAL (1u << 8u)
  73. #define SKD_ID_FIT_MSG (3u << 8u)
  74. #define SKD_ID_SLOT_MASK 0x00FFu
  75. #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
  76. #define SKD_N_MAX_SECTORS 2048u
  77. #define SKD_MAX_RETRIES 2u
  78. #define SKD_TIMER_SECONDS(seconds) (seconds)
  79. #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
  80. #define INQ_STD_NBYTES 36
  81. enum skd_drvr_state {
  82. SKD_DRVR_STATE_LOAD,
  83. SKD_DRVR_STATE_IDLE,
  84. SKD_DRVR_STATE_BUSY,
  85. SKD_DRVR_STATE_STARTING,
  86. SKD_DRVR_STATE_ONLINE,
  87. SKD_DRVR_STATE_PAUSING,
  88. SKD_DRVR_STATE_PAUSED,
  89. SKD_DRVR_STATE_RESTARTING,
  90. SKD_DRVR_STATE_RESUMING,
  91. SKD_DRVR_STATE_STOPPING,
  92. SKD_DRVR_STATE_FAULT,
  93. SKD_DRVR_STATE_DISAPPEARED,
  94. SKD_DRVR_STATE_PROTOCOL_MISMATCH,
  95. SKD_DRVR_STATE_BUSY_ERASE,
  96. SKD_DRVR_STATE_BUSY_SANITIZE,
  97. SKD_DRVR_STATE_BUSY_IMMINENT,
  98. SKD_DRVR_STATE_WAIT_BOOT,
  99. SKD_DRVR_STATE_SYNCING,
  100. };
  101. #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
  102. #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
  103. #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
  104. #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
  105. #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
  106. #define SKD_START_WAIT_SECONDS 90u
  107. enum skd_req_state {
  108. SKD_REQ_STATE_IDLE,
  109. SKD_REQ_STATE_SETUP,
  110. SKD_REQ_STATE_BUSY,
  111. SKD_REQ_STATE_COMPLETED,
  112. SKD_REQ_STATE_TIMEOUT,
  113. };
  114. enum skd_check_status_action {
  115. SKD_CHECK_STATUS_REPORT_GOOD,
  116. SKD_CHECK_STATUS_REPORT_SMART_ALERT,
  117. SKD_CHECK_STATUS_REQUEUE_REQUEST,
  118. SKD_CHECK_STATUS_REPORT_ERROR,
  119. SKD_CHECK_STATUS_BUSY_IMMINENT,
  120. };
  121. struct skd_msg_buf {
  122. struct fit_msg_hdr fmh;
  123. struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG];
  124. };
  125. struct skd_fitmsg_context {
  126. u32 id;
  127. u32 length;
  128. struct skd_msg_buf *msg_buf;
  129. dma_addr_t mb_dma_address;
  130. };
  131. struct skd_request_context {
  132. enum skd_req_state state;
  133. u16 id;
  134. u32 fitmsg_id;
  135. u8 flush_cmd;
  136. enum dma_data_direction data_dir;
  137. struct scatterlist *sg;
  138. u32 n_sg;
  139. u32 sg_byte_count;
  140. struct fit_sg_descriptor *sksg_list;
  141. dma_addr_t sksg_dma_address;
  142. struct fit_completion_entry_v1 completion;
  143. struct fit_comp_error_info err_info;
  144. int retries;
  145. blk_status_t status;
  146. };
  147. struct skd_special_context {
  148. struct skd_request_context req;
  149. void *data_buf;
  150. dma_addr_t db_dma_address;
  151. struct skd_msg_buf *msg_buf;
  152. dma_addr_t mb_dma_address;
  153. };
  154. typedef enum skd_irq_type {
  155. SKD_IRQ_LEGACY,
  156. SKD_IRQ_MSI,
  157. SKD_IRQ_MSIX
  158. } skd_irq_type_t;
  159. #define SKD_MAX_BARS 2
  160. struct skd_device {
  161. void __iomem *mem_map[SKD_MAX_BARS];
  162. resource_size_t mem_phys[SKD_MAX_BARS];
  163. u32 mem_size[SKD_MAX_BARS];
  164. struct skd_msix_entry *msix_entries;
  165. struct pci_dev *pdev;
  166. int pcie_error_reporting_is_enabled;
  167. spinlock_t lock;
  168. struct gendisk *disk;
  169. struct blk_mq_tag_set tag_set;
  170. struct request_queue *queue;
  171. struct skd_fitmsg_context *skmsg;
  172. struct device *class_dev;
  173. int gendisk_on;
  174. int sync_done;
  175. u32 devno;
  176. u32 major;
  177. char isr_name[30];
  178. enum skd_drvr_state state;
  179. u32 drive_state;
  180. u32 cur_max_queue_depth;
  181. u32 queue_low_water_mark;
  182. u32 dev_max_queue_depth;
  183. u32 num_fitmsg_context;
  184. u32 num_req_context;
  185. struct skd_fitmsg_context *skmsg_table;
  186. struct skd_special_context internal_skspcl;
  187. u32 read_cap_blocksize;
  188. u32 read_cap_last_lba;
  189. int read_cap_is_valid;
  190. int inquiry_is_valid;
  191. u8 inq_serial_num[13]; /*12 chars plus null term */
  192. u8 skcomp_cycle;
  193. u32 skcomp_ix;
  194. struct kmem_cache *msgbuf_cache;
  195. struct kmem_cache *sglist_cache;
  196. struct kmem_cache *databuf_cache;
  197. struct fit_completion_entry_v1 *skcomp_table;
  198. struct fit_comp_error_info *skerr_table;
  199. dma_addr_t cq_dma_address;
  200. wait_queue_head_t waitq;
  201. struct timer_list timer;
  202. u32 timer_countdown;
  203. u32 timer_substate;
  204. int sgs_per_request;
  205. u32 last_mtd;
  206. u32 proto_ver;
  207. int dbg_level;
  208. u32 connect_time_stamp;
  209. int connect_retries;
  210. #define SKD_MAX_CONNECT_RETRIES 16
  211. u32 drive_jiffies;
  212. u32 timo_slot;
  213. struct work_struct start_queue;
  214. struct work_struct completion_worker;
  215. };
  216. #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
  217. #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
  218. #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
  219. static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
  220. {
  221. u32 val = readl(skdev->mem_map[1] + offset);
  222. if (unlikely(skdev->dbg_level >= 2))
  223. dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
  224. return val;
  225. }
  226. static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
  227. u32 offset)
  228. {
  229. writel(val, skdev->mem_map[1] + offset);
  230. if (unlikely(skdev->dbg_level >= 2))
  231. dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
  232. }
  233. static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
  234. u32 offset)
  235. {
  236. writeq(val, skdev->mem_map[1] + offset);
  237. if (unlikely(skdev->dbg_level >= 2))
  238. dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
  239. val);
  240. }
  241. #define SKD_IRQ_DEFAULT SKD_IRQ_MSIX
  242. static int skd_isr_type = SKD_IRQ_DEFAULT;
  243. module_param(skd_isr_type, int, 0444);
  244. MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
  245. " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
  246. #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
  247. static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
  248. module_param(skd_max_req_per_msg, int, 0444);
  249. MODULE_PARM_DESC(skd_max_req_per_msg,
  250. "Maximum SCSI requests packed in a single message."
  251. " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
  252. #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
  253. #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
  254. static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
  255. module_param(skd_max_queue_depth, int, 0444);
  256. MODULE_PARM_DESC(skd_max_queue_depth,
  257. "Maximum SCSI requests issued to s1120."
  258. " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
  259. static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
  260. module_param(skd_sgs_per_request, int, 0444);
  261. MODULE_PARM_DESC(skd_sgs_per_request,
  262. "Maximum SG elements per block request."
  263. " (1-4096, default==256)");
  264. static int skd_max_pass_thru = 1;
  265. module_param(skd_max_pass_thru, int, 0444);
  266. MODULE_PARM_DESC(skd_max_pass_thru,
  267. "Maximum SCSI pass-thru at a time. IGNORED");
  268. module_param(skd_dbg_level, int, 0444);
  269. MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
  270. module_param(skd_isr_comp_limit, int, 0444);
  271. MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
  272. /* Major device number dynamically assigned. */
  273. static u32 skd_major;
  274. static void skd_destruct(struct skd_device *skdev);
  275. static const struct block_device_operations skd_blockdev_ops;
  276. static void skd_send_fitmsg(struct skd_device *skdev,
  277. struct skd_fitmsg_context *skmsg);
  278. static void skd_send_special_fitmsg(struct skd_device *skdev,
  279. struct skd_special_context *skspcl);
  280. static bool skd_preop_sg_list(struct skd_device *skdev,
  281. struct skd_request_context *skreq);
  282. static void skd_postop_sg_list(struct skd_device *skdev,
  283. struct skd_request_context *skreq);
  284. static void skd_restart_device(struct skd_device *skdev);
  285. static int skd_quiesce_dev(struct skd_device *skdev);
  286. static int skd_unquiesce_dev(struct skd_device *skdev);
  287. static void skd_disable_interrupts(struct skd_device *skdev);
  288. static void skd_isr_fwstate(struct skd_device *skdev);
  289. static void skd_recover_requests(struct skd_device *skdev);
  290. static void skd_soft_reset(struct skd_device *skdev);
  291. const char *skd_drive_state_to_str(int state);
  292. const char *skd_skdev_state_to_str(enum skd_drvr_state state);
  293. static void skd_log_skdev(struct skd_device *skdev, const char *event);
  294. static void skd_log_skreq(struct skd_device *skdev,
  295. struct skd_request_context *skreq, const char *event);
  296. /*
  297. *****************************************************************************
  298. * READ/WRITE REQUESTS
  299. *****************************************************************************
  300. */
  301. static bool skd_inc_in_flight(struct request *rq, void *data, bool reserved)
  302. {
  303. int *count = data;
  304. count++;
  305. return true;
  306. }
  307. static int skd_in_flight(struct skd_device *skdev)
  308. {
  309. int count = 0;
  310. blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count);
  311. return count;
  312. }
  313. static void
  314. skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
  315. int data_dir, unsigned lba,
  316. unsigned count)
  317. {
  318. if (data_dir == READ)
  319. scsi_req->cdb[0] = READ_10;
  320. else
  321. scsi_req->cdb[0] = WRITE_10;
  322. scsi_req->cdb[1] = 0;
  323. scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
  324. scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
  325. scsi_req->cdb[4] = (lba & 0xff00) >> 8;
  326. scsi_req->cdb[5] = (lba & 0xff);
  327. scsi_req->cdb[6] = 0;
  328. scsi_req->cdb[7] = (count & 0xff00) >> 8;
  329. scsi_req->cdb[8] = count & 0xff;
  330. scsi_req->cdb[9] = 0;
  331. }
  332. static void
  333. skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
  334. struct skd_request_context *skreq)
  335. {
  336. skreq->flush_cmd = 1;
  337. scsi_req->cdb[0] = SYNCHRONIZE_CACHE;
  338. scsi_req->cdb[1] = 0;
  339. scsi_req->cdb[2] = 0;
  340. scsi_req->cdb[3] = 0;
  341. scsi_req->cdb[4] = 0;
  342. scsi_req->cdb[5] = 0;
  343. scsi_req->cdb[6] = 0;
  344. scsi_req->cdb[7] = 0;
  345. scsi_req->cdb[8] = 0;
  346. scsi_req->cdb[9] = 0;
  347. }
  348. /*
  349. * Return true if and only if all pending requests should be failed.
  350. */
  351. static bool skd_fail_all(struct request_queue *q)
  352. {
  353. struct skd_device *skdev = q->queuedata;
  354. SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
  355. skd_log_skdev(skdev, "req_not_online");
  356. switch (skdev->state) {
  357. case SKD_DRVR_STATE_PAUSING:
  358. case SKD_DRVR_STATE_PAUSED:
  359. case SKD_DRVR_STATE_STARTING:
  360. case SKD_DRVR_STATE_RESTARTING:
  361. case SKD_DRVR_STATE_WAIT_BOOT:
  362. /* In case of starting, we haven't started the queue,
  363. * so we can't get here... but requests are
  364. * possibly hanging out waiting for us because we
  365. * reported the dev/skd0 already. They'll wait
  366. * forever if connect doesn't complete.
  367. * What to do??? delay dev/skd0 ??
  368. */
  369. case SKD_DRVR_STATE_BUSY:
  370. case SKD_DRVR_STATE_BUSY_IMMINENT:
  371. case SKD_DRVR_STATE_BUSY_ERASE:
  372. return false;
  373. case SKD_DRVR_STATE_BUSY_SANITIZE:
  374. case SKD_DRVR_STATE_STOPPING:
  375. case SKD_DRVR_STATE_SYNCING:
  376. case SKD_DRVR_STATE_FAULT:
  377. case SKD_DRVR_STATE_DISAPPEARED:
  378. default:
  379. return true;
  380. }
  381. }
  382. static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
  383. const struct blk_mq_queue_data *mqd)
  384. {
  385. struct request *const req = mqd->rq;
  386. struct request_queue *const q = req->q;
  387. struct skd_device *skdev = q->queuedata;
  388. struct skd_fitmsg_context *skmsg;
  389. struct fit_msg_hdr *fmh;
  390. const u32 tag = blk_mq_unique_tag(req);
  391. struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req);
  392. struct skd_scsi_request *scsi_req;
  393. unsigned long flags = 0;
  394. const u32 lba = blk_rq_pos(req);
  395. const u32 count = blk_rq_sectors(req);
  396. const int data_dir = rq_data_dir(req);
  397. if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE))
  398. return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
  399. if (!(req->rq_flags & RQF_DONTPREP)) {
  400. skreq->retries = 0;
  401. req->rq_flags |= RQF_DONTPREP;
  402. }
  403. blk_mq_start_request(req);
  404. WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
  405. tag, skd_max_queue_depth, q->nr_requests);
  406. SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
  407. dev_dbg(&skdev->pdev->dev,
  408. "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba,
  409. lba, count, count, data_dir);
  410. skreq->id = tag + SKD_ID_RW_REQUEST;
  411. skreq->flush_cmd = 0;
  412. skreq->n_sg = 0;
  413. skreq->sg_byte_count = 0;
  414. skreq->fitmsg_id = 0;
  415. skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  416. if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
  417. dev_dbg(&skdev->pdev->dev, "error Out\n");
  418. skreq->status = BLK_STS_RESOURCE;
  419. blk_mq_complete_request(req);
  420. return BLK_STS_OK;
  421. }
  422. dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
  423. skreq->n_sg *
  424. sizeof(struct fit_sg_descriptor),
  425. DMA_TO_DEVICE);
  426. /* Either a FIT msg is in progress or we have to start one. */
  427. if (skd_max_req_per_msg == 1) {
  428. skmsg = NULL;
  429. } else {
  430. spin_lock_irqsave(&skdev->lock, flags);
  431. skmsg = skdev->skmsg;
  432. }
  433. if (!skmsg) {
  434. skmsg = &skdev->skmsg_table[tag];
  435. skdev->skmsg = skmsg;
  436. /* Initialize the FIT msg header */
  437. fmh = &skmsg->msg_buf->fmh;
  438. memset(fmh, 0, sizeof(*fmh));
  439. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  440. skmsg->length = sizeof(*fmh);
  441. } else {
  442. fmh = &skmsg->msg_buf->fmh;
  443. }
  444. skreq->fitmsg_id = skmsg->id;
  445. scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
  446. memset(scsi_req, 0, sizeof(*scsi_req));
  447. scsi_req->hdr.tag = skreq->id;
  448. scsi_req->hdr.sg_list_dma_address =
  449. cpu_to_be64(skreq->sksg_dma_address);
  450. if (req_op(req) == REQ_OP_FLUSH) {
  451. skd_prep_zerosize_flush_cdb(scsi_req, skreq);
  452. SKD_ASSERT(skreq->flush_cmd == 1);
  453. } else {
  454. skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
  455. }
  456. if (req->cmd_flags & REQ_FUA)
  457. scsi_req->cdb[1] |= SKD_FUA_NV;
  458. scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count);
  459. /* Complete resource allocations. */
  460. skreq->state = SKD_REQ_STATE_BUSY;
  461. skmsg->length += sizeof(struct skd_scsi_request);
  462. fmh->num_protocol_cmds_coalesced++;
  463. dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
  464. skd_in_flight(skdev));
  465. /*
  466. * If the FIT msg buffer is full send it.
  467. */
  468. if (skd_max_req_per_msg == 1) {
  469. skd_send_fitmsg(skdev, skmsg);
  470. } else {
  471. if (mqd->last ||
  472. fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
  473. skd_send_fitmsg(skdev, skmsg);
  474. skdev->skmsg = NULL;
  475. }
  476. spin_unlock_irqrestore(&skdev->lock, flags);
  477. }
  478. return BLK_STS_OK;
  479. }
  480. static enum blk_eh_timer_return skd_timed_out(struct request *req,
  481. bool reserved)
  482. {
  483. struct skd_device *skdev = req->q->queuedata;
  484. dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n",
  485. blk_mq_unique_tag(req));
  486. return BLK_EH_RESET_TIMER;
  487. }
  488. static void skd_complete_rq(struct request *req)
  489. {
  490. struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
  491. blk_mq_end_request(req, skreq->status);
  492. }
  493. static bool skd_preop_sg_list(struct skd_device *skdev,
  494. struct skd_request_context *skreq)
  495. {
  496. struct request *req = blk_mq_rq_from_pdu(skreq);
  497. struct scatterlist *sgl = &skreq->sg[0], *sg;
  498. int n_sg;
  499. int i;
  500. skreq->sg_byte_count = 0;
  501. WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE &&
  502. skreq->data_dir != DMA_FROM_DEVICE);
  503. n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
  504. if (n_sg <= 0)
  505. return false;
  506. /*
  507. * Map scatterlist to PCI bus addresses.
  508. * Note PCI might change the number of entries.
  509. */
  510. n_sg = dma_map_sg(&skdev->pdev->dev, sgl, n_sg, skreq->data_dir);
  511. if (n_sg <= 0)
  512. return false;
  513. SKD_ASSERT(n_sg <= skdev->sgs_per_request);
  514. skreq->n_sg = n_sg;
  515. for_each_sg(sgl, sg, n_sg, i) {
  516. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  517. u32 cnt = sg_dma_len(sg);
  518. uint64_t dma_addr = sg_dma_address(sg);
  519. sgd->control = FIT_SGD_CONTROL_NOT_LAST;
  520. sgd->byte_count = cnt;
  521. skreq->sg_byte_count += cnt;
  522. sgd->host_side_addr = dma_addr;
  523. sgd->dev_side_addr = 0;
  524. }
  525. skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
  526. skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
  527. if (unlikely(skdev->dbg_level > 1)) {
  528. dev_dbg(&skdev->pdev->dev,
  529. "skreq=%x sksg_list=%p sksg_dma=%pad\n",
  530. skreq->id, skreq->sksg_list, &skreq->sksg_dma_address);
  531. for (i = 0; i < n_sg; i++) {
  532. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  533. dev_dbg(&skdev->pdev->dev,
  534. " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
  535. i, sgd->byte_count, sgd->control,
  536. sgd->host_side_addr, sgd->next_desc_ptr);
  537. }
  538. }
  539. return true;
  540. }
  541. static void skd_postop_sg_list(struct skd_device *skdev,
  542. struct skd_request_context *skreq)
  543. {
  544. /*
  545. * restore the next ptr for next IO request so we
  546. * don't have to set it every time.
  547. */
  548. skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
  549. skreq->sksg_dma_address +
  550. ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
  551. dma_unmap_sg(&skdev->pdev->dev, &skreq->sg[0], skreq->n_sg,
  552. skreq->data_dir);
  553. }
  554. /*
  555. *****************************************************************************
  556. * TIMER
  557. *****************************************************************************
  558. */
  559. static void skd_timer_tick_not_online(struct skd_device *skdev);
  560. static void skd_start_queue(struct work_struct *work)
  561. {
  562. struct skd_device *skdev = container_of(work, typeof(*skdev),
  563. start_queue);
  564. /*
  565. * Although it is safe to call blk_start_queue() from interrupt
  566. * context, blk_mq_start_hw_queues() must not be called from
  567. * interrupt context.
  568. */
  569. blk_mq_start_hw_queues(skdev->queue);
  570. }
  571. static void skd_timer_tick(struct timer_list *t)
  572. {
  573. struct skd_device *skdev = from_timer(skdev, t, timer);
  574. unsigned long reqflags;
  575. u32 state;
  576. if (skdev->state == SKD_DRVR_STATE_FAULT)
  577. /* The driver has declared fault, and we want it to
  578. * stay that way until driver is reloaded.
  579. */
  580. return;
  581. spin_lock_irqsave(&skdev->lock, reqflags);
  582. state = SKD_READL(skdev, FIT_STATUS);
  583. state &= FIT_SR_DRIVE_STATE_MASK;
  584. if (state != skdev->drive_state)
  585. skd_isr_fwstate(skdev);
  586. if (skdev->state != SKD_DRVR_STATE_ONLINE)
  587. skd_timer_tick_not_online(skdev);
  588. mod_timer(&skdev->timer, (jiffies + HZ));
  589. spin_unlock_irqrestore(&skdev->lock, reqflags);
  590. }
  591. static void skd_timer_tick_not_online(struct skd_device *skdev)
  592. {
  593. switch (skdev->state) {
  594. case SKD_DRVR_STATE_IDLE:
  595. case SKD_DRVR_STATE_LOAD:
  596. break;
  597. case SKD_DRVR_STATE_BUSY_SANITIZE:
  598. dev_dbg(&skdev->pdev->dev,
  599. "drive busy sanitize[%x], driver[%x]\n",
  600. skdev->drive_state, skdev->state);
  601. /* If we've been in sanitize for 3 seconds, we figure we're not
  602. * going to get anymore completions, so recover requests now
  603. */
  604. if (skdev->timer_countdown > 0) {
  605. skdev->timer_countdown--;
  606. return;
  607. }
  608. skd_recover_requests(skdev);
  609. break;
  610. case SKD_DRVR_STATE_BUSY:
  611. case SKD_DRVR_STATE_BUSY_IMMINENT:
  612. case SKD_DRVR_STATE_BUSY_ERASE:
  613. dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
  614. skdev->state, skdev->timer_countdown);
  615. if (skdev->timer_countdown > 0) {
  616. skdev->timer_countdown--;
  617. return;
  618. }
  619. dev_dbg(&skdev->pdev->dev,
  620. "busy[%x], timedout=%d, restarting device.",
  621. skdev->state, skdev->timer_countdown);
  622. skd_restart_device(skdev);
  623. break;
  624. case SKD_DRVR_STATE_WAIT_BOOT:
  625. case SKD_DRVR_STATE_STARTING:
  626. if (skdev->timer_countdown > 0) {
  627. skdev->timer_countdown--;
  628. return;
  629. }
  630. /* For now, we fault the drive. Could attempt resets to
  631. * revcover at some point. */
  632. skdev->state = SKD_DRVR_STATE_FAULT;
  633. dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
  634. skdev->drive_state);
  635. /*start the queue so we can respond with error to requests */
  636. /* wakeup anyone waiting for startup complete */
  637. schedule_work(&skdev->start_queue);
  638. skdev->gendisk_on = -1;
  639. wake_up_interruptible(&skdev->waitq);
  640. break;
  641. case SKD_DRVR_STATE_ONLINE:
  642. /* shouldn't get here. */
  643. break;
  644. case SKD_DRVR_STATE_PAUSING:
  645. case SKD_DRVR_STATE_PAUSED:
  646. break;
  647. case SKD_DRVR_STATE_RESTARTING:
  648. if (skdev->timer_countdown > 0) {
  649. skdev->timer_countdown--;
  650. return;
  651. }
  652. /* For now, we fault the drive. Could attempt resets to
  653. * revcover at some point. */
  654. skdev->state = SKD_DRVR_STATE_FAULT;
  655. dev_err(&skdev->pdev->dev,
  656. "DriveFault Reconnect Timeout (%x)\n",
  657. skdev->drive_state);
  658. /*
  659. * Recovering does two things:
  660. * 1. completes IO with error
  661. * 2. reclaims dma resources
  662. * When is it safe to recover requests?
  663. * - if the drive state is faulted
  664. * - if the state is still soft reset after out timeout
  665. * - if the drive registers are dead (state = FF)
  666. * If it is "unsafe", we still need to recover, so we will
  667. * disable pci bus mastering and disable our interrupts.
  668. */
  669. if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
  670. (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
  671. (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
  672. /* It never came out of soft reset. Try to
  673. * recover the requests and then let them
  674. * fail. This is to mitigate hung processes. */
  675. skd_recover_requests(skdev);
  676. else {
  677. dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
  678. skdev->drive_state);
  679. pci_disable_device(skdev->pdev);
  680. skd_disable_interrupts(skdev);
  681. skd_recover_requests(skdev);
  682. }
  683. /*start the queue so we can respond with error to requests */
  684. /* wakeup anyone waiting for startup complete */
  685. schedule_work(&skdev->start_queue);
  686. skdev->gendisk_on = -1;
  687. wake_up_interruptible(&skdev->waitq);
  688. break;
  689. case SKD_DRVR_STATE_RESUMING:
  690. case SKD_DRVR_STATE_STOPPING:
  691. case SKD_DRVR_STATE_SYNCING:
  692. case SKD_DRVR_STATE_FAULT:
  693. case SKD_DRVR_STATE_DISAPPEARED:
  694. default:
  695. break;
  696. }
  697. }
  698. static int skd_start_timer(struct skd_device *skdev)
  699. {
  700. int rc;
  701. timer_setup(&skdev->timer, skd_timer_tick, 0);
  702. rc = mod_timer(&skdev->timer, (jiffies + HZ));
  703. if (rc)
  704. dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
  705. return rc;
  706. }
  707. static void skd_kill_timer(struct skd_device *skdev)
  708. {
  709. del_timer_sync(&skdev->timer);
  710. }
  711. /*
  712. *****************************************************************************
  713. * INTERNAL REQUESTS -- generated by driver itself
  714. *****************************************************************************
  715. */
  716. static int skd_format_internal_skspcl(struct skd_device *skdev)
  717. {
  718. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  719. struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
  720. struct fit_msg_hdr *fmh;
  721. uint64_t dma_address;
  722. struct skd_scsi_request *scsi;
  723. fmh = &skspcl->msg_buf->fmh;
  724. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  725. fmh->num_protocol_cmds_coalesced = 1;
  726. scsi = &skspcl->msg_buf->scsi[0];
  727. memset(scsi, 0, sizeof(*scsi));
  728. dma_address = skspcl->req.sksg_dma_address;
  729. scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
  730. skspcl->req.n_sg = 1;
  731. sgd->control = FIT_SGD_CONTROL_LAST;
  732. sgd->byte_count = 0;
  733. sgd->host_side_addr = skspcl->db_dma_address;
  734. sgd->dev_side_addr = 0;
  735. sgd->next_desc_ptr = 0LL;
  736. return 1;
  737. }
  738. #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
  739. static void skd_send_internal_skspcl(struct skd_device *skdev,
  740. struct skd_special_context *skspcl,
  741. u8 opcode)
  742. {
  743. struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
  744. struct skd_scsi_request *scsi;
  745. unsigned char *buf = skspcl->data_buf;
  746. int i;
  747. if (skspcl->req.state != SKD_REQ_STATE_IDLE)
  748. /*
  749. * A refresh is already in progress.
  750. * Just wait for it to finish.
  751. */
  752. return;
  753. skspcl->req.state = SKD_REQ_STATE_BUSY;
  754. scsi = &skspcl->msg_buf->scsi[0];
  755. scsi->hdr.tag = skspcl->req.id;
  756. memset(scsi->cdb, 0, sizeof(scsi->cdb));
  757. switch (opcode) {
  758. case TEST_UNIT_READY:
  759. scsi->cdb[0] = TEST_UNIT_READY;
  760. sgd->byte_count = 0;
  761. scsi->hdr.sg_list_len_bytes = 0;
  762. break;
  763. case READ_CAPACITY:
  764. scsi->cdb[0] = READ_CAPACITY;
  765. sgd->byte_count = SKD_N_READ_CAP_BYTES;
  766. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  767. break;
  768. case INQUIRY:
  769. scsi->cdb[0] = INQUIRY;
  770. scsi->cdb[1] = 0x01; /* evpd */
  771. scsi->cdb[2] = 0x80; /* serial number page */
  772. scsi->cdb[4] = 0x10;
  773. sgd->byte_count = 16;
  774. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  775. break;
  776. case SYNCHRONIZE_CACHE:
  777. scsi->cdb[0] = SYNCHRONIZE_CACHE;
  778. sgd->byte_count = 0;
  779. scsi->hdr.sg_list_len_bytes = 0;
  780. break;
  781. case WRITE_BUFFER:
  782. scsi->cdb[0] = WRITE_BUFFER;
  783. scsi->cdb[1] = 0x02;
  784. scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
  785. scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
  786. sgd->byte_count = WR_BUF_SIZE;
  787. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  788. /* fill incrementing byte pattern */
  789. for (i = 0; i < sgd->byte_count; i++)
  790. buf[i] = i & 0xFF;
  791. break;
  792. case READ_BUFFER:
  793. scsi->cdb[0] = READ_BUFFER;
  794. scsi->cdb[1] = 0x02;
  795. scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
  796. scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
  797. sgd->byte_count = WR_BUF_SIZE;
  798. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  799. memset(skspcl->data_buf, 0, sgd->byte_count);
  800. break;
  801. default:
  802. SKD_ASSERT("Don't know what to send");
  803. return;
  804. }
  805. skd_send_special_fitmsg(skdev, skspcl);
  806. }
  807. static void skd_refresh_device_data(struct skd_device *skdev)
  808. {
  809. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  810. skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
  811. }
  812. static int skd_chk_read_buf(struct skd_device *skdev,
  813. struct skd_special_context *skspcl)
  814. {
  815. unsigned char *buf = skspcl->data_buf;
  816. int i;
  817. /* check for incrementing byte pattern */
  818. for (i = 0; i < WR_BUF_SIZE; i++)
  819. if (buf[i] != (i & 0xFF))
  820. return 1;
  821. return 0;
  822. }
  823. static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
  824. u8 code, u8 qual, u8 fruc)
  825. {
  826. /* If the check condition is of special interest, log a message */
  827. if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
  828. && (code == 0x04) && (qual == 0x06)) {
  829. dev_err(&skdev->pdev->dev,
  830. "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
  831. key, code, qual, fruc);
  832. }
  833. }
  834. static void skd_complete_internal(struct skd_device *skdev,
  835. struct fit_completion_entry_v1 *skcomp,
  836. struct fit_comp_error_info *skerr,
  837. struct skd_special_context *skspcl)
  838. {
  839. u8 *buf = skspcl->data_buf;
  840. u8 status;
  841. int i;
  842. struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0];
  843. lockdep_assert_held(&skdev->lock);
  844. SKD_ASSERT(skspcl == &skdev->internal_skspcl);
  845. dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
  846. dma_sync_single_for_cpu(&skdev->pdev->dev,
  847. skspcl->db_dma_address,
  848. skspcl->req.sksg_list[0].byte_count,
  849. DMA_BIDIRECTIONAL);
  850. skspcl->req.completion = *skcomp;
  851. skspcl->req.state = SKD_REQ_STATE_IDLE;
  852. status = skspcl->req.completion.status;
  853. skd_log_check_status(skdev, status, skerr->key, skerr->code,
  854. skerr->qual, skerr->fruc);
  855. switch (scsi->cdb[0]) {
  856. case TEST_UNIT_READY:
  857. if (status == SAM_STAT_GOOD)
  858. skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
  859. else if ((status == SAM_STAT_CHECK_CONDITION) &&
  860. (skerr->key == MEDIUM_ERROR))
  861. skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
  862. else {
  863. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  864. dev_dbg(&skdev->pdev->dev,
  865. "TUR failed, don't send anymore state 0x%x\n",
  866. skdev->state);
  867. return;
  868. }
  869. dev_dbg(&skdev->pdev->dev,
  870. "**** TUR failed, retry skerr\n");
  871. skd_send_internal_skspcl(skdev, skspcl,
  872. TEST_UNIT_READY);
  873. }
  874. break;
  875. case WRITE_BUFFER:
  876. if (status == SAM_STAT_GOOD)
  877. skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
  878. else {
  879. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  880. dev_dbg(&skdev->pdev->dev,
  881. "write buffer failed, don't send anymore state 0x%x\n",
  882. skdev->state);
  883. return;
  884. }
  885. dev_dbg(&skdev->pdev->dev,
  886. "**** write buffer failed, retry skerr\n");
  887. skd_send_internal_skspcl(skdev, skspcl,
  888. TEST_UNIT_READY);
  889. }
  890. break;
  891. case READ_BUFFER:
  892. if (status == SAM_STAT_GOOD) {
  893. if (skd_chk_read_buf(skdev, skspcl) == 0)
  894. skd_send_internal_skspcl(skdev, skspcl,
  895. READ_CAPACITY);
  896. else {
  897. dev_err(&skdev->pdev->dev,
  898. "*** W/R Buffer mismatch %d ***\n",
  899. skdev->connect_retries);
  900. if (skdev->connect_retries <
  901. SKD_MAX_CONNECT_RETRIES) {
  902. skdev->connect_retries++;
  903. skd_soft_reset(skdev);
  904. } else {
  905. dev_err(&skdev->pdev->dev,
  906. "W/R Buffer Connect Error\n");
  907. return;
  908. }
  909. }
  910. } else {
  911. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  912. dev_dbg(&skdev->pdev->dev,
  913. "read buffer failed, don't send anymore state 0x%x\n",
  914. skdev->state);
  915. return;
  916. }
  917. dev_dbg(&skdev->pdev->dev,
  918. "**** read buffer failed, retry skerr\n");
  919. skd_send_internal_skspcl(skdev, skspcl,
  920. TEST_UNIT_READY);
  921. }
  922. break;
  923. case READ_CAPACITY:
  924. skdev->read_cap_is_valid = 0;
  925. if (status == SAM_STAT_GOOD) {
  926. skdev->read_cap_last_lba =
  927. (buf[0] << 24) | (buf[1] << 16) |
  928. (buf[2] << 8) | buf[3];
  929. skdev->read_cap_blocksize =
  930. (buf[4] << 24) | (buf[5] << 16) |
  931. (buf[6] << 8) | buf[7];
  932. dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
  933. skdev->read_cap_last_lba,
  934. skdev->read_cap_blocksize);
  935. set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
  936. skdev->read_cap_is_valid = 1;
  937. skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
  938. } else if ((status == SAM_STAT_CHECK_CONDITION) &&
  939. (skerr->key == MEDIUM_ERROR)) {
  940. skdev->read_cap_last_lba = ~0;
  941. set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
  942. dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
  943. skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
  944. } else {
  945. dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
  946. skd_send_internal_skspcl(skdev, skspcl,
  947. TEST_UNIT_READY);
  948. }
  949. break;
  950. case INQUIRY:
  951. skdev->inquiry_is_valid = 0;
  952. if (status == SAM_STAT_GOOD) {
  953. skdev->inquiry_is_valid = 1;
  954. for (i = 0; i < 12; i++)
  955. skdev->inq_serial_num[i] = buf[i + 4];
  956. skdev->inq_serial_num[12] = 0;
  957. }
  958. if (skd_unquiesce_dev(skdev) < 0)
  959. dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
  960. /* connection is complete */
  961. skdev->connect_retries = 0;
  962. break;
  963. case SYNCHRONIZE_CACHE:
  964. if (status == SAM_STAT_GOOD)
  965. skdev->sync_done = 1;
  966. else
  967. skdev->sync_done = -1;
  968. wake_up_interruptible(&skdev->waitq);
  969. break;
  970. default:
  971. SKD_ASSERT("we didn't send this");
  972. }
  973. }
  974. /*
  975. *****************************************************************************
  976. * FIT MESSAGES
  977. *****************************************************************************
  978. */
  979. static void skd_send_fitmsg(struct skd_device *skdev,
  980. struct skd_fitmsg_context *skmsg)
  981. {
  982. u64 qcmd;
  983. dev_dbg(&skdev->pdev->dev, "dma address %pad, busy=%d\n",
  984. &skmsg->mb_dma_address, skd_in_flight(skdev));
  985. dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
  986. qcmd = skmsg->mb_dma_address;
  987. qcmd |= FIT_QCMD_QID_NORMAL;
  988. if (unlikely(skdev->dbg_level > 1)) {
  989. u8 *bp = (u8 *)skmsg->msg_buf;
  990. int i;
  991. for (i = 0; i < skmsg->length; i += 8) {
  992. dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
  993. &bp[i]);
  994. if (i == 0)
  995. i = 64 - 8;
  996. }
  997. }
  998. if (skmsg->length > 256)
  999. qcmd |= FIT_QCMD_MSGSIZE_512;
  1000. else if (skmsg->length > 128)
  1001. qcmd |= FIT_QCMD_MSGSIZE_256;
  1002. else if (skmsg->length > 64)
  1003. qcmd |= FIT_QCMD_MSGSIZE_128;
  1004. else
  1005. /*
  1006. * This makes no sense because the FIT msg header is
  1007. * 64 bytes. If the msg is only 64 bytes long it has
  1008. * no payload.
  1009. */
  1010. qcmd |= FIT_QCMD_MSGSIZE_64;
  1011. dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address,
  1012. skmsg->length, DMA_TO_DEVICE);
  1013. /* Make sure skd_msg_buf is written before the doorbell is triggered. */
  1014. smp_wmb();
  1015. SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
  1016. }
  1017. static void skd_send_special_fitmsg(struct skd_device *skdev,
  1018. struct skd_special_context *skspcl)
  1019. {
  1020. u64 qcmd;
  1021. WARN_ON_ONCE(skspcl->req.n_sg != 1);
  1022. if (unlikely(skdev->dbg_level > 1)) {
  1023. u8 *bp = (u8 *)skspcl->msg_buf;
  1024. int i;
  1025. for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
  1026. dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
  1027. &bp[i]);
  1028. if (i == 0)
  1029. i = 64 - 8;
  1030. }
  1031. dev_dbg(&skdev->pdev->dev,
  1032. "skspcl=%p id=%04x sksg_list=%p sksg_dma=%pad\n",
  1033. skspcl, skspcl->req.id, skspcl->req.sksg_list,
  1034. &skspcl->req.sksg_dma_address);
  1035. for (i = 0; i < skspcl->req.n_sg; i++) {
  1036. struct fit_sg_descriptor *sgd =
  1037. &skspcl->req.sksg_list[i];
  1038. dev_dbg(&skdev->pdev->dev,
  1039. " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
  1040. i, sgd->byte_count, sgd->control,
  1041. sgd->host_side_addr, sgd->next_desc_ptr);
  1042. }
  1043. }
  1044. /*
  1045. * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
  1046. * and one 64-byte SSDI command.
  1047. */
  1048. qcmd = skspcl->mb_dma_address;
  1049. qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
  1050. dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address,
  1051. SKD_N_SPECIAL_FITMSG_BYTES, DMA_TO_DEVICE);
  1052. dma_sync_single_for_device(&skdev->pdev->dev,
  1053. skspcl->req.sksg_dma_address,
  1054. 1 * sizeof(struct fit_sg_descriptor),
  1055. DMA_TO_DEVICE);
  1056. dma_sync_single_for_device(&skdev->pdev->dev,
  1057. skspcl->db_dma_address,
  1058. skspcl->req.sksg_list[0].byte_count,
  1059. DMA_BIDIRECTIONAL);
  1060. /* Make sure skd_msg_buf is written before the doorbell is triggered. */
  1061. smp_wmb();
  1062. SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
  1063. }
  1064. /*
  1065. *****************************************************************************
  1066. * COMPLETION QUEUE
  1067. *****************************************************************************
  1068. */
  1069. static void skd_complete_other(struct skd_device *skdev,
  1070. struct fit_completion_entry_v1 *skcomp,
  1071. struct fit_comp_error_info *skerr);
  1072. struct sns_info {
  1073. u8 type;
  1074. u8 stat;
  1075. u8 key;
  1076. u8 asc;
  1077. u8 ascq;
  1078. u8 mask;
  1079. enum skd_check_status_action action;
  1080. };
  1081. static struct sns_info skd_chkstat_table[] = {
  1082. /* Good */
  1083. { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
  1084. SKD_CHECK_STATUS_REPORT_GOOD },
  1085. /* Smart alerts */
  1086. { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
  1087. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1088. { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
  1089. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1090. { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
  1091. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1092. /* Retry (with limits) */
  1093. { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
  1094. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1095. { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
  1096. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1097. { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
  1098. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1099. { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
  1100. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1101. /* Busy (or about to be) */
  1102. { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
  1103. SKD_CHECK_STATUS_BUSY_IMMINENT },
  1104. };
  1105. /*
  1106. * Look up status and sense data to decide how to handle the error
  1107. * from the device.
  1108. * mask says which fields must match e.g., mask=0x18 means check
  1109. * type and stat, ignore key, asc, ascq.
  1110. */
  1111. static enum skd_check_status_action
  1112. skd_check_status(struct skd_device *skdev,
  1113. u8 cmp_status, struct fit_comp_error_info *skerr)
  1114. {
  1115. int i;
  1116. dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
  1117. skerr->key, skerr->code, skerr->qual, skerr->fruc);
  1118. dev_dbg(&skdev->pdev->dev,
  1119. "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
  1120. skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
  1121. skerr->fruc);
  1122. /* Does the info match an entry in the good category? */
  1123. for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) {
  1124. struct sns_info *sns = &skd_chkstat_table[i];
  1125. if (sns->mask & 0x10)
  1126. if (skerr->type != sns->type)
  1127. continue;
  1128. if (sns->mask & 0x08)
  1129. if (cmp_status != sns->stat)
  1130. continue;
  1131. if (sns->mask & 0x04)
  1132. if (skerr->key != sns->key)
  1133. continue;
  1134. if (sns->mask & 0x02)
  1135. if (skerr->code != sns->asc)
  1136. continue;
  1137. if (sns->mask & 0x01)
  1138. if (skerr->qual != sns->ascq)
  1139. continue;
  1140. if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
  1141. dev_err(&skdev->pdev->dev,
  1142. "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
  1143. skerr->key, skerr->code, skerr->qual);
  1144. }
  1145. return sns->action;
  1146. }
  1147. /* No other match, so nonzero status means error,
  1148. * zero status means good
  1149. */
  1150. if (cmp_status) {
  1151. dev_dbg(&skdev->pdev->dev, "status check: error\n");
  1152. return SKD_CHECK_STATUS_REPORT_ERROR;
  1153. }
  1154. dev_dbg(&skdev->pdev->dev, "status check good default\n");
  1155. return SKD_CHECK_STATUS_REPORT_GOOD;
  1156. }
  1157. static void skd_resolve_req_exception(struct skd_device *skdev,
  1158. struct skd_request_context *skreq,
  1159. struct request *req)
  1160. {
  1161. u8 cmp_status = skreq->completion.status;
  1162. switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
  1163. case SKD_CHECK_STATUS_REPORT_GOOD:
  1164. case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
  1165. skreq->status = BLK_STS_OK;
  1166. if (likely(!blk_should_fake_timeout(req->q)))
  1167. blk_mq_complete_request(req);
  1168. break;
  1169. case SKD_CHECK_STATUS_BUSY_IMMINENT:
  1170. skd_log_skreq(skdev, skreq, "retry(busy)");
  1171. blk_mq_requeue_request(req, true);
  1172. dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
  1173. skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
  1174. skdev->timer_countdown = SKD_TIMER_MINUTES(20);
  1175. skd_quiesce_dev(skdev);
  1176. break;
  1177. case SKD_CHECK_STATUS_REQUEUE_REQUEST:
  1178. if (++skreq->retries < SKD_MAX_RETRIES) {
  1179. skd_log_skreq(skdev, skreq, "retry");
  1180. blk_mq_requeue_request(req, true);
  1181. break;
  1182. }
  1183. fallthrough;
  1184. case SKD_CHECK_STATUS_REPORT_ERROR:
  1185. default:
  1186. skreq->status = BLK_STS_IOERR;
  1187. if (likely(!blk_should_fake_timeout(req->q)))
  1188. blk_mq_complete_request(req);
  1189. break;
  1190. }
  1191. }
  1192. static void skd_release_skreq(struct skd_device *skdev,
  1193. struct skd_request_context *skreq)
  1194. {
  1195. /*
  1196. * Reclaim the skd_request_context
  1197. */
  1198. skreq->state = SKD_REQ_STATE_IDLE;
  1199. }
  1200. static int skd_isr_completion_posted(struct skd_device *skdev,
  1201. int limit, int *enqueued)
  1202. {
  1203. struct fit_completion_entry_v1 *skcmp;
  1204. struct fit_comp_error_info *skerr;
  1205. u16 req_id;
  1206. u32 tag;
  1207. u16 hwq = 0;
  1208. struct request *rq;
  1209. struct skd_request_context *skreq;
  1210. u16 cmp_cntxt;
  1211. u8 cmp_status;
  1212. u8 cmp_cycle;
  1213. u32 cmp_bytes;
  1214. int rc = 0;
  1215. int processed = 0;
  1216. lockdep_assert_held(&skdev->lock);
  1217. for (;; ) {
  1218. SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
  1219. skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
  1220. cmp_cycle = skcmp->cycle;
  1221. cmp_cntxt = skcmp->tag;
  1222. cmp_status = skcmp->status;
  1223. cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
  1224. skerr = &skdev->skerr_table[skdev->skcomp_ix];
  1225. dev_dbg(&skdev->pdev->dev,
  1226. "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
  1227. skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
  1228. cmp_cntxt, cmp_status, skd_in_flight(skdev),
  1229. cmp_bytes, skdev->proto_ver);
  1230. if (cmp_cycle != skdev->skcomp_cycle) {
  1231. dev_dbg(&skdev->pdev->dev, "end of completions\n");
  1232. break;
  1233. }
  1234. /*
  1235. * Update the completion queue head index and possibly
  1236. * the completion cycle count. 8-bit wrap-around.
  1237. */
  1238. skdev->skcomp_ix++;
  1239. if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
  1240. skdev->skcomp_ix = 0;
  1241. skdev->skcomp_cycle++;
  1242. }
  1243. /*
  1244. * The command context is a unique 32-bit ID. The low order
  1245. * bits help locate the request. The request is usually a
  1246. * r/w request (see skd_start() above) or a special request.
  1247. */
  1248. req_id = cmp_cntxt;
  1249. tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
  1250. /* Is this other than a r/w request? */
  1251. if (tag >= skdev->num_req_context) {
  1252. /*
  1253. * This is not a completion for a r/w request.
  1254. */
  1255. WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq],
  1256. tag));
  1257. skd_complete_other(skdev, skcmp, skerr);
  1258. continue;
  1259. }
  1260. rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag);
  1261. if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt,
  1262. tag))
  1263. continue;
  1264. skreq = blk_mq_rq_to_pdu(rq);
  1265. /*
  1266. * Make sure the request ID for the slot matches.
  1267. */
  1268. if (skreq->id != req_id) {
  1269. dev_err(&skdev->pdev->dev,
  1270. "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
  1271. req_id, skreq->id, cmp_cntxt);
  1272. continue;
  1273. }
  1274. SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
  1275. skreq->completion = *skcmp;
  1276. if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
  1277. skreq->err_info = *skerr;
  1278. skd_log_check_status(skdev, cmp_status, skerr->key,
  1279. skerr->code, skerr->qual,
  1280. skerr->fruc);
  1281. }
  1282. /* Release DMA resources for the request. */
  1283. if (skreq->n_sg > 0)
  1284. skd_postop_sg_list(skdev, skreq);
  1285. skd_release_skreq(skdev, skreq);
  1286. /*
  1287. * Capture the outcome and post it back to the native request.
  1288. */
  1289. if (likely(cmp_status == SAM_STAT_GOOD)) {
  1290. skreq->status = BLK_STS_OK;
  1291. if (likely(!blk_should_fake_timeout(rq->q)))
  1292. blk_mq_complete_request(rq);
  1293. } else {
  1294. skd_resolve_req_exception(skdev, skreq, rq);
  1295. }
  1296. /* skd_isr_comp_limit equal zero means no limit */
  1297. if (limit) {
  1298. if (++processed >= limit) {
  1299. rc = 1;
  1300. break;
  1301. }
  1302. }
  1303. }
  1304. if (skdev->state == SKD_DRVR_STATE_PAUSING &&
  1305. skd_in_flight(skdev) == 0) {
  1306. skdev->state = SKD_DRVR_STATE_PAUSED;
  1307. wake_up_interruptible(&skdev->waitq);
  1308. }
  1309. return rc;
  1310. }
  1311. static void skd_complete_other(struct skd_device *skdev,
  1312. struct fit_completion_entry_v1 *skcomp,
  1313. struct fit_comp_error_info *skerr)
  1314. {
  1315. u32 req_id = 0;
  1316. u32 req_table;
  1317. u32 req_slot;
  1318. struct skd_special_context *skspcl;
  1319. lockdep_assert_held(&skdev->lock);
  1320. req_id = skcomp->tag;
  1321. req_table = req_id & SKD_ID_TABLE_MASK;
  1322. req_slot = req_id & SKD_ID_SLOT_MASK;
  1323. dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
  1324. req_id, req_slot);
  1325. /*
  1326. * Based on the request id, determine how to dispatch this completion.
  1327. * This swich/case is finding the good cases and forwarding the
  1328. * completion entry. Errors are reported below the switch.
  1329. */
  1330. switch (req_table) {
  1331. case SKD_ID_RW_REQUEST:
  1332. /*
  1333. * The caller, skd_isr_completion_posted() above,
  1334. * handles r/w requests. The only way we get here
  1335. * is if the req_slot is out of bounds.
  1336. */
  1337. break;
  1338. case SKD_ID_INTERNAL:
  1339. if (req_slot == 0) {
  1340. skspcl = &skdev->internal_skspcl;
  1341. if (skspcl->req.id == req_id &&
  1342. skspcl->req.state == SKD_REQ_STATE_BUSY) {
  1343. skd_complete_internal(skdev,
  1344. skcomp, skerr, skspcl);
  1345. return;
  1346. }
  1347. }
  1348. break;
  1349. case SKD_ID_FIT_MSG:
  1350. /*
  1351. * These id's should never appear in a completion record.
  1352. */
  1353. break;
  1354. default:
  1355. /*
  1356. * These id's should never appear anywhere;
  1357. */
  1358. break;
  1359. }
  1360. /*
  1361. * If we get here it is a bad or stale id.
  1362. */
  1363. }
  1364. static void skd_reset_skcomp(struct skd_device *skdev)
  1365. {
  1366. memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
  1367. skdev->skcomp_ix = 0;
  1368. skdev->skcomp_cycle = 1;
  1369. }
  1370. /*
  1371. *****************************************************************************
  1372. * INTERRUPTS
  1373. *****************************************************************************
  1374. */
  1375. static void skd_completion_worker(struct work_struct *work)
  1376. {
  1377. struct skd_device *skdev =
  1378. container_of(work, struct skd_device, completion_worker);
  1379. unsigned long flags;
  1380. int flush_enqueued = 0;
  1381. spin_lock_irqsave(&skdev->lock, flags);
  1382. /*
  1383. * pass in limit=0, which means no limit..
  1384. * process everything in compq
  1385. */
  1386. skd_isr_completion_posted(skdev, 0, &flush_enqueued);
  1387. schedule_work(&skdev->start_queue);
  1388. spin_unlock_irqrestore(&skdev->lock, flags);
  1389. }
  1390. static void skd_isr_msg_from_dev(struct skd_device *skdev);
  1391. static irqreturn_t
  1392. skd_isr(int irq, void *ptr)
  1393. {
  1394. struct skd_device *skdev = ptr;
  1395. u32 intstat;
  1396. u32 ack;
  1397. int rc = 0;
  1398. int deferred = 0;
  1399. int flush_enqueued = 0;
  1400. spin_lock(&skdev->lock);
  1401. for (;; ) {
  1402. intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
  1403. ack = FIT_INT_DEF_MASK;
  1404. ack &= intstat;
  1405. dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
  1406. ack);
  1407. /* As long as there is an int pending on device, keep
  1408. * running loop. When none, get out, but if we've never
  1409. * done any processing, call completion handler?
  1410. */
  1411. if (ack == 0) {
  1412. /* No interrupts on device, but run the completion
  1413. * processor anyway?
  1414. */
  1415. if (rc == 0)
  1416. if (likely (skdev->state
  1417. == SKD_DRVR_STATE_ONLINE))
  1418. deferred = 1;
  1419. break;
  1420. }
  1421. rc = IRQ_HANDLED;
  1422. SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
  1423. if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
  1424. (skdev->state != SKD_DRVR_STATE_STOPPING))) {
  1425. if (intstat & FIT_ISH_COMPLETION_POSTED) {
  1426. /*
  1427. * If we have already deferred completion
  1428. * processing, don't bother running it again
  1429. */
  1430. if (deferred == 0)
  1431. deferred =
  1432. skd_isr_completion_posted(skdev,
  1433. skd_isr_comp_limit, &flush_enqueued);
  1434. }
  1435. if (intstat & FIT_ISH_FW_STATE_CHANGE) {
  1436. skd_isr_fwstate(skdev);
  1437. if (skdev->state == SKD_DRVR_STATE_FAULT ||
  1438. skdev->state ==
  1439. SKD_DRVR_STATE_DISAPPEARED) {
  1440. spin_unlock(&skdev->lock);
  1441. return rc;
  1442. }
  1443. }
  1444. if (intstat & FIT_ISH_MSG_FROM_DEV)
  1445. skd_isr_msg_from_dev(skdev);
  1446. }
  1447. }
  1448. if (unlikely(flush_enqueued))
  1449. schedule_work(&skdev->start_queue);
  1450. if (deferred)
  1451. schedule_work(&skdev->completion_worker);
  1452. else if (!flush_enqueued)
  1453. schedule_work(&skdev->start_queue);
  1454. spin_unlock(&skdev->lock);
  1455. return rc;
  1456. }
  1457. static void skd_drive_fault(struct skd_device *skdev)
  1458. {
  1459. skdev->state = SKD_DRVR_STATE_FAULT;
  1460. dev_err(&skdev->pdev->dev, "Drive FAULT\n");
  1461. }
  1462. static void skd_drive_disappeared(struct skd_device *skdev)
  1463. {
  1464. skdev->state = SKD_DRVR_STATE_DISAPPEARED;
  1465. dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
  1466. }
  1467. static void skd_isr_fwstate(struct skd_device *skdev)
  1468. {
  1469. u32 sense;
  1470. u32 state;
  1471. u32 mtd;
  1472. int prev_driver_state = skdev->state;
  1473. sense = SKD_READL(skdev, FIT_STATUS);
  1474. state = sense & FIT_SR_DRIVE_STATE_MASK;
  1475. dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
  1476. skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
  1477. skd_drive_state_to_str(state), state);
  1478. skdev->drive_state = state;
  1479. switch (skdev->drive_state) {
  1480. case FIT_SR_DRIVE_INIT:
  1481. if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
  1482. skd_disable_interrupts(skdev);
  1483. break;
  1484. }
  1485. if (skdev->state == SKD_DRVR_STATE_RESTARTING)
  1486. skd_recover_requests(skdev);
  1487. if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
  1488. skdev->timer_countdown = SKD_STARTING_TIMO;
  1489. skdev->state = SKD_DRVR_STATE_STARTING;
  1490. skd_soft_reset(skdev);
  1491. break;
  1492. }
  1493. mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
  1494. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  1495. skdev->last_mtd = mtd;
  1496. break;
  1497. case FIT_SR_DRIVE_ONLINE:
  1498. skdev->cur_max_queue_depth = skd_max_queue_depth;
  1499. if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
  1500. skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
  1501. skdev->queue_low_water_mark =
  1502. skdev->cur_max_queue_depth * 2 / 3 + 1;
  1503. if (skdev->queue_low_water_mark < 1)
  1504. skdev->queue_low_water_mark = 1;
  1505. dev_info(&skdev->pdev->dev,
  1506. "Queue depth limit=%d dev=%d lowat=%d\n",
  1507. skdev->cur_max_queue_depth,
  1508. skdev->dev_max_queue_depth,
  1509. skdev->queue_low_water_mark);
  1510. skd_refresh_device_data(skdev);
  1511. break;
  1512. case FIT_SR_DRIVE_BUSY:
  1513. skdev->state = SKD_DRVR_STATE_BUSY;
  1514. skdev->timer_countdown = SKD_BUSY_TIMO;
  1515. skd_quiesce_dev(skdev);
  1516. break;
  1517. case FIT_SR_DRIVE_BUSY_SANITIZE:
  1518. /* set timer for 3 seconds, we'll abort any unfinished
  1519. * commands after that expires
  1520. */
  1521. skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
  1522. skdev->timer_countdown = SKD_TIMER_SECONDS(3);
  1523. schedule_work(&skdev->start_queue);
  1524. break;
  1525. case FIT_SR_DRIVE_BUSY_ERASE:
  1526. skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
  1527. skdev->timer_countdown = SKD_BUSY_TIMO;
  1528. break;
  1529. case FIT_SR_DRIVE_OFFLINE:
  1530. skdev->state = SKD_DRVR_STATE_IDLE;
  1531. break;
  1532. case FIT_SR_DRIVE_SOFT_RESET:
  1533. switch (skdev->state) {
  1534. case SKD_DRVR_STATE_STARTING:
  1535. case SKD_DRVR_STATE_RESTARTING:
  1536. /* Expected by a caller of skd_soft_reset() */
  1537. break;
  1538. default:
  1539. skdev->state = SKD_DRVR_STATE_RESTARTING;
  1540. break;
  1541. }
  1542. break;
  1543. case FIT_SR_DRIVE_FW_BOOTING:
  1544. dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
  1545. skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
  1546. skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
  1547. break;
  1548. case FIT_SR_DRIVE_DEGRADED:
  1549. case FIT_SR_PCIE_LINK_DOWN:
  1550. case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
  1551. break;
  1552. case FIT_SR_DRIVE_FAULT:
  1553. skd_drive_fault(skdev);
  1554. skd_recover_requests(skdev);
  1555. schedule_work(&skdev->start_queue);
  1556. break;
  1557. /* PCIe bus returned all Fs? */
  1558. case 0xFF:
  1559. dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
  1560. sense);
  1561. skd_drive_disappeared(skdev);
  1562. skd_recover_requests(skdev);
  1563. schedule_work(&skdev->start_queue);
  1564. break;
  1565. default:
  1566. /*
  1567. * Uknown FW State. Wait for a state we recognize.
  1568. */
  1569. break;
  1570. }
  1571. dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
  1572. skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
  1573. skd_skdev_state_to_str(skdev->state), skdev->state);
  1574. }
  1575. static bool skd_recover_request(struct request *req, void *data, bool reserved)
  1576. {
  1577. struct skd_device *const skdev = data;
  1578. struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
  1579. if (skreq->state != SKD_REQ_STATE_BUSY)
  1580. return true;
  1581. skd_log_skreq(skdev, skreq, "recover");
  1582. /* Release DMA resources for the request. */
  1583. if (skreq->n_sg > 0)
  1584. skd_postop_sg_list(skdev, skreq);
  1585. skreq->state = SKD_REQ_STATE_IDLE;
  1586. skreq->status = BLK_STS_IOERR;
  1587. blk_mq_complete_request(req);
  1588. return true;
  1589. }
  1590. static void skd_recover_requests(struct skd_device *skdev)
  1591. {
  1592. blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev);
  1593. }
  1594. static void skd_isr_msg_from_dev(struct skd_device *skdev)
  1595. {
  1596. u32 mfd;
  1597. u32 mtd;
  1598. u32 data;
  1599. mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
  1600. dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
  1601. skdev->last_mtd);
  1602. /* ignore any mtd that is an ack for something we didn't send */
  1603. if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
  1604. return;
  1605. switch (FIT_MXD_TYPE(mfd)) {
  1606. case FIT_MTD_FITFW_INIT:
  1607. skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
  1608. if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
  1609. dev_err(&skdev->pdev->dev, "protocol mismatch\n");
  1610. dev_err(&skdev->pdev->dev, " got=%d support=%d\n",
  1611. skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
  1612. dev_err(&skdev->pdev->dev, " please upgrade driver\n");
  1613. skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
  1614. skd_soft_reset(skdev);
  1615. break;
  1616. }
  1617. mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
  1618. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  1619. skdev->last_mtd = mtd;
  1620. break;
  1621. case FIT_MTD_GET_CMDQ_DEPTH:
  1622. skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
  1623. mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
  1624. SKD_N_COMPLETION_ENTRY);
  1625. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  1626. skdev->last_mtd = mtd;
  1627. break;
  1628. case FIT_MTD_SET_COMPQ_DEPTH:
  1629. SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
  1630. mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
  1631. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  1632. skdev->last_mtd = mtd;
  1633. break;
  1634. case FIT_MTD_SET_COMPQ_ADDR:
  1635. skd_reset_skcomp(skdev);
  1636. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
  1637. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  1638. skdev->last_mtd = mtd;
  1639. break;
  1640. case FIT_MTD_CMD_LOG_HOST_ID:
  1641. /* hardware interface overflows in y2106 */
  1642. skdev->connect_time_stamp = (u32)ktime_get_real_seconds();
  1643. data = skdev->connect_time_stamp & 0xFFFF;
  1644. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
  1645. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  1646. skdev->last_mtd = mtd;
  1647. break;
  1648. case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
  1649. skdev->drive_jiffies = FIT_MXD_DATA(mfd);
  1650. data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
  1651. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
  1652. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  1653. skdev->last_mtd = mtd;
  1654. break;
  1655. case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
  1656. skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
  1657. mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
  1658. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  1659. skdev->last_mtd = mtd;
  1660. dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
  1661. skdev->connect_time_stamp, skdev->drive_jiffies);
  1662. break;
  1663. case FIT_MTD_ARM_QUEUE:
  1664. skdev->last_mtd = 0;
  1665. /*
  1666. * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
  1667. */
  1668. break;
  1669. default:
  1670. break;
  1671. }
  1672. }
  1673. static void skd_disable_interrupts(struct skd_device *skdev)
  1674. {
  1675. u32 sense;
  1676. sense = SKD_READL(skdev, FIT_CONTROL);
  1677. sense &= ~FIT_CR_ENABLE_INTERRUPTS;
  1678. SKD_WRITEL(skdev, sense, FIT_CONTROL);
  1679. dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
  1680. /* Note that the 1s is written. A 1-bit means
  1681. * disable, a 0 means enable.
  1682. */
  1683. SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
  1684. }
  1685. static void skd_enable_interrupts(struct skd_device *skdev)
  1686. {
  1687. u32 val;
  1688. /* unmask interrupts first */
  1689. val = FIT_ISH_FW_STATE_CHANGE +
  1690. FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
  1691. /* Note that the compliment of mask is written. A 1-bit means
  1692. * disable, a 0 means enable. */
  1693. SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
  1694. dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
  1695. val = SKD_READL(skdev, FIT_CONTROL);
  1696. val |= FIT_CR_ENABLE_INTERRUPTS;
  1697. dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
  1698. SKD_WRITEL(skdev, val, FIT_CONTROL);
  1699. }
  1700. /*
  1701. *****************************************************************************
  1702. * START, STOP, RESTART, QUIESCE, UNQUIESCE
  1703. *****************************************************************************
  1704. */
  1705. static void skd_soft_reset(struct skd_device *skdev)
  1706. {
  1707. u32 val;
  1708. val = SKD_READL(skdev, FIT_CONTROL);
  1709. val |= (FIT_CR_SOFT_RESET);
  1710. dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
  1711. SKD_WRITEL(skdev, val, FIT_CONTROL);
  1712. }
  1713. static void skd_start_device(struct skd_device *skdev)
  1714. {
  1715. unsigned long flags;
  1716. u32 sense;
  1717. u32 state;
  1718. spin_lock_irqsave(&skdev->lock, flags);
  1719. /* ack all ghost interrupts */
  1720. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  1721. sense = SKD_READL(skdev, FIT_STATUS);
  1722. dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
  1723. state = sense & FIT_SR_DRIVE_STATE_MASK;
  1724. skdev->drive_state = state;
  1725. skdev->last_mtd = 0;
  1726. skdev->state = SKD_DRVR_STATE_STARTING;
  1727. skdev->timer_countdown = SKD_STARTING_TIMO;
  1728. skd_enable_interrupts(skdev);
  1729. switch (skdev->drive_state) {
  1730. case FIT_SR_DRIVE_OFFLINE:
  1731. dev_err(&skdev->pdev->dev, "Drive offline...\n");
  1732. break;
  1733. case FIT_SR_DRIVE_FW_BOOTING:
  1734. dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
  1735. skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
  1736. skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
  1737. break;
  1738. case FIT_SR_DRIVE_BUSY_SANITIZE:
  1739. dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
  1740. skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
  1741. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  1742. break;
  1743. case FIT_SR_DRIVE_BUSY_ERASE:
  1744. dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
  1745. skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
  1746. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  1747. break;
  1748. case FIT_SR_DRIVE_INIT:
  1749. case FIT_SR_DRIVE_ONLINE:
  1750. skd_soft_reset(skdev);
  1751. break;
  1752. case FIT_SR_DRIVE_BUSY:
  1753. dev_err(&skdev->pdev->dev, "Drive Busy...\n");
  1754. skdev->state = SKD_DRVR_STATE_BUSY;
  1755. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  1756. break;
  1757. case FIT_SR_DRIVE_SOFT_RESET:
  1758. dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
  1759. break;
  1760. case FIT_SR_DRIVE_FAULT:
  1761. /* Fault state is bad...soft reset won't do it...
  1762. * Hard reset, maybe, but does it work on device?
  1763. * For now, just fault so the system doesn't hang.
  1764. */
  1765. skd_drive_fault(skdev);
  1766. /*start the queue so we can respond with error to requests */
  1767. dev_dbg(&skdev->pdev->dev, "starting queue\n");
  1768. schedule_work(&skdev->start_queue);
  1769. skdev->gendisk_on = -1;
  1770. wake_up_interruptible(&skdev->waitq);
  1771. break;
  1772. case 0xFF:
  1773. /* Most likely the device isn't there or isn't responding
  1774. * to the BAR1 addresses. */
  1775. skd_drive_disappeared(skdev);
  1776. /*start the queue so we can respond with error to requests */
  1777. dev_dbg(&skdev->pdev->dev,
  1778. "starting queue to error-out reqs\n");
  1779. schedule_work(&skdev->start_queue);
  1780. skdev->gendisk_on = -1;
  1781. wake_up_interruptible(&skdev->waitq);
  1782. break;
  1783. default:
  1784. dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
  1785. skdev->drive_state);
  1786. break;
  1787. }
  1788. state = SKD_READL(skdev, FIT_CONTROL);
  1789. dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
  1790. state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
  1791. dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
  1792. state = SKD_READL(skdev, FIT_INT_MASK_HOST);
  1793. dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
  1794. state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
  1795. dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
  1796. state = SKD_READL(skdev, FIT_HW_VERSION);
  1797. dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
  1798. spin_unlock_irqrestore(&skdev->lock, flags);
  1799. }
  1800. static void skd_stop_device(struct skd_device *skdev)
  1801. {
  1802. unsigned long flags;
  1803. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  1804. u32 dev_state;
  1805. int i;
  1806. spin_lock_irqsave(&skdev->lock, flags);
  1807. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  1808. dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
  1809. goto stop_out;
  1810. }
  1811. if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
  1812. dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
  1813. goto stop_out;
  1814. }
  1815. skdev->state = SKD_DRVR_STATE_SYNCING;
  1816. skdev->sync_done = 0;
  1817. skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
  1818. spin_unlock_irqrestore(&skdev->lock, flags);
  1819. wait_event_interruptible_timeout(skdev->waitq,
  1820. (skdev->sync_done), (10 * HZ));
  1821. spin_lock_irqsave(&skdev->lock, flags);
  1822. switch (skdev->sync_done) {
  1823. case 0:
  1824. dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
  1825. break;
  1826. case 1:
  1827. dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
  1828. break;
  1829. default:
  1830. dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
  1831. }
  1832. stop_out:
  1833. skdev->state = SKD_DRVR_STATE_STOPPING;
  1834. spin_unlock_irqrestore(&skdev->lock, flags);
  1835. skd_kill_timer(skdev);
  1836. spin_lock_irqsave(&skdev->lock, flags);
  1837. skd_disable_interrupts(skdev);
  1838. /* ensure all ints on device are cleared */
  1839. /* soft reset the device to unload with a clean slate */
  1840. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  1841. SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
  1842. spin_unlock_irqrestore(&skdev->lock, flags);
  1843. /* poll every 100ms, 1 second timeout */
  1844. for (i = 0; i < 10; i++) {
  1845. dev_state =
  1846. SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
  1847. if (dev_state == FIT_SR_DRIVE_INIT)
  1848. break;
  1849. set_current_state(TASK_INTERRUPTIBLE);
  1850. schedule_timeout(msecs_to_jiffies(100));
  1851. }
  1852. if (dev_state != FIT_SR_DRIVE_INIT)
  1853. dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
  1854. dev_state);
  1855. }
  1856. /* assume spinlock is held */
  1857. static void skd_restart_device(struct skd_device *skdev)
  1858. {
  1859. u32 state;
  1860. /* ack all ghost interrupts */
  1861. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  1862. state = SKD_READL(skdev, FIT_STATUS);
  1863. dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
  1864. state &= FIT_SR_DRIVE_STATE_MASK;
  1865. skdev->drive_state = state;
  1866. skdev->last_mtd = 0;
  1867. skdev->state = SKD_DRVR_STATE_RESTARTING;
  1868. skdev->timer_countdown = SKD_RESTARTING_TIMO;
  1869. skd_soft_reset(skdev);
  1870. }
  1871. /* assume spinlock is held */
  1872. static int skd_quiesce_dev(struct skd_device *skdev)
  1873. {
  1874. int rc = 0;
  1875. switch (skdev->state) {
  1876. case SKD_DRVR_STATE_BUSY:
  1877. case SKD_DRVR_STATE_BUSY_IMMINENT:
  1878. dev_dbg(&skdev->pdev->dev, "stopping queue\n");
  1879. blk_mq_stop_hw_queues(skdev->queue);
  1880. break;
  1881. case SKD_DRVR_STATE_ONLINE:
  1882. case SKD_DRVR_STATE_STOPPING:
  1883. case SKD_DRVR_STATE_SYNCING:
  1884. case SKD_DRVR_STATE_PAUSING:
  1885. case SKD_DRVR_STATE_PAUSED:
  1886. case SKD_DRVR_STATE_STARTING:
  1887. case SKD_DRVR_STATE_RESTARTING:
  1888. case SKD_DRVR_STATE_RESUMING:
  1889. default:
  1890. rc = -EINVAL;
  1891. dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
  1892. skdev->state);
  1893. }
  1894. return rc;
  1895. }
  1896. /* assume spinlock is held */
  1897. static int skd_unquiesce_dev(struct skd_device *skdev)
  1898. {
  1899. int prev_driver_state = skdev->state;
  1900. skd_log_skdev(skdev, "unquiesce");
  1901. if (skdev->state == SKD_DRVR_STATE_ONLINE) {
  1902. dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
  1903. return 0;
  1904. }
  1905. if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
  1906. /*
  1907. * If there has been an state change to other than
  1908. * ONLINE, we will rely on controller state change
  1909. * to come back online and restart the queue.
  1910. * The BUSY state means that driver is ready to
  1911. * continue normal processing but waiting for controller
  1912. * to become available.
  1913. */
  1914. skdev->state = SKD_DRVR_STATE_BUSY;
  1915. dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
  1916. return 0;
  1917. }
  1918. /*
  1919. * Drive has just come online, driver is either in startup,
  1920. * paused performing a task, or bust waiting for hardware.
  1921. */
  1922. switch (skdev->state) {
  1923. case SKD_DRVR_STATE_PAUSED:
  1924. case SKD_DRVR_STATE_BUSY:
  1925. case SKD_DRVR_STATE_BUSY_IMMINENT:
  1926. case SKD_DRVR_STATE_BUSY_ERASE:
  1927. case SKD_DRVR_STATE_STARTING:
  1928. case SKD_DRVR_STATE_RESTARTING:
  1929. case SKD_DRVR_STATE_FAULT:
  1930. case SKD_DRVR_STATE_IDLE:
  1931. case SKD_DRVR_STATE_LOAD:
  1932. skdev->state = SKD_DRVR_STATE_ONLINE;
  1933. dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
  1934. skd_skdev_state_to_str(prev_driver_state),
  1935. prev_driver_state, skd_skdev_state_to_str(skdev->state),
  1936. skdev->state);
  1937. dev_dbg(&skdev->pdev->dev,
  1938. "**** device ONLINE...starting block queue\n");
  1939. dev_dbg(&skdev->pdev->dev, "starting queue\n");
  1940. dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
  1941. schedule_work(&skdev->start_queue);
  1942. skdev->gendisk_on = 1;
  1943. wake_up_interruptible(&skdev->waitq);
  1944. break;
  1945. case SKD_DRVR_STATE_DISAPPEARED:
  1946. default:
  1947. dev_dbg(&skdev->pdev->dev,
  1948. "**** driver state %d, not implemented\n",
  1949. skdev->state);
  1950. return -EBUSY;
  1951. }
  1952. return 0;
  1953. }
  1954. /*
  1955. *****************************************************************************
  1956. * PCIe MSI/MSI-X INTERRUPT HANDLERS
  1957. *****************************************************************************
  1958. */
  1959. static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
  1960. {
  1961. struct skd_device *skdev = skd_host_data;
  1962. unsigned long flags;
  1963. spin_lock_irqsave(&skdev->lock, flags);
  1964. dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
  1965. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  1966. dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
  1967. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  1968. SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
  1969. spin_unlock_irqrestore(&skdev->lock, flags);
  1970. return IRQ_HANDLED;
  1971. }
  1972. static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
  1973. {
  1974. struct skd_device *skdev = skd_host_data;
  1975. unsigned long flags;
  1976. spin_lock_irqsave(&skdev->lock, flags);
  1977. dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
  1978. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  1979. SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
  1980. skd_isr_fwstate(skdev);
  1981. spin_unlock_irqrestore(&skdev->lock, flags);
  1982. return IRQ_HANDLED;
  1983. }
  1984. static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
  1985. {
  1986. struct skd_device *skdev = skd_host_data;
  1987. unsigned long flags;
  1988. int flush_enqueued = 0;
  1989. int deferred;
  1990. spin_lock_irqsave(&skdev->lock, flags);
  1991. dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
  1992. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  1993. SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
  1994. deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
  1995. &flush_enqueued);
  1996. if (flush_enqueued)
  1997. schedule_work(&skdev->start_queue);
  1998. if (deferred)
  1999. schedule_work(&skdev->completion_worker);
  2000. else if (!flush_enqueued)
  2001. schedule_work(&skdev->start_queue);
  2002. spin_unlock_irqrestore(&skdev->lock, flags);
  2003. return IRQ_HANDLED;
  2004. }
  2005. static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
  2006. {
  2007. struct skd_device *skdev = skd_host_data;
  2008. unsigned long flags;
  2009. spin_lock_irqsave(&skdev->lock, flags);
  2010. dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
  2011. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  2012. SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
  2013. skd_isr_msg_from_dev(skdev);
  2014. spin_unlock_irqrestore(&skdev->lock, flags);
  2015. return IRQ_HANDLED;
  2016. }
  2017. static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
  2018. {
  2019. struct skd_device *skdev = skd_host_data;
  2020. unsigned long flags;
  2021. spin_lock_irqsave(&skdev->lock, flags);
  2022. dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
  2023. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  2024. SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
  2025. spin_unlock_irqrestore(&skdev->lock, flags);
  2026. return IRQ_HANDLED;
  2027. }
  2028. /*
  2029. *****************************************************************************
  2030. * PCIe MSI/MSI-X SETUP
  2031. *****************************************************************************
  2032. */
  2033. struct skd_msix_entry {
  2034. char isr_name[30];
  2035. };
  2036. struct skd_init_msix_entry {
  2037. const char *name;
  2038. irq_handler_t handler;
  2039. };
  2040. #define SKD_MAX_MSIX_COUNT 13
  2041. #define SKD_MIN_MSIX_COUNT 7
  2042. #define SKD_BASE_MSIX_IRQ 4
  2043. static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
  2044. { "(DMA 0)", skd_reserved_isr },
  2045. { "(DMA 1)", skd_reserved_isr },
  2046. { "(DMA 2)", skd_reserved_isr },
  2047. { "(DMA 3)", skd_reserved_isr },
  2048. { "(State Change)", skd_statec_isr },
  2049. { "(COMPL_Q)", skd_comp_q },
  2050. { "(MSG)", skd_msg_isr },
  2051. { "(Reserved)", skd_reserved_isr },
  2052. { "(Reserved)", skd_reserved_isr },
  2053. { "(Queue Full 0)", skd_qfull_isr },
  2054. { "(Queue Full 1)", skd_qfull_isr },
  2055. { "(Queue Full 2)", skd_qfull_isr },
  2056. { "(Queue Full 3)", skd_qfull_isr },
  2057. };
  2058. static int skd_acquire_msix(struct skd_device *skdev)
  2059. {
  2060. int i, rc;
  2061. struct pci_dev *pdev = skdev->pdev;
  2062. rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
  2063. PCI_IRQ_MSIX);
  2064. if (rc < 0) {
  2065. dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
  2066. goto out;
  2067. }
  2068. skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
  2069. sizeof(struct skd_msix_entry), GFP_KERNEL);
  2070. if (!skdev->msix_entries) {
  2071. rc = -ENOMEM;
  2072. dev_err(&skdev->pdev->dev, "msix table allocation error\n");
  2073. goto out;
  2074. }
  2075. /* Enable MSI-X vectors for the base queue */
  2076. for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
  2077. struct skd_msix_entry *qentry = &skdev->msix_entries[i];
  2078. snprintf(qentry->isr_name, sizeof(qentry->isr_name),
  2079. "%s%d-msix %s", DRV_NAME, skdev->devno,
  2080. msix_entries[i].name);
  2081. rc = devm_request_irq(&skdev->pdev->dev,
  2082. pci_irq_vector(skdev->pdev, i),
  2083. msix_entries[i].handler, 0,
  2084. qentry->isr_name, skdev);
  2085. if (rc) {
  2086. dev_err(&skdev->pdev->dev,
  2087. "Unable to register(%d) MSI-X handler %d: %s\n",
  2088. rc, i, qentry->isr_name);
  2089. goto msix_out;
  2090. }
  2091. }
  2092. dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
  2093. SKD_MAX_MSIX_COUNT);
  2094. return 0;
  2095. msix_out:
  2096. while (--i >= 0)
  2097. devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
  2098. out:
  2099. kfree(skdev->msix_entries);
  2100. skdev->msix_entries = NULL;
  2101. return rc;
  2102. }
  2103. static int skd_acquire_irq(struct skd_device *skdev)
  2104. {
  2105. struct pci_dev *pdev = skdev->pdev;
  2106. unsigned int irq_flag = PCI_IRQ_LEGACY;
  2107. int rc;
  2108. if (skd_isr_type == SKD_IRQ_MSIX) {
  2109. rc = skd_acquire_msix(skdev);
  2110. if (!rc)
  2111. return 0;
  2112. dev_err(&skdev->pdev->dev,
  2113. "failed to enable MSI-X, re-trying with MSI %d\n", rc);
  2114. }
  2115. snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
  2116. skdev->devno);
  2117. if (skd_isr_type != SKD_IRQ_LEGACY)
  2118. irq_flag |= PCI_IRQ_MSI;
  2119. rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
  2120. if (rc < 0) {
  2121. dev_err(&skdev->pdev->dev,
  2122. "failed to allocate the MSI interrupt %d\n", rc);
  2123. return rc;
  2124. }
  2125. rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
  2126. pdev->msi_enabled ? 0 : IRQF_SHARED,
  2127. skdev->isr_name, skdev);
  2128. if (rc) {
  2129. pci_free_irq_vectors(pdev);
  2130. dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
  2131. rc);
  2132. return rc;
  2133. }
  2134. return 0;
  2135. }
  2136. static void skd_release_irq(struct skd_device *skdev)
  2137. {
  2138. struct pci_dev *pdev = skdev->pdev;
  2139. if (skdev->msix_entries) {
  2140. int i;
  2141. for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
  2142. devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
  2143. skdev);
  2144. }
  2145. kfree(skdev->msix_entries);
  2146. skdev->msix_entries = NULL;
  2147. } else {
  2148. devm_free_irq(&pdev->dev, pdev->irq, skdev);
  2149. }
  2150. pci_free_irq_vectors(pdev);
  2151. }
  2152. /*
  2153. *****************************************************************************
  2154. * CONSTRUCT
  2155. *****************************************************************************
  2156. */
  2157. static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
  2158. dma_addr_t *dma_handle, gfp_t gfp,
  2159. enum dma_data_direction dir)
  2160. {
  2161. struct device *dev = &skdev->pdev->dev;
  2162. void *buf;
  2163. buf = kmem_cache_alloc(s, gfp);
  2164. if (!buf)
  2165. return NULL;
  2166. *dma_handle = dma_map_single(dev, buf,
  2167. kmem_cache_size(s), dir);
  2168. if (dma_mapping_error(dev, *dma_handle)) {
  2169. kmem_cache_free(s, buf);
  2170. buf = NULL;
  2171. }
  2172. return buf;
  2173. }
  2174. static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s,
  2175. void *vaddr, dma_addr_t dma_handle,
  2176. enum dma_data_direction dir)
  2177. {
  2178. if (!vaddr)
  2179. return;
  2180. dma_unmap_single(&skdev->pdev->dev, dma_handle,
  2181. kmem_cache_size(s), dir);
  2182. kmem_cache_free(s, vaddr);
  2183. }
  2184. static int skd_cons_skcomp(struct skd_device *skdev)
  2185. {
  2186. int rc = 0;
  2187. struct fit_completion_entry_v1 *skcomp;
  2188. dev_dbg(&skdev->pdev->dev,
  2189. "comp pci_alloc, total bytes %zd entries %d\n",
  2190. SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
  2191. skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
  2192. &skdev->cq_dma_address, GFP_KERNEL);
  2193. if (skcomp == NULL) {
  2194. rc = -ENOMEM;
  2195. goto err_out;
  2196. }
  2197. skdev->skcomp_table = skcomp;
  2198. skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
  2199. sizeof(*skcomp) *
  2200. SKD_N_COMPLETION_ENTRY);
  2201. err_out:
  2202. return rc;
  2203. }
  2204. static int skd_cons_skmsg(struct skd_device *skdev)
  2205. {
  2206. int rc = 0;
  2207. u32 i;
  2208. dev_dbg(&skdev->pdev->dev,
  2209. "skmsg_table kcalloc, struct %lu, count %u total %lu\n",
  2210. sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
  2211. sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
  2212. skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context,
  2213. sizeof(struct skd_fitmsg_context),
  2214. GFP_KERNEL);
  2215. if (skdev->skmsg_table == NULL) {
  2216. rc = -ENOMEM;
  2217. goto err_out;
  2218. }
  2219. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  2220. struct skd_fitmsg_context *skmsg;
  2221. skmsg = &skdev->skmsg_table[i];
  2222. skmsg->id = i + SKD_ID_FIT_MSG;
  2223. skmsg->msg_buf = dma_alloc_coherent(&skdev->pdev->dev,
  2224. SKD_N_FITMSG_BYTES,
  2225. &skmsg->mb_dma_address,
  2226. GFP_KERNEL);
  2227. if (skmsg->msg_buf == NULL) {
  2228. rc = -ENOMEM;
  2229. goto err_out;
  2230. }
  2231. WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
  2232. (FIT_QCMD_ALIGN - 1),
  2233. "not aligned: msg_buf %p mb_dma_address %pad\n",
  2234. skmsg->msg_buf, &skmsg->mb_dma_address);
  2235. }
  2236. err_out:
  2237. return rc;
  2238. }
  2239. static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
  2240. u32 n_sg,
  2241. dma_addr_t *ret_dma_addr)
  2242. {
  2243. struct fit_sg_descriptor *sg_list;
  2244. sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr,
  2245. GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
  2246. if (sg_list != NULL) {
  2247. uint64_t dma_address = *ret_dma_addr;
  2248. u32 i;
  2249. for (i = 0; i < n_sg - 1; i++) {
  2250. uint64_t ndp_off;
  2251. ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
  2252. sg_list[i].next_desc_ptr = dma_address + ndp_off;
  2253. }
  2254. sg_list[i].next_desc_ptr = 0LL;
  2255. }
  2256. return sg_list;
  2257. }
  2258. static void skd_free_sg_list(struct skd_device *skdev,
  2259. struct fit_sg_descriptor *sg_list,
  2260. dma_addr_t dma_addr)
  2261. {
  2262. if (WARN_ON_ONCE(!sg_list))
  2263. return;
  2264. skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr,
  2265. DMA_TO_DEVICE);
  2266. }
  2267. static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq,
  2268. unsigned int hctx_idx, unsigned int numa_node)
  2269. {
  2270. struct skd_device *skdev = set->driver_data;
  2271. struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
  2272. skreq->state = SKD_REQ_STATE_IDLE;
  2273. skreq->sg = (void *)(skreq + 1);
  2274. sg_init_table(skreq->sg, skd_sgs_per_request);
  2275. skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request,
  2276. &skreq->sksg_dma_address);
  2277. return skreq->sksg_list ? 0 : -ENOMEM;
  2278. }
  2279. static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq,
  2280. unsigned int hctx_idx)
  2281. {
  2282. struct skd_device *skdev = set->driver_data;
  2283. struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
  2284. skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address);
  2285. }
  2286. static int skd_cons_sksb(struct skd_device *skdev)
  2287. {
  2288. int rc = 0;
  2289. struct skd_special_context *skspcl;
  2290. skspcl = &skdev->internal_skspcl;
  2291. skspcl->req.id = 0 + SKD_ID_INTERNAL;
  2292. skspcl->req.state = SKD_REQ_STATE_IDLE;
  2293. skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache,
  2294. &skspcl->db_dma_address,
  2295. GFP_DMA | __GFP_ZERO,
  2296. DMA_BIDIRECTIONAL);
  2297. if (skspcl->data_buf == NULL) {
  2298. rc = -ENOMEM;
  2299. goto err_out;
  2300. }
  2301. skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache,
  2302. &skspcl->mb_dma_address,
  2303. GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
  2304. if (skspcl->msg_buf == NULL) {
  2305. rc = -ENOMEM;
  2306. goto err_out;
  2307. }
  2308. skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
  2309. &skspcl->req.sksg_dma_address);
  2310. if (skspcl->req.sksg_list == NULL) {
  2311. rc = -ENOMEM;
  2312. goto err_out;
  2313. }
  2314. if (!skd_format_internal_skspcl(skdev)) {
  2315. rc = -EINVAL;
  2316. goto err_out;
  2317. }
  2318. err_out:
  2319. return rc;
  2320. }
  2321. static const struct blk_mq_ops skd_mq_ops = {
  2322. .queue_rq = skd_mq_queue_rq,
  2323. .complete = skd_complete_rq,
  2324. .timeout = skd_timed_out,
  2325. .init_request = skd_init_request,
  2326. .exit_request = skd_exit_request,
  2327. };
  2328. static int skd_cons_disk(struct skd_device *skdev)
  2329. {
  2330. int rc = 0;
  2331. struct gendisk *disk;
  2332. struct request_queue *q;
  2333. unsigned long flags;
  2334. disk = alloc_disk(SKD_MINORS_PER_DEVICE);
  2335. if (!disk) {
  2336. rc = -ENOMEM;
  2337. goto err_out;
  2338. }
  2339. skdev->disk = disk;
  2340. sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
  2341. disk->major = skdev->major;
  2342. disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
  2343. disk->fops = &skd_blockdev_ops;
  2344. disk->private_data = skdev;
  2345. memset(&skdev->tag_set, 0, sizeof(skdev->tag_set));
  2346. skdev->tag_set.ops = &skd_mq_ops;
  2347. skdev->tag_set.nr_hw_queues = 1;
  2348. skdev->tag_set.queue_depth = skd_max_queue_depth;
  2349. skdev->tag_set.cmd_size = sizeof(struct skd_request_context) +
  2350. skdev->sgs_per_request * sizeof(struct scatterlist);
  2351. skdev->tag_set.numa_node = NUMA_NO_NODE;
  2352. skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
  2353. BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO);
  2354. skdev->tag_set.driver_data = skdev;
  2355. rc = blk_mq_alloc_tag_set(&skdev->tag_set);
  2356. if (rc)
  2357. goto err_out;
  2358. q = blk_mq_init_queue(&skdev->tag_set);
  2359. if (IS_ERR(q)) {
  2360. blk_mq_free_tag_set(&skdev->tag_set);
  2361. rc = PTR_ERR(q);
  2362. goto err_out;
  2363. }
  2364. q->queuedata = skdev;
  2365. skdev->queue = q;
  2366. disk->queue = q;
  2367. blk_queue_write_cache(q, true, true);
  2368. blk_queue_max_segments(q, skdev->sgs_per_request);
  2369. blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
  2370. /* set optimal I/O size to 8KB */
  2371. blk_queue_io_opt(q, 8192);
  2372. blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
  2373. blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
  2374. blk_queue_rq_timeout(q, 8 * HZ);
  2375. spin_lock_irqsave(&skdev->lock, flags);
  2376. dev_dbg(&skdev->pdev->dev, "stopping queue\n");
  2377. blk_mq_stop_hw_queues(skdev->queue);
  2378. spin_unlock_irqrestore(&skdev->lock, flags);
  2379. err_out:
  2380. return rc;
  2381. }
  2382. #define SKD_N_DEV_TABLE 16u
  2383. static u32 skd_next_devno;
  2384. static struct skd_device *skd_construct(struct pci_dev *pdev)
  2385. {
  2386. struct skd_device *skdev;
  2387. int blk_major = skd_major;
  2388. size_t size;
  2389. int rc;
  2390. skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
  2391. if (!skdev) {
  2392. dev_err(&pdev->dev, "memory alloc failure\n");
  2393. return NULL;
  2394. }
  2395. skdev->state = SKD_DRVR_STATE_LOAD;
  2396. skdev->pdev = pdev;
  2397. skdev->devno = skd_next_devno++;
  2398. skdev->major = blk_major;
  2399. skdev->dev_max_queue_depth = 0;
  2400. skdev->num_req_context = skd_max_queue_depth;
  2401. skdev->num_fitmsg_context = skd_max_queue_depth;
  2402. skdev->cur_max_queue_depth = 1;
  2403. skdev->queue_low_water_mark = 1;
  2404. skdev->proto_ver = 99;
  2405. skdev->sgs_per_request = skd_sgs_per_request;
  2406. skdev->dbg_level = skd_dbg_level;
  2407. spin_lock_init(&skdev->lock);
  2408. INIT_WORK(&skdev->start_queue, skd_start_queue);
  2409. INIT_WORK(&skdev->completion_worker, skd_completion_worker);
  2410. size = max(SKD_N_FITMSG_BYTES, SKD_N_SPECIAL_FITMSG_BYTES);
  2411. skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0,
  2412. SLAB_HWCACHE_ALIGN, NULL);
  2413. if (!skdev->msgbuf_cache)
  2414. goto err_out;
  2415. WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size,
  2416. "skd-msgbuf: %d < %zd\n",
  2417. kmem_cache_size(skdev->msgbuf_cache), size);
  2418. size = skd_sgs_per_request * sizeof(struct fit_sg_descriptor);
  2419. skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0,
  2420. SLAB_HWCACHE_ALIGN, NULL);
  2421. if (!skdev->sglist_cache)
  2422. goto err_out;
  2423. WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size,
  2424. "skd-sglist: %d < %zd\n",
  2425. kmem_cache_size(skdev->sglist_cache), size);
  2426. size = SKD_N_INTERNAL_BYTES;
  2427. skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0,
  2428. SLAB_HWCACHE_ALIGN, NULL);
  2429. if (!skdev->databuf_cache)
  2430. goto err_out;
  2431. WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size,
  2432. "skd-databuf: %d < %zd\n",
  2433. kmem_cache_size(skdev->databuf_cache), size);
  2434. dev_dbg(&skdev->pdev->dev, "skcomp\n");
  2435. rc = skd_cons_skcomp(skdev);
  2436. if (rc < 0)
  2437. goto err_out;
  2438. dev_dbg(&skdev->pdev->dev, "skmsg\n");
  2439. rc = skd_cons_skmsg(skdev);
  2440. if (rc < 0)
  2441. goto err_out;
  2442. dev_dbg(&skdev->pdev->dev, "sksb\n");
  2443. rc = skd_cons_sksb(skdev);
  2444. if (rc < 0)
  2445. goto err_out;
  2446. dev_dbg(&skdev->pdev->dev, "disk\n");
  2447. rc = skd_cons_disk(skdev);
  2448. if (rc < 0)
  2449. goto err_out;
  2450. dev_dbg(&skdev->pdev->dev, "VICTORY\n");
  2451. return skdev;
  2452. err_out:
  2453. dev_dbg(&skdev->pdev->dev, "construct failed\n");
  2454. skd_destruct(skdev);
  2455. return NULL;
  2456. }
  2457. /*
  2458. *****************************************************************************
  2459. * DESTRUCT (FREE)
  2460. *****************************************************************************
  2461. */
  2462. static void skd_free_skcomp(struct skd_device *skdev)
  2463. {
  2464. if (skdev->skcomp_table)
  2465. dma_free_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
  2466. skdev->skcomp_table, skdev->cq_dma_address);
  2467. skdev->skcomp_table = NULL;
  2468. skdev->cq_dma_address = 0;
  2469. }
  2470. static void skd_free_skmsg(struct skd_device *skdev)
  2471. {
  2472. u32 i;
  2473. if (skdev->skmsg_table == NULL)
  2474. return;
  2475. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  2476. struct skd_fitmsg_context *skmsg;
  2477. skmsg = &skdev->skmsg_table[i];
  2478. if (skmsg->msg_buf != NULL) {
  2479. dma_free_coherent(&skdev->pdev->dev, SKD_N_FITMSG_BYTES,
  2480. skmsg->msg_buf,
  2481. skmsg->mb_dma_address);
  2482. }
  2483. skmsg->msg_buf = NULL;
  2484. skmsg->mb_dma_address = 0;
  2485. }
  2486. kfree(skdev->skmsg_table);
  2487. skdev->skmsg_table = NULL;
  2488. }
  2489. static void skd_free_sksb(struct skd_device *skdev)
  2490. {
  2491. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  2492. skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf,
  2493. skspcl->db_dma_address, DMA_BIDIRECTIONAL);
  2494. skspcl->data_buf = NULL;
  2495. skspcl->db_dma_address = 0;
  2496. skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf,
  2497. skspcl->mb_dma_address, DMA_TO_DEVICE);
  2498. skspcl->msg_buf = NULL;
  2499. skspcl->mb_dma_address = 0;
  2500. skd_free_sg_list(skdev, skspcl->req.sksg_list,
  2501. skspcl->req.sksg_dma_address);
  2502. skspcl->req.sksg_list = NULL;
  2503. skspcl->req.sksg_dma_address = 0;
  2504. }
  2505. static void skd_free_disk(struct skd_device *skdev)
  2506. {
  2507. struct gendisk *disk = skdev->disk;
  2508. if (disk && (disk->flags & GENHD_FL_UP))
  2509. del_gendisk(disk);
  2510. if (skdev->queue) {
  2511. blk_cleanup_queue(skdev->queue);
  2512. skdev->queue = NULL;
  2513. if (disk)
  2514. disk->queue = NULL;
  2515. }
  2516. if (skdev->tag_set.tags)
  2517. blk_mq_free_tag_set(&skdev->tag_set);
  2518. put_disk(disk);
  2519. skdev->disk = NULL;
  2520. }
  2521. static void skd_destruct(struct skd_device *skdev)
  2522. {
  2523. if (skdev == NULL)
  2524. return;
  2525. cancel_work_sync(&skdev->start_queue);
  2526. dev_dbg(&skdev->pdev->dev, "disk\n");
  2527. skd_free_disk(skdev);
  2528. dev_dbg(&skdev->pdev->dev, "sksb\n");
  2529. skd_free_sksb(skdev);
  2530. dev_dbg(&skdev->pdev->dev, "skmsg\n");
  2531. skd_free_skmsg(skdev);
  2532. dev_dbg(&skdev->pdev->dev, "skcomp\n");
  2533. skd_free_skcomp(skdev);
  2534. kmem_cache_destroy(skdev->databuf_cache);
  2535. kmem_cache_destroy(skdev->sglist_cache);
  2536. kmem_cache_destroy(skdev->msgbuf_cache);
  2537. dev_dbg(&skdev->pdev->dev, "skdev\n");
  2538. kfree(skdev);
  2539. }
  2540. /*
  2541. *****************************************************************************
  2542. * BLOCK DEVICE (BDEV) GLUE
  2543. *****************************************************************************
  2544. */
  2545. static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  2546. {
  2547. struct skd_device *skdev;
  2548. u64 capacity;
  2549. skdev = bdev->bd_disk->private_data;
  2550. dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
  2551. bdev->bd_disk->disk_name, current->comm);
  2552. if (skdev->read_cap_is_valid) {
  2553. capacity = get_capacity(skdev->disk);
  2554. geo->heads = 64;
  2555. geo->sectors = 255;
  2556. geo->cylinders = (capacity) / (255 * 64);
  2557. return 0;
  2558. }
  2559. return -EIO;
  2560. }
  2561. static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
  2562. {
  2563. dev_dbg(&skdev->pdev->dev, "add_disk\n");
  2564. device_add_disk(parent, skdev->disk, NULL);
  2565. return 0;
  2566. }
  2567. static const struct block_device_operations skd_blockdev_ops = {
  2568. .owner = THIS_MODULE,
  2569. .getgeo = skd_bdev_getgeo,
  2570. };
  2571. /*
  2572. *****************************************************************************
  2573. * PCIe DRIVER GLUE
  2574. *****************************************************************************
  2575. */
  2576. static const struct pci_device_id skd_pci_tbl[] = {
  2577. { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
  2578. PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
  2579. { 0 } /* terminate list */
  2580. };
  2581. MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
  2582. static char *skd_pci_info(struct skd_device *skdev, char *str)
  2583. {
  2584. int pcie_reg;
  2585. strcpy(str, "PCIe (");
  2586. pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
  2587. if (pcie_reg) {
  2588. char lwstr[6];
  2589. uint16_t pcie_lstat, lspeed, lwidth;
  2590. pcie_reg += 0x12;
  2591. pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
  2592. lspeed = pcie_lstat & (0xF);
  2593. lwidth = (pcie_lstat & 0x3F0) >> 4;
  2594. if (lspeed == 1)
  2595. strcat(str, "2.5GT/s ");
  2596. else if (lspeed == 2)
  2597. strcat(str, "5.0GT/s ");
  2598. else
  2599. strcat(str, "<unknown> ");
  2600. snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
  2601. strcat(str, lwstr);
  2602. }
  2603. return str;
  2604. }
  2605. static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  2606. {
  2607. int i;
  2608. int rc = 0;
  2609. char pci_str[32];
  2610. struct skd_device *skdev;
  2611. dev_dbg(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
  2612. pdev->device);
  2613. rc = pci_enable_device(pdev);
  2614. if (rc)
  2615. return rc;
  2616. rc = pci_request_regions(pdev, DRV_NAME);
  2617. if (rc)
  2618. goto err_out;
  2619. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  2620. if (rc)
  2621. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  2622. if (rc) {
  2623. dev_err(&pdev->dev, "DMA mask error %d\n", rc);
  2624. goto err_out_regions;
  2625. }
  2626. if (!skd_major) {
  2627. rc = register_blkdev(0, DRV_NAME);
  2628. if (rc < 0)
  2629. goto err_out_regions;
  2630. BUG_ON(!rc);
  2631. skd_major = rc;
  2632. }
  2633. skdev = skd_construct(pdev);
  2634. if (skdev == NULL) {
  2635. rc = -ENOMEM;
  2636. goto err_out_regions;
  2637. }
  2638. skd_pci_info(skdev, pci_str);
  2639. dev_info(&pdev->dev, "%s 64bit\n", pci_str);
  2640. pci_set_master(pdev);
  2641. rc = pci_enable_pcie_error_reporting(pdev);
  2642. if (rc) {
  2643. dev_err(&pdev->dev,
  2644. "bad enable of PCIe error reporting rc=%d\n", rc);
  2645. skdev->pcie_error_reporting_is_enabled = 0;
  2646. } else
  2647. skdev->pcie_error_reporting_is_enabled = 1;
  2648. pci_set_drvdata(pdev, skdev);
  2649. for (i = 0; i < SKD_MAX_BARS; i++) {
  2650. skdev->mem_phys[i] = pci_resource_start(pdev, i);
  2651. skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
  2652. skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
  2653. skdev->mem_size[i]);
  2654. if (!skdev->mem_map[i]) {
  2655. dev_err(&pdev->dev,
  2656. "Unable to map adapter memory!\n");
  2657. rc = -ENODEV;
  2658. goto err_out_iounmap;
  2659. }
  2660. dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
  2661. skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
  2662. skdev->mem_size[i]);
  2663. }
  2664. rc = skd_acquire_irq(skdev);
  2665. if (rc) {
  2666. dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
  2667. goto err_out_iounmap;
  2668. }
  2669. rc = skd_start_timer(skdev);
  2670. if (rc)
  2671. goto err_out_timer;
  2672. init_waitqueue_head(&skdev->waitq);
  2673. skd_start_device(skdev);
  2674. rc = wait_event_interruptible_timeout(skdev->waitq,
  2675. (skdev->gendisk_on),
  2676. (SKD_START_WAIT_SECONDS * HZ));
  2677. if (skdev->gendisk_on > 0) {
  2678. /* device came on-line after reset */
  2679. skd_bdev_attach(&pdev->dev, skdev);
  2680. rc = 0;
  2681. } else {
  2682. /* we timed out, something is wrong with the device,
  2683. don't add the disk structure */
  2684. dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
  2685. rc);
  2686. /* in case of no error; we timeout with ENXIO */
  2687. if (!rc)
  2688. rc = -ENXIO;
  2689. goto err_out_timer;
  2690. }
  2691. return rc;
  2692. err_out_timer:
  2693. skd_stop_device(skdev);
  2694. skd_release_irq(skdev);
  2695. err_out_iounmap:
  2696. for (i = 0; i < SKD_MAX_BARS; i++)
  2697. if (skdev->mem_map[i])
  2698. iounmap(skdev->mem_map[i]);
  2699. if (skdev->pcie_error_reporting_is_enabled)
  2700. pci_disable_pcie_error_reporting(pdev);
  2701. skd_destruct(skdev);
  2702. err_out_regions:
  2703. pci_release_regions(pdev);
  2704. err_out:
  2705. pci_disable_device(pdev);
  2706. pci_set_drvdata(pdev, NULL);
  2707. return rc;
  2708. }
  2709. static void skd_pci_remove(struct pci_dev *pdev)
  2710. {
  2711. int i;
  2712. struct skd_device *skdev;
  2713. skdev = pci_get_drvdata(pdev);
  2714. if (!skdev) {
  2715. dev_err(&pdev->dev, "no device data for PCI\n");
  2716. return;
  2717. }
  2718. skd_stop_device(skdev);
  2719. skd_release_irq(skdev);
  2720. for (i = 0; i < SKD_MAX_BARS; i++)
  2721. if (skdev->mem_map[i])
  2722. iounmap(skdev->mem_map[i]);
  2723. if (skdev->pcie_error_reporting_is_enabled)
  2724. pci_disable_pcie_error_reporting(pdev);
  2725. skd_destruct(skdev);
  2726. pci_release_regions(pdev);
  2727. pci_disable_device(pdev);
  2728. pci_set_drvdata(pdev, NULL);
  2729. return;
  2730. }
  2731. static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  2732. {
  2733. int i;
  2734. struct skd_device *skdev;
  2735. skdev = pci_get_drvdata(pdev);
  2736. if (!skdev) {
  2737. dev_err(&pdev->dev, "no device data for PCI\n");
  2738. return -EIO;
  2739. }
  2740. skd_stop_device(skdev);
  2741. skd_release_irq(skdev);
  2742. for (i = 0; i < SKD_MAX_BARS; i++)
  2743. if (skdev->mem_map[i])
  2744. iounmap(skdev->mem_map[i]);
  2745. if (skdev->pcie_error_reporting_is_enabled)
  2746. pci_disable_pcie_error_reporting(pdev);
  2747. pci_release_regions(pdev);
  2748. pci_save_state(pdev);
  2749. pci_disable_device(pdev);
  2750. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  2751. return 0;
  2752. }
  2753. static int skd_pci_resume(struct pci_dev *pdev)
  2754. {
  2755. int i;
  2756. int rc = 0;
  2757. struct skd_device *skdev;
  2758. skdev = pci_get_drvdata(pdev);
  2759. if (!skdev) {
  2760. dev_err(&pdev->dev, "no device data for PCI\n");
  2761. return -1;
  2762. }
  2763. pci_set_power_state(pdev, PCI_D0);
  2764. pci_enable_wake(pdev, PCI_D0, 0);
  2765. pci_restore_state(pdev);
  2766. rc = pci_enable_device(pdev);
  2767. if (rc)
  2768. return rc;
  2769. rc = pci_request_regions(pdev, DRV_NAME);
  2770. if (rc)
  2771. goto err_out;
  2772. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  2773. if (rc)
  2774. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  2775. if (rc) {
  2776. dev_err(&pdev->dev, "DMA mask error %d\n", rc);
  2777. goto err_out_regions;
  2778. }
  2779. pci_set_master(pdev);
  2780. rc = pci_enable_pcie_error_reporting(pdev);
  2781. if (rc) {
  2782. dev_err(&pdev->dev,
  2783. "bad enable of PCIe error reporting rc=%d\n", rc);
  2784. skdev->pcie_error_reporting_is_enabled = 0;
  2785. } else
  2786. skdev->pcie_error_reporting_is_enabled = 1;
  2787. for (i = 0; i < SKD_MAX_BARS; i++) {
  2788. skdev->mem_phys[i] = pci_resource_start(pdev, i);
  2789. skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
  2790. skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
  2791. skdev->mem_size[i]);
  2792. if (!skdev->mem_map[i]) {
  2793. dev_err(&pdev->dev, "Unable to map adapter memory!\n");
  2794. rc = -ENODEV;
  2795. goto err_out_iounmap;
  2796. }
  2797. dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
  2798. skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
  2799. skdev->mem_size[i]);
  2800. }
  2801. rc = skd_acquire_irq(skdev);
  2802. if (rc) {
  2803. dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
  2804. goto err_out_iounmap;
  2805. }
  2806. rc = skd_start_timer(skdev);
  2807. if (rc)
  2808. goto err_out_timer;
  2809. init_waitqueue_head(&skdev->waitq);
  2810. skd_start_device(skdev);
  2811. return rc;
  2812. err_out_timer:
  2813. skd_stop_device(skdev);
  2814. skd_release_irq(skdev);
  2815. err_out_iounmap:
  2816. for (i = 0; i < SKD_MAX_BARS; i++)
  2817. if (skdev->mem_map[i])
  2818. iounmap(skdev->mem_map[i]);
  2819. if (skdev->pcie_error_reporting_is_enabled)
  2820. pci_disable_pcie_error_reporting(pdev);
  2821. err_out_regions:
  2822. pci_release_regions(pdev);
  2823. err_out:
  2824. pci_disable_device(pdev);
  2825. return rc;
  2826. }
  2827. static void skd_pci_shutdown(struct pci_dev *pdev)
  2828. {
  2829. struct skd_device *skdev;
  2830. dev_err(&pdev->dev, "%s called\n", __func__);
  2831. skdev = pci_get_drvdata(pdev);
  2832. if (!skdev) {
  2833. dev_err(&pdev->dev, "no device data for PCI\n");
  2834. return;
  2835. }
  2836. dev_err(&pdev->dev, "calling stop\n");
  2837. skd_stop_device(skdev);
  2838. }
  2839. static struct pci_driver skd_driver = {
  2840. .name = DRV_NAME,
  2841. .id_table = skd_pci_tbl,
  2842. .probe = skd_pci_probe,
  2843. .remove = skd_pci_remove,
  2844. .suspend = skd_pci_suspend,
  2845. .resume = skd_pci_resume,
  2846. .shutdown = skd_pci_shutdown,
  2847. };
  2848. /*
  2849. *****************************************************************************
  2850. * LOGGING SUPPORT
  2851. *****************************************************************************
  2852. */
  2853. const char *skd_drive_state_to_str(int state)
  2854. {
  2855. switch (state) {
  2856. case FIT_SR_DRIVE_OFFLINE:
  2857. return "OFFLINE";
  2858. case FIT_SR_DRIVE_INIT:
  2859. return "INIT";
  2860. case FIT_SR_DRIVE_ONLINE:
  2861. return "ONLINE";
  2862. case FIT_SR_DRIVE_BUSY:
  2863. return "BUSY";
  2864. case FIT_SR_DRIVE_FAULT:
  2865. return "FAULT";
  2866. case FIT_SR_DRIVE_DEGRADED:
  2867. return "DEGRADED";
  2868. case FIT_SR_PCIE_LINK_DOWN:
  2869. return "INK_DOWN";
  2870. case FIT_SR_DRIVE_SOFT_RESET:
  2871. return "SOFT_RESET";
  2872. case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
  2873. return "NEED_FW";
  2874. case FIT_SR_DRIVE_INIT_FAULT:
  2875. return "INIT_FAULT";
  2876. case FIT_SR_DRIVE_BUSY_SANITIZE:
  2877. return "BUSY_SANITIZE";
  2878. case FIT_SR_DRIVE_BUSY_ERASE:
  2879. return "BUSY_ERASE";
  2880. case FIT_SR_DRIVE_FW_BOOTING:
  2881. return "FW_BOOTING";
  2882. default:
  2883. return "???";
  2884. }
  2885. }
  2886. const char *skd_skdev_state_to_str(enum skd_drvr_state state)
  2887. {
  2888. switch (state) {
  2889. case SKD_DRVR_STATE_LOAD:
  2890. return "LOAD";
  2891. case SKD_DRVR_STATE_IDLE:
  2892. return "IDLE";
  2893. case SKD_DRVR_STATE_BUSY:
  2894. return "BUSY";
  2895. case SKD_DRVR_STATE_STARTING:
  2896. return "STARTING";
  2897. case SKD_DRVR_STATE_ONLINE:
  2898. return "ONLINE";
  2899. case SKD_DRVR_STATE_PAUSING:
  2900. return "PAUSING";
  2901. case SKD_DRVR_STATE_PAUSED:
  2902. return "PAUSED";
  2903. case SKD_DRVR_STATE_RESTARTING:
  2904. return "RESTARTING";
  2905. case SKD_DRVR_STATE_RESUMING:
  2906. return "RESUMING";
  2907. case SKD_DRVR_STATE_STOPPING:
  2908. return "STOPPING";
  2909. case SKD_DRVR_STATE_SYNCING:
  2910. return "SYNCING";
  2911. case SKD_DRVR_STATE_FAULT:
  2912. return "FAULT";
  2913. case SKD_DRVR_STATE_DISAPPEARED:
  2914. return "DISAPPEARED";
  2915. case SKD_DRVR_STATE_BUSY_ERASE:
  2916. return "BUSY_ERASE";
  2917. case SKD_DRVR_STATE_BUSY_SANITIZE:
  2918. return "BUSY_SANITIZE";
  2919. case SKD_DRVR_STATE_BUSY_IMMINENT:
  2920. return "BUSY_IMMINENT";
  2921. case SKD_DRVR_STATE_WAIT_BOOT:
  2922. return "WAIT_BOOT";
  2923. default:
  2924. return "???";
  2925. }
  2926. }
  2927. static const char *skd_skreq_state_to_str(enum skd_req_state state)
  2928. {
  2929. switch (state) {
  2930. case SKD_REQ_STATE_IDLE:
  2931. return "IDLE";
  2932. case SKD_REQ_STATE_SETUP:
  2933. return "SETUP";
  2934. case SKD_REQ_STATE_BUSY:
  2935. return "BUSY";
  2936. case SKD_REQ_STATE_COMPLETED:
  2937. return "COMPLETED";
  2938. case SKD_REQ_STATE_TIMEOUT:
  2939. return "TIMEOUT";
  2940. default:
  2941. return "???";
  2942. }
  2943. }
  2944. static void skd_log_skdev(struct skd_device *skdev, const char *event)
  2945. {
  2946. dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
  2947. dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n",
  2948. skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
  2949. skd_skdev_state_to_str(skdev->state), skdev->state);
  2950. dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n",
  2951. skd_in_flight(skdev), skdev->cur_max_queue_depth,
  2952. skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
  2953. dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n",
  2954. skdev->skcomp_cycle, skdev->skcomp_ix);
  2955. }
  2956. static void skd_log_skreq(struct skd_device *skdev,
  2957. struct skd_request_context *skreq, const char *event)
  2958. {
  2959. struct request *req = blk_mq_rq_from_pdu(skreq);
  2960. u32 lba = blk_rq_pos(req);
  2961. u32 count = blk_rq_sectors(req);
  2962. dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
  2963. dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
  2964. skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
  2965. skreq->fitmsg_id);
  2966. dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n",
  2967. skreq->data_dir, skreq->n_sg);
  2968. dev_dbg(&skdev->pdev->dev,
  2969. "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba,
  2970. count, count, (int)rq_data_dir(req));
  2971. }
  2972. /*
  2973. *****************************************************************************
  2974. * MODULE GLUE
  2975. *****************************************************************************
  2976. */
  2977. static int __init skd_init(void)
  2978. {
  2979. BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8);
  2980. BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32);
  2981. BUILD_BUG_ON(sizeof(struct skd_command_header) != 16);
  2982. BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32);
  2983. BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44);
  2984. BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0);
  2985. BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64);
  2986. BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES);
  2987. switch (skd_isr_type) {
  2988. case SKD_IRQ_LEGACY:
  2989. case SKD_IRQ_MSI:
  2990. case SKD_IRQ_MSIX:
  2991. break;
  2992. default:
  2993. pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
  2994. skd_isr_type, SKD_IRQ_DEFAULT);
  2995. skd_isr_type = SKD_IRQ_DEFAULT;
  2996. }
  2997. if (skd_max_queue_depth < 1 ||
  2998. skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
  2999. pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
  3000. skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
  3001. skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
  3002. }
  3003. if (skd_max_req_per_msg < 1 ||
  3004. skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
  3005. pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
  3006. skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
  3007. skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
  3008. }
  3009. if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
  3010. pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
  3011. skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
  3012. skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
  3013. }
  3014. if (skd_dbg_level < 0 || skd_dbg_level > 2) {
  3015. pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
  3016. skd_dbg_level, 0);
  3017. skd_dbg_level = 0;
  3018. }
  3019. if (skd_isr_comp_limit < 0) {
  3020. pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
  3021. skd_isr_comp_limit, 0);
  3022. skd_isr_comp_limit = 0;
  3023. }
  3024. return pci_register_driver(&skd_driver);
  3025. }
  3026. static void __exit skd_exit(void)
  3027. {
  3028. pci_unregister_driver(&skd_driver);
  3029. if (skd_major)
  3030. unregister_blkdev(skd_major, DRV_NAME);
  3031. }
  3032. module_init(skd_init);
  3033. module_exit(skd_exit);