sx8.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586
  1. /*
  2. * sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware
  3. *
  4. * Copyright 2004-2005 Red Hat, Inc.
  5. *
  6. * Author/maintainer: Jeff Garzik <jgarzik@pobox.com>
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License. See the file "COPYING" in the main directory of this archive
  10. * for more details.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/pci.h>
  16. #include <linux/slab.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/blk-mq.h>
  19. #include <linux/sched.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/compiler.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/bitops.h>
  24. #include <linux/delay.h>
  25. #include <linux/ktime.h>
  26. #include <linux/hdreg.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/completion.h>
  29. #include <linux/scatterlist.h>
  30. #include <asm/io.h>
  31. #include <linux/uaccess.h>
  32. #if 0
  33. #define CARM_DEBUG
  34. #define CARM_VERBOSE_DEBUG
  35. #else
  36. #undef CARM_DEBUG
  37. #undef CARM_VERBOSE_DEBUG
  38. #endif
  39. #undef CARM_NDEBUG
  40. #define DRV_NAME "sx8"
  41. #define DRV_VERSION "1.0"
  42. #define PFX DRV_NAME ": "
  43. MODULE_AUTHOR("Jeff Garzik");
  44. MODULE_LICENSE("GPL");
  45. MODULE_DESCRIPTION("Promise SATA SX8 block driver");
  46. MODULE_VERSION(DRV_VERSION);
  47. /*
  48. * SX8 hardware has a single message queue for all ATA ports.
  49. * When this driver was written, the hardware (firmware?) would
  50. * corrupt data eventually, if more than one request was outstanding.
  51. * As one can imagine, having 8 ports bottlenecking on a single
  52. * command hurts performance.
  53. *
  54. * Based on user reports, later versions of the hardware (firmware?)
  55. * seem to be able to survive with more than one command queued.
  56. *
  57. * Therefore, we default to the safe option -- 1 command -- but
  58. * allow the user to increase this.
  59. *
  60. * SX8 should be able to support up to ~60 queued commands (CARM_MAX_REQ),
  61. * but problems seem to occur when you exceed ~30, even on newer hardware.
  62. */
  63. static int max_queue = 1;
  64. module_param(max_queue, int, 0444);
  65. MODULE_PARM_DESC(max_queue, "Maximum number of queued commands. (min==1, max==30, safe==1)");
  66. #define NEXT_RESP(idx) ((idx + 1) % RMSG_Q_LEN)
  67. /* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */
  68. #define TAG_ENCODE(tag) (((tag) << 16) | 0xf)
  69. #define TAG_DECODE(tag) (((tag) >> 16) & 0x1f)
  70. #define TAG_VALID(tag) ((((tag) & 0xf) == 0xf) && (TAG_DECODE(tag) < 32))
  71. /* note: prints function name for you */
  72. #ifdef CARM_DEBUG
  73. #define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
  74. #ifdef CARM_VERBOSE_DEBUG
  75. #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
  76. #else
  77. #define VPRINTK(fmt, args...)
  78. #endif /* CARM_VERBOSE_DEBUG */
  79. #else
  80. #define DPRINTK(fmt, args...)
  81. #define VPRINTK(fmt, args...)
  82. #endif /* CARM_DEBUG */
  83. #ifdef CARM_NDEBUG
  84. #define assert(expr)
  85. #else
  86. #define assert(expr) \
  87. if(unlikely(!(expr))) { \
  88. printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
  89. #expr, __FILE__, __func__, __LINE__); \
  90. }
  91. #endif
  92. /* defines only for the constants which don't work well as enums */
  93. struct carm_host;
  94. enum {
  95. /* adapter-wide limits */
  96. CARM_MAX_PORTS = 8,
  97. CARM_SHM_SIZE = (4096 << 7),
  98. CARM_MINORS_PER_MAJOR = 256 / CARM_MAX_PORTS,
  99. CARM_MAX_WAIT_Q = CARM_MAX_PORTS + 1,
  100. /* command message queue limits */
  101. CARM_MAX_REQ = 64, /* max command msgs per host */
  102. CARM_MSG_LOW_WATER = (CARM_MAX_REQ / 4), /* refill mark */
  103. /* S/G limits, host-wide and per-request */
  104. CARM_MAX_REQ_SG = 32, /* max s/g entries per request */
  105. CARM_MAX_HOST_SG = 600, /* max s/g entries per host */
  106. CARM_SG_LOW_WATER = (CARM_MAX_HOST_SG / 4), /* re-fill mark */
  107. /* hardware registers */
  108. CARM_IHQP = 0x1c,
  109. CARM_INT_STAT = 0x10, /* interrupt status */
  110. CARM_INT_MASK = 0x14, /* interrupt mask */
  111. CARM_HMUC = 0x18, /* host message unit control */
  112. RBUF_ADDR_LO = 0x20, /* response msg DMA buf low 32 bits */
  113. RBUF_ADDR_HI = 0x24, /* response msg DMA buf high 32 bits */
  114. RBUF_BYTE_SZ = 0x28,
  115. CARM_RESP_IDX = 0x2c,
  116. CARM_CMS0 = 0x30, /* command message size reg 0 */
  117. CARM_LMUC = 0x48,
  118. CARM_HMPHA = 0x6c,
  119. CARM_INITC = 0xb5,
  120. /* bits in CARM_INT_{STAT,MASK} */
  121. INT_RESERVED = 0xfffffff0,
  122. INT_WATCHDOG = (1 << 3), /* watchdog timer */
  123. INT_Q_OVERFLOW = (1 << 2), /* cmd msg q overflow */
  124. INT_Q_AVAILABLE = (1 << 1), /* cmd msg q has free space */
  125. INT_RESPONSE = (1 << 0), /* response msg available */
  126. INT_ACK_MASK = INT_WATCHDOG | INT_Q_OVERFLOW,
  127. INT_DEF_MASK = INT_RESERVED | INT_Q_OVERFLOW |
  128. INT_RESPONSE,
  129. /* command messages, and related register bits */
  130. CARM_HAVE_RESP = 0x01,
  131. CARM_MSG_READ = 1,
  132. CARM_MSG_WRITE = 2,
  133. CARM_MSG_VERIFY = 3,
  134. CARM_MSG_GET_CAPACITY = 4,
  135. CARM_MSG_FLUSH = 5,
  136. CARM_MSG_IOCTL = 6,
  137. CARM_MSG_ARRAY = 8,
  138. CARM_MSG_MISC = 9,
  139. CARM_CME = (1 << 2),
  140. CARM_RME = (1 << 1),
  141. CARM_WZBC = (1 << 0),
  142. CARM_RMI = (1 << 0),
  143. CARM_Q_FULL = (1 << 3),
  144. CARM_MSG_SIZE = 288,
  145. CARM_Q_LEN = 48,
  146. /* CARM_MSG_IOCTL messages */
  147. CARM_IOC_SCAN_CHAN = 5, /* scan channels for devices */
  148. CARM_IOC_GET_TCQ = 13, /* get tcq/ncq depth */
  149. CARM_IOC_SET_TCQ = 14, /* set tcq/ncq depth */
  150. IOC_SCAN_CHAN_NODEV = 0x1f,
  151. IOC_SCAN_CHAN_OFFSET = 0x40,
  152. /* CARM_MSG_ARRAY messages */
  153. CARM_ARRAY_INFO = 0,
  154. ARRAY_NO_EXIST = (1 << 31),
  155. /* response messages */
  156. RMSG_SZ = 8, /* sizeof(struct carm_response) */
  157. RMSG_Q_LEN = 48, /* resp. msg list length */
  158. RMSG_OK = 1, /* bit indicating msg was successful */
  159. /* length of entire resp. msg buffer */
  160. RBUF_LEN = RMSG_SZ * RMSG_Q_LEN,
  161. PDC_SHM_SIZE = (4096 << 7), /* length of entire h/w buffer */
  162. /* CARM_MSG_MISC messages */
  163. MISC_GET_FW_VER = 2,
  164. MISC_ALLOC_MEM = 3,
  165. MISC_SET_TIME = 5,
  166. /* MISC_GET_FW_VER feature bits */
  167. FW_VER_4PORT = (1 << 2), /* 1=4 ports, 0=8 ports */
  168. FW_VER_NON_RAID = (1 << 1), /* 1=non-RAID firmware, 0=RAID */
  169. FW_VER_ZCR = (1 << 0), /* zero channel RAID (whatever that is) */
  170. /* carm_host flags */
  171. FL_NON_RAID = FW_VER_NON_RAID,
  172. FL_4PORT = FW_VER_4PORT,
  173. FL_FW_VER_MASK = (FW_VER_NON_RAID | FW_VER_4PORT),
  174. FL_DYN_MAJOR = (1 << 17),
  175. };
  176. enum {
  177. CARM_SG_BOUNDARY = 0xffffUL, /* s/g segment boundary */
  178. };
  179. enum scatter_gather_types {
  180. SGT_32BIT = 0,
  181. SGT_64BIT = 1,
  182. };
  183. enum host_states {
  184. HST_INVALID, /* invalid state; never used */
  185. HST_ALLOC_BUF, /* setting up master SHM area */
  186. HST_ERROR, /* we never leave here */
  187. HST_PORT_SCAN, /* start dev scan */
  188. HST_DEV_SCAN_START, /* start per-device probe */
  189. HST_DEV_SCAN, /* continue per-device probe */
  190. HST_DEV_ACTIVATE, /* activate devices we found */
  191. HST_PROBE_FINISHED, /* probe is complete */
  192. HST_PROBE_START, /* initiate probe */
  193. HST_SYNC_TIME, /* tell firmware what time it is */
  194. HST_GET_FW_VER, /* get firmware version, adapter port cnt */
  195. };
  196. #ifdef CARM_DEBUG
  197. static const char *state_name[] = {
  198. "HST_INVALID",
  199. "HST_ALLOC_BUF",
  200. "HST_ERROR",
  201. "HST_PORT_SCAN",
  202. "HST_DEV_SCAN_START",
  203. "HST_DEV_SCAN",
  204. "HST_DEV_ACTIVATE",
  205. "HST_PROBE_FINISHED",
  206. "HST_PROBE_START",
  207. "HST_SYNC_TIME",
  208. "HST_GET_FW_VER",
  209. };
  210. #endif
  211. struct carm_port {
  212. unsigned int port_no;
  213. struct gendisk *disk;
  214. struct carm_host *host;
  215. /* attached device characteristics */
  216. u64 capacity;
  217. char name[41];
  218. u16 dev_geom_head;
  219. u16 dev_geom_sect;
  220. u16 dev_geom_cyl;
  221. };
  222. struct carm_request {
  223. int n_elem;
  224. unsigned int msg_type;
  225. unsigned int msg_subtype;
  226. unsigned int msg_bucket;
  227. struct scatterlist sg[CARM_MAX_REQ_SG];
  228. };
  229. struct carm_host {
  230. unsigned long flags;
  231. void __iomem *mmio;
  232. void *shm;
  233. dma_addr_t shm_dma;
  234. int major;
  235. int id;
  236. char name[32];
  237. spinlock_t lock;
  238. struct pci_dev *pdev;
  239. unsigned int state;
  240. u32 fw_ver;
  241. struct blk_mq_tag_set tag_set;
  242. struct request_queue *oob_q;
  243. unsigned int n_oob;
  244. unsigned int hw_sg_used;
  245. unsigned int resp_idx;
  246. unsigned int wait_q_prod;
  247. unsigned int wait_q_cons;
  248. struct request_queue *wait_q[CARM_MAX_WAIT_Q];
  249. void *msg_base;
  250. dma_addr_t msg_dma;
  251. int cur_scan_dev;
  252. unsigned long dev_active;
  253. unsigned long dev_present;
  254. struct carm_port port[CARM_MAX_PORTS];
  255. struct work_struct fsm_task;
  256. struct completion probe_comp;
  257. };
  258. struct carm_response {
  259. __le32 ret_handle;
  260. __le32 status;
  261. } __attribute__((packed));
  262. struct carm_msg_sg {
  263. __le32 start;
  264. __le32 len;
  265. } __attribute__((packed));
  266. struct carm_msg_rw {
  267. u8 type;
  268. u8 id;
  269. u8 sg_count;
  270. u8 sg_type;
  271. __le32 handle;
  272. __le32 lba;
  273. __le16 lba_count;
  274. __le16 lba_high;
  275. struct carm_msg_sg sg[32];
  276. } __attribute__((packed));
  277. struct carm_msg_allocbuf {
  278. u8 type;
  279. u8 subtype;
  280. u8 n_sg;
  281. u8 sg_type;
  282. __le32 handle;
  283. __le32 addr;
  284. __le32 len;
  285. __le32 evt_pool;
  286. __le32 n_evt;
  287. __le32 rbuf_pool;
  288. __le32 n_rbuf;
  289. __le32 msg_pool;
  290. __le32 n_msg;
  291. struct carm_msg_sg sg[8];
  292. } __attribute__((packed));
  293. struct carm_msg_ioctl {
  294. u8 type;
  295. u8 subtype;
  296. u8 array_id;
  297. u8 reserved1;
  298. __le32 handle;
  299. __le32 data_addr;
  300. u32 reserved2;
  301. } __attribute__((packed));
  302. struct carm_msg_sync_time {
  303. u8 type;
  304. u8 subtype;
  305. u16 reserved1;
  306. __le32 handle;
  307. u32 reserved2;
  308. __le32 timestamp;
  309. } __attribute__((packed));
  310. struct carm_msg_get_fw_ver {
  311. u8 type;
  312. u8 subtype;
  313. u16 reserved1;
  314. __le32 handle;
  315. __le32 data_addr;
  316. u32 reserved2;
  317. } __attribute__((packed));
  318. struct carm_fw_ver {
  319. __le32 version;
  320. u8 features;
  321. u8 reserved1;
  322. u16 reserved2;
  323. } __attribute__((packed));
  324. struct carm_array_info {
  325. __le32 size;
  326. __le16 size_hi;
  327. __le16 stripe_size;
  328. __le32 mode;
  329. __le16 stripe_blk_sz;
  330. __le16 reserved1;
  331. __le16 cyl;
  332. __le16 head;
  333. __le16 sect;
  334. u8 array_id;
  335. u8 reserved2;
  336. char name[40];
  337. __le32 array_status;
  338. /* device list continues beyond this point? */
  339. } __attribute__((packed));
  340. static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
  341. static void carm_remove_one (struct pci_dev *pdev);
  342. static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo);
  343. static const struct pci_device_id carm_pci_tbl[] = {
  344. { PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
  345. { PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
  346. { } /* terminate list */
  347. };
  348. MODULE_DEVICE_TABLE(pci, carm_pci_tbl);
  349. static struct pci_driver carm_driver = {
  350. .name = DRV_NAME,
  351. .id_table = carm_pci_tbl,
  352. .probe = carm_init_one,
  353. .remove = carm_remove_one,
  354. };
  355. static const struct block_device_operations carm_bd_ops = {
  356. .owner = THIS_MODULE,
  357. .getgeo = carm_bdev_getgeo,
  358. };
  359. static unsigned int carm_host_id;
  360. static unsigned long carm_major_alloc;
  361. static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  362. {
  363. struct carm_port *port = bdev->bd_disk->private_data;
  364. geo->heads = (u8) port->dev_geom_head;
  365. geo->sectors = (u8) port->dev_geom_sect;
  366. geo->cylinders = port->dev_geom_cyl;
  367. return 0;
  368. }
  369. static const u32 msg_sizes[] = { 32, 64, 128, CARM_MSG_SIZE };
  370. static inline int carm_lookup_bucket(u32 msg_size)
  371. {
  372. int i;
  373. for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
  374. if (msg_size <= msg_sizes[i])
  375. return i;
  376. return -ENOENT;
  377. }
  378. static void carm_init_buckets(void __iomem *mmio)
  379. {
  380. unsigned int i;
  381. for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
  382. writel(msg_sizes[i], mmio + CARM_CMS0 + (4 * i));
  383. }
  384. static inline void *carm_ref_msg(struct carm_host *host,
  385. unsigned int msg_idx)
  386. {
  387. return host->msg_base + (msg_idx * CARM_MSG_SIZE);
  388. }
  389. static inline dma_addr_t carm_ref_msg_dma(struct carm_host *host,
  390. unsigned int msg_idx)
  391. {
  392. return host->msg_dma + (msg_idx * CARM_MSG_SIZE);
  393. }
  394. static int carm_send_msg(struct carm_host *host,
  395. struct carm_request *crq, unsigned tag)
  396. {
  397. void __iomem *mmio = host->mmio;
  398. u32 msg = (u32) carm_ref_msg_dma(host, tag);
  399. u32 cm_bucket = crq->msg_bucket;
  400. u32 tmp;
  401. int rc = 0;
  402. VPRINTK("ENTER\n");
  403. tmp = readl(mmio + CARM_HMUC);
  404. if (tmp & CARM_Q_FULL) {
  405. #if 0
  406. tmp = readl(mmio + CARM_INT_MASK);
  407. tmp |= INT_Q_AVAILABLE;
  408. writel(tmp, mmio + CARM_INT_MASK);
  409. readl(mmio + CARM_INT_MASK); /* flush */
  410. #endif
  411. DPRINTK("host msg queue full\n");
  412. rc = -EBUSY;
  413. } else {
  414. writel(msg | (cm_bucket << 1), mmio + CARM_IHQP);
  415. readl(mmio + CARM_IHQP); /* flush */
  416. }
  417. return rc;
  418. }
  419. static int carm_array_info (struct carm_host *host, unsigned int array_idx)
  420. {
  421. struct carm_msg_ioctl *ioc;
  422. u32 msg_data;
  423. dma_addr_t msg_dma;
  424. struct carm_request *crq;
  425. struct request *rq;
  426. int rc;
  427. rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0);
  428. if (IS_ERR(rq)) {
  429. rc = -ENOMEM;
  430. goto err_out;
  431. }
  432. crq = blk_mq_rq_to_pdu(rq);
  433. ioc = carm_ref_msg(host, rq->tag);
  434. msg_dma = carm_ref_msg_dma(host, rq->tag);
  435. msg_data = (u32) (msg_dma + sizeof(struct carm_array_info));
  436. crq->msg_type = CARM_MSG_ARRAY;
  437. crq->msg_subtype = CARM_ARRAY_INFO;
  438. rc = carm_lookup_bucket(sizeof(struct carm_msg_ioctl) +
  439. sizeof(struct carm_array_info));
  440. BUG_ON(rc < 0);
  441. crq->msg_bucket = (u32) rc;
  442. memset(ioc, 0, sizeof(*ioc));
  443. ioc->type = CARM_MSG_ARRAY;
  444. ioc->subtype = CARM_ARRAY_INFO;
  445. ioc->array_id = (u8) array_idx;
  446. ioc->handle = cpu_to_le32(TAG_ENCODE(rq->tag));
  447. ioc->data_addr = cpu_to_le32(msg_data);
  448. spin_lock_irq(&host->lock);
  449. assert(host->state == HST_DEV_SCAN_START ||
  450. host->state == HST_DEV_SCAN);
  451. spin_unlock_irq(&host->lock);
  452. DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
  453. blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL);
  454. return 0;
  455. err_out:
  456. spin_lock_irq(&host->lock);
  457. host->state = HST_ERROR;
  458. spin_unlock_irq(&host->lock);
  459. return rc;
  460. }
  461. typedef unsigned int (*carm_sspc_t)(struct carm_host *, unsigned int, void *);
  462. static int carm_send_special (struct carm_host *host, carm_sspc_t func)
  463. {
  464. struct request *rq;
  465. struct carm_request *crq;
  466. struct carm_msg_ioctl *ioc;
  467. void *mem;
  468. unsigned int msg_size;
  469. int rc;
  470. rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0);
  471. if (IS_ERR(rq))
  472. return -ENOMEM;
  473. crq = blk_mq_rq_to_pdu(rq);
  474. mem = carm_ref_msg(host, rq->tag);
  475. msg_size = func(host, rq->tag, mem);
  476. ioc = mem;
  477. crq->msg_type = ioc->type;
  478. crq->msg_subtype = ioc->subtype;
  479. rc = carm_lookup_bucket(msg_size);
  480. BUG_ON(rc < 0);
  481. crq->msg_bucket = (u32) rc;
  482. DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
  483. blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL);
  484. return 0;
  485. }
  486. static unsigned int carm_fill_sync_time(struct carm_host *host,
  487. unsigned int idx, void *mem)
  488. {
  489. struct carm_msg_sync_time *st = mem;
  490. time64_t tv = ktime_get_real_seconds();
  491. memset(st, 0, sizeof(*st));
  492. st->type = CARM_MSG_MISC;
  493. st->subtype = MISC_SET_TIME;
  494. st->handle = cpu_to_le32(TAG_ENCODE(idx));
  495. st->timestamp = cpu_to_le32(tv);
  496. return sizeof(struct carm_msg_sync_time);
  497. }
  498. static unsigned int carm_fill_alloc_buf(struct carm_host *host,
  499. unsigned int idx, void *mem)
  500. {
  501. struct carm_msg_allocbuf *ab = mem;
  502. memset(ab, 0, sizeof(*ab));
  503. ab->type = CARM_MSG_MISC;
  504. ab->subtype = MISC_ALLOC_MEM;
  505. ab->handle = cpu_to_le32(TAG_ENCODE(idx));
  506. ab->n_sg = 1;
  507. ab->sg_type = SGT_32BIT;
  508. ab->addr = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
  509. ab->len = cpu_to_le32(PDC_SHM_SIZE >> 1);
  510. ab->evt_pool = cpu_to_le32(host->shm_dma + (16 * 1024));
  511. ab->n_evt = cpu_to_le32(1024);
  512. ab->rbuf_pool = cpu_to_le32(host->shm_dma);
  513. ab->n_rbuf = cpu_to_le32(RMSG_Q_LEN);
  514. ab->msg_pool = cpu_to_le32(host->shm_dma + RBUF_LEN);
  515. ab->n_msg = cpu_to_le32(CARM_Q_LEN);
  516. ab->sg[0].start = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
  517. ab->sg[0].len = cpu_to_le32(65536);
  518. return sizeof(struct carm_msg_allocbuf);
  519. }
  520. static unsigned int carm_fill_scan_channels(struct carm_host *host,
  521. unsigned int idx, void *mem)
  522. {
  523. struct carm_msg_ioctl *ioc = mem;
  524. u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) +
  525. IOC_SCAN_CHAN_OFFSET);
  526. memset(ioc, 0, sizeof(*ioc));
  527. ioc->type = CARM_MSG_IOCTL;
  528. ioc->subtype = CARM_IOC_SCAN_CHAN;
  529. ioc->handle = cpu_to_le32(TAG_ENCODE(idx));
  530. ioc->data_addr = cpu_to_le32(msg_data);
  531. /* fill output data area with "no device" default values */
  532. mem += IOC_SCAN_CHAN_OFFSET;
  533. memset(mem, IOC_SCAN_CHAN_NODEV, CARM_MAX_PORTS);
  534. return IOC_SCAN_CHAN_OFFSET + CARM_MAX_PORTS;
  535. }
  536. static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
  537. unsigned int idx, void *mem)
  538. {
  539. struct carm_msg_get_fw_ver *ioc = mem;
  540. u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + sizeof(*ioc));
  541. memset(ioc, 0, sizeof(*ioc));
  542. ioc->type = CARM_MSG_MISC;
  543. ioc->subtype = MISC_GET_FW_VER;
  544. ioc->handle = cpu_to_le32(TAG_ENCODE(idx));
  545. ioc->data_addr = cpu_to_le32(msg_data);
  546. return sizeof(struct carm_msg_get_fw_ver) +
  547. sizeof(struct carm_fw_ver);
  548. }
  549. static inline void carm_push_q (struct carm_host *host, struct request_queue *q)
  550. {
  551. unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
  552. blk_mq_stop_hw_queues(q);
  553. VPRINTK("STOPPED QUEUE %p\n", q);
  554. host->wait_q[idx] = q;
  555. host->wait_q_prod++;
  556. BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */
  557. }
  558. static inline struct request_queue *carm_pop_q(struct carm_host *host)
  559. {
  560. unsigned int idx;
  561. if (host->wait_q_prod == host->wait_q_cons)
  562. return NULL;
  563. idx = host->wait_q_cons % CARM_MAX_WAIT_Q;
  564. host->wait_q_cons++;
  565. return host->wait_q[idx];
  566. }
  567. static inline void carm_round_robin(struct carm_host *host)
  568. {
  569. struct request_queue *q = carm_pop_q(host);
  570. if (q) {
  571. blk_mq_start_hw_queues(q);
  572. VPRINTK("STARTED QUEUE %p\n", q);
  573. }
  574. }
  575. static inline enum dma_data_direction carm_rq_dir(struct request *rq)
  576. {
  577. return op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  578. }
  579. static blk_status_t carm_queue_rq(struct blk_mq_hw_ctx *hctx,
  580. const struct blk_mq_queue_data *bd)
  581. {
  582. struct request_queue *q = hctx->queue;
  583. struct request *rq = bd->rq;
  584. struct carm_port *port = q->queuedata;
  585. struct carm_host *host = port->host;
  586. struct carm_request *crq = blk_mq_rq_to_pdu(rq);
  587. struct carm_msg_rw *msg;
  588. struct scatterlist *sg;
  589. int i, n_elem = 0, rc;
  590. unsigned int msg_size;
  591. u32 tmp;
  592. crq->n_elem = 0;
  593. sg_init_table(crq->sg, CARM_MAX_REQ_SG);
  594. blk_mq_start_request(rq);
  595. spin_lock_irq(&host->lock);
  596. if (req_op(rq) == REQ_OP_DRV_OUT)
  597. goto send_msg;
  598. /* get scatterlist from block layer */
  599. sg = &crq->sg[0];
  600. n_elem = blk_rq_map_sg(q, rq, sg);
  601. if (n_elem <= 0)
  602. goto out_ioerr;
  603. /* map scatterlist to PCI bus addresses */
  604. n_elem = dma_map_sg(&host->pdev->dev, sg, n_elem, carm_rq_dir(rq));
  605. if (n_elem <= 0)
  606. goto out_ioerr;
  607. /* obey global hardware limit on S/G entries */
  608. if (host->hw_sg_used >= CARM_MAX_HOST_SG - n_elem)
  609. goto out_resource;
  610. crq->n_elem = n_elem;
  611. host->hw_sg_used += n_elem;
  612. /*
  613. * build read/write message
  614. */
  615. VPRINTK("build msg\n");
  616. msg = (struct carm_msg_rw *) carm_ref_msg(host, rq->tag);
  617. if (rq_data_dir(rq) == WRITE) {
  618. msg->type = CARM_MSG_WRITE;
  619. crq->msg_type = CARM_MSG_WRITE;
  620. } else {
  621. msg->type = CARM_MSG_READ;
  622. crq->msg_type = CARM_MSG_READ;
  623. }
  624. msg->id = port->port_no;
  625. msg->sg_count = n_elem;
  626. msg->sg_type = SGT_32BIT;
  627. msg->handle = cpu_to_le32(TAG_ENCODE(rq->tag));
  628. msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
  629. tmp = (blk_rq_pos(rq) >> 16) >> 16;
  630. msg->lba_high = cpu_to_le16( (u16) tmp );
  631. msg->lba_count = cpu_to_le16(blk_rq_sectors(rq));
  632. msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
  633. for (i = 0; i < n_elem; i++) {
  634. struct carm_msg_sg *carm_sg = &msg->sg[i];
  635. carm_sg->start = cpu_to_le32(sg_dma_address(&crq->sg[i]));
  636. carm_sg->len = cpu_to_le32(sg_dma_len(&crq->sg[i]));
  637. msg_size += sizeof(struct carm_msg_sg);
  638. }
  639. rc = carm_lookup_bucket(msg_size);
  640. BUG_ON(rc < 0);
  641. crq->msg_bucket = (u32) rc;
  642. send_msg:
  643. /*
  644. * queue read/write message to hardware
  645. */
  646. VPRINTK("send msg, tag == %u\n", rq->tag);
  647. rc = carm_send_msg(host, crq, rq->tag);
  648. if (rc) {
  649. host->hw_sg_used -= n_elem;
  650. goto out_resource;
  651. }
  652. spin_unlock_irq(&host->lock);
  653. return BLK_STS_OK;
  654. out_resource:
  655. dma_unmap_sg(&host->pdev->dev, &crq->sg[0], n_elem, carm_rq_dir(rq));
  656. carm_push_q(host, q);
  657. spin_unlock_irq(&host->lock);
  658. return BLK_STS_DEV_RESOURCE;
  659. out_ioerr:
  660. carm_round_robin(host);
  661. spin_unlock_irq(&host->lock);
  662. return BLK_STS_IOERR;
  663. }
  664. static void carm_handle_array_info(struct carm_host *host,
  665. struct carm_request *crq, u8 *mem,
  666. blk_status_t error)
  667. {
  668. struct carm_port *port;
  669. u8 *msg_data = mem + sizeof(struct carm_array_info);
  670. struct carm_array_info *desc = (struct carm_array_info *) msg_data;
  671. u64 lo, hi;
  672. int cur_port;
  673. size_t slen;
  674. DPRINTK("ENTER\n");
  675. if (error)
  676. goto out;
  677. if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST)
  678. goto out;
  679. cur_port = host->cur_scan_dev;
  680. /* should never occur */
  681. if ((cur_port < 0) || (cur_port >= CARM_MAX_PORTS)) {
  682. printk(KERN_ERR PFX "BUG: cur_scan_dev==%d, array_id==%d\n",
  683. cur_port, (int) desc->array_id);
  684. goto out;
  685. }
  686. port = &host->port[cur_port];
  687. lo = (u64) le32_to_cpu(desc->size);
  688. hi = (u64) le16_to_cpu(desc->size_hi);
  689. port->capacity = lo | (hi << 32);
  690. port->dev_geom_head = le16_to_cpu(desc->head);
  691. port->dev_geom_sect = le16_to_cpu(desc->sect);
  692. port->dev_geom_cyl = le16_to_cpu(desc->cyl);
  693. host->dev_active |= (1 << cur_port);
  694. strncpy(port->name, desc->name, sizeof(port->name));
  695. port->name[sizeof(port->name) - 1] = 0;
  696. slen = strlen(port->name);
  697. while (slen && (port->name[slen - 1] == ' ')) {
  698. port->name[slen - 1] = 0;
  699. slen--;
  700. }
  701. printk(KERN_INFO DRV_NAME "(%s): port %u device %Lu sectors\n",
  702. pci_name(host->pdev), port->port_no,
  703. (unsigned long long) port->capacity);
  704. printk(KERN_INFO DRV_NAME "(%s): port %u device \"%s\"\n",
  705. pci_name(host->pdev), port->port_no, port->name);
  706. out:
  707. assert(host->state == HST_DEV_SCAN);
  708. schedule_work(&host->fsm_task);
  709. }
  710. static void carm_handle_scan_chan(struct carm_host *host,
  711. struct carm_request *crq, u8 *mem,
  712. blk_status_t error)
  713. {
  714. u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
  715. unsigned int i, dev_count = 0;
  716. int new_state = HST_DEV_SCAN_START;
  717. DPRINTK("ENTER\n");
  718. if (error) {
  719. new_state = HST_ERROR;
  720. goto out;
  721. }
  722. /* TODO: scan and support non-disk devices */
  723. for (i = 0; i < 8; i++)
  724. if (msg_data[i] == 0) { /* direct-access device (disk) */
  725. host->dev_present |= (1 << i);
  726. dev_count++;
  727. }
  728. printk(KERN_INFO DRV_NAME "(%s): found %u interesting devices\n",
  729. pci_name(host->pdev), dev_count);
  730. out:
  731. assert(host->state == HST_PORT_SCAN);
  732. host->state = new_state;
  733. schedule_work(&host->fsm_task);
  734. }
  735. static void carm_handle_generic(struct carm_host *host,
  736. struct carm_request *crq, blk_status_t error,
  737. int cur_state, int next_state)
  738. {
  739. DPRINTK("ENTER\n");
  740. assert(host->state == cur_state);
  741. if (error)
  742. host->state = HST_ERROR;
  743. else
  744. host->state = next_state;
  745. schedule_work(&host->fsm_task);
  746. }
  747. static inline void carm_handle_resp(struct carm_host *host,
  748. __le32 ret_handle_le, u32 status)
  749. {
  750. u32 handle = le32_to_cpu(ret_handle_le);
  751. unsigned int msg_idx;
  752. struct request *rq;
  753. struct carm_request *crq;
  754. blk_status_t error = (status == RMSG_OK) ? 0 : BLK_STS_IOERR;
  755. u8 *mem;
  756. VPRINTK("ENTER, handle == 0x%x\n", handle);
  757. if (unlikely(!TAG_VALID(handle))) {
  758. printk(KERN_ERR DRV_NAME "(%s): BUG: invalid tag 0x%x\n",
  759. pci_name(host->pdev), handle);
  760. return;
  761. }
  762. msg_idx = TAG_DECODE(handle);
  763. VPRINTK("tag == %u\n", msg_idx);
  764. rq = blk_mq_tag_to_rq(host->tag_set.tags[0], msg_idx);
  765. crq = blk_mq_rq_to_pdu(rq);
  766. /* fast path */
  767. if (likely(crq->msg_type == CARM_MSG_READ ||
  768. crq->msg_type == CARM_MSG_WRITE)) {
  769. dma_unmap_sg(&host->pdev->dev, &crq->sg[0], crq->n_elem,
  770. carm_rq_dir(rq));
  771. goto done;
  772. }
  773. mem = carm_ref_msg(host, msg_idx);
  774. switch (crq->msg_type) {
  775. case CARM_MSG_IOCTL: {
  776. switch (crq->msg_subtype) {
  777. case CARM_IOC_SCAN_CHAN:
  778. carm_handle_scan_chan(host, crq, mem, error);
  779. goto done;
  780. default:
  781. /* unknown / invalid response */
  782. goto err_out;
  783. }
  784. break;
  785. }
  786. case CARM_MSG_MISC: {
  787. switch (crq->msg_subtype) {
  788. case MISC_ALLOC_MEM:
  789. carm_handle_generic(host, crq, error,
  790. HST_ALLOC_BUF, HST_SYNC_TIME);
  791. goto done;
  792. case MISC_SET_TIME:
  793. carm_handle_generic(host, crq, error,
  794. HST_SYNC_TIME, HST_GET_FW_VER);
  795. goto done;
  796. case MISC_GET_FW_VER: {
  797. struct carm_fw_ver *ver = (struct carm_fw_ver *)
  798. (mem + sizeof(struct carm_msg_get_fw_ver));
  799. if (!error) {
  800. host->fw_ver = le32_to_cpu(ver->version);
  801. host->flags |= (ver->features & FL_FW_VER_MASK);
  802. }
  803. carm_handle_generic(host, crq, error,
  804. HST_GET_FW_VER, HST_PORT_SCAN);
  805. goto done;
  806. }
  807. default:
  808. /* unknown / invalid response */
  809. goto err_out;
  810. }
  811. break;
  812. }
  813. case CARM_MSG_ARRAY: {
  814. switch (crq->msg_subtype) {
  815. case CARM_ARRAY_INFO:
  816. carm_handle_array_info(host, crq, mem, error);
  817. break;
  818. default:
  819. /* unknown / invalid response */
  820. goto err_out;
  821. }
  822. break;
  823. }
  824. default:
  825. /* unknown / invalid response */
  826. goto err_out;
  827. }
  828. return;
  829. err_out:
  830. printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
  831. pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
  832. error = BLK_STS_IOERR;
  833. done:
  834. host->hw_sg_used -= crq->n_elem;
  835. blk_mq_end_request(blk_mq_rq_from_pdu(crq), error);
  836. if (host->hw_sg_used <= CARM_SG_LOW_WATER)
  837. carm_round_robin(host);
  838. }
  839. static inline void carm_handle_responses(struct carm_host *host)
  840. {
  841. void __iomem *mmio = host->mmio;
  842. struct carm_response *resp = (struct carm_response *) host->shm;
  843. unsigned int work = 0;
  844. unsigned int idx = host->resp_idx % RMSG_Q_LEN;
  845. while (1) {
  846. u32 status = le32_to_cpu(resp[idx].status);
  847. if (status == 0xffffffff) {
  848. VPRINTK("ending response on index %u\n", idx);
  849. writel(idx << 3, mmio + CARM_RESP_IDX);
  850. break;
  851. }
  852. /* response to a message we sent */
  853. else if ((status & (1 << 31)) == 0) {
  854. VPRINTK("handling msg response on index %u\n", idx);
  855. carm_handle_resp(host, resp[idx].ret_handle, status);
  856. resp[idx].status = cpu_to_le32(0xffffffff);
  857. }
  858. /* asynchronous events the hardware throws our way */
  859. else if ((status & 0xff000000) == (1 << 31)) {
  860. u8 *evt_type_ptr = (u8 *) &resp[idx];
  861. u8 evt_type = *evt_type_ptr;
  862. printk(KERN_WARNING DRV_NAME "(%s): unhandled event type %d\n",
  863. pci_name(host->pdev), (int) evt_type);
  864. resp[idx].status = cpu_to_le32(0xffffffff);
  865. }
  866. idx = NEXT_RESP(idx);
  867. work++;
  868. }
  869. VPRINTK("EXIT, work==%u\n", work);
  870. host->resp_idx += work;
  871. }
  872. static irqreturn_t carm_interrupt(int irq, void *__host)
  873. {
  874. struct carm_host *host = __host;
  875. void __iomem *mmio;
  876. u32 mask;
  877. int handled = 0;
  878. unsigned long flags;
  879. if (!host) {
  880. VPRINTK("no host\n");
  881. return IRQ_NONE;
  882. }
  883. spin_lock_irqsave(&host->lock, flags);
  884. mmio = host->mmio;
  885. /* reading should also clear interrupts */
  886. mask = readl(mmio + CARM_INT_STAT);
  887. if (mask == 0 || mask == 0xffffffff) {
  888. VPRINTK("no work, mask == 0x%x\n", mask);
  889. goto out;
  890. }
  891. if (mask & INT_ACK_MASK)
  892. writel(mask, mmio + CARM_INT_STAT);
  893. if (unlikely(host->state == HST_INVALID)) {
  894. VPRINTK("not initialized yet, mask = 0x%x\n", mask);
  895. goto out;
  896. }
  897. if (mask & CARM_HAVE_RESP) {
  898. handled = 1;
  899. carm_handle_responses(host);
  900. }
  901. out:
  902. spin_unlock_irqrestore(&host->lock, flags);
  903. VPRINTK("EXIT\n");
  904. return IRQ_RETVAL(handled);
  905. }
  906. static void carm_fsm_task (struct work_struct *work)
  907. {
  908. struct carm_host *host =
  909. container_of(work, struct carm_host, fsm_task);
  910. unsigned long flags;
  911. unsigned int state;
  912. int rc, i, next_dev;
  913. int reschedule = 0;
  914. int new_state = HST_INVALID;
  915. spin_lock_irqsave(&host->lock, flags);
  916. state = host->state;
  917. spin_unlock_irqrestore(&host->lock, flags);
  918. DPRINTK("ENTER, state == %s\n", state_name[state]);
  919. switch (state) {
  920. case HST_PROBE_START:
  921. new_state = HST_ALLOC_BUF;
  922. reschedule = 1;
  923. break;
  924. case HST_ALLOC_BUF:
  925. rc = carm_send_special(host, carm_fill_alloc_buf);
  926. if (rc) {
  927. new_state = HST_ERROR;
  928. reschedule = 1;
  929. }
  930. break;
  931. case HST_SYNC_TIME:
  932. rc = carm_send_special(host, carm_fill_sync_time);
  933. if (rc) {
  934. new_state = HST_ERROR;
  935. reschedule = 1;
  936. }
  937. break;
  938. case HST_GET_FW_VER:
  939. rc = carm_send_special(host, carm_fill_get_fw_ver);
  940. if (rc) {
  941. new_state = HST_ERROR;
  942. reschedule = 1;
  943. }
  944. break;
  945. case HST_PORT_SCAN:
  946. rc = carm_send_special(host, carm_fill_scan_channels);
  947. if (rc) {
  948. new_state = HST_ERROR;
  949. reschedule = 1;
  950. }
  951. break;
  952. case HST_DEV_SCAN_START:
  953. host->cur_scan_dev = -1;
  954. new_state = HST_DEV_SCAN;
  955. reschedule = 1;
  956. break;
  957. case HST_DEV_SCAN:
  958. next_dev = -1;
  959. for (i = host->cur_scan_dev + 1; i < CARM_MAX_PORTS; i++)
  960. if (host->dev_present & (1 << i)) {
  961. next_dev = i;
  962. break;
  963. }
  964. if (next_dev >= 0) {
  965. host->cur_scan_dev = next_dev;
  966. rc = carm_array_info(host, next_dev);
  967. if (rc) {
  968. new_state = HST_ERROR;
  969. reschedule = 1;
  970. }
  971. } else {
  972. new_state = HST_DEV_ACTIVATE;
  973. reschedule = 1;
  974. }
  975. break;
  976. case HST_DEV_ACTIVATE: {
  977. int activated = 0;
  978. for (i = 0; i < CARM_MAX_PORTS; i++)
  979. if (host->dev_active & (1 << i)) {
  980. struct carm_port *port = &host->port[i];
  981. struct gendisk *disk = port->disk;
  982. set_capacity(disk, port->capacity);
  983. add_disk(disk);
  984. activated++;
  985. }
  986. printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n",
  987. pci_name(host->pdev), activated);
  988. new_state = HST_PROBE_FINISHED;
  989. reschedule = 1;
  990. break;
  991. }
  992. case HST_PROBE_FINISHED:
  993. complete(&host->probe_comp);
  994. break;
  995. case HST_ERROR:
  996. /* FIXME: TODO */
  997. break;
  998. default:
  999. /* should never occur */
  1000. printk(KERN_ERR PFX "BUG: unknown state %d\n", state);
  1001. assert(0);
  1002. break;
  1003. }
  1004. if (new_state != HST_INVALID) {
  1005. spin_lock_irqsave(&host->lock, flags);
  1006. host->state = new_state;
  1007. spin_unlock_irqrestore(&host->lock, flags);
  1008. }
  1009. if (reschedule)
  1010. schedule_work(&host->fsm_task);
  1011. }
  1012. static int carm_init_wait(void __iomem *mmio, u32 bits, unsigned int test_bit)
  1013. {
  1014. unsigned int i;
  1015. for (i = 0; i < 50000; i++) {
  1016. u32 tmp = readl(mmio + CARM_LMUC);
  1017. udelay(100);
  1018. if (test_bit) {
  1019. if ((tmp & bits) == bits)
  1020. return 0;
  1021. } else {
  1022. if ((tmp & bits) == 0)
  1023. return 0;
  1024. }
  1025. cond_resched();
  1026. }
  1027. printk(KERN_ERR PFX "carm_init_wait timeout, bits == 0x%x, test_bit == %s\n",
  1028. bits, test_bit ? "yes" : "no");
  1029. return -EBUSY;
  1030. }
  1031. static void carm_init_responses(struct carm_host *host)
  1032. {
  1033. void __iomem *mmio = host->mmio;
  1034. unsigned int i;
  1035. struct carm_response *resp = (struct carm_response *) host->shm;
  1036. for (i = 0; i < RMSG_Q_LEN; i++)
  1037. resp[i].status = cpu_to_le32(0xffffffff);
  1038. writel(0, mmio + CARM_RESP_IDX);
  1039. }
  1040. static int carm_init_host(struct carm_host *host)
  1041. {
  1042. void __iomem *mmio = host->mmio;
  1043. u32 tmp;
  1044. u8 tmp8;
  1045. int rc;
  1046. DPRINTK("ENTER\n");
  1047. writel(0, mmio + CARM_INT_MASK);
  1048. tmp8 = readb(mmio + CARM_INITC);
  1049. if (tmp8 & 0x01) {
  1050. tmp8 &= ~0x01;
  1051. writeb(tmp8, mmio + CARM_INITC);
  1052. readb(mmio + CARM_INITC); /* flush */
  1053. DPRINTK("snooze...\n");
  1054. msleep(5000);
  1055. }
  1056. tmp = readl(mmio + CARM_HMUC);
  1057. if (tmp & CARM_CME) {
  1058. DPRINTK("CME bit present, waiting\n");
  1059. rc = carm_init_wait(mmio, CARM_CME, 1);
  1060. if (rc) {
  1061. DPRINTK("EXIT, carm_init_wait 1 failed\n");
  1062. return rc;
  1063. }
  1064. }
  1065. if (tmp & CARM_RME) {
  1066. DPRINTK("RME bit present, waiting\n");
  1067. rc = carm_init_wait(mmio, CARM_RME, 1);
  1068. if (rc) {
  1069. DPRINTK("EXIT, carm_init_wait 2 failed\n");
  1070. return rc;
  1071. }
  1072. }
  1073. tmp &= ~(CARM_RME | CARM_CME);
  1074. writel(tmp, mmio + CARM_HMUC);
  1075. readl(mmio + CARM_HMUC); /* flush */
  1076. rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 0);
  1077. if (rc) {
  1078. DPRINTK("EXIT, carm_init_wait 3 failed\n");
  1079. return rc;
  1080. }
  1081. carm_init_buckets(mmio);
  1082. writel(host->shm_dma & 0xffffffff, mmio + RBUF_ADDR_LO);
  1083. writel((host->shm_dma >> 16) >> 16, mmio + RBUF_ADDR_HI);
  1084. writel(RBUF_LEN, mmio + RBUF_BYTE_SZ);
  1085. tmp = readl(mmio + CARM_HMUC);
  1086. tmp |= (CARM_RME | CARM_CME | CARM_WZBC);
  1087. writel(tmp, mmio + CARM_HMUC);
  1088. readl(mmio + CARM_HMUC); /* flush */
  1089. rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 1);
  1090. if (rc) {
  1091. DPRINTK("EXIT, carm_init_wait 4 failed\n");
  1092. return rc;
  1093. }
  1094. writel(0, mmio + CARM_HMPHA);
  1095. writel(INT_DEF_MASK, mmio + CARM_INT_MASK);
  1096. carm_init_responses(host);
  1097. /* start initialization, probing state machine */
  1098. spin_lock_irq(&host->lock);
  1099. assert(host->state == HST_INVALID);
  1100. host->state = HST_PROBE_START;
  1101. spin_unlock_irq(&host->lock);
  1102. schedule_work(&host->fsm_task);
  1103. DPRINTK("EXIT\n");
  1104. return 0;
  1105. }
  1106. static const struct blk_mq_ops carm_mq_ops = {
  1107. .queue_rq = carm_queue_rq,
  1108. };
  1109. static int carm_init_disk(struct carm_host *host, unsigned int port_no)
  1110. {
  1111. struct carm_port *port = &host->port[port_no];
  1112. struct gendisk *disk;
  1113. struct request_queue *q;
  1114. port->host = host;
  1115. port->port_no = port_no;
  1116. disk = alloc_disk(CARM_MINORS_PER_MAJOR);
  1117. if (!disk)
  1118. return -ENOMEM;
  1119. port->disk = disk;
  1120. sprintf(disk->disk_name, DRV_NAME "/%u",
  1121. (unsigned int)host->id * CARM_MAX_PORTS + port_no);
  1122. disk->major = host->major;
  1123. disk->first_minor = port_no * CARM_MINORS_PER_MAJOR;
  1124. disk->fops = &carm_bd_ops;
  1125. disk->private_data = port;
  1126. q = blk_mq_init_queue(&host->tag_set);
  1127. if (IS_ERR(q))
  1128. return PTR_ERR(q);
  1129. blk_queue_max_segments(q, CARM_MAX_REQ_SG);
  1130. blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
  1131. q->queuedata = port;
  1132. disk->queue = q;
  1133. return 0;
  1134. }
  1135. static void carm_free_disk(struct carm_host *host, unsigned int port_no)
  1136. {
  1137. struct carm_port *port = &host->port[port_no];
  1138. struct gendisk *disk = port->disk;
  1139. if (!disk)
  1140. return;
  1141. if (disk->flags & GENHD_FL_UP)
  1142. del_gendisk(disk);
  1143. if (disk->queue)
  1144. blk_cleanup_queue(disk->queue);
  1145. put_disk(disk);
  1146. }
  1147. static int carm_init_shm(struct carm_host *host)
  1148. {
  1149. host->shm = dma_alloc_coherent(&host->pdev->dev, CARM_SHM_SIZE,
  1150. &host->shm_dma, GFP_KERNEL);
  1151. if (!host->shm)
  1152. return -ENOMEM;
  1153. host->msg_base = host->shm + RBUF_LEN;
  1154. host->msg_dma = host->shm_dma + RBUF_LEN;
  1155. memset(host->shm, 0xff, RBUF_LEN);
  1156. memset(host->msg_base, 0, PDC_SHM_SIZE - RBUF_LEN);
  1157. return 0;
  1158. }
  1159. static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
  1160. {
  1161. struct carm_host *host;
  1162. int rc;
  1163. struct request_queue *q;
  1164. unsigned int i;
  1165. printk_once(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
  1166. rc = pci_enable_device(pdev);
  1167. if (rc)
  1168. return rc;
  1169. rc = pci_request_regions(pdev, DRV_NAME);
  1170. if (rc)
  1171. goto err_out;
  1172. rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
  1173. if (rc) {
  1174. printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
  1175. pci_name(pdev));
  1176. goto err_out_regions;
  1177. }
  1178. host = kzalloc(sizeof(*host), GFP_KERNEL);
  1179. if (!host) {
  1180. printk(KERN_ERR DRV_NAME "(%s): memory alloc failure\n",
  1181. pci_name(pdev));
  1182. rc = -ENOMEM;
  1183. goto err_out_regions;
  1184. }
  1185. host->pdev = pdev;
  1186. spin_lock_init(&host->lock);
  1187. INIT_WORK(&host->fsm_task, carm_fsm_task);
  1188. init_completion(&host->probe_comp);
  1189. host->mmio = ioremap(pci_resource_start(pdev, 0),
  1190. pci_resource_len(pdev, 0));
  1191. if (!host->mmio) {
  1192. printk(KERN_ERR DRV_NAME "(%s): MMIO alloc failure\n",
  1193. pci_name(pdev));
  1194. rc = -ENOMEM;
  1195. goto err_out_kfree;
  1196. }
  1197. rc = carm_init_shm(host);
  1198. if (rc) {
  1199. printk(KERN_ERR DRV_NAME "(%s): DMA SHM alloc failure\n",
  1200. pci_name(pdev));
  1201. goto err_out_iounmap;
  1202. }
  1203. memset(&host->tag_set, 0, sizeof(host->tag_set));
  1204. host->tag_set.ops = &carm_mq_ops;
  1205. host->tag_set.cmd_size = sizeof(struct carm_request);
  1206. host->tag_set.nr_hw_queues = 1;
  1207. host->tag_set.nr_maps = 1;
  1208. host->tag_set.queue_depth = max_queue;
  1209. host->tag_set.numa_node = NUMA_NO_NODE;
  1210. host->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
  1211. rc = blk_mq_alloc_tag_set(&host->tag_set);
  1212. if (rc)
  1213. goto err_out_dma_free;
  1214. q = blk_mq_init_queue(&host->tag_set);
  1215. if (IS_ERR(q)) {
  1216. rc = PTR_ERR(q);
  1217. blk_mq_free_tag_set(&host->tag_set);
  1218. goto err_out_dma_free;
  1219. }
  1220. host->oob_q = q;
  1221. q->queuedata = host;
  1222. /*
  1223. * Figure out which major to use: 160, 161, or dynamic
  1224. */
  1225. if (!test_and_set_bit(0, &carm_major_alloc))
  1226. host->major = 160;
  1227. else if (!test_and_set_bit(1, &carm_major_alloc))
  1228. host->major = 161;
  1229. else
  1230. host->flags |= FL_DYN_MAJOR;
  1231. host->id = carm_host_id;
  1232. sprintf(host->name, DRV_NAME "%d", carm_host_id);
  1233. rc = register_blkdev(host->major, host->name);
  1234. if (rc < 0)
  1235. goto err_out_free_majors;
  1236. if (host->flags & FL_DYN_MAJOR)
  1237. host->major = rc;
  1238. for (i = 0; i < CARM_MAX_PORTS; i++) {
  1239. rc = carm_init_disk(host, i);
  1240. if (rc)
  1241. goto err_out_blkdev_disks;
  1242. }
  1243. pci_set_master(pdev);
  1244. rc = request_irq(pdev->irq, carm_interrupt, IRQF_SHARED, DRV_NAME, host);
  1245. if (rc) {
  1246. printk(KERN_ERR DRV_NAME "(%s): irq alloc failure\n",
  1247. pci_name(pdev));
  1248. goto err_out_blkdev_disks;
  1249. }
  1250. rc = carm_init_host(host);
  1251. if (rc)
  1252. goto err_out_free_irq;
  1253. DPRINTK("waiting for probe_comp\n");
  1254. wait_for_completion(&host->probe_comp);
  1255. printk(KERN_INFO "%s: pci %s, ports %d, io %llx, irq %u, major %d\n",
  1256. host->name, pci_name(pdev), (int) CARM_MAX_PORTS,
  1257. (unsigned long long)pci_resource_start(pdev, 0),
  1258. pdev->irq, host->major);
  1259. carm_host_id++;
  1260. pci_set_drvdata(pdev, host);
  1261. return 0;
  1262. err_out_free_irq:
  1263. free_irq(pdev->irq, host);
  1264. err_out_blkdev_disks:
  1265. for (i = 0; i < CARM_MAX_PORTS; i++)
  1266. carm_free_disk(host, i);
  1267. unregister_blkdev(host->major, host->name);
  1268. err_out_free_majors:
  1269. if (host->major == 160)
  1270. clear_bit(0, &carm_major_alloc);
  1271. else if (host->major == 161)
  1272. clear_bit(1, &carm_major_alloc);
  1273. blk_cleanup_queue(host->oob_q);
  1274. blk_mq_free_tag_set(&host->tag_set);
  1275. err_out_dma_free:
  1276. dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
  1277. err_out_iounmap:
  1278. iounmap(host->mmio);
  1279. err_out_kfree:
  1280. kfree(host);
  1281. err_out_regions:
  1282. pci_release_regions(pdev);
  1283. err_out:
  1284. pci_disable_device(pdev);
  1285. return rc;
  1286. }
  1287. static void carm_remove_one (struct pci_dev *pdev)
  1288. {
  1289. struct carm_host *host = pci_get_drvdata(pdev);
  1290. unsigned int i;
  1291. if (!host) {
  1292. printk(KERN_ERR PFX "BUG: no host data for PCI(%s)\n",
  1293. pci_name(pdev));
  1294. return;
  1295. }
  1296. free_irq(pdev->irq, host);
  1297. for (i = 0; i < CARM_MAX_PORTS; i++)
  1298. carm_free_disk(host, i);
  1299. unregister_blkdev(host->major, host->name);
  1300. if (host->major == 160)
  1301. clear_bit(0, &carm_major_alloc);
  1302. else if (host->major == 161)
  1303. clear_bit(1, &carm_major_alloc);
  1304. blk_cleanup_queue(host->oob_q);
  1305. blk_mq_free_tag_set(&host->tag_set);
  1306. dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
  1307. iounmap(host->mmio);
  1308. kfree(host);
  1309. pci_release_regions(pdev);
  1310. pci_disable_device(pdev);
  1311. }
  1312. module_pci_driver(carm_driver);