umem.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * mm.c - Micro Memory(tm) PCI memory board block device driver - v2.3
  4. *
  5. * (C) 2001 San Mehat <nettwerk@valinux.com>
  6. * (C) 2001 Johannes Erdfelt <jerdfelt@valinux.com>
  7. * (C) 2001 NeilBrown <neilb@cse.unsw.edu.au>
  8. *
  9. * This driver for the Micro Memory PCI Memory Module with Battery Backup
  10. * is Copyright Micro Memory Inc 2001-2002. All rights reserved.
  11. *
  12. * This driver provides a standard block device interface for Micro Memory(tm)
  13. * PCI based RAM boards.
  14. * 10/05/01: Phap Nguyen - Rebuilt the driver
  15. * 10/22/01: Phap Nguyen - v2.1 Added disk partitioning
  16. * 29oct2001:NeilBrown - Use make_request_fn instead of request_fn
  17. * - use stand disk partitioning (so fdisk works).
  18. * 08nov2001:NeilBrown - change driver name from "mm" to "umem"
  19. * - incorporate into main kernel
  20. * 08apr2002:NeilBrown - Move some of interrupt handle to tasklet
  21. * - use spin_lock_bh instead of _irq
  22. * - Never block on make_request. queue
  23. * bh's instead.
  24. * - unregister umem from devfs at mod unload
  25. * - Change version to 2.3
  26. * 07Nov2001:Phap Nguyen - Select pci read command: 06, 12, 15 (Decimal)
  27. * 07Jan2002: P. Nguyen - Used PCI Memory Write & Invalidate for DMA
  28. * 15May2002:NeilBrown - convert to bio for 2.5
  29. * 17May2002:NeilBrown - remove init_mem initialisation. Instead detect
  30. * - a sequence of writes that cover the card, and
  31. * - set initialised bit then.
  32. */
  33. #undef DEBUG /* #define DEBUG if you want debugging info (pr_debug) */
  34. #include <linux/fs.h>
  35. #include <linux/bio.h>
  36. #include <linux/kernel.h>
  37. #include <linux/mm.h>
  38. #include <linux/mman.h>
  39. #include <linux/gfp.h>
  40. #include <linux/ioctl.h>
  41. #include <linux/module.h>
  42. #include <linux/init.h>
  43. #include <linux/interrupt.h>
  44. #include <linux/timer.h>
  45. #include <linux/pci.h>
  46. #include <linux/dma-mapping.h>
  47. #include <linux/fcntl.h> /* O_ACCMODE */
  48. #include <linux/hdreg.h> /* HDIO_GETGEO */
  49. #include "umem.h"
  50. #include <linux/uaccess.h>
  51. #include <asm/io.h>
  52. #define MM_MAXCARDS 4
  53. #define MM_RAHEAD 2 /* two sectors */
  54. #define MM_BLKSIZE 1024 /* 1k blocks */
  55. #define MM_HARDSECT 512 /* 512-byte hardware sectors */
  56. #define MM_SHIFT 6 /* max 64 partitions on 4 cards */
  57. /*
  58. * Version Information
  59. */
  60. #define DRIVER_NAME "umem"
  61. #define DRIVER_VERSION "v2.3"
  62. #define DRIVER_AUTHOR "San Mehat, Johannes Erdfelt, NeilBrown"
  63. #define DRIVER_DESC "Micro Memory(tm) PCI memory board block driver"
  64. static int debug;
  65. /* #define HW_TRACE(x) writeb(x,cards[0].csr_remap + MEMCTRLSTATUS_MAGIC) */
  66. #define HW_TRACE(x)
  67. #define DEBUG_LED_ON_TRANSFER 0x01
  68. #define DEBUG_BATTERY_POLLING 0x02
  69. module_param(debug, int, 0644);
  70. MODULE_PARM_DESC(debug, "Debug bitmask");
  71. static int pci_read_cmd = 0x0C; /* Read Multiple */
  72. module_param(pci_read_cmd, int, 0);
  73. MODULE_PARM_DESC(pci_read_cmd, "PCI read command");
  74. static int pci_write_cmd = 0x0F; /* Write and Invalidate */
  75. module_param(pci_write_cmd, int, 0);
  76. MODULE_PARM_DESC(pci_write_cmd, "PCI write command");
  77. static int pci_cmds;
  78. static int major_nr;
  79. #include <linux/blkdev.h>
  80. #include <linux/blkpg.h>
  81. struct cardinfo {
  82. struct pci_dev *dev;
  83. unsigned char __iomem *csr_remap;
  84. unsigned int mm_size; /* size in kbytes */
  85. unsigned int init_size; /* initial segment, in sectors,
  86. * that we know to
  87. * have been written
  88. */
  89. struct bio *bio, *currentbio, **biotail;
  90. struct bvec_iter current_iter;
  91. struct request_queue *queue;
  92. struct mm_page {
  93. dma_addr_t page_dma;
  94. struct mm_dma_desc *desc;
  95. int cnt, headcnt;
  96. struct bio *bio, **biotail;
  97. struct bvec_iter iter;
  98. } mm_pages[2];
  99. #define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
  100. int Active, Ready;
  101. struct tasklet_struct tasklet;
  102. unsigned int dma_status;
  103. struct {
  104. int good;
  105. int warned;
  106. unsigned long last_change;
  107. } battery[2];
  108. spinlock_t lock;
  109. int check_batteries;
  110. int flags;
  111. };
  112. static struct cardinfo cards[MM_MAXCARDS];
  113. static struct timer_list battery_timer;
  114. static int num_cards;
  115. static struct gendisk *mm_gendisk[MM_MAXCARDS];
  116. static void check_batteries(struct cardinfo *card);
  117. static int get_userbit(struct cardinfo *card, int bit)
  118. {
  119. unsigned char led;
  120. led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL);
  121. return led & bit;
  122. }
  123. static int set_userbit(struct cardinfo *card, int bit, unsigned char state)
  124. {
  125. unsigned char led;
  126. led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL);
  127. if (state)
  128. led |= bit;
  129. else
  130. led &= ~bit;
  131. writeb(led, card->csr_remap + MEMCTRLCMD_LEDCTRL);
  132. return 0;
  133. }
  134. /*
  135. * NOTE: For the power LED, use the LED_POWER_* macros since they differ
  136. */
  137. static void set_led(struct cardinfo *card, int shift, unsigned char state)
  138. {
  139. unsigned char led;
  140. led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL);
  141. if (state == LED_FLIP)
  142. led ^= (1<<shift);
  143. else {
  144. led &= ~(0x03 << shift);
  145. led |= (state << shift);
  146. }
  147. writeb(led, card->csr_remap + MEMCTRLCMD_LEDCTRL);
  148. }
  149. #ifdef MM_DIAG
  150. static void dump_regs(struct cardinfo *card)
  151. {
  152. unsigned char *p;
  153. int i, i1;
  154. p = card->csr_remap;
  155. for (i = 0; i < 8; i++) {
  156. printk(KERN_DEBUG "%p ", p);
  157. for (i1 = 0; i1 < 16; i1++)
  158. printk("%02x ", *p++);
  159. printk("\n");
  160. }
  161. }
  162. #endif
  163. static void dump_dmastat(struct cardinfo *card, unsigned int dmastat)
  164. {
  165. dev_printk(KERN_DEBUG, &card->dev->dev, "DMAstat - ");
  166. if (dmastat & DMASCR_ANY_ERR)
  167. printk(KERN_CONT "ANY_ERR ");
  168. if (dmastat & DMASCR_MBE_ERR)
  169. printk(KERN_CONT "MBE_ERR ");
  170. if (dmastat & DMASCR_PARITY_ERR_REP)
  171. printk(KERN_CONT "PARITY_ERR_REP ");
  172. if (dmastat & DMASCR_PARITY_ERR_DET)
  173. printk(KERN_CONT "PARITY_ERR_DET ");
  174. if (dmastat & DMASCR_SYSTEM_ERR_SIG)
  175. printk(KERN_CONT "SYSTEM_ERR_SIG ");
  176. if (dmastat & DMASCR_TARGET_ABT)
  177. printk(KERN_CONT "TARGET_ABT ");
  178. if (dmastat & DMASCR_MASTER_ABT)
  179. printk(KERN_CONT "MASTER_ABT ");
  180. if (dmastat & DMASCR_CHAIN_COMPLETE)
  181. printk(KERN_CONT "CHAIN_COMPLETE ");
  182. if (dmastat & DMASCR_DMA_COMPLETE)
  183. printk(KERN_CONT "DMA_COMPLETE ");
  184. printk("\n");
  185. }
  186. /*
  187. * Theory of request handling
  188. *
  189. * Each bio is assigned to one mm_dma_desc - which may not be enough FIXME
  190. * We have two pages of mm_dma_desc, holding about 64 descriptors
  191. * each. These are allocated at init time.
  192. * One page is "Ready" and is either full, or can have request added.
  193. * The other page might be "Active", which DMA is happening on it.
  194. *
  195. * Whenever IO on the active page completes, the Ready page is activated
  196. * and the ex-Active page is clean out and made Ready.
  197. * Otherwise the Ready page is only activated when it becomes full.
  198. *
  199. * If a request arrives while both pages a full, it is queued, and b_rdev is
  200. * overloaded to record whether it was a read or a write.
  201. *
  202. * The interrupt handler only polls the device to clear the interrupt.
  203. * The processing of the result is done in a tasklet.
  204. */
  205. static void mm_start_io(struct cardinfo *card)
  206. {
  207. /* we have the lock, we know there is
  208. * no IO active, and we know that card->Active
  209. * is set
  210. */
  211. struct mm_dma_desc *desc;
  212. struct mm_page *page;
  213. int offset;
  214. /* make the last descriptor end the chain */
  215. page = &card->mm_pages[card->Active];
  216. pr_debug("start_io: %d %d->%d\n",
  217. card->Active, page->headcnt, page->cnt - 1);
  218. desc = &page->desc[page->cnt-1];
  219. desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN);
  220. desc->control_bits &= ~cpu_to_le32(DMASCR_CHAIN_EN);
  221. desc->sem_control_bits = desc->control_bits;
  222. if (debug & DEBUG_LED_ON_TRANSFER)
  223. set_led(card, LED_REMOVE, LED_ON);
  224. desc = &page->desc[page->headcnt];
  225. writel(0, card->csr_remap + DMA_PCI_ADDR);
  226. writel(0, card->csr_remap + DMA_PCI_ADDR + 4);
  227. writel(0, card->csr_remap + DMA_LOCAL_ADDR);
  228. writel(0, card->csr_remap + DMA_LOCAL_ADDR + 4);
  229. writel(0, card->csr_remap + DMA_TRANSFER_SIZE);
  230. writel(0, card->csr_remap + DMA_TRANSFER_SIZE + 4);
  231. writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR);
  232. writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR + 4);
  233. offset = ((char *)desc) - ((char *)page->desc);
  234. writel(cpu_to_le32((page->page_dma+offset) & 0xffffffff),
  235. card->csr_remap + DMA_DESCRIPTOR_ADDR);
  236. /* Force the value to u64 before shifting otherwise >> 32 is undefined C
  237. * and on some ports will do nothing ! */
  238. writel(cpu_to_le32(((u64)page->page_dma)>>32),
  239. card->csr_remap + DMA_DESCRIPTOR_ADDR + 4);
  240. /* Go, go, go */
  241. writel(cpu_to_le32(DMASCR_GO | DMASCR_CHAIN_EN | pci_cmds),
  242. card->csr_remap + DMA_STATUS_CTRL);
  243. }
  244. static int add_bio(struct cardinfo *card);
  245. static void activate(struct cardinfo *card)
  246. {
  247. /* if No page is Active, and Ready is
  248. * not empty, then switch Ready page
  249. * to active and start IO.
  250. * Then add any bh's that are available to Ready
  251. */
  252. do {
  253. while (add_bio(card))
  254. ;
  255. if (card->Active == -1 &&
  256. card->mm_pages[card->Ready].cnt > 0) {
  257. card->Active = card->Ready;
  258. card->Ready = 1-card->Ready;
  259. mm_start_io(card);
  260. }
  261. } while (card->Active == -1 && add_bio(card));
  262. }
  263. static inline void reset_page(struct mm_page *page)
  264. {
  265. page->cnt = 0;
  266. page->headcnt = 0;
  267. page->bio = NULL;
  268. page->biotail = &page->bio;
  269. }
  270. /*
  271. * If there is room on Ready page, take
  272. * one bh off list and add it.
  273. * return 1 if there was room, else 0.
  274. */
  275. static int add_bio(struct cardinfo *card)
  276. {
  277. struct mm_page *p;
  278. struct mm_dma_desc *desc;
  279. dma_addr_t dma_handle;
  280. int offset;
  281. struct bio *bio;
  282. struct bio_vec vec;
  283. bio = card->currentbio;
  284. if (!bio && card->bio) {
  285. card->currentbio = card->bio;
  286. card->current_iter = card->bio->bi_iter;
  287. card->bio = card->bio->bi_next;
  288. if (card->bio == NULL)
  289. card->biotail = &card->bio;
  290. card->currentbio->bi_next = NULL;
  291. return 1;
  292. }
  293. if (!bio)
  294. return 0;
  295. if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
  296. return 0;
  297. vec = bio_iter_iovec(bio, card->current_iter);
  298. dma_handle = dma_map_page(&card->dev->dev,
  299. vec.bv_page,
  300. vec.bv_offset,
  301. vec.bv_len,
  302. bio_op(bio) == REQ_OP_READ ?
  303. DMA_FROM_DEVICE : DMA_TO_DEVICE);
  304. p = &card->mm_pages[card->Ready];
  305. desc = &p->desc[p->cnt];
  306. p->cnt++;
  307. if (p->bio == NULL)
  308. p->iter = card->current_iter;
  309. if ((p->biotail) != &bio->bi_next) {
  310. *(p->biotail) = bio;
  311. p->biotail = &(bio->bi_next);
  312. bio->bi_next = NULL;
  313. }
  314. desc->data_dma_handle = dma_handle;
  315. desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
  316. desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9);
  317. desc->transfer_size = cpu_to_le32(vec.bv_len);
  318. offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc));
  319. desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
  320. desc->zero1 = desc->zero2 = 0;
  321. offset = (((char *)(desc+1)) - ((char *)p->desc));
  322. desc->next_desc_addr = cpu_to_le64(p->page_dma+offset);
  323. desc->control_bits = cpu_to_le32(DMASCR_GO|DMASCR_ERR_INT_EN|
  324. DMASCR_PARITY_INT_EN|
  325. DMASCR_CHAIN_EN |
  326. DMASCR_SEM_EN |
  327. pci_cmds);
  328. if (bio_op(bio) == REQ_OP_WRITE)
  329. desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
  330. desc->sem_control_bits = desc->control_bits;
  331. bio_advance_iter(bio, &card->current_iter, vec.bv_len);
  332. if (!card->current_iter.bi_size)
  333. card->currentbio = NULL;
  334. return 1;
  335. }
  336. static void process_page(unsigned long data)
  337. {
  338. /* check if any of the requests in the page are DMA_COMPLETE,
  339. * and deal with them appropriately.
  340. * If we find a descriptor without DMA_COMPLETE in the semaphore, then
  341. * dma must have hit an error on that descriptor, so use dma_status
  342. * instead and assume that all following descriptors must be re-tried.
  343. */
  344. struct mm_page *page;
  345. struct bio *return_bio = NULL;
  346. struct cardinfo *card = (struct cardinfo *)data;
  347. unsigned int dma_status = card->dma_status;
  348. spin_lock(&card->lock);
  349. if (card->Active < 0)
  350. goto out_unlock;
  351. page = &card->mm_pages[card->Active];
  352. while (page->headcnt < page->cnt) {
  353. struct bio *bio = page->bio;
  354. struct mm_dma_desc *desc = &page->desc[page->headcnt];
  355. int control = le32_to_cpu(desc->sem_control_bits);
  356. int last = 0;
  357. struct bio_vec vec;
  358. if (!(control & DMASCR_DMA_COMPLETE)) {
  359. control = dma_status;
  360. last = 1;
  361. }
  362. page->headcnt++;
  363. vec = bio_iter_iovec(bio, page->iter);
  364. bio_advance_iter(bio, &page->iter, vec.bv_len);
  365. if (!page->iter.bi_size) {
  366. page->bio = bio->bi_next;
  367. if (page->bio)
  368. page->iter = page->bio->bi_iter;
  369. }
  370. dma_unmap_page(&card->dev->dev, desc->data_dma_handle,
  371. vec.bv_len,
  372. (control & DMASCR_TRANSFER_READ) ?
  373. DMA_TO_DEVICE : DMA_FROM_DEVICE);
  374. if (control & DMASCR_HARD_ERROR) {
  375. /* error */
  376. bio->bi_status = BLK_STS_IOERR;
  377. dev_printk(KERN_WARNING, &card->dev->dev,
  378. "I/O error on sector %d/%d\n",
  379. le32_to_cpu(desc->local_addr)>>9,
  380. le32_to_cpu(desc->transfer_size));
  381. dump_dmastat(card, control);
  382. } else if (op_is_write(bio_op(bio)) &&
  383. le32_to_cpu(desc->local_addr) >> 9 ==
  384. card->init_size) {
  385. card->init_size += le32_to_cpu(desc->transfer_size) >> 9;
  386. if (card->init_size >> 1 >= card->mm_size) {
  387. dev_printk(KERN_INFO, &card->dev->dev,
  388. "memory now initialised\n");
  389. set_userbit(card, MEMORY_INITIALIZED, 1);
  390. }
  391. }
  392. if (bio != page->bio) {
  393. bio->bi_next = return_bio;
  394. return_bio = bio;
  395. }
  396. if (last)
  397. break;
  398. }
  399. if (debug & DEBUG_LED_ON_TRANSFER)
  400. set_led(card, LED_REMOVE, LED_OFF);
  401. if (card->check_batteries) {
  402. card->check_batteries = 0;
  403. check_batteries(card);
  404. }
  405. if (page->headcnt >= page->cnt) {
  406. reset_page(page);
  407. card->Active = -1;
  408. activate(card);
  409. } else {
  410. /* haven't finished with this one yet */
  411. pr_debug("do some more\n");
  412. mm_start_io(card);
  413. }
  414. out_unlock:
  415. spin_unlock(&card->lock);
  416. while (return_bio) {
  417. struct bio *bio = return_bio;
  418. return_bio = bio->bi_next;
  419. bio->bi_next = NULL;
  420. bio_endio(bio);
  421. }
  422. }
  423. static void mm_unplug(struct blk_plug_cb *cb, bool from_schedule)
  424. {
  425. struct cardinfo *card = cb->data;
  426. spin_lock_irq(&card->lock);
  427. activate(card);
  428. spin_unlock_irq(&card->lock);
  429. kfree(cb);
  430. }
  431. static int mm_check_plugged(struct cardinfo *card)
  432. {
  433. return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb));
  434. }
  435. static blk_qc_t mm_submit_bio(struct bio *bio)
  436. {
  437. struct cardinfo *card = bio->bi_disk->private_data;
  438. pr_debug("mm_make_request %llu %u\n",
  439. (unsigned long long)bio->bi_iter.bi_sector,
  440. bio->bi_iter.bi_size);
  441. blk_queue_split(&bio);
  442. spin_lock_irq(&card->lock);
  443. *card->biotail = bio;
  444. bio->bi_next = NULL;
  445. card->biotail = &bio->bi_next;
  446. if (op_is_sync(bio->bi_opf) || !mm_check_plugged(card))
  447. activate(card);
  448. spin_unlock_irq(&card->lock);
  449. return BLK_QC_T_NONE;
  450. }
  451. static irqreturn_t mm_interrupt(int irq, void *__card)
  452. {
  453. struct cardinfo *card = (struct cardinfo *) __card;
  454. unsigned int dma_status;
  455. unsigned short cfg_status;
  456. HW_TRACE(0x30);
  457. dma_status = le32_to_cpu(readl(card->csr_remap + DMA_STATUS_CTRL));
  458. if (!(dma_status & (DMASCR_ERROR_MASK | DMASCR_CHAIN_COMPLETE))) {
  459. /* interrupt wasn't for me ... */
  460. return IRQ_NONE;
  461. }
  462. /* clear COMPLETION interrupts */
  463. if (card->flags & UM_FLAG_NO_BYTE_STATUS)
  464. writel(cpu_to_le32(DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE),
  465. card->csr_remap + DMA_STATUS_CTRL);
  466. else
  467. writeb((DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE) >> 16,
  468. card->csr_remap + DMA_STATUS_CTRL + 2);
  469. /* log errors and clear interrupt status */
  470. if (dma_status & DMASCR_ANY_ERR) {
  471. unsigned int data_log1, data_log2;
  472. unsigned int addr_log1, addr_log2;
  473. unsigned char stat, count, syndrome, check;
  474. stat = readb(card->csr_remap + MEMCTRLCMD_ERRSTATUS);
  475. data_log1 = le32_to_cpu(readl(card->csr_remap +
  476. ERROR_DATA_LOG));
  477. data_log2 = le32_to_cpu(readl(card->csr_remap +
  478. ERROR_DATA_LOG + 4));
  479. addr_log1 = le32_to_cpu(readl(card->csr_remap +
  480. ERROR_ADDR_LOG));
  481. addr_log2 = readb(card->csr_remap + ERROR_ADDR_LOG + 4);
  482. count = readb(card->csr_remap + ERROR_COUNT);
  483. syndrome = readb(card->csr_remap + ERROR_SYNDROME);
  484. check = readb(card->csr_remap + ERROR_CHECK);
  485. dump_dmastat(card, dma_status);
  486. if (stat & 0x01)
  487. dev_printk(KERN_ERR, &card->dev->dev,
  488. "Memory access error detected (err count %d)\n",
  489. count);
  490. if (stat & 0x02)
  491. dev_printk(KERN_ERR, &card->dev->dev,
  492. "Multi-bit EDC error\n");
  493. dev_printk(KERN_ERR, &card->dev->dev,
  494. "Fault Address 0x%02x%08x, Fault Data 0x%08x%08x\n",
  495. addr_log2, addr_log1, data_log2, data_log1);
  496. dev_printk(KERN_ERR, &card->dev->dev,
  497. "Fault Check 0x%02x, Fault Syndrome 0x%02x\n",
  498. check, syndrome);
  499. writeb(0, card->csr_remap + ERROR_COUNT);
  500. }
  501. if (dma_status & DMASCR_PARITY_ERR_REP) {
  502. dev_printk(KERN_ERR, &card->dev->dev,
  503. "PARITY ERROR REPORTED\n");
  504. pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
  505. pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
  506. }
  507. if (dma_status & DMASCR_PARITY_ERR_DET) {
  508. dev_printk(KERN_ERR, &card->dev->dev,
  509. "PARITY ERROR DETECTED\n");
  510. pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
  511. pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
  512. }
  513. if (dma_status & DMASCR_SYSTEM_ERR_SIG) {
  514. dev_printk(KERN_ERR, &card->dev->dev, "SYSTEM ERROR\n");
  515. pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
  516. pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
  517. }
  518. if (dma_status & DMASCR_TARGET_ABT) {
  519. dev_printk(KERN_ERR, &card->dev->dev, "TARGET ABORT\n");
  520. pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
  521. pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
  522. }
  523. if (dma_status & DMASCR_MASTER_ABT) {
  524. dev_printk(KERN_ERR, &card->dev->dev, "MASTER ABORT\n");
  525. pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
  526. pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
  527. }
  528. /* and process the DMA descriptors */
  529. card->dma_status = dma_status;
  530. tasklet_schedule(&card->tasklet);
  531. HW_TRACE(0x36);
  532. return IRQ_HANDLED;
  533. }
  534. /*
  535. * If both batteries are good, no LED
  536. * If either battery has been warned, solid LED
  537. * If both batteries are bad, flash the LED quickly
  538. * If either battery is bad, flash the LED semi quickly
  539. */
  540. static void set_fault_to_battery_status(struct cardinfo *card)
  541. {
  542. if (card->battery[0].good && card->battery[1].good)
  543. set_led(card, LED_FAULT, LED_OFF);
  544. else if (card->battery[0].warned || card->battery[1].warned)
  545. set_led(card, LED_FAULT, LED_ON);
  546. else if (!card->battery[0].good && !card->battery[1].good)
  547. set_led(card, LED_FAULT, LED_FLASH_7_0);
  548. else
  549. set_led(card, LED_FAULT, LED_FLASH_3_5);
  550. }
  551. static void init_battery_timer(void);
  552. static int check_battery(struct cardinfo *card, int battery, int status)
  553. {
  554. if (status != card->battery[battery].good) {
  555. card->battery[battery].good = !card->battery[battery].good;
  556. card->battery[battery].last_change = jiffies;
  557. if (card->battery[battery].good) {
  558. dev_printk(KERN_ERR, &card->dev->dev,
  559. "Battery %d now good\n", battery + 1);
  560. card->battery[battery].warned = 0;
  561. } else
  562. dev_printk(KERN_ERR, &card->dev->dev,
  563. "Battery %d now FAILED\n", battery + 1);
  564. return 1;
  565. } else if (!card->battery[battery].good &&
  566. !card->battery[battery].warned &&
  567. time_after_eq(jiffies, card->battery[battery].last_change +
  568. (HZ * 60 * 60 * 5))) {
  569. dev_printk(KERN_ERR, &card->dev->dev,
  570. "Battery %d still FAILED after 5 hours\n", battery + 1);
  571. card->battery[battery].warned = 1;
  572. return 1;
  573. }
  574. return 0;
  575. }
  576. static void check_batteries(struct cardinfo *card)
  577. {
  578. /* NOTE: this must *never* be called while the card
  579. * is doing (bus-to-card) DMA, or you will need the
  580. * reset switch
  581. */
  582. unsigned char status;
  583. int ret1, ret2;
  584. status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY);
  585. if (debug & DEBUG_BATTERY_POLLING)
  586. dev_printk(KERN_DEBUG, &card->dev->dev,
  587. "checking battery status, 1 = %s, 2 = %s\n",
  588. (status & BATTERY_1_FAILURE) ? "FAILURE" : "OK",
  589. (status & BATTERY_2_FAILURE) ? "FAILURE" : "OK");
  590. ret1 = check_battery(card, 0, !(status & BATTERY_1_FAILURE));
  591. ret2 = check_battery(card, 1, !(status & BATTERY_2_FAILURE));
  592. if (ret1 || ret2)
  593. set_fault_to_battery_status(card);
  594. }
  595. static void check_all_batteries(struct timer_list *unused)
  596. {
  597. int i;
  598. for (i = 0; i < num_cards; i++)
  599. if (!(cards[i].flags & UM_FLAG_NO_BATT)) {
  600. struct cardinfo *card = &cards[i];
  601. spin_lock_bh(&card->lock);
  602. if (card->Active >= 0)
  603. card->check_batteries = 1;
  604. else
  605. check_batteries(card);
  606. spin_unlock_bh(&card->lock);
  607. }
  608. init_battery_timer();
  609. }
  610. static void init_battery_timer(void)
  611. {
  612. timer_setup(&battery_timer, check_all_batteries, 0);
  613. battery_timer.expires = jiffies + (HZ * 60);
  614. add_timer(&battery_timer);
  615. }
  616. static void del_battery_timer(void)
  617. {
  618. del_timer(&battery_timer);
  619. }
  620. /*
  621. * Note no locks taken out here. In a worst case scenario, we could drop
  622. * a chunk of system memory. But that should never happen, since validation
  623. * happens at open or mount time, when locks are held.
  624. *
  625. * That's crap, since doing that while some partitions are opened
  626. * or mounted will give you really nasty results.
  627. */
  628. static int mm_revalidate(struct gendisk *disk)
  629. {
  630. struct cardinfo *card = disk->private_data;
  631. set_capacity(disk, card->mm_size << 1);
  632. return 0;
  633. }
  634. static int mm_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  635. {
  636. struct cardinfo *card = bdev->bd_disk->private_data;
  637. int size = card->mm_size * (1024 / MM_HARDSECT);
  638. /*
  639. * get geometry: we have to fake one... trim the size to a
  640. * multiple of 2048 (1M): tell we have 32 sectors, 64 heads,
  641. * whatever cylinders.
  642. */
  643. geo->heads = 64;
  644. geo->sectors = 32;
  645. geo->cylinders = size / (geo->heads * geo->sectors);
  646. return 0;
  647. }
  648. static const struct block_device_operations mm_fops = {
  649. .owner = THIS_MODULE,
  650. .submit_bio = mm_submit_bio,
  651. .getgeo = mm_getgeo,
  652. .revalidate_disk = mm_revalidate,
  653. };
  654. static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
  655. {
  656. int ret;
  657. struct cardinfo *card = &cards[num_cards];
  658. unsigned char mem_present;
  659. unsigned char batt_status;
  660. unsigned int saved_bar, data;
  661. unsigned long csr_base;
  662. unsigned long csr_len;
  663. int magic_number;
  664. static int printed_version;
  665. if (!printed_version++)
  666. printk(KERN_INFO DRIVER_VERSION " : " DRIVER_DESC "\n");
  667. ret = pci_enable_device(dev);
  668. if (ret)
  669. return ret;
  670. pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF8);
  671. pci_set_master(dev);
  672. card->dev = dev;
  673. csr_base = pci_resource_start(dev, 0);
  674. csr_len = pci_resource_len(dev, 0);
  675. if (!csr_base || !csr_len)
  676. return -ENODEV;
  677. dev_printk(KERN_INFO, &dev->dev,
  678. "Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n");
  679. if (dma_set_mask(&dev->dev, DMA_BIT_MASK(64)) &&
  680. dma_set_mask(&dev->dev, DMA_BIT_MASK(32))) {
  681. dev_printk(KERN_WARNING, &dev->dev, "NO suitable DMA found\n");
  682. return -ENOMEM;
  683. }
  684. ret = pci_request_regions(dev, DRIVER_NAME);
  685. if (ret) {
  686. dev_printk(KERN_ERR, &card->dev->dev,
  687. "Unable to request memory region\n");
  688. goto failed_req_csr;
  689. }
  690. card->csr_remap = ioremap(csr_base, csr_len);
  691. if (!card->csr_remap) {
  692. dev_printk(KERN_ERR, &card->dev->dev,
  693. "Unable to remap memory region\n");
  694. ret = -ENOMEM;
  695. goto failed_remap_csr;
  696. }
  697. dev_printk(KERN_INFO, &card->dev->dev,
  698. "CSR 0x%08lx -> 0x%p (0x%lx)\n",
  699. csr_base, card->csr_remap, csr_len);
  700. switch (card->dev->device) {
  701. case 0x5415:
  702. card->flags |= UM_FLAG_NO_BYTE_STATUS | UM_FLAG_NO_BATTREG;
  703. magic_number = 0x59;
  704. break;
  705. case 0x5425:
  706. card->flags |= UM_FLAG_NO_BYTE_STATUS;
  707. magic_number = 0x5C;
  708. break;
  709. case 0x6155:
  710. card->flags |= UM_FLAG_NO_BYTE_STATUS |
  711. UM_FLAG_NO_BATTREG | UM_FLAG_NO_BATT;
  712. magic_number = 0x99;
  713. break;
  714. default:
  715. magic_number = 0x100;
  716. break;
  717. }
  718. if (readb(card->csr_remap + MEMCTRLSTATUS_MAGIC) != magic_number) {
  719. dev_printk(KERN_ERR, &card->dev->dev, "Magic number invalid\n");
  720. ret = -ENOMEM;
  721. goto failed_magic;
  722. }
  723. card->mm_pages[0].desc = dma_alloc_coherent(&card->dev->dev,
  724. PAGE_SIZE * 2, &card->mm_pages[0].page_dma, GFP_KERNEL);
  725. card->mm_pages[1].desc = dma_alloc_coherent(&card->dev->dev,
  726. PAGE_SIZE * 2, &card->mm_pages[1].page_dma, GFP_KERNEL);
  727. if (card->mm_pages[0].desc == NULL ||
  728. card->mm_pages[1].desc == NULL) {
  729. dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n");
  730. ret = -ENOMEM;
  731. goto failed_alloc;
  732. }
  733. reset_page(&card->mm_pages[0]);
  734. reset_page(&card->mm_pages[1]);
  735. card->Ready = 0; /* page 0 is ready */
  736. card->Active = -1; /* no page is active */
  737. card->bio = NULL;
  738. card->biotail = &card->bio;
  739. spin_lock_init(&card->lock);
  740. card->queue = blk_alloc_queue(NUMA_NO_NODE);
  741. if (!card->queue) {
  742. ret = -ENOMEM;
  743. goto failed_alloc;
  744. }
  745. tasklet_init(&card->tasklet, process_page, (unsigned long)card);
  746. card->check_batteries = 0;
  747. mem_present = readb(card->csr_remap + MEMCTRLSTATUS_MEMORY);
  748. switch (mem_present) {
  749. case MEM_128_MB:
  750. card->mm_size = 1024 * 128;
  751. break;
  752. case MEM_256_MB:
  753. card->mm_size = 1024 * 256;
  754. break;
  755. case MEM_512_MB:
  756. card->mm_size = 1024 * 512;
  757. break;
  758. case MEM_1_GB:
  759. card->mm_size = 1024 * 1024;
  760. break;
  761. case MEM_2_GB:
  762. card->mm_size = 1024 * 2048;
  763. break;
  764. default:
  765. card->mm_size = 0;
  766. break;
  767. }
  768. /* Clear the LED's we control */
  769. set_led(card, LED_REMOVE, LED_OFF);
  770. set_led(card, LED_FAULT, LED_OFF);
  771. batt_status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY);
  772. card->battery[0].good = !(batt_status & BATTERY_1_FAILURE);
  773. card->battery[1].good = !(batt_status & BATTERY_2_FAILURE);
  774. card->battery[0].last_change = card->battery[1].last_change = jiffies;
  775. if (card->flags & UM_FLAG_NO_BATT)
  776. dev_printk(KERN_INFO, &card->dev->dev,
  777. "Size %d KB\n", card->mm_size);
  778. else {
  779. dev_printk(KERN_INFO, &card->dev->dev,
  780. "Size %d KB, Battery 1 %s (%s), Battery 2 %s (%s)\n",
  781. card->mm_size,
  782. batt_status & BATTERY_1_DISABLED ? "Disabled" : "Enabled",
  783. card->battery[0].good ? "OK" : "FAILURE",
  784. batt_status & BATTERY_2_DISABLED ? "Disabled" : "Enabled",
  785. card->battery[1].good ? "OK" : "FAILURE");
  786. set_fault_to_battery_status(card);
  787. }
  788. pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &saved_bar);
  789. data = 0xffffffff;
  790. pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, data);
  791. pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &data);
  792. pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, saved_bar);
  793. data &= 0xfffffff0;
  794. data = ~data;
  795. data += 1;
  796. if (request_irq(dev->irq, mm_interrupt, IRQF_SHARED, DRIVER_NAME,
  797. card)) {
  798. dev_printk(KERN_ERR, &card->dev->dev,
  799. "Unable to allocate IRQ\n");
  800. ret = -ENODEV;
  801. goto failed_req_irq;
  802. }
  803. dev_printk(KERN_INFO, &card->dev->dev,
  804. "Window size %d bytes, IRQ %d\n", data, dev->irq);
  805. pci_set_drvdata(dev, card);
  806. if (pci_write_cmd != 0x0F) /* If not Memory Write & Invalidate */
  807. pci_write_cmd = 0x07; /* then Memory Write command */
  808. if (pci_write_cmd & 0x08) { /* use Memory Write and Invalidate */
  809. unsigned short cfg_command;
  810. pci_read_config_word(dev, PCI_COMMAND, &cfg_command);
  811. cfg_command |= 0x10; /* Memory Write & Invalidate Enable */
  812. pci_write_config_word(dev, PCI_COMMAND, cfg_command);
  813. }
  814. pci_cmds = (pci_read_cmd << 28) | (pci_write_cmd << 24);
  815. num_cards++;
  816. if (!get_userbit(card, MEMORY_INITIALIZED)) {
  817. dev_printk(KERN_INFO, &card->dev->dev,
  818. "memory NOT initialized. Consider over-writing whole device.\n");
  819. card->init_size = 0;
  820. } else {
  821. dev_printk(KERN_INFO, &card->dev->dev,
  822. "memory already initialized\n");
  823. card->init_size = card->mm_size;
  824. }
  825. /* Enable ECC */
  826. writeb(EDC_STORE_CORRECT, card->csr_remap + MEMCTRLCMD_ERRCTRL);
  827. return 0;
  828. failed_req_irq:
  829. failed_alloc:
  830. if (card->mm_pages[0].desc)
  831. dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
  832. card->mm_pages[0].desc,
  833. card->mm_pages[0].page_dma);
  834. if (card->mm_pages[1].desc)
  835. dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
  836. card->mm_pages[1].desc,
  837. card->mm_pages[1].page_dma);
  838. failed_magic:
  839. iounmap(card->csr_remap);
  840. failed_remap_csr:
  841. pci_release_regions(dev);
  842. failed_req_csr:
  843. return ret;
  844. }
  845. static void mm_pci_remove(struct pci_dev *dev)
  846. {
  847. struct cardinfo *card = pci_get_drvdata(dev);
  848. tasklet_kill(&card->tasklet);
  849. free_irq(dev->irq, card);
  850. iounmap(card->csr_remap);
  851. if (card->mm_pages[0].desc)
  852. dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
  853. card->mm_pages[0].desc,
  854. card->mm_pages[0].page_dma);
  855. if (card->mm_pages[1].desc)
  856. dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
  857. card->mm_pages[1].desc,
  858. card->mm_pages[1].page_dma);
  859. blk_cleanup_queue(card->queue);
  860. pci_release_regions(dev);
  861. pci_disable_device(dev);
  862. }
  863. static const struct pci_device_id mm_pci_ids[] = {
  864. {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_5415CN)},
  865. {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_5425CN)},
  866. {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_6155)},
  867. {
  868. .vendor = 0x8086,
  869. .device = 0xB555,
  870. .subvendor = 0x1332,
  871. .subdevice = 0x5460,
  872. .class = 0x050000,
  873. .class_mask = 0,
  874. }, { /* end: all zeroes */ }
  875. };
  876. MODULE_DEVICE_TABLE(pci, mm_pci_ids);
  877. static struct pci_driver mm_pci_driver = {
  878. .name = DRIVER_NAME,
  879. .id_table = mm_pci_ids,
  880. .probe = mm_pci_probe,
  881. .remove = mm_pci_remove,
  882. };
  883. static int __init mm_init(void)
  884. {
  885. int retval, i;
  886. int err;
  887. retval = pci_register_driver(&mm_pci_driver);
  888. if (retval)
  889. return -ENOMEM;
  890. err = major_nr = register_blkdev(0, DRIVER_NAME);
  891. if (err < 0) {
  892. pci_unregister_driver(&mm_pci_driver);
  893. return -EIO;
  894. }
  895. for (i = 0; i < num_cards; i++) {
  896. mm_gendisk[i] = alloc_disk(1 << MM_SHIFT);
  897. if (!mm_gendisk[i])
  898. goto out;
  899. }
  900. for (i = 0; i < num_cards; i++) {
  901. struct gendisk *disk = mm_gendisk[i];
  902. sprintf(disk->disk_name, "umem%c", 'a'+i);
  903. spin_lock_init(&cards[i].lock);
  904. disk->major = major_nr;
  905. disk->first_minor = i << MM_SHIFT;
  906. disk->fops = &mm_fops;
  907. disk->private_data = &cards[i];
  908. disk->queue = cards[i].queue;
  909. set_capacity(disk, cards[i].mm_size << 1);
  910. add_disk(disk);
  911. }
  912. init_battery_timer();
  913. printk(KERN_INFO "MM: desc_per_page = %ld\n", DESC_PER_PAGE);
  914. /* printk("mm_init: Done. 10-19-01 9:00\n"); */
  915. return 0;
  916. out:
  917. pci_unregister_driver(&mm_pci_driver);
  918. unregister_blkdev(major_nr, DRIVER_NAME);
  919. while (i--)
  920. put_disk(mm_gendisk[i]);
  921. return -ENOMEM;
  922. }
  923. static void __exit mm_cleanup(void)
  924. {
  925. int i;
  926. del_battery_timer();
  927. for (i = 0; i < num_cards ; i++) {
  928. del_gendisk(mm_gendisk[i]);
  929. put_disk(mm_gendisk[i]);
  930. }
  931. pci_unregister_driver(&mm_pci_driver);
  932. unregister_blkdev(major_nr, DRIVER_NAME);
  933. }
  934. module_init(mm_init);
  935. module_exit(mm_cleanup);
  936. MODULE_AUTHOR(DRIVER_AUTHOR);
  937. MODULE_DESCRIPTION(DRIVER_DESC);
  938. MODULE_LICENSE("GPL");