spi-topcliff-pch.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * SPI bus driver for the Topcliff PCH used by Intel SoCs
  4. *
  5. * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
  6. */
  7. #include <linux/delay.h>
  8. #include <linux/pci.h>
  9. #include <linux/wait.h>
  10. #include <linux/spi/spi.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/sched.h>
  13. #include <linux/spi/spidev.h>
  14. #include <linux/module.h>
  15. #include <linux/device.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/dmaengine.h>
  18. #include <linux/pch_dma.h>
  19. /* Register offsets */
  20. #define PCH_SPCR 0x00 /* SPI control register */
  21. #define PCH_SPBRR 0x04 /* SPI baud rate register */
  22. #define PCH_SPSR 0x08 /* SPI status register */
  23. #define PCH_SPDWR 0x0C /* SPI write data register */
  24. #define PCH_SPDRR 0x10 /* SPI read data register */
  25. #define PCH_SSNXCR 0x18 /* SSN Expand Control Register */
  26. #define PCH_SRST 0x1C /* SPI reset register */
  27. #define PCH_ADDRESS_SIZE 0x20
  28. #define PCH_SPSR_TFD 0x000007C0
  29. #define PCH_SPSR_RFD 0x0000F800
  30. #define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11)
  31. #define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6)
  32. #define PCH_RX_THOLD 7
  33. #define PCH_RX_THOLD_MAX 15
  34. #define PCH_TX_THOLD 2
  35. #define PCH_MAX_BAUDRATE 5000000
  36. #define PCH_MAX_FIFO_DEPTH 16
  37. #define STATUS_RUNNING 1
  38. #define STATUS_EXITING 2
  39. #define PCH_SLEEP_TIME 10
  40. #define SSN_LOW 0x02U
  41. #define SSN_HIGH 0x03U
  42. #define SSN_NO_CONTROL 0x00U
  43. #define PCH_MAX_CS 0xFF
  44. #define PCI_DEVICE_ID_GE_SPI 0x8816
  45. #define SPCR_SPE_BIT (1 << 0)
  46. #define SPCR_MSTR_BIT (1 << 1)
  47. #define SPCR_LSBF_BIT (1 << 4)
  48. #define SPCR_CPHA_BIT (1 << 5)
  49. #define SPCR_CPOL_BIT (1 << 6)
  50. #define SPCR_TFIE_BIT (1 << 8)
  51. #define SPCR_RFIE_BIT (1 << 9)
  52. #define SPCR_FIE_BIT (1 << 10)
  53. #define SPCR_ORIE_BIT (1 << 11)
  54. #define SPCR_MDFIE_BIT (1 << 12)
  55. #define SPCR_FICLR_BIT (1 << 24)
  56. #define SPSR_TFI_BIT (1 << 0)
  57. #define SPSR_RFI_BIT (1 << 1)
  58. #define SPSR_FI_BIT (1 << 2)
  59. #define SPSR_ORF_BIT (1 << 3)
  60. #define SPBRR_SIZE_BIT (1 << 10)
  61. #define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|\
  62. SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
  63. #define SPCR_RFIC_FIELD 20
  64. #define SPCR_TFIC_FIELD 16
  65. #define MASK_SPBRR_SPBR_BITS ((1 << 10) - 1)
  66. #define MASK_RFIC_SPCR_BITS (0xf << SPCR_RFIC_FIELD)
  67. #define MASK_TFIC_SPCR_BITS (0xf << SPCR_TFIC_FIELD)
  68. #define PCH_CLOCK_HZ 50000000
  69. #define PCH_MAX_SPBR 1023
  70. /* Definition for ML7213/ML7223/ML7831 by LAPIS Semiconductor */
  71. #define PCI_DEVICE_ID_ML7213_SPI 0x802c
  72. #define PCI_DEVICE_ID_ML7223_SPI 0x800F
  73. #define PCI_DEVICE_ID_ML7831_SPI 0x8816
  74. /*
  75. * Set the number of SPI instance max
  76. * Intel EG20T PCH : 1ch
  77. * LAPIS Semiconductor ML7213 IOH : 2ch
  78. * LAPIS Semiconductor ML7223 IOH : 1ch
  79. * LAPIS Semiconductor ML7831 IOH : 1ch
  80. */
  81. #define PCH_SPI_MAX_DEV 2
  82. #define PCH_BUF_SIZE 4096
  83. #define PCH_DMA_TRANS_SIZE 12
  84. static int use_dma = 1;
  85. struct pch_spi_dma_ctrl {
  86. struct dma_async_tx_descriptor *desc_tx;
  87. struct dma_async_tx_descriptor *desc_rx;
  88. struct pch_dma_slave param_tx;
  89. struct pch_dma_slave param_rx;
  90. struct dma_chan *chan_tx;
  91. struct dma_chan *chan_rx;
  92. struct scatterlist *sg_tx_p;
  93. struct scatterlist *sg_rx_p;
  94. struct scatterlist sg_tx;
  95. struct scatterlist sg_rx;
  96. int nent;
  97. void *tx_buf_virt;
  98. void *rx_buf_virt;
  99. dma_addr_t tx_buf_dma;
  100. dma_addr_t rx_buf_dma;
  101. };
  102. /**
  103. * struct pch_spi_data - Holds the SPI channel specific details
  104. * @io_remap_addr: The remapped PCI base address
  105. * @io_base_addr: Base address
  106. * @master: Pointer to the SPI master structure
  107. * @work: Reference to work queue handler
  108. * @wait: Wait queue for waking up upon receiving an
  109. * interrupt.
  110. * @transfer_complete: Status of SPI Transfer
  111. * @bcurrent_msg_processing: Status flag for message processing
  112. * @lock: Lock for protecting this structure
  113. * @queue: SPI Message queue
  114. * @status: Status of the SPI driver
  115. * @bpw_len: Length of data to be transferred in bits per
  116. * word
  117. * @transfer_active: Flag showing active transfer
  118. * @tx_index: Transmit data count; for bookkeeping during
  119. * transfer
  120. * @rx_index: Receive data count; for bookkeeping during
  121. * transfer
  122. * @pkt_tx_buff: Buffer for data to be transmitted
  123. * @pkt_rx_buff: Buffer for received data
  124. * @n_curnt_chip: The chip number that this SPI driver currently
  125. * operates on
  126. * @current_chip: Reference to the current chip that this SPI
  127. * driver currently operates on
  128. * @current_msg: The current message that this SPI driver is
  129. * handling
  130. * @cur_trans: The current transfer that this SPI driver is
  131. * handling
  132. * @board_dat: Reference to the SPI device data structure
  133. * @plat_dev: platform_device structure
  134. * @ch: SPI channel number
  135. * @dma: Local DMA information
  136. * @use_dma: True if DMA is to be used
  137. * @irq_reg_sts: Status of IRQ registration
  138. * @save_total_len: Save length while data is being transferred
  139. */
  140. struct pch_spi_data {
  141. void __iomem *io_remap_addr;
  142. unsigned long io_base_addr;
  143. struct spi_master *master;
  144. struct work_struct work;
  145. wait_queue_head_t wait;
  146. u8 transfer_complete;
  147. u8 bcurrent_msg_processing;
  148. spinlock_t lock;
  149. struct list_head queue;
  150. u8 status;
  151. u32 bpw_len;
  152. u8 transfer_active;
  153. u32 tx_index;
  154. u32 rx_index;
  155. u16 *pkt_tx_buff;
  156. u16 *pkt_rx_buff;
  157. u8 n_curnt_chip;
  158. struct spi_device *current_chip;
  159. struct spi_message *current_msg;
  160. struct spi_transfer *cur_trans;
  161. struct pch_spi_board_data *board_dat;
  162. struct platform_device *plat_dev;
  163. int ch;
  164. struct pch_spi_dma_ctrl dma;
  165. int use_dma;
  166. u8 irq_reg_sts;
  167. int save_total_len;
  168. };
  169. /**
  170. * struct pch_spi_board_data - Holds the SPI device specific details
  171. * @pdev: Pointer to the PCI device
  172. * @suspend_sts: Status of suspend
  173. * @num: The number of SPI device instance
  174. */
  175. struct pch_spi_board_data {
  176. struct pci_dev *pdev;
  177. u8 suspend_sts;
  178. int num;
  179. };
  180. struct pch_pd_dev_save {
  181. int num;
  182. struct platform_device *pd_save[PCH_SPI_MAX_DEV];
  183. struct pch_spi_board_data *board_dat;
  184. };
  185. static const struct pci_device_id pch_spi_pcidev_id[] = {
  186. { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
  187. { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
  188. { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
  189. { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_SPI), 1, },
  190. { }
  191. };
  192. /**
  193. * pch_spi_writereg() - Performs register writes
  194. * @master: Pointer to struct spi_master.
  195. * @idx: Register offset.
  196. * @val: Value to be written to register.
  197. */
  198. static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val)
  199. {
  200. struct pch_spi_data *data = spi_master_get_devdata(master);
  201. iowrite32(val, (data->io_remap_addr + idx));
  202. }
  203. /**
  204. * pch_spi_readreg() - Performs register reads
  205. * @master: Pointer to struct spi_master.
  206. * @idx: Register offset.
  207. */
  208. static inline u32 pch_spi_readreg(struct spi_master *master, int idx)
  209. {
  210. struct pch_spi_data *data = spi_master_get_devdata(master);
  211. return ioread32(data->io_remap_addr + idx);
  212. }
  213. static inline void pch_spi_setclr_reg(struct spi_master *master, int idx,
  214. u32 set, u32 clr)
  215. {
  216. u32 tmp = pch_spi_readreg(master, idx);
  217. tmp = (tmp & ~clr) | set;
  218. pch_spi_writereg(master, idx, tmp);
  219. }
  220. static void pch_spi_set_master_mode(struct spi_master *master)
  221. {
  222. pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0);
  223. }
  224. /**
  225. * pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs
  226. * @master: Pointer to struct spi_master.
  227. */
  228. static void pch_spi_clear_fifo(struct spi_master *master)
  229. {
  230. pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0);
  231. pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT);
  232. }
  233. static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
  234. void __iomem *io_remap_addr)
  235. {
  236. u32 n_read, tx_index, rx_index, bpw_len;
  237. u16 *pkt_rx_buffer, *pkt_tx_buff;
  238. int read_cnt;
  239. u32 reg_spcr_val;
  240. void __iomem *spsr;
  241. void __iomem *spdrr;
  242. void __iomem *spdwr;
  243. spsr = io_remap_addr + PCH_SPSR;
  244. iowrite32(reg_spsr_val, spsr);
  245. if (data->transfer_active) {
  246. rx_index = data->rx_index;
  247. tx_index = data->tx_index;
  248. bpw_len = data->bpw_len;
  249. pkt_rx_buffer = data->pkt_rx_buff;
  250. pkt_tx_buff = data->pkt_tx_buff;
  251. spdrr = io_remap_addr + PCH_SPDRR;
  252. spdwr = io_remap_addr + PCH_SPDWR;
  253. n_read = PCH_READABLE(reg_spsr_val);
  254. for (read_cnt = 0; (read_cnt < n_read); read_cnt++) {
  255. pkt_rx_buffer[rx_index++] = ioread32(spdrr);
  256. if (tx_index < bpw_len)
  257. iowrite32(pkt_tx_buff[tx_index++], spdwr);
  258. }
  259. /* disable RFI if not needed */
  260. if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) {
  261. reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR);
  262. reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */
  263. /* reset rx threshold */
  264. reg_spcr_val &= ~MASK_RFIC_SPCR_BITS;
  265. reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD);
  266. iowrite32(reg_spcr_val, (io_remap_addr + PCH_SPCR));
  267. }
  268. /* update counts */
  269. data->tx_index = tx_index;
  270. data->rx_index = rx_index;
  271. /* if transfer complete interrupt */
  272. if (reg_spsr_val & SPSR_FI_BIT) {
  273. if ((tx_index == bpw_len) && (rx_index == tx_index)) {
  274. /* disable interrupts */
  275. pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
  276. PCH_ALL);
  277. /* transfer is completed;
  278. inform pch_spi_process_messages */
  279. data->transfer_complete = true;
  280. data->transfer_active = false;
  281. wake_up(&data->wait);
  282. } else {
  283. dev_vdbg(&data->master->dev,
  284. "%s : Transfer is not completed",
  285. __func__);
  286. }
  287. }
  288. }
  289. }
  290. /**
  291. * pch_spi_handler() - Interrupt handler
  292. * @irq: The interrupt number.
  293. * @dev_id: Pointer to struct pch_spi_board_data.
  294. */
  295. static irqreturn_t pch_spi_handler(int irq, void *dev_id)
  296. {
  297. u32 reg_spsr_val;
  298. void __iomem *spsr;
  299. void __iomem *io_remap_addr;
  300. irqreturn_t ret = IRQ_NONE;
  301. struct pch_spi_data *data = dev_id;
  302. struct pch_spi_board_data *board_dat = data->board_dat;
  303. if (board_dat->suspend_sts) {
  304. dev_dbg(&board_dat->pdev->dev,
  305. "%s returning due to suspend\n", __func__);
  306. return IRQ_NONE;
  307. }
  308. io_remap_addr = data->io_remap_addr;
  309. spsr = io_remap_addr + PCH_SPSR;
  310. reg_spsr_val = ioread32(spsr);
  311. if (reg_spsr_val & SPSR_ORF_BIT) {
  312. dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
  313. if (data->current_msg->complete) {
  314. data->transfer_complete = true;
  315. data->current_msg->status = -EIO;
  316. data->current_msg->complete(data->current_msg->context);
  317. data->bcurrent_msg_processing = false;
  318. data->current_msg = NULL;
  319. data->cur_trans = NULL;
  320. }
  321. }
  322. if (data->use_dma)
  323. return IRQ_NONE;
  324. /* Check if the interrupt is for SPI device */
  325. if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
  326. pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr);
  327. ret = IRQ_HANDLED;
  328. }
  329. dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n",
  330. __func__, ret);
  331. return ret;
  332. }
  333. /**
  334. * pch_spi_set_baud_rate() - Sets SPBR field in SPBRR
  335. * @master: Pointer to struct spi_master.
  336. * @speed_hz: Baud rate.
  337. */
  338. static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
  339. {
  340. u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2);
  341. /* if baud rate is less than we can support limit it */
  342. if (n_spbr > PCH_MAX_SPBR)
  343. n_spbr = PCH_MAX_SPBR;
  344. pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
  345. }
  346. /**
  347. * pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR
  348. * @master: Pointer to struct spi_master.
  349. * @bits_per_word: Bits per word for SPI transfer.
  350. */
  351. static void pch_spi_set_bits_per_word(struct spi_master *master,
  352. u8 bits_per_word)
  353. {
  354. if (bits_per_word == 8)
  355. pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT);
  356. else
  357. pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0);
  358. }
  359. /**
  360. * pch_spi_setup_transfer() - Configures the PCH SPI hardware for transfer
  361. * @spi: Pointer to struct spi_device.
  362. */
  363. static void pch_spi_setup_transfer(struct spi_device *spi)
  364. {
  365. u32 flags = 0;
  366. dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n",
  367. __func__, pch_spi_readreg(spi->master, PCH_SPBRR),
  368. spi->max_speed_hz);
  369. pch_spi_set_baud_rate(spi->master, spi->max_speed_hz);
  370. /* set bits per word */
  371. pch_spi_set_bits_per_word(spi->master, spi->bits_per_word);
  372. if (!(spi->mode & SPI_LSB_FIRST))
  373. flags |= SPCR_LSBF_BIT;
  374. if (spi->mode & SPI_CPOL)
  375. flags |= SPCR_CPOL_BIT;
  376. if (spi->mode & SPI_CPHA)
  377. flags |= SPCR_CPHA_BIT;
  378. pch_spi_setclr_reg(spi->master, PCH_SPCR, flags,
  379. (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT));
  380. /* Clear the FIFO by toggling FICLR to 1 and back to 0 */
  381. pch_spi_clear_fifo(spi->master);
  382. }
  383. /**
  384. * pch_spi_reset() - Clears SPI registers
  385. * @master: Pointer to struct spi_master.
  386. */
  387. static void pch_spi_reset(struct spi_master *master)
  388. {
  389. /* write 1 to reset SPI */
  390. pch_spi_writereg(master, PCH_SRST, 0x1);
  391. /* clear reset */
  392. pch_spi_writereg(master, PCH_SRST, 0x0);
  393. }
  394. static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
  395. {
  396. struct spi_transfer *transfer;
  397. struct pch_spi_data *data = spi_master_get_devdata(pspi->master);
  398. int retval;
  399. unsigned long flags;
  400. spin_lock_irqsave(&data->lock, flags);
  401. /* validate Tx/Rx buffers and Transfer length */
  402. list_for_each_entry(transfer, &pmsg->transfers, transfer_list) {
  403. if (!transfer->tx_buf && !transfer->rx_buf) {
  404. dev_err(&pspi->dev,
  405. "%s Tx and Rx buffer NULL\n", __func__);
  406. retval = -EINVAL;
  407. goto err_return_spinlock;
  408. }
  409. if (!transfer->len) {
  410. dev_err(&pspi->dev, "%s Transfer length invalid\n",
  411. __func__);
  412. retval = -EINVAL;
  413. goto err_return_spinlock;
  414. }
  415. dev_dbg(&pspi->dev,
  416. "%s Tx/Rx buffer valid. Transfer length valid\n",
  417. __func__);
  418. }
  419. spin_unlock_irqrestore(&data->lock, flags);
  420. /* We won't process any messages if we have been asked to terminate */
  421. if (data->status == STATUS_EXITING) {
  422. dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
  423. retval = -ESHUTDOWN;
  424. goto err_out;
  425. }
  426. /* If suspended ,return -EINVAL */
  427. if (data->board_dat->suspend_sts) {
  428. dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__);
  429. retval = -EINVAL;
  430. goto err_out;
  431. }
  432. /* set status of message */
  433. pmsg->actual_length = 0;
  434. dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status);
  435. pmsg->status = -EINPROGRESS;
  436. spin_lock_irqsave(&data->lock, flags);
  437. /* add message to queue */
  438. list_add_tail(&pmsg->queue, &data->queue);
  439. spin_unlock_irqrestore(&data->lock, flags);
  440. dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
  441. schedule_work(&data->work);
  442. dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__);
  443. retval = 0;
  444. err_out:
  445. dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
  446. return retval;
  447. err_return_spinlock:
  448. dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
  449. spin_unlock_irqrestore(&data->lock, flags);
  450. return retval;
  451. }
  452. static inline void pch_spi_select_chip(struct pch_spi_data *data,
  453. struct spi_device *pspi)
  454. {
  455. if (data->current_chip != NULL) {
  456. if (pspi->chip_select != data->n_curnt_chip) {
  457. dev_dbg(&pspi->dev, "%s : different slave\n", __func__);
  458. data->current_chip = NULL;
  459. }
  460. }
  461. data->current_chip = pspi;
  462. data->n_curnt_chip = data->current_chip->chip_select;
  463. dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__);
  464. pch_spi_setup_transfer(pspi);
  465. }
  466. static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
  467. {
  468. int size;
  469. u32 n_writes;
  470. int j;
  471. struct spi_message *pmsg, *tmp;
  472. const u8 *tx_buf;
  473. const u16 *tx_sbuf;
  474. /* set baud rate if needed */
  475. if (data->cur_trans->speed_hz) {
  476. dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
  477. pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
  478. }
  479. /* set bits per word if needed */
  480. if (data->cur_trans->bits_per_word &&
  481. (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) {
  482. dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
  483. pch_spi_set_bits_per_word(data->master,
  484. data->cur_trans->bits_per_word);
  485. *bpw = data->cur_trans->bits_per_word;
  486. } else {
  487. *bpw = data->current_msg->spi->bits_per_word;
  488. }
  489. /* reset Tx/Rx index */
  490. data->tx_index = 0;
  491. data->rx_index = 0;
  492. data->bpw_len = data->cur_trans->len / (*bpw / 8);
  493. /* find alloc size */
  494. size = data->cur_trans->len * sizeof(*data->pkt_tx_buff);
  495. /* allocate memory for pkt_tx_buff & pkt_rx_buffer */
  496. data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
  497. if (data->pkt_tx_buff != NULL) {
  498. data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
  499. if (!data->pkt_rx_buff) {
  500. kfree(data->pkt_tx_buff);
  501. data->pkt_tx_buff = NULL;
  502. }
  503. }
  504. if (!data->pkt_rx_buff) {
  505. /* flush queue and set status of all transfers to -ENOMEM */
  506. list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
  507. pmsg->status = -ENOMEM;
  508. if (pmsg->complete)
  509. pmsg->complete(pmsg->context);
  510. /* delete from queue */
  511. list_del_init(&pmsg->queue);
  512. }
  513. return;
  514. }
  515. /* copy Tx Data */
  516. if (data->cur_trans->tx_buf != NULL) {
  517. if (*bpw == 8) {
  518. tx_buf = data->cur_trans->tx_buf;
  519. for (j = 0; j < data->bpw_len; j++)
  520. data->pkt_tx_buff[j] = *tx_buf++;
  521. } else {
  522. tx_sbuf = data->cur_trans->tx_buf;
  523. for (j = 0; j < data->bpw_len; j++)
  524. data->pkt_tx_buff[j] = *tx_sbuf++;
  525. }
  526. }
  527. /* if len greater than PCH_MAX_FIFO_DEPTH, write 16,else len bytes */
  528. n_writes = data->bpw_len;
  529. if (n_writes > PCH_MAX_FIFO_DEPTH)
  530. n_writes = PCH_MAX_FIFO_DEPTH;
  531. dev_dbg(&data->master->dev,
  532. "\n%s:Pulling down SSN low - writing 0x2 to SSNXCR\n",
  533. __func__);
  534. pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
  535. for (j = 0; j < n_writes; j++)
  536. pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]);
  537. /* update tx_index */
  538. data->tx_index = j;
  539. /* reset transfer complete flag */
  540. data->transfer_complete = false;
  541. data->transfer_active = true;
  542. }
  543. static void pch_spi_nomore_transfer(struct pch_spi_data *data)
  544. {
  545. struct spi_message *pmsg, *tmp;
  546. dev_dbg(&data->master->dev, "%s called\n", __func__);
  547. /* Invoke complete callback
  548. * [To the spi core..indicating end of transfer] */
  549. data->current_msg->status = 0;
  550. if (data->current_msg->complete) {
  551. dev_dbg(&data->master->dev,
  552. "%s:Invoking callback of SPI core\n", __func__);
  553. data->current_msg->complete(data->current_msg->context);
  554. }
  555. /* update status in global variable */
  556. data->bcurrent_msg_processing = false;
  557. dev_dbg(&data->master->dev,
  558. "%s:data->bcurrent_msg_processing = false\n", __func__);
  559. data->current_msg = NULL;
  560. data->cur_trans = NULL;
  561. /* check if we have items in list and not suspending
  562. * return 1 if list empty */
  563. if ((list_empty(&data->queue) == 0) &&
  564. (!data->board_dat->suspend_sts) &&
  565. (data->status != STATUS_EXITING)) {
  566. /* We have some more work to do (either there is more tranint
  567. * bpw;sfer requests in the current message or there are
  568. *more messages)
  569. */
  570. dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__);
  571. schedule_work(&data->work);
  572. } else if (data->board_dat->suspend_sts ||
  573. data->status == STATUS_EXITING) {
  574. dev_dbg(&data->master->dev,
  575. "%s suspend/remove initiated, flushing queue\n",
  576. __func__);
  577. list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
  578. pmsg->status = -EIO;
  579. if (pmsg->complete)
  580. pmsg->complete(pmsg->context);
  581. /* delete from queue */
  582. list_del_init(&pmsg->queue);
  583. }
  584. }
  585. }
  586. static void pch_spi_set_ir(struct pch_spi_data *data)
  587. {
  588. /* enable interrupts, set threshold, enable SPI */
  589. if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH)
  590. /* set receive threshold to PCH_RX_THOLD */
  591. pch_spi_setclr_reg(data->master, PCH_SPCR,
  592. PCH_RX_THOLD << SPCR_RFIC_FIELD |
  593. SPCR_FIE_BIT | SPCR_RFIE_BIT |
  594. SPCR_ORIE_BIT | SPCR_SPE_BIT,
  595. MASK_RFIC_SPCR_BITS | PCH_ALL);
  596. else
  597. /* set receive threshold to maximum */
  598. pch_spi_setclr_reg(data->master, PCH_SPCR,
  599. PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD |
  600. SPCR_FIE_BIT | SPCR_ORIE_BIT |
  601. SPCR_SPE_BIT,
  602. MASK_RFIC_SPCR_BITS | PCH_ALL);
  603. /* Wait until the transfer completes; go to sleep after
  604. initiating the transfer. */
  605. dev_dbg(&data->master->dev,
  606. "%s:waiting for transfer to get over\n", __func__);
  607. wait_event_interruptible(data->wait, data->transfer_complete);
  608. /* clear all interrupts */
  609. pch_spi_writereg(data->master, PCH_SPSR,
  610. pch_spi_readreg(data->master, PCH_SPSR));
  611. /* Disable interrupts and SPI transfer */
  612. pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
  613. /* clear FIFO */
  614. pch_spi_clear_fifo(data->master);
  615. }
  616. static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
  617. {
  618. int j;
  619. u8 *rx_buf;
  620. u16 *rx_sbuf;
  621. /* copy Rx Data */
  622. if (!data->cur_trans->rx_buf)
  623. return;
  624. if (bpw == 8) {
  625. rx_buf = data->cur_trans->rx_buf;
  626. for (j = 0; j < data->bpw_len; j++)
  627. *rx_buf++ = data->pkt_rx_buff[j] & 0xFF;
  628. } else {
  629. rx_sbuf = data->cur_trans->rx_buf;
  630. for (j = 0; j < data->bpw_len; j++)
  631. *rx_sbuf++ = data->pkt_rx_buff[j];
  632. }
  633. }
  634. static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
  635. {
  636. int j;
  637. u8 *rx_buf;
  638. u16 *rx_sbuf;
  639. const u8 *rx_dma_buf;
  640. const u16 *rx_dma_sbuf;
  641. /* copy Rx Data */
  642. if (!data->cur_trans->rx_buf)
  643. return;
  644. if (bpw == 8) {
  645. rx_buf = data->cur_trans->rx_buf;
  646. rx_dma_buf = data->dma.rx_buf_virt;
  647. for (j = 0; j < data->bpw_len; j++)
  648. *rx_buf++ = *rx_dma_buf++ & 0xFF;
  649. data->cur_trans->rx_buf = rx_buf;
  650. } else {
  651. rx_sbuf = data->cur_trans->rx_buf;
  652. rx_dma_sbuf = data->dma.rx_buf_virt;
  653. for (j = 0; j < data->bpw_len; j++)
  654. *rx_sbuf++ = *rx_dma_sbuf++;
  655. data->cur_trans->rx_buf = rx_sbuf;
  656. }
  657. }
  658. static int pch_spi_start_transfer(struct pch_spi_data *data)
  659. {
  660. struct pch_spi_dma_ctrl *dma;
  661. unsigned long flags;
  662. int rtn;
  663. dma = &data->dma;
  664. spin_lock_irqsave(&data->lock, flags);
  665. /* disable interrupts, SPI set enable */
  666. pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
  667. spin_unlock_irqrestore(&data->lock, flags);
  668. /* Wait until the transfer completes; go to sleep after
  669. initiating the transfer. */
  670. dev_dbg(&data->master->dev,
  671. "%s:waiting for transfer to get over\n", __func__);
  672. rtn = wait_event_interruptible_timeout(data->wait,
  673. data->transfer_complete,
  674. msecs_to_jiffies(2 * HZ));
  675. if (!rtn)
  676. dev_err(&data->master->dev,
  677. "%s wait-event timeout\n", __func__);
  678. dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
  679. DMA_FROM_DEVICE);
  680. dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
  681. DMA_FROM_DEVICE);
  682. memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
  683. async_tx_ack(dma->desc_rx);
  684. async_tx_ack(dma->desc_tx);
  685. kfree(dma->sg_tx_p);
  686. kfree(dma->sg_rx_p);
  687. spin_lock_irqsave(&data->lock, flags);
  688. /* clear fifo threshold, disable interrupts, disable SPI transfer */
  689. pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
  690. MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL |
  691. SPCR_SPE_BIT);
  692. /* clear all interrupts */
  693. pch_spi_writereg(data->master, PCH_SPSR,
  694. pch_spi_readreg(data->master, PCH_SPSR));
  695. /* clear FIFO */
  696. pch_spi_clear_fifo(data->master);
  697. spin_unlock_irqrestore(&data->lock, flags);
  698. return rtn;
  699. }
  700. static void pch_dma_rx_complete(void *arg)
  701. {
  702. struct pch_spi_data *data = arg;
  703. /* transfer is completed;inform pch_spi_process_messages_dma */
  704. data->transfer_complete = true;
  705. wake_up_interruptible(&data->wait);
  706. }
  707. static bool pch_spi_filter(struct dma_chan *chan, void *slave)
  708. {
  709. struct pch_dma_slave *param = slave;
  710. if ((chan->chan_id == param->chan_id) &&
  711. (param->dma_dev == chan->device->dev)) {
  712. chan->private = param;
  713. return true;
  714. } else {
  715. return false;
  716. }
  717. }
  718. static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
  719. {
  720. dma_cap_mask_t mask;
  721. struct dma_chan *chan;
  722. struct pci_dev *dma_dev;
  723. struct pch_dma_slave *param;
  724. struct pch_spi_dma_ctrl *dma;
  725. unsigned int width;
  726. if (bpw == 8)
  727. width = PCH_DMA_WIDTH_1_BYTE;
  728. else
  729. width = PCH_DMA_WIDTH_2_BYTES;
  730. dma = &data->dma;
  731. dma_cap_zero(mask);
  732. dma_cap_set(DMA_SLAVE, mask);
  733. /* Get DMA's dev information */
  734. dma_dev = pci_get_slot(data->board_dat->pdev->bus,
  735. PCI_DEVFN(PCI_SLOT(data->board_dat->pdev->devfn), 0));
  736. /* Set Tx DMA */
  737. param = &dma->param_tx;
  738. param->dma_dev = &dma_dev->dev;
  739. param->chan_id = data->ch * 2; /* Tx = 0, 2 */
  740. param->tx_reg = data->io_base_addr + PCH_SPDWR;
  741. param->width = width;
  742. chan = dma_request_channel(mask, pch_spi_filter, param);
  743. if (!chan) {
  744. dev_err(&data->master->dev,
  745. "ERROR: dma_request_channel FAILS(Tx)\n");
  746. data->use_dma = 0;
  747. return;
  748. }
  749. dma->chan_tx = chan;
  750. /* Set Rx DMA */
  751. param = &dma->param_rx;
  752. param->dma_dev = &dma_dev->dev;
  753. param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */
  754. param->rx_reg = data->io_base_addr + PCH_SPDRR;
  755. param->width = width;
  756. chan = dma_request_channel(mask, pch_spi_filter, param);
  757. if (!chan) {
  758. dev_err(&data->master->dev,
  759. "ERROR: dma_request_channel FAILS(Rx)\n");
  760. dma_release_channel(dma->chan_tx);
  761. dma->chan_tx = NULL;
  762. data->use_dma = 0;
  763. return;
  764. }
  765. dma->chan_rx = chan;
  766. }
  767. static void pch_spi_release_dma(struct pch_spi_data *data)
  768. {
  769. struct pch_spi_dma_ctrl *dma;
  770. dma = &data->dma;
  771. if (dma->chan_tx) {
  772. dma_release_channel(dma->chan_tx);
  773. dma->chan_tx = NULL;
  774. }
  775. if (dma->chan_rx) {
  776. dma_release_channel(dma->chan_rx);
  777. dma->chan_rx = NULL;
  778. }
  779. }
  780. static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
  781. {
  782. const u8 *tx_buf;
  783. const u16 *tx_sbuf;
  784. u8 *tx_dma_buf;
  785. u16 *tx_dma_sbuf;
  786. struct scatterlist *sg;
  787. struct dma_async_tx_descriptor *desc_tx;
  788. struct dma_async_tx_descriptor *desc_rx;
  789. int num;
  790. int i;
  791. int size;
  792. int rem;
  793. int head;
  794. unsigned long flags;
  795. struct pch_spi_dma_ctrl *dma;
  796. dma = &data->dma;
  797. /* set baud rate if needed */
  798. if (data->cur_trans->speed_hz) {
  799. dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
  800. spin_lock_irqsave(&data->lock, flags);
  801. pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
  802. spin_unlock_irqrestore(&data->lock, flags);
  803. }
  804. /* set bits per word if needed */
  805. if (data->cur_trans->bits_per_word &&
  806. (data->current_msg->spi->bits_per_word !=
  807. data->cur_trans->bits_per_word)) {
  808. dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
  809. spin_lock_irqsave(&data->lock, flags);
  810. pch_spi_set_bits_per_word(data->master,
  811. data->cur_trans->bits_per_word);
  812. spin_unlock_irqrestore(&data->lock, flags);
  813. *bpw = data->cur_trans->bits_per_word;
  814. } else {
  815. *bpw = data->current_msg->spi->bits_per_word;
  816. }
  817. data->bpw_len = data->cur_trans->len / (*bpw / 8);
  818. if (data->bpw_len > PCH_BUF_SIZE) {
  819. data->bpw_len = PCH_BUF_SIZE;
  820. data->cur_trans->len -= PCH_BUF_SIZE;
  821. }
  822. /* copy Tx Data */
  823. if (data->cur_trans->tx_buf != NULL) {
  824. if (*bpw == 8) {
  825. tx_buf = data->cur_trans->tx_buf;
  826. tx_dma_buf = dma->tx_buf_virt;
  827. for (i = 0; i < data->bpw_len; i++)
  828. *tx_dma_buf++ = *tx_buf++;
  829. } else {
  830. tx_sbuf = data->cur_trans->tx_buf;
  831. tx_dma_sbuf = dma->tx_buf_virt;
  832. for (i = 0; i < data->bpw_len; i++)
  833. *tx_dma_sbuf++ = *tx_sbuf++;
  834. }
  835. }
  836. /* Calculate Rx parameter for DMA transmitting */
  837. if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
  838. if (data->bpw_len % PCH_DMA_TRANS_SIZE) {
  839. num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
  840. rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
  841. } else {
  842. num = data->bpw_len / PCH_DMA_TRANS_SIZE;
  843. rem = PCH_DMA_TRANS_SIZE;
  844. }
  845. size = PCH_DMA_TRANS_SIZE;
  846. } else {
  847. num = 1;
  848. size = data->bpw_len;
  849. rem = data->bpw_len;
  850. }
  851. dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n",
  852. __func__, num, size, rem);
  853. spin_lock_irqsave(&data->lock, flags);
  854. /* set receive fifo threshold and transmit fifo threshold */
  855. pch_spi_setclr_reg(data->master, PCH_SPCR,
  856. ((size - 1) << SPCR_RFIC_FIELD) |
  857. (PCH_TX_THOLD << SPCR_TFIC_FIELD),
  858. MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
  859. spin_unlock_irqrestore(&data->lock, flags);
  860. /* RX */
  861. dma->sg_rx_p = kmalloc_array(num, sizeof(*dma->sg_rx_p), GFP_ATOMIC);
  862. if (!dma->sg_rx_p)
  863. return;
  864. sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */
  865. /* offset, length setting */
  866. sg = dma->sg_rx_p;
  867. for (i = 0; i < num; i++, sg++) {
  868. if (i == (num - 2)) {
  869. sg->offset = size * i;
  870. sg->offset = sg->offset * (*bpw / 8);
  871. sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
  872. sg->offset);
  873. sg_dma_len(sg) = rem;
  874. } else if (i == (num - 1)) {
  875. sg->offset = size * (i - 1) + rem;
  876. sg->offset = sg->offset * (*bpw / 8);
  877. sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
  878. sg->offset);
  879. sg_dma_len(sg) = size;
  880. } else {
  881. sg->offset = size * i;
  882. sg->offset = sg->offset * (*bpw / 8);
  883. sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
  884. sg->offset);
  885. sg_dma_len(sg) = size;
  886. }
  887. sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
  888. }
  889. sg = dma->sg_rx_p;
  890. desc_rx = dmaengine_prep_slave_sg(dma->chan_rx, sg,
  891. num, DMA_DEV_TO_MEM,
  892. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  893. if (!desc_rx) {
  894. dev_err(&data->master->dev,
  895. "%s:dmaengine_prep_slave_sg Failed\n", __func__);
  896. return;
  897. }
  898. dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE);
  899. desc_rx->callback = pch_dma_rx_complete;
  900. desc_rx->callback_param = data;
  901. dma->nent = num;
  902. dma->desc_rx = desc_rx;
  903. /* Calculate Tx parameter for DMA transmitting */
  904. if (data->bpw_len > PCH_MAX_FIFO_DEPTH) {
  905. head = PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE;
  906. if (data->bpw_len % PCH_DMA_TRANS_SIZE > 4) {
  907. num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
  908. rem = data->bpw_len % PCH_DMA_TRANS_SIZE - head;
  909. } else {
  910. num = data->bpw_len / PCH_DMA_TRANS_SIZE;
  911. rem = data->bpw_len % PCH_DMA_TRANS_SIZE +
  912. PCH_DMA_TRANS_SIZE - head;
  913. }
  914. size = PCH_DMA_TRANS_SIZE;
  915. } else {
  916. num = 1;
  917. size = data->bpw_len;
  918. rem = data->bpw_len;
  919. head = 0;
  920. }
  921. dma->sg_tx_p = kmalloc_array(num, sizeof(*dma->sg_tx_p), GFP_ATOMIC);
  922. if (!dma->sg_tx_p)
  923. return;
  924. sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
  925. /* offset, length setting */
  926. sg = dma->sg_tx_p;
  927. for (i = 0; i < num; i++, sg++) {
  928. if (i == 0) {
  929. sg->offset = 0;
  930. sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size + head,
  931. sg->offset);
  932. sg_dma_len(sg) = size + head;
  933. } else if (i == (num - 1)) {
  934. sg->offset = head + size * i;
  935. sg->offset = sg->offset * (*bpw / 8);
  936. sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
  937. sg->offset);
  938. sg_dma_len(sg) = rem;
  939. } else {
  940. sg->offset = head + size * i;
  941. sg->offset = sg->offset * (*bpw / 8);
  942. sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
  943. sg->offset);
  944. sg_dma_len(sg) = size;
  945. }
  946. sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
  947. }
  948. sg = dma->sg_tx_p;
  949. desc_tx = dmaengine_prep_slave_sg(dma->chan_tx,
  950. sg, num, DMA_MEM_TO_DEV,
  951. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  952. if (!desc_tx) {
  953. dev_err(&data->master->dev,
  954. "%s:dmaengine_prep_slave_sg Failed\n", __func__);
  955. return;
  956. }
  957. dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE);
  958. desc_tx->callback = NULL;
  959. desc_tx->callback_param = data;
  960. dma->nent = num;
  961. dma->desc_tx = desc_tx;
  962. dev_dbg(&data->master->dev, "%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__);
  963. spin_lock_irqsave(&data->lock, flags);
  964. pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
  965. desc_rx->tx_submit(desc_rx);
  966. desc_tx->tx_submit(desc_tx);
  967. spin_unlock_irqrestore(&data->lock, flags);
  968. /* reset transfer complete flag */
  969. data->transfer_complete = false;
  970. }
  971. static void pch_spi_process_messages(struct work_struct *pwork)
  972. {
  973. struct spi_message *pmsg, *tmp;
  974. struct pch_spi_data *data;
  975. int bpw;
  976. data = container_of(pwork, struct pch_spi_data, work);
  977. dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
  978. spin_lock(&data->lock);
  979. /* check if suspend has been initiated;if yes flush queue */
  980. if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
  981. dev_dbg(&data->master->dev,
  982. "%s suspend/remove initiated, flushing queue\n", __func__);
  983. list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
  984. pmsg->status = -EIO;
  985. if (pmsg->complete) {
  986. spin_unlock(&data->lock);
  987. pmsg->complete(pmsg->context);
  988. spin_lock(&data->lock);
  989. }
  990. /* delete from queue */
  991. list_del_init(&pmsg->queue);
  992. }
  993. spin_unlock(&data->lock);
  994. return;
  995. }
  996. data->bcurrent_msg_processing = true;
  997. dev_dbg(&data->master->dev,
  998. "%s Set data->bcurrent_msg_processing= true\n", __func__);
  999. /* Get the message from the queue and delete it from there. */
  1000. data->current_msg = list_entry(data->queue.next, struct spi_message,
  1001. queue);
  1002. list_del_init(&data->current_msg->queue);
  1003. data->current_msg->status = 0;
  1004. pch_spi_select_chip(data, data->current_msg->spi);
  1005. spin_unlock(&data->lock);
  1006. if (data->use_dma)
  1007. pch_spi_request_dma(data,
  1008. data->current_msg->spi->bits_per_word);
  1009. pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
  1010. do {
  1011. int cnt;
  1012. /* If we are already processing a message get the next
  1013. transfer structure from the message otherwise retrieve
  1014. the 1st transfer request from the message. */
  1015. spin_lock(&data->lock);
  1016. if (data->cur_trans == NULL) {
  1017. data->cur_trans =
  1018. list_entry(data->current_msg->transfers.next,
  1019. struct spi_transfer, transfer_list);
  1020. dev_dbg(&data->master->dev,
  1021. "%s :Getting 1st transfer message\n",
  1022. __func__);
  1023. } else {
  1024. data->cur_trans =
  1025. list_entry(data->cur_trans->transfer_list.next,
  1026. struct spi_transfer, transfer_list);
  1027. dev_dbg(&data->master->dev,
  1028. "%s :Getting next transfer message\n",
  1029. __func__);
  1030. }
  1031. spin_unlock(&data->lock);
  1032. if (!data->cur_trans->len)
  1033. goto out;
  1034. cnt = (data->cur_trans->len - 1) / PCH_BUF_SIZE + 1;
  1035. data->save_total_len = data->cur_trans->len;
  1036. if (data->use_dma) {
  1037. int i;
  1038. char *save_rx_buf = data->cur_trans->rx_buf;
  1039. for (i = 0; i < cnt; i ++) {
  1040. pch_spi_handle_dma(data, &bpw);
  1041. if (!pch_spi_start_transfer(data)) {
  1042. data->transfer_complete = true;
  1043. data->current_msg->status = -EIO;
  1044. data->current_msg->complete
  1045. (data->current_msg->context);
  1046. data->bcurrent_msg_processing = false;
  1047. data->current_msg = NULL;
  1048. data->cur_trans = NULL;
  1049. goto out;
  1050. }
  1051. pch_spi_copy_rx_data_for_dma(data, bpw);
  1052. }
  1053. data->cur_trans->rx_buf = save_rx_buf;
  1054. } else {
  1055. pch_spi_set_tx(data, &bpw);
  1056. pch_spi_set_ir(data);
  1057. pch_spi_copy_rx_data(data, bpw);
  1058. kfree(data->pkt_rx_buff);
  1059. data->pkt_rx_buff = NULL;
  1060. kfree(data->pkt_tx_buff);
  1061. data->pkt_tx_buff = NULL;
  1062. }
  1063. /* increment message count */
  1064. data->cur_trans->len = data->save_total_len;
  1065. data->current_msg->actual_length += data->cur_trans->len;
  1066. dev_dbg(&data->master->dev,
  1067. "%s:data->current_msg->actual_length=%d\n",
  1068. __func__, data->current_msg->actual_length);
  1069. spi_transfer_delay_exec(data->cur_trans);
  1070. spin_lock(&data->lock);
  1071. /* No more transfer in this message. */
  1072. if ((data->cur_trans->transfer_list.next) ==
  1073. &(data->current_msg->transfers)) {
  1074. pch_spi_nomore_transfer(data);
  1075. }
  1076. spin_unlock(&data->lock);
  1077. } while (data->cur_trans != NULL);
  1078. out:
  1079. pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
  1080. if (data->use_dma)
  1081. pch_spi_release_dma(data);
  1082. }
  1083. static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
  1084. struct pch_spi_data *data)
  1085. {
  1086. dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
  1087. flush_work(&data->work);
  1088. }
  1089. static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
  1090. struct pch_spi_data *data)
  1091. {
  1092. dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
  1093. /* reset PCH SPI h/w */
  1094. pch_spi_reset(data->master);
  1095. dev_dbg(&board_dat->pdev->dev,
  1096. "%s pch_spi_reset invoked successfully\n", __func__);
  1097. dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
  1098. return 0;
  1099. }
  1100. static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
  1101. struct pch_spi_data *data)
  1102. {
  1103. struct pch_spi_dma_ctrl *dma;
  1104. dma = &data->dma;
  1105. if (dma->tx_buf_dma)
  1106. dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
  1107. dma->tx_buf_virt, dma->tx_buf_dma);
  1108. if (dma->rx_buf_dma)
  1109. dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
  1110. dma->rx_buf_virt, dma->rx_buf_dma);
  1111. }
  1112. static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
  1113. struct pch_spi_data *data)
  1114. {
  1115. struct pch_spi_dma_ctrl *dma;
  1116. int ret;
  1117. dma = &data->dma;
  1118. ret = 0;
  1119. /* Get Consistent memory for Tx DMA */
  1120. dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
  1121. PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
  1122. if (!dma->tx_buf_virt)
  1123. ret = -ENOMEM;
  1124. /* Get Consistent memory for Rx DMA */
  1125. dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
  1126. PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
  1127. if (!dma->rx_buf_virt)
  1128. ret = -ENOMEM;
  1129. return ret;
  1130. }
  1131. static int pch_spi_pd_probe(struct platform_device *plat_dev)
  1132. {
  1133. int ret;
  1134. struct spi_master *master;
  1135. struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
  1136. struct pch_spi_data *data;
  1137. dev_dbg(&plat_dev->dev, "%s:debug\n", __func__);
  1138. master = spi_alloc_master(&board_dat->pdev->dev,
  1139. sizeof(struct pch_spi_data));
  1140. if (!master) {
  1141. dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n",
  1142. plat_dev->id);
  1143. return -ENOMEM;
  1144. }
  1145. data = spi_master_get_devdata(master);
  1146. data->master = master;
  1147. platform_set_drvdata(plat_dev, data);
  1148. /* baseaddress + address offset) */
  1149. data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
  1150. PCH_ADDRESS_SIZE * plat_dev->id;
  1151. data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0);
  1152. if (!data->io_remap_addr) {
  1153. dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
  1154. ret = -ENOMEM;
  1155. goto err_pci_iomap;
  1156. }
  1157. data->io_remap_addr += PCH_ADDRESS_SIZE * plat_dev->id;
  1158. dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
  1159. plat_dev->id, data->io_remap_addr);
  1160. /* initialize members of SPI master */
  1161. master->num_chipselect = PCH_MAX_CS;
  1162. master->transfer = pch_spi_transfer;
  1163. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
  1164. master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
  1165. master->max_speed_hz = PCH_MAX_BAUDRATE;
  1166. data->board_dat = board_dat;
  1167. data->plat_dev = plat_dev;
  1168. data->n_curnt_chip = 255;
  1169. data->status = STATUS_RUNNING;
  1170. data->ch = plat_dev->id;
  1171. data->use_dma = use_dma;
  1172. INIT_LIST_HEAD(&data->queue);
  1173. spin_lock_init(&data->lock);
  1174. INIT_WORK(&data->work, pch_spi_process_messages);
  1175. init_waitqueue_head(&data->wait);
  1176. ret = pch_spi_get_resources(board_dat, data);
  1177. if (ret) {
  1178. dev_err(&plat_dev->dev, "%s fail(retval=%d)\n", __func__, ret);
  1179. goto err_spi_get_resources;
  1180. }
  1181. ret = request_irq(board_dat->pdev->irq, pch_spi_handler,
  1182. IRQF_SHARED, KBUILD_MODNAME, data);
  1183. if (ret) {
  1184. dev_err(&plat_dev->dev,
  1185. "%s request_irq failed\n", __func__);
  1186. goto err_request_irq;
  1187. }
  1188. data->irq_reg_sts = true;
  1189. pch_spi_set_master_mode(master);
  1190. if (use_dma) {
  1191. dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
  1192. ret = pch_alloc_dma_buf(board_dat, data);
  1193. if (ret)
  1194. goto err_spi_register_master;
  1195. }
  1196. ret = spi_register_master(master);
  1197. if (ret != 0) {
  1198. dev_err(&plat_dev->dev,
  1199. "%s spi_register_master FAILED\n", __func__);
  1200. goto err_spi_register_master;
  1201. }
  1202. return 0;
  1203. err_spi_register_master:
  1204. pch_free_dma_buf(board_dat, data);
  1205. free_irq(board_dat->pdev->irq, data);
  1206. err_request_irq:
  1207. pch_spi_free_resources(board_dat, data);
  1208. err_spi_get_resources:
  1209. pci_iounmap(board_dat->pdev, data->io_remap_addr);
  1210. err_pci_iomap:
  1211. spi_master_put(master);
  1212. return ret;
  1213. }
  1214. static int pch_spi_pd_remove(struct platform_device *plat_dev)
  1215. {
  1216. struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
  1217. struct pch_spi_data *data = platform_get_drvdata(plat_dev);
  1218. int count;
  1219. unsigned long flags;
  1220. dev_dbg(&plat_dev->dev, "%s:[ch%d] irq=%d\n",
  1221. __func__, plat_dev->id, board_dat->pdev->irq);
  1222. if (use_dma)
  1223. pch_free_dma_buf(board_dat, data);
  1224. /* check for any pending messages; no action is taken if the queue
  1225. * is still full; but at least we tried. Unload anyway */
  1226. count = 500;
  1227. spin_lock_irqsave(&data->lock, flags);
  1228. data->status = STATUS_EXITING;
  1229. while ((list_empty(&data->queue) == 0) && --count) {
  1230. dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n",
  1231. __func__);
  1232. spin_unlock_irqrestore(&data->lock, flags);
  1233. msleep(PCH_SLEEP_TIME);
  1234. spin_lock_irqsave(&data->lock, flags);
  1235. }
  1236. spin_unlock_irqrestore(&data->lock, flags);
  1237. pch_spi_free_resources(board_dat, data);
  1238. /* disable interrupts & free IRQ */
  1239. if (data->irq_reg_sts) {
  1240. /* disable interrupts */
  1241. pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
  1242. data->irq_reg_sts = false;
  1243. free_irq(board_dat->pdev->irq, data);
  1244. }
  1245. pci_iounmap(board_dat->pdev, data->io_remap_addr);
  1246. spi_unregister_master(data->master);
  1247. return 0;
  1248. }
  1249. #ifdef CONFIG_PM
  1250. static int pch_spi_pd_suspend(struct platform_device *pd_dev,
  1251. pm_message_t state)
  1252. {
  1253. u8 count;
  1254. struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
  1255. struct pch_spi_data *data = platform_get_drvdata(pd_dev);
  1256. dev_dbg(&pd_dev->dev, "%s ENTRY\n", __func__);
  1257. if (!board_dat) {
  1258. dev_err(&pd_dev->dev,
  1259. "%s pci_get_drvdata returned NULL\n", __func__);
  1260. return -EFAULT;
  1261. }
  1262. /* check if the current message is processed:
  1263. Only after thats done the transfer will be suspended */
  1264. count = 255;
  1265. while ((--count) > 0) {
  1266. if (!(data->bcurrent_msg_processing))
  1267. break;
  1268. msleep(PCH_SLEEP_TIME);
  1269. }
  1270. /* Free IRQ */
  1271. if (data->irq_reg_sts) {
  1272. /* disable all interrupts */
  1273. pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
  1274. pch_spi_reset(data->master);
  1275. free_irq(board_dat->pdev->irq, data);
  1276. data->irq_reg_sts = false;
  1277. dev_dbg(&pd_dev->dev,
  1278. "%s free_irq invoked successfully.\n", __func__);
  1279. }
  1280. return 0;
  1281. }
  1282. static int pch_spi_pd_resume(struct platform_device *pd_dev)
  1283. {
  1284. struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
  1285. struct pch_spi_data *data = platform_get_drvdata(pd_dev);
  1286. int retval;
  1287. if (!board_dat) {
  1288. dev_err(&pd_dev->dev,
  1289. "%s pci_get_drvdata returned NULL\n", __func__);
  1290. return -EFAULT;
  1291. }
  1292. if (!data->irq_reg_sts) {
  1293. /* register IRQ */
  1294. retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
  1295. IRQF_SHARED, KBUILD_MODNAME, data);
  1296. if (retval < 0) {
  1297. dev_err(&pd_dev->dev,
  1298. "%s request_irq failed\n", __func__);
  1299. return retval;
  1300. }
  1301. /* reset PCH SPI h/w */
  1302. pch_spi_reset(data->master);
  1303. pch_spi_set_master_mode(data->master);
  1304. data->irq_reg_sts = true;
  1305. }
  1306. return 0;
  1307. }
  1308. #else
  1309. #define pch_spi_pd_suspend NULL
  1310. #define pch_spi_pd_resume NULL
  1311. #endif
  1312. static struct platform_driver pch_spi_pd_driver = {
  1313. .driver = {
  1314. .name = "pch-spi",
  1315. },
  1316. .probe = pch_spi_pd_probe,
  1317. .remove = pch_spi_pd_remove,
  1318. .suspend = pch_spi_pd_suspend,
  1319. .resume = pch_spi_pd_resume
  1320. };
  1321. static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  1322. {
  1323. struct pch_spi_board_data *board_dat;
  1324. struct platform_device *pd_dev = NULL;
  1325. int retval;
  1326. int i;
  1327. struct pch_pd_dev_save *pd_dev_save;
  1328. pd_dev_save = kzalloc(sizeof(*pd_dev_save), GFP_KERNEL);
  1329. if (!pd_dev_save)
  1330. return -ENOMEM;
  1331. board_dat = kzalloc(sizeof(*board_dat), GFP_KERNEL);
  1332. if (!board_dat) {
  1333. retval = -ENOMEM;
  1334. goto err_no_mem;
  1335. }
  1336. retval = pci_request_regions(pdev, KBUILD_MODNAME);
  1337. if (retval) {
  1338. dev_err(&pdev->dev, "%s request_region failed\n", __func__);
  1339. goto pci_request_regions;
  1340. }
  1341. board_dat->pdev = pdev;
  1342. board_dat->num = id->driver_data;
  1343. pd_dev_save->num = id->driver_data;
  1344. pd_dev_save->board_dat = board_dat;
  1345. retval = pci_enable_device(pdev);
  1346. if (retval) {
  1347. dev_err(&pdev->dev, "%s pci_enable_device failed\n", __func__);
  1348. goto pci_enable_device;
  1349. }
  1350. for (i = 0; i < board_dat->num; i++) {
  1351. pd_dev = platform_device_alloc("pch-spi", i);
  1352. if (!pd_dev) {
  1353. dev_err(&pdev->dev, "platform_device_alloc failed\n");
  1354. retval = -ENOMEM;
  1355. goto err_platform_device;
  1356. }
  1357. pd_dev_save->pd_save[i] = pd_dev;
  1358. pd_dev->dev.parent = &pdev->dev;
  1359. retval = platform_device_add_data(pd_dev, board_dat,
  1360. sizeof(*board_dat));
  1361. if (retval) {
  1362. dev_err(&pdev->dev,
  1363. "platform_device_add_data failed\n");
  1364. platform_device_put(pd_dev);
  1365. goto err_platform_device;
  1366. }
  1367. retval = platform_device_add(pd_dev);
  1368. if (retval) {
  1369. dev_err(&pdev->dev, "platform_device_add failed\n");
  1370. platform_device_put(pd_dev);
  1371. goto err_platform_device;
  1372. }
  1373. }
  1374. pci_set_drvdata(pdev, pd_dev_save);
  1375. return 0;
  1376. err_platform_device:
  1377. while (--i >= 0)
  1378. platform_device_unregister(pd_dev_save->pd_save[i]);
  1379. pci_disable_device(pdev);
  1380. pci_enable_device:
  1381. pci_release_regions(pdev);
  1382. pci_request_regions:
  1383. kfree(board_dat);
  1384. err_no_mem:
  1385. kfree(pd_dev_save);
  1386. return retval;
  1387. }
  1388. static void pch_spi_remove(struct pci_dev *pdev)
  1389. {
  1390. int i;
  1391. struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
  1392. dev_dbg(&pdev->dev, "%s ENTRY:pdev=%p\n", __func__, pdev);
  1393. for (i = 0; i < pd_dev_save->num; i++)
  1394. platform_device_unregister(pd_dev_save->pd_save[i]);
  1395. pci_disable_device(pdev);
  1396. pci_release_regions(pdev);
  1397. kfree(pd_dev_save->board_dat);
  1398. kfree(pd_dev_save);
  1399. }
  1400. static int __maybe_unused pch_spi_suspend(struct device *dev)
  1401. {
  1402. struct pch_pd_dev_save *pd_dev_save = dev_get_drvdata(dev);
  1403. dev_dbg(dev, "%s ENTRY\n", __func__);
  1404. pd_dev_save->board_dat->suspend_sts = true;
  1405. return 0;
  1406. }
  1407. static int __maybe_unused pch_spi_resume(struct device *dev)
  1408. {
  1409. struct pch_pd_dev_save *pd_dev_save = dev_get_drvdata(dev);
  1410. dev_dbg(dev, "%s ENTRY\n", __func__);
  1411. /* set suspend status to false */
  1412. pd_dev_save->board_dat->suspend_sts = false;
  1413. return 0;
  1414. }
  1415. static SIMPLE_DEV_PM_OPS(pch_spi_pm_ops, pch_spi_suspend, pch_spi_resume);
  1416. static struct pci_driver pch_spi_pcidev_driver = {
  1417. .name = "pch_spi",
  1418. .id_table = pch_spi_pcidev_id,
  1419. .probe = pch_spi_probe,
  1420. .remove = pch_spi_remove,
  1421. .driver.pm = &pch_spi_pm_ops,
  1422. };
  1423. static int __init pch_spi_init(void)
  1424. {
  1425. int ret;
  1426. ret = platform_driver_register(&pch_spi_pd_driver);
  1427. if (ret)
  1428. return ret;
  1429. ret = pci_register_driver(&pch_spi_pcidev_driver);
  1430. if (ret) {
  1431. platform_driver_unregister(&pch_spi_pd_driver);
  1432. return ret;
  1433. }
  1434. return 0;
  1435. }
  1436. module_init(pch_spi_init);
  1437. static void __exit pch_spi_exit(void)
  1438. {
  1439. pci_unregister_driver(&pch_spi_pcidev_driver);
  1440. platform_driver_unregister(&pch_spi_pd_driver);
  1441. }
  1442. module_exit(pch_spi_exit);
  1443. module_param(use_dma, int, 0644);
  1444. MODULE_PARM_DESC(use_dma,
  1445. "to use DMA for data transfers pass 1 else 0; default 1");
  1446. MODULE_LICENSE("GPL");
  1447. MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor ML7xxx IOH SPI Driver");
  1448. MODULE_DEVICE_TABLE(pci, pch_spi_pcidev_id);