nuvoton-cir.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120
  1. /*
  2. * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
  3. *
  4. * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com>
  5. * Copyright (C) 2009 Nuvoton PS Team
  6. *
  7. * Special thanks to Nuvoton for providing hardware, spec sheets and
  8. * sample code upon which portions of this driver are based. Indirect
  9. * thanks also to Maxim Levitsky, whose ene_ir driver this driver is
  10. * modeled after.
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License as
  14. * published by the Free Software Foundation; either version 2 of the
  15. * License, or (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful, but
  18. * WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * General Public License for more details.
  21. */
  22. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/pnp.h>
  26. #include <linux/io.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/sched.h>
  29. #include <linux/slab.h>
  30. #include <media/rc-core.h>
  31. #include <linux/pci_ids.h>
  32. #include "nuvoton-cir.h"
  33. static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt);
  34. static const struct nvt_chip nvt_chips[] = {
  35. { "w83667hg", NVT_W83667HG },
  36. { "NCT6775F", NVT_6775F },
  37. { "NCT6776F", NVT_6776F },
  38. { "NCT6779D", NVT_6779D },
  39. };
  40. static inline struct device *nvt_get_dev(const struct nvt_dev *nvt)
  41. {
  42. return nvt->rdev->dev.parent;
  43. }
  44. static inline bool is_w83667hg(struct nvt_dev *nvt)
  45. {
  46. return nvt->chip_ver == NVT_W83667HG;
  47. }
  48. /* write val to config reg */
  49. static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg)
  50. {
  51. outb(reg, nvt->cr_efir);
  52. outb(val, nvt->cr_efdr);
  53. }
  54. /* read val from config reg */
  55. static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg)
  56. {
  57. outb(reg, nvt->cr_efir);
  58. return inb(nvt->cr_efdr);
  59. }
  60. /* update config register bit without changing other bits */
  61. static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
  62. {
  63. u8 tmp = nvt_cr_read(nvt, reg) | val;
  64. nvt_cr_write(nvt, tmp, reg);
  65. }
  66. /* enter extended function mode */
  67. static inline int nvt_efm_enable(struct nvt_dev *nvt)
  68. {
  69. if (!request_muxed_region(nvt->cr_efir, 2, NVT_DRIVER_NAME))
  70. return -EBUSY;
  71. /* Enabling Extended Function Mode explicitly requires writing 2x */
  72. outb(EFER_EFM_ENABLE, nvt->cr_efir);
  73. outb(EFER_EFM_ENABLE, nvt->cr_efir);
  74. return 0;
  75. }
  76. /* exit extended function mode */
  77. static inline void nvt_efm_disable(struct nvt_dev *nvt)
  78. {
  79. outb(EFER_EFM_DISABLE, nvt->cr_efir);
  80. release_region(nvt->cr_efir, 2);
  81. }
  82. /*
  83. * When you want to address a specific logical device, write its logical
  84. * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing
  85. * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN.
  86. */
  87. static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
  88. {
  89. nvt_cr_write(nvt, ldev, CR_LOGICAL_DEV_SEL);
  90. }
  91. /* select and enable logical device with setting EFM mode*/
  92. static inline void nvt_enable_logical_dev(struct nvt_dev *nvt, u8 ldev)
  93. {
  94. nvt_efm_enable(nvt);
  95. nvt_select_logical_dev(nvt, ldev);
  96. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  97. nvt_efm_disable(nvt);
  98. }
  99. /* select and disable logical device with setting EFM mode*/
  100. static inline void nvt_disable_logical_dev(struct nvt_dev *nvt, u8 ldev)
  101. {
  102. nvt_efm_enable(nvt);
  103. nvt_select_logical_dev(nvt, ldev);
  104. nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
  105. nvt_efm_disable(nvt);
  106. }
  107. /* write val to cir config register */
  108. static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset)
  109. {
  110. outb(val, nvt->cir_addr + offset);
  111. }
  112. /* read val from cir config register */
  113. static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset)
  114. {
  115. return inb(nvt->cir_addr + offset);
  116. }
  117. /* write val to cir wake register */
  118. static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt,
  119. u8 val, u8 offset)
  120. {
  121. outb(val, nvt->cir_wake_addr + offset);
  122. }
  123. /* read val from cir wake config register */
  124. static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
  125. {
  126. return inb(nvt->cir_wake_addr + offset);
  127. }
  128. /* don't override io address if one is set already */
  129. static void nvt_set_ioaddr(struct nvt_dev *nvt, unsigned long *ioaddr)
  130. {
  131. unsigned long old_addr;
  132. old_addr = nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8;
  133. old_addr |= nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO);
  134. if (old_addr)
  135. *ioaddr = old_addr;
  136. else {
  137. nvt_cr_write(nvt, *ioaddr >> 8, CR_CIR_BASE_ADDR_HI);
  138. nvt_cr_write(nvt, *ioaddr & 0xff, CR_CIR_BASE_ADDR_LO);
  139. }
  140. }
  141. static void nvt_write_wakeup_codes(struct rc_dev *dev,
  142. const u8 *wbuf, int count)
  143. {
  144. u8 tolerance, config;
  145. struct nvt_dev *nvt = dev->priv;
  146. unsigned long flags;
  147. int i;
  148. /* hardcode the tolerance to 10% */
  149. tolerance = DIV_ROUND_UP(count, 10);
  150. spin_lock_irqsave(&nvt->lock, flags);
  151. nvt_clear_cir_wake_fifo(nvt);
  152. nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
  153. nvt_cir_wake_reg_write(nvt, tolerance, CIR_WAKE_FIFO_CMP_TOL);
  154. config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
  155. /* enable writes to wake fifo */
  156. nvt_cir_wake_reg_write(nvt, config | CIR_WAKE_IRCON_MODE1,
  157. CIR_WAKE_IRCON);
  158. if (count)
  159. pr_info("Wake samples (%d) =", count);
  160. else
  161. pr_info("Wake sample fifo cleared");
  162. for (i = 0; i < count; i++)
  163. nvt_cir_wake_reg_write(nvt, wbuf[i], CIR_WAKE_WR_FIFO_DATA);
  164. nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
  165. spin_unlock_irqrestore(&nvt->lock, flags);
  166. }
  167. static ssize_t wakeup_data_show(struct device *dev,
  168. struct device_attribute *attr,
  169. char *buf)
  170. {
  171. struct rc_dev *rc_dev = to_rc_dev(dev);
  172. struct nvt_dev *nvt = rc_dev->priv;
  173. int fifo_len, duration;
  174. unsigned long flags;
  175. ssize_t buf_len = 0;
  176. int i;
  177. spin_lock_irqsave(&nvt->lock, flags);
  178. fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
  179. fifo_len = min(fifo_len, WAKEUP_MAX_SIZE);
  180. /* go to first element to be read */
  181. while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX))
  182. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
  183. for (i = 0; i < fifo_len; i++) {
  184. duration = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
  185. duration = (duration & BUF_LEN_MASK) * SAMPLE_PERIOD;
  186. buf_len += scnprintf(buf + buf_len, PAGE_SIZE - buf_len,
  187. "%d ", duration);
  188. }
  189. buf_len += scnprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n");
  190. spin_unlock_irqrestore(&nvt->lock, flags);
  191. return buf_len;
  192. }
  193. static ssize_t wakeup_data_store(struct device *dev,
  194. struct device_attribute *attr,
  195. const char *buf, size_t len)
  196. {
  197. struct rc_dev *rc_dev = to_rc_dev(dev);
  198. u8 wake_buf[WAKEUP_MAX_SIZE];
  199. char **argv;
  200. int i, count;
  201. unsigned int val;
  202. ssize_t ret;
  203. argv = argv_split(GFP_KERNEL, buf, &count);
  204. if (!argv)
  205. return -ENOMEM;
  206. if (!count || count > WAKEUP_MAX_SIZE) {
  207. ret = -EINVAL;
  208. goto out;
  209. }
  210. for (i = 0; i < count; i++) {
  211. ret = kstrtouint(argv[i], 10, &val);
  212. if (ret)
  213. goto out;
  214. val = DIV_ROUND_CLOSEST(val, SAMPLE_PERIOD);
  215. if (!val || val > 0x7f) {
  216. ret = -EINVAL;
  217. goto out;
  218. }
  219. wake_buf[i] = val;
  220. /* sequence must start with a pulse */
  221. if (i % 2 == 0)
  222. wake_buf[i] |= BUF_PULSE_BIT;
  223. }
  224. nvt_write_wakeup_codes(rc_dev, wake_buf, count);
  225. ret = len;
  226. out:
  227. argv_free(argv);
  228. return ret;
  229. }
  230. static DEVICE_ATTR_RW(wakeup_data);
  231. /* dump current cir register contents */
  232. static void cir_dump_regs(struct nvt_dev *nvt)
  233. {
  234. nvt_efm_enable(nvt);
  235. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  236. pr_info("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
  237. pr_info(" * CR CIR ACTIVE : 0x%x\n",
  238. nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
  239. pr_info(" * CR CIR BASE ADDR: 0x%x\n",
  240. (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
  241. nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
  242. pr_info(" * CR CIR IRQ NUM: 0x%x\n",
  243. nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
  244. nvt_efm_disable(nvt);
  245. pr_info("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
  246. pr_info(" * IRCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
  247. pr_info(" * IRSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
  248. pr_info(" * IREN: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
  249. pr_info(" * RXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
  250. pr_info(" * CP: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
  251. pr_info(" * CC: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
  252. pr_info(" * SLCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
  253. pr_info(" * SLCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
  254. pr_info(" * FIFOCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
  255. pr_info(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
  256. pr_info(" * SRXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
  257. pr_info(" * TXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
  258. pr_info(" * STXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
  259. pr_info(" * FCCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
  260. pr_info(" * FCCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
  261. pr_info(" * IRFSM: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
  262. }
  263. /* dump current cir wake register contents */
  264. static void cir_wake_dump_regs(struct nvt_dev *nvt)
  265. {
  266. u8 i, fifo_len;
  267. nvt_efm_enable(nvt);
  268. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  269. pr_info("%s: Dump CIR WAKE logical device registers:\n",
  270. NVT_DRIVER_NAME);
  271. pr_info(" * CR CIR WAKE ACTIVE : 0x%x\n",
  272. nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
  273. pr_info(" * CR CIR WAKE BASE ADDR: 0x%x\n",
  274. (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
  275. nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
  276. pr_info(" * CR CIR WAKE IRQ NUM: 0x%x\n",
  277. nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
  278. nvt_efm_disable(nvt);
  279. pr_info("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
  280. pr_info(" * IRCON: 0x%x\n",
  281. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
  282. pr_info(" * IRSTS: 0x%x\n",
  283. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
  284. pr_info(" * IREN: 0x%x\n",
  285. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
  286. pr_info(" * FIFO CMP DEEP: 0x%x\n",
  287. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
  288. pr_info(" * FIFO CMP TOL: 0x%x\n",
  289. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
  290. pr_info(" * FIFO COUNT: 0x%x\n",
  291. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
  292. pr_info(" * SLCH: 0x%x\n",
  293. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
  294. pr_info(" * SLCL: 0x%x\n",
  295. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
  296. pr_info(" * FIFOCON: 0x%x\n",
  297. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
  298. pr_info(" * SRXFSTS: 0x%x\n",
  299. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
  300. pr_info(" * SAMPLE RX FIFO: 0x%x\n",
  301. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
  302. pr_info(" * WR FIFO DATA: 0x%x\n",
  303. nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
  304. pr_info(" * RD FIFO ONLY: 0x%x\n",
  305. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
  306. pr_info(" * RD FIFO ONLY IDX: 0x%x\n",
  307. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
  308. pr_info(" * FIFO IGNORE: 0x%x\n",
  309. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
  310. pr_info(" * IRFSM: 0x%x\n",
  311. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
  312. fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
  313. pr_info("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
  314. pr_info("* Contents =");
  315. for (i = 0; i < fifo_len; i++)
  316. pr_cont(" %02x",
  317. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
  318. pr_cont("\n");
  319. }
  320. static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id)
  321. {
  322. int i;
  323. for (i = 0; i < ARRAY_SIZE(nvt_chips); i++)
  324. if ((id & SIO_ID_MASK) == nvt_chips[i].chip_ver) {
  325. nvt->chip_ver = nvt_chips[i].chip_ver;
  326. return nvt_chips[i].name;
  327. }
  328. return NULL;
  329. }
  330. /* detect hardware features */
  331. static int nvt_hw_detect(struct nvt_dev *nvt)
  332. {
  333. struct device *dev = nvt_get_dev(nvt);
  334. const char *chip_name;
  335. int chip_id;
  336. nvt_efm_enable(nvt);
  337. /* Check if we're wired for the alternate EFER setup */
  338. nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
  339. if (nvt->chip_major == 0xff) {
  340. nvt_efm_disable(nvt);
  341. nvt->cr_efir = CR_EFIR2;
  342. nvt->cr_efdr = CR_EFDR2;
  343. nvt_efm_enable(nvt);
  344. nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
  345. }
  346. nvt->chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
  347. nvt_efm_disable(nvt);
  348. chip_id = nvt->chip_major << 8 | nvt->chip_minor;
  349. if (chip_id == NVT_INVALID) {
  350. dev_err(dev, "No device found on either EFM port\n");
  351. return -ENODEV;
  352. }
  353. chip_name = nvt_find_chip(nvt, chip_id);
  354. /* warn, but still let the driver load, if we don't know this chip */
  355. if (!chip_name)
  356. dev_warn(dev,
  357. "unknown chip, id: 0x%02x 0x%02x, it may not work...",
  358. nvt->chip_major, nvt->chip_minor);
  359. else
  360. dev_info(dev, "found %s or compatible: chip id: 0x%02x 0x%02x",
  361. chip_name, nvt->chip_major, nvt->chip_minor);
  362. return 0;
  363. }
  364. static void nvt_cir_ldev_init(struct nvt_dev *nvt)
  365. {
  366. u8 val, psreg, psmask, psval;
  367. if (is_w83667hg(nvt)) {
  368. psreg = CR_MULTIFUNC_PIN_SEL;
  369. psmask = MULTIFUNC_PIN_SEL_MASK;
  370. psval = MULTIFUNC_ENABLE_CIR | MULTIFUNC_ENABLE_CIRWB;
  371. } else {
  372. psreg = CR_OUTPUT_PIN_SEL;
  373. psmask = OUTPUT_PIN_SEL_MASK;
  374. psval = OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB;
  375. }
  376. /* output pin selection: enable CIR, with WB sensor enabled */
  377. val = nvt_cr_read(nvt, psreg);
  378. val &= psmask;
  379. val |= psval;
  380. nvt_cr_write(nvt, val, psreg);
  381. /* Select CIR logical device */
  382. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  383. nvt_set_ioaddr(nvt, &nvt->cir_addr);
  384. nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
  385. nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d",
  386. nvt->cir_addr, nvt->cir_irq);
  387. }
  388. static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
  389. {
  390. /* Select ACPI logical device and anable it */
  391. nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
  392. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  393. /* Enable CIR Wake via PSOUT# (Pin60) */
  394. nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
  395. /* enable pme interrupt of cir wakeup event */
  396. nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
  397. /* Select CIR Wake logical device */
  398. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  399. nvt_set_ioaddr(nvt, &nvt->cir_wake_addr);
  400. nvt_dbg("CIR Wake initialized, base io port address: 0x%lx",
  401. nvt->cir_wake_addr);
  402. }
  403. /* clear out the hardware's cir rx fifo */
  404. static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
  405. {
  406. u8 val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
  407. nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
  408. }
  409. /* clear out the hardware's cir wake rx fifo */
  410. static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
  411. {
  412. u8 val, config;
  413. config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
  414. /* clearing wake fifo works in learning mode only */
  415. nvt_cir_wake_reg_write(nvt, config & ~CIR_WAKE_IRCON_MODE0,
  416. CIR_WAKE_IRCON);
  417. val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
  418. nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
  419. CIR_WAKE_FIFOCON);
  420. nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
  421. }
  422. /* clear out the hardware's cir tx fifo */
  423. static void nvt_clear_tx_fifo(struct nvt_dev *nvt)
  424. {
  425. u8 val;
  426. val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
  427. nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON);
  428. }
  429. /* enable RX Trigger Level Reach and Packet End interrupts */
  430. static void nvt_set_cir_iren(struct nvt_dev *nvt)
  431. {
  432. u8 iren;
  433. iren = CIR_IREN_RTR | CIR_IREN_PE | CIR_IREN_RFO;
  434. nvt_cir_reg_write(nvt, iren, CIR_IREN);
  435. }
  436. static void nvt_cir_regs_init(struct nvt_dev *nvt)
  437. {
  438. nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
  439. /* set sample limit count (PE interrupt raised when reached) */
  440. nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH);
  441. nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL);
  442. /* set fifo irq trigger levels */
  443. nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV |
  444. CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON);
  445. /* clear hardware rx and tx fifos */
  446. nvt_clear_cir_fifo(nvt);
  447. nvt_clear_tx_fifo(nvt);
  448. nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
  449. }
  450. static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
  451. {
  452. nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  453. /*
  454. * Disable RX, set specific carrier on = low, off = high,
  455. * and sample period (currently 50us)
  456. */
  457. nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 |
  458. CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
  459. CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
  460. CIR_WAKE_IRCON);
  461. /* clear any and all stray interrupts */
  462. nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
  463. }
  464. static void nvt_enable_wake(struct nvt_dev *nvt)
  465. {
  466. unsigned long flags;
  467. nvt_efm_enable(nvt);
  468. nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
  469. nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
  470. nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
  471. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  472. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  473. nvt_efm_disable(nvt);
  474. spin_lock_irqsave(&nvt->lock, flags);
  475. nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
  476. CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
  477. CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
  478. CIR_WAKE_IRCON);
  479. nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
  480. nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
  481. spin_unlock_irqrestore(&nvt->lock, flags);
  482. }
  483. #if 0 /* Currently unused */
  484. /* rx carrier detect only works in learning mode, must be called w/lock */
  485. static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
  486. {
  487. u32 count, carrier, duration = 0;
  488. int i;
  489. count = nvt_cir_reg_read(nvt, CIR_FCCL) |
  490. nvt_cir_reg_read(nvt, CIR_FCCH) << 8;
  491. for (i = 0; i < nvt->pkts; i++) {
  492. if (nvt->buf[i] & BUF_PULSE_BIT)
  493. duration += nvt->buf[i] & BUF_LEN_MASK;
  494. }
  495. duration *= SAMPLE_PERIOD;
  496. if (!count || !duration) {
  497. dev_notice(nvt_get_dev(nvt),
  498. "Unable to determine carrier! (c:%u, d:%u)",
  499. count, duration);
  500. return 0;
  501. }
  502. carrier = MS_TO_NS(count) / duration;
  503. if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
  504. nvt_dbg("WTF? Carrier frequency out of range!");
  505. nvt_dbg("Carrier frequency: %u (count %u, duration %u)",
  506. carrier, count, duration);
  507. return carrier;
  508. }
  509. #endif
  510. static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
  511. struct rc_scancode_filter *sc_filter)
  512. {
  513. u8 buf_val;
  514. int i, ret, count;
  515. unsigned int val;
  516. struct ir_raw_event *raw;
  517. u8 wake_buf[WAKEUP_MAX_SIZE];
  518. bool complete;
  519. /* Require mask to be set */
  520. if (!sc_filter->mask)
  521. return 0;
  522. raw = kmalloc_array(WAKEUP_MAX_SIZE, sizeof(*raw), GFP_KERNEL);
  523. if (!raw)
  524. return -ENOMEM;
  525. ret = ir_raw_encode_scancode(dev->wakeup_protocol, sc_filter->data,
  526. raw, WAKEUP_MAX_SIZE);
  527. complete = (ret != -ENOBUFS);
  528. if (!complete)
  529. ret = WAKEUP_MAX_SIZE;
  530. else if (ret < 0)
  531. goto out_raw;
  532. /* Inspect the ir samples */
  533. for (i = 0, count = 0; i < ret && count < WAKEUP_MAX_SIZE; ++i) {
  534. val = raw[i].duration / SAMPLE_PERIOD;
  535. /* Split too large values into several smaller ones */
  536. while (val > 0 && count < WAKEUP_MAX_SIZE) {
  537. /* Skip last value for better comparison tolerance */
  538. if (complete && i == ret - 1 && val < BUF_LEN_MASK)
  539. break;
  540. /* Clamp values to BUF_LEN_MASK at most */
  541. buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
  542. wake_buf[count] = buf_val;
  543. val -= buf_val;
  544. if ((raw[i]).pulse)
  545. wake_buf[count] |= BUF_PULSE_BIT;
  546. count++;
  547. }
  548. }
  549. nvt_write_wakeup_codes(dev, wake_buf, count);
  550. ret = 0;
  551. out_raw:
  552. kfree(raw);
  553. return ret;
  554. }
  555. /* dump contents of the last rx buffer we got from the hw rx fifo */
  556. static void nvt_dump_rx_buf(struct nvt_dev *nvt)
  557. {
  558. int i;
  559. printk(KERN_DEBUG "%s (len %d): ", __func__, nvt->pkts);
  560. for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++)
  561. printk(KERN_CONT "0x%02x ", nvt->buf[i]);
  562. printk(KERN_CONT "\n");
  563. }
  564. /*
  565. * Process raw data in rx driver buffer, store it in raw IR event kfifo,
  566. * trigger decode when appropriate.
  567. *
  568. * We get IR data samples one byte at a time. If the msb is set, its a pulse,
  569. * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD
  570. * (default 50us) intervals for that pulse/space. A discrete signal is
  571. * followed by a series of 0x7f packets, then either 0x7<something> or 0x80
  572. * to signal more IR coming (repeats) or end of IR, respectively. We store
  573. * sample data in the raw event kfifo until we see 0x7<something> (except f)
  574. * or 0x80, at which time, we trigger a decode operation.
  575. */
  576. static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
  577. {
  578. struct ir_raw_event rawir = {};
  579. u8 sample;
  580. int i;
  581. nvt_dbg_verbose("%s firing", __func__);
  582. if (debug)
  583. nvt_dump_rx_buf(nvt);
  584. nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
  585. for (i = 0; i < nvt->pkts; i++) {
  586. sample = nvt->buf[i];
  587. rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
  588. rawir.duration = (sample & BUF_LEN_MASK) * SAMPLE_PERIOD;
  589. nvt_dbg("Storing %s with duration %d",
  590. rawir.pulse ? "pulse" : "space", rawir.duration);
  591. ir_raw_event_store_with_filter(nvt->rdev, &rawir);
  592. }
  593. nvt->pkts = 0;
  594. nvt_dbg("Calling ir_raw_event_handle\n");
  595. ir_raw_event_handle(nvt->rdev);
  596. nvt_dbg_verbose("%s done", __func__);
  597. }
  598. static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
  599. {
  600. dev_warn(nvt_get_dev(nvt), "RX FIFO overrun detected, flushing data!");
  601. nvt->pkts = 0;
  602. nvt_clear_cir_fifo(nvt);
  603. ir_raw_event_reset(nvt->rdev);
  604. }
  605. /* copy data from hardware rx fifo into driver buffer */
  606. static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
  607. {
  608. u8 fifocount;
  609. int i;
  610. /* Get count of how many bytes to read from RX FIFO */
  611. fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT);
  612. nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
  613. /* Read fifocount bytes from CIR Sample RX FIFO register */
  614. for (i = 0; i < fifocount; i++)
  615. nvt->buf[i] = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
  616. nvt->pkts = fifocount;
  617. nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
  618. nvt_process_rx_ir_data(nvt);
  619. }
  620. static void nvt_cir_log_irqs(u8 status, u8 iren)
  621. {
  622. nvt_dbg("IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s",
  623. status, iren,
  624. status & CIR_IRSTS_RDR ? " RDR" : "",
  625. status & CIR_IRSTS_RTR ? " RTR" : "",
  626. status & CIR_IRSTS_PE ? " PE" : "",
  627. status & CIR_IRSTS_RFO ? " RFO" : "",
  628. status & CIR_IRSTS_TE ? " TE" : "",
  629. status & CIR_IRSTS_TTR ? " TTR" : "",
  630. status & CIR_IRSTS_TFU ? " TFU" : "",
  631. status & CIR_IRSTS_GH ? " GH" : "",
  632. status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE |
  633. CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR |
  634. CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : "");
  635. }
  636. /* interrupt service routine for incoming and outgoing CIR data */
  637. static irqreturn_t nvt_cir_isr(int irq, void *data)
  638. {
  639. struct nvt_dev *nvt = data;
  640. u8 status, iren;
  641. nvt_dbg_verbose("%s firing", __func__);
  642. spin_lock(&nvt->lock);
  643. /*
  644. * Get IR Status register contents. Write 1 to ack/clear
  645. *
  646. * bit: reg name - description
  647. * 7: CIR_IRSTS_RDR - RX Data Ready
  648. * 6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach
  649. * 5: CIR_IRSTS_PE - Packet End
  650. * 4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set)
  651. * 3: CIR_IRSTS_TE - TX FIFO Empty
  652. * 2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach
  653. * 1: CIR_IRSTS_TFU - TX FIFO Underrun
  654. * 0: CIR_IRSTS_GH - Min Length Detected
  655. */
  656. status = nvt_cir_reg_read(nvt, CIR_IRSTS);
  657. iren = nvt_cir_reg_read(nvt, CIR_IREN);
  658. /* At least NCT6779D creates a spurious interrupt when the
  659. * logical device is being disabled.
  660. */
  661. if (status == 0xff && iren == 0xff) {
  662. spin_unlock(&nvt->lock);
  663. nvt_dbg_verbose("Spurious interrupt detected");
  664. return IRQ_HANDLED;
  665. }
  666. /* IRQ may be shared with CIR WAKE, therefore check for each
  667. * status bit whether the related interrupt source is enabled
  668. */
  669. if (!(status & iren)) {
  670. spin_unlock(&nvt->lock);
  671. nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
  672. return IRQ_NONE;
  673. }
  674. /* ack/clear all irq flags we've got */
  675. nvt_cir_reg_write(nvt, status, CIR_IRSTS);
  676. nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
  677. nvt_cir_log_irqs(status, iren);
  678. if (status & CIR_IRSTS_RFO)
  679. nvt_handle_rx_fifo_overrun(nvt);
  680. else if (status & (CIR_IRSTS_RTR | CIR_IRSTS_PE))
  681. nvt_get_rx_ir_data(nvt);
  682. spin_unlock(&nvt->lock);
  683. nvt_dbg_verbose("%s done", __func__);
  684. return IRQ_HANDLED;
  685. }
  686. static void nvt_enable_cir(struct nvt_dev *nvt)
  687. {
  688. unsigned long flags;
  689. /* enable the CIR logical device */
  690. nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
  691. spin_lock_irqsave(&nvt->lock, flags);
  692. /*
  693. * Enable TX and RX, specify carrier on = low, off = high, and set
  694. * sample period (currently 50us)
  695. */
  696. nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
  697. CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
  698. CIR_IRCON);
  699. /* clear all pending interrupts */
  700. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  701. /* enable interrupts */
  702. nvt_set_cir_iren(nvt);
  703. spin_unlock_irqrestore(&nvt->lock, flags);
  704. }
  705. static void nvt_disable_cir(struct nvt_dev *nvt)
  706. {
  707. unsigned long flags;
  708. spin_lock_irqsave(&nvt->lock, flags);
  709. /* disable CIR interrupts */
  710. nvt_cir_reg_write(nvt, 0, CIR_IREN);
  711. /* clear any and all pending interrupts */
  712. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  713. /* clear all function enable flags */
  714. nvt_cir_reg_write(nvt, 0, CIR_IRCON);
  715. /* clear hardware rx and tx fifos */
  716. nvt_clear_cir_fifo(nvt);
  717. nvt_clear_tx_fifo(nvt);
  718. spin_unlock_irqrestore(&nvt->lock, flags);
  719. /* disable the CIR logical device */
  720. nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
  721. }
  722. static int nvt_open(struct rc_dev *dev)
  723. {
  724. struct nvt_dev *nvt = dev->priv;
  725. nvt_enable_cir(nvt);
  726. return 0;
  727. }
  728. static void nvt_close(struct rc_dev *dev)
  729. {
  730. struct nvt_dev *nvt = dev->priv;
  731. nvt_disable_cir(nvt);
  732. }
  733. /* Allocate memory, probe hardware, and initialize everything */
  734. static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
  735. {
  736. struct nvt_dev *nvt;
  737. struct rc_dev *rdev;
  738. int ret;
  739. nvt = devm_kzalloc(&pdev->dev, sizeof(struct nvt_dev), GFP_KERNEL);
  740. if (!nvt)
  741. return -ENOMEM;
  742. /* input device for IR remote */
  743. nvt->rdev = devm_rc_allocate_device(&pdev->dev, RC_DRIVER_IR_RAW);
  744. if (!nvt->rdev)
  745. return -ENOMEM;
  746. rdev = nvt->rdev;
  747. /* activate pnp device */
  748. ret = pnp_activate_dev(pdev);
  749. if (ret) {
  750. dev_err(&pdev->dev, "Could not activate PNP device!\n");
  751. return ret;
  752. }
  753. /* validate pnp resources */
  754. if (!pnp_port_valid(pdev, 0) ||
  755. pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
  756. dev_err(&pdev->dev, "IR PNP Port not valid!\n");
  757. return -EINVAL;
  758. }
  759. if (!pnp_irq_valid(pdev, 0)) {
  760. dev_err(&pdev->dev, "PNP IRQ not valid!\n");
  761. return -EINVAL;
  762. }
  763. if (!pnp_port_valid(pdev, 1) ||
  764. pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
  765. dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
  766. return -EINVAL;
  767. }
  768. nvt->cir_addr = pnp_port_start(pdev, 0);
  769. nvt->cir_irq = pnp_irq(pdev, 0);
  770. nvt->cir_wake_addr = pnp_port_start(pdev, 1);
  771. nvt->cr_efir = CR_EFIR;
  772. nvt->cr_efdr = CR_EFDR;
  773. spin_lock_init(&nvt->lock);
  774. pnp_set_drvdata(pdev, nvt);
  775. ret = nvt_hw_detect(nvt);
  776. if (ret)
  777. return ret;
  778. /* Initialize CIR & CIR Wake Logical Devices */
  779. nvt_efm_enable(nvt);
  780. nvt_cir_ldev_init(nvt);
  781. nvt_cir_wake_ldev_init(nvt);
  782. nvt_efm_disable(nvt);
  783. /*
  784. * Initialize CIR & CIR Wake Config Registers
  785. * and enable logical devices
  786. */
  787. nvt_cir_regs_init(nvt);
  788. nvt_cir_wake_regs_init(nvt);
  789. /* Set up the rc device */
  790. rdev->priv = nvt;
  791. rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
  792. rdev->allowed_wakeup_protocols = RC_PROTO_BIT_ALL_IR_ENCODER;
  793. rdev->encode_wakeup = true;
  794. rdev->open = nvt_open;
  795. rdev->close = nvt_close;
  796. rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
  797. rdev->device_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
  798. rdev->input_phys = "nuvoton/cir0";
  799. rdev->input_id.bustype = BUS_HOST;
  800. rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2;
  801. rdev->input_id.product = nvt->chip_major;
  802. rdev->input_id.version = nvt->chip_minor;
  803. rdev->driver_name = NVT_DRIVER_NAME;
  804. rdev->map_name = RC_MAP_RC6_MCE;
  805. rdev->timeout = MS_TO_US(100);
  806. /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
  807. rdev->rx_resolution = CIR_SAMPLE_PERIOD;
  808. #if 0
  809. rdev->min_timeout = XYZ;
  810. rdev->max_timeout = XYZ;
  811. #endif
  812. ret = devm_rc_register_device(&pdev->dev, rdev);
  813. if (ret)
  814. return ret;
  815. /* now claim resources */
  816. if (!devm_request_region(&pdev->dev, nvt->cir_addr,
  817. CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
  818. return -EBUSY;
  819. ret = devm_request_irq(&pdev->dev, nvt->cir_irq, nvt_cir_isr,
  820. IRQF_SHARED, NVT_DRIVER_NAME, nvt);
  821. if (ret)
  822. return ret;
  823. if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr,
  824. CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake"))
  825. return -EBUSY;
  826. ret = device_create_file(&rdev->dev, &dev_attr_wakeup_data);
  827. if (ret)
  828. return ret;
  829. device_init_wakeup(&pdev->dev, true);
  830. dev_notice(&pdev->dev, "driver has been successfully loaded\n");
  831. if (debug) {
  832. cir_dump_regs(nvt);
  833. cir_wake_dump_regs(nvt);
  834. }
  835. return 0;
  836. }
  837. static void nvt_remove(struct pnp_dev *pdev)
  838. {
  839. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  840. device_remove_file(&nvt->rdev->dev, &dev_attr_wakeup_data);
  841. nvt_disable_cir(nvt);
  842. /* enable CIR Wake (for IR power-on) */
  843. nvt_enable_wake(nvt);
  844. }
  845. static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
  846. {
  847. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  848. nvt_dbg("%s called", __func__);
  849. mutex_lock(&nvt->rdev->lock);
  850. if (nvt->rdev->users)
  851. nvt_disable_cir(nvt);
  852. mutex_unlock(&nvt->rdev->lock);
  853. /* make sure wake is enabled */
  854. nvt_enable_wake(nvt);
  855. return 0;
  856. }
  857. static int nvt_resume(struct pnp_dev *pdev)
  858. {
  859. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  860. nvt_dbg("%s called", __func__);
  861. nvt_cir_regs_init(nvt);
  862. nvt_cir_wake_regs_init(nvt);
  863. mutex_lock(&nvt->rdev->lock);
  864. if (nvt->rdev->users)
  865. nvt_enable_cir(nvt);
  866. mutex_unlock(&nvt->rdev->lock);
  867. return 0;
  868. }
  869. static void nvt_shutdown(struct pnp_dev *pdev)
  870. {
  871. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  872. nvt_enable_wake(nvt);
  873. }
  874. static const struct pnp_device_id nvt_ids[] = {
  875. { "WEC0530", 0 }, /* CIR */
  876. { "NTN0530", 0 }, /* CIR for new chip's pnp id*/
  877. { "", 0 },
  878. };
  879. static struct pnp_driver nvt_driver = {
  880. .name = NVT_DRIVER_NAME,
  881. .id_table = nvt_ids,
  882. .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
  883. .probe = nvt_probe,
  884. .remove = nvt_remove,
  885. .suspend = nvt_suspend,
  886. .resume = nvt_resume,
  887. .shutdown = nvt_shutdown,
  888. };
  889. module_param(debug, int, S_IRUGO | S_IWUSR);
  890. MODULE_PARM_DESC(debug, "Enable debugging output");
  891. MODULE_DEVICE_TABLE(pnp, nvt_ids);
  892. MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver");
  893. MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
  894. MODULE_LICENSE("GPL");
  895. module_pnp_driver(nvt_driver);