psb_irq.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /**************************************************************************
  3. * Copyright (c) 2007, Intel Corporation.
  4. * All Rights Reserved.
  5. *
  6. * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
  7. * develop this driver.
  8. *
  9. **************************************************************************/
  10. #include <drm/drm_vblank.h>
  11. #include "mdfld_output.h"
  12. #include "power.h"
  13. #include "psb_drv.h"
  14. #include "psb_intel_reg.h"
  15. #include "psb_irq.h"
  16. #include "psb_reg.h"
  17. /*
  18. * inline functions
  19. */
  20. static inline u32
  21. psb_pipestat(int pipe)
  22. {
  23. if (pipe == 0)
  24. return PIPEASTAT;
  25. if (pipe == 1)
  26. return PIPEBSTAT;
  27. if (pipe == 2)
  28. return PIPECSTAT;
  29. BUG();
  30. }
  31. static inline u32
  32. mid_pipe_event(int pipe)
  33. {
  34. if (pipe == 0)
  35. return _PSB_PIPEA_EVENT_FLAG;
  36. if (pipe == 1)
  37. return _MDFLD_PIPEB_EVENT_FLAG;
  38. if (pipe == 2)
  39. return _MDFLD_PIPEC_EVENT_FLAG;
  40. BUG();
  41. }
  42. static inline u32
  43. mid_pipe_vsync(int pipe)
  44. {
  45. if (pipe == 0)
  46. return _PSB_VSYNC_PIPEA_FLAG;
  47. if (pipe == 1)
  48. return _PSB_VSYNC_PIPEB_FLAG;
  49. if (pipe == 2)
  50. return _MDFLD_PIPEC_VBLANK_FLAG;
  51. BUG();
  52. }
  53. static inline u32
  54. mid_pipeconf(int pipe)
  55. {
  56. if (pipe == 0)
  57. return PIPEACONF;
  58. if (pipe == 1)
  59. return PIPEBCONF;
  60. if (pipe == 2)
  61. return PIPECCONF;
  62. BUG();
  63. }
  64. void
  65. psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
  66. {
  67. if ((dev_priv->pipestat[pipe] & mask) != mask) {
  68. u32 reg = psb_pipestat(pipe);
  69. dev_priv->pipestat[pipe] |= mask;
  70. /* Enable the interrupt, clear any pending status */
  71. if (gma_power_begin(dev_priv->dev, false)) {
  72. u32 writeVal = PSB_RVDC32(reg);
  73. writeVal |= (mask | (mask >> 16));
  74. PSB_WVDC32(writeVal, reg);
  75. (void) PSB_RVDC32(reg);
  76. gma_power_end(dev_priv->dev);
  77. }
  78. }
  79. }
  80. void
  81. psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
  82. {
  83. if ((dev_priv->pipestat[pipe] & mask) != 0) {
  84. u32 reg = psb_pipestat(pipe);
  85. dev_priv->pipestat[pipe] &= ~mask;
  86. if (gma_power_begin(dev_priv->dev, false)) {
  87. u32 writeVal = PSB_RVDC32(reg);
  88. writeVal &= ~mask;
  89. PSB_WVDC32(writeVal, reg);
  90. (void) PSB_RVDC32(reg);
  91. gma_power_end(dev_priv->dev);
  92. }
  93. }
  94. }
  95. static void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
  96. {
  97. if (gma_power_begin(dev_priv->dev, false)) {
  98. u32 pipe_event = mid_pipe_event(pipe);
  99. dev_priv->vdc_irq_mask |= pipe_event;
  100. PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
  101. PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
  102. gma_power_end(dev_priv->dev);
  103. }
  104. }
  105. static void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
  106. {
  107. if (dev_priv->pipestat[pipe] == 0) {
  108. if (gma_power_begin(dev_priv->dev, false)) {
  109. u32 pipe_event = mid_pipe_event(pipe);
  110. dev_priv->vdc_irq_mask &= ~pipe_event;
  111. PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
  112. PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
  113. gma_power_end(dev_priv->dev);
  114. }
  115. }
  116. }
  117. /**
  118. * Display controller interrupt handler for pipe event.
  119. *
  120. */
  121. static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
  122. {
  123. struct drm_psb_private *dev_priv =
  124. (struct drm_psb_private *) dev->dev_private;
  125. uint32_t pipe_stat_val = 0;
  126. uint32_t pipe_stat_reg = psb_pipestat(pipe);
  127. uint32_t pipe_enable = dev_priv->pipestat[pipe];
  128. uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
  129. uint32_t pipe_clear;
  130. uint32_t i = 0;
  131. spin_lock(&dev_priv->irqmask_lock);
  132. pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
  133. pipe_stat_val &= pipe_enable | pipe_status;
  134. pipe_stat_val &= pipe_stat_val >> 16;
  135. spin_unlock(&dev_priv->irqmask_lock);
  136. /* Clear the 2nd level interrupt status bits
  137. * Sometimes the bits are very sticky so we repeat until they unstick */
  138. for (i = 0; i < 0xffff; i++) {
  139. PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
  140. pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
  141. if (pipe_clear == 0)
  142. break;
  143. }
  144. if (pipe_clear)
  145. dev_err(dev->dev,
  146. "%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
  147. __func__, pipe, PSB_RVDC32(pipe_stat_reg));
  148. if (pipe_stat_val & PIPE_VBLANK_STATUS ||
  149. (IS_MFLD(dev) && pipe_stat_val & PIPE_TE_STATUS)) {
  150. struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
  151. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  152. unsigned long flags;
  153. drm_handle_vblank(dev, pipe);
  154. spin_lock_irqsave(&dev->event_lock, flags);
  155. if (gma_crtc->page_flip_event) {
  156. drm_crtc_send_vblank_event(crtc,
  157. gma_crtc->page_flip_event);
  158. gma_crtc->page_flip_event = NULL;
  159. drm_crtc_vblank_put(crtc);
  160. }
  161. spin_unlock_irqrestore(&dev->event_lock, flags);
  162. }
  163. }
  164. /*
  165. * Display controller interrupt handler.
  166. */
  167. static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
  168. {
  169. if (vdc_stat & _PSB_IRQ_ASLE)
  170. psb_intel_opregion_asle_intr(dev);
  171. if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
  172. mid_pipe_event_handler(dev, 0);
  173. if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
  174. mid_pipe_event_handler(dev, 1);
  175. }
  176. /*
  177. * SGX interrupt handler
  178. */
  179. static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
  180. {
  181. struct drm_psb_private *dev_priv = dev->dev_private;
  182. u32 val, addr;
  183. if (stat_1 & _PSB_CE_TWOD_COMPLETE)
  184. val = PSB_RSGX32(PSB_CR_2D_BLIT_STATUS);
  185. if (stat_2 & _PSB_CE2_BIF_REQUESTER_FAULT) {
  186. val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
  187. addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
  188. if (val) {
  189. if (val & _PSB_CBI_STAT_PF_N_RW)
  190. DRM_ERROR("SGX MMU page fault:");
  191. else
  192. DRM_ERROR("SGX MMU read / write protection fault:");
  193. if (val & _PSB_CBI_STAT_FAULT_CACHE)
  194. DRM_ERROR("\tCache requestor");
  195. if (val & _PSB_CBI_STAT_FAULT_TA)
  196. DRM_ERROR("\tTA requestor");
  197. if (val & _PSB_CBI_STAT_FAULT_VDM)
  198. DRM_ERROR("\tVDM requestor");
  199. if (val & _PSB_CBI_STAT_FAULT_2D)
  200. DRM_ERROR("\t2D requestor");
  201. if (val & _PSB_CBI_STAT_FAULT_PBE)
  202. DRM_ERROR("\tPBE requestor");
  203. if (val & _PSB_CBI_STAT_FAULT_TSP)
  204. DRM_ERROR("\tTSP requestor");
  205. if (val & _PSB_CBI_STAT_FAULT_ISP)
  206. DRM_ERROR("\tISP requestor");
  207. if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
  208. DRM_ERROR("\tUSSEPDS requestor");
  209. if (val & _PSB_CBI_STAT_FAULT_HOST)
  210. DRM_ERROR("\tHost requestor");
  211. DRM_ERROR("\tMMU failing address is 0x%08x.\n",
  212. (unsigned int)addr);
  213. }
  214. }
  215. /* Clear bits */
  216. PSB_WSGX32(stat_1, PSB_CR_EVENT_HOST_CLEAR);
  217. PSB_WSGX32(stat_2, PSB_CR_EVENT_HOST_CLEAR2);
  218. PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2);
  219. }
  220. irqreturn_t psb_irq_handler(int irq, void *arg)
  221. {
  222. struct drm_device *dev = arg;
  223. struct drm_psb_private *dev_priv = dev->dev_private;
  224. uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
  225. u32 sgx_stat_1, sgx_stat_2;
  226. int handled = 0;
  227. spin_lock(&dev_priv->irqmask_lock);
  228. vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
  229. if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
  230. dsp_int = 1;
  231. /* FIXME: Handle Medfield
  232. if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG)
  233. dsp_int = 1;
  234. */
  235. if (vdc_stat & _PSB_IRQ_SGX_FLAG)
  236. sgx_int = 1;
  237. if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC)
  238. hotplug_int = 1;
  239. vdc_stat &= dev_priv->vdc_irq_mask;
  240. spin_unlock(&dev_priv->irqmask_lock);
  241. if (dsp_int && gma_power_is_on(dev)) {
  242. psb_vdc_interrupt(dev, vdc_stat);
  243. handled = 1;
  244. }
  245. if (sgx_int) {
  246. sgx_stat_1 = PSB_RSGX32(PSB_CR_EVENT_STATUS);
  247. sgx_stat_2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
  248. psb_sgx_interrupt(dev, sgx_stat_1, sgx_stat_2);
  249. handled = 1;
  250. }
  251. /* Note: this bit has other meanings on some devices, so we will
  252. need to address that later if it ever matters */
  253. if (hotplug_int && dev_priv->ops->hotplug) {
  254. handled = dev_priv->ops->hotplug(dev);
  255. REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
  256. }
  257. PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
  258. (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
  259. rmb();
  260. if (!handled)
  261. return IRQ_NONE;
  262. return IRQ_HANDLED;
  263. }
  264. void psb_irq_preinstall(struct drm_device *dev)
  265. {
  266. struct drm_psb_private *dev_priv =
  267. (struct drm_psb_private *) dev->dev_private;
  268. unsigned long irqflags;
  269. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  270. if (gma_power_is_on(dev)) {
  271. PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
  272. PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
  273. PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
  274. PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
  275. PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
  276. }
  277. if (dev->vblank[0].enabled)
  278. dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
  279. if (dev->vblank[1].enabled)
  280. dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
  281. /* FIXME: Handle Medfield irq mask
  282. if (dev->vblank[1].enabled)
  283. dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
  284. if (dev->vblank[2].enabled)
  285. dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
  286. */
  287. /* Revisit this area - want per device masks ? */
  288. if (dev_priv->ops->hotplug)
  289. dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
  290. dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE | _PSB_IRQ_SGX_FLAG;
  291. /* This register is safe even if display island is off */
  292. PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
  293. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  294. }
  295. int psb_irq_postinstall(struct drm_device *dev)
  296. {
  297. struct drm_psb_private *dev_priv = dev->dev_private;
  298. unsigned long irqflags;
  299. unsigned int i;
  300. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  301. /* Enable 2D and MMU fault interrupts */
  302. PSB_WSGX32(_PSB_CE2_BIF_REQUESTER_FAULT, PSB_CR_EVENT_HOST_ENABLE2);
  303. PSB_WSGX32(_PSB_CE_TWOD_COMPLETE, PSB_CR_EVENT_HOST_ENABLE);
  304. PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); /* Post */
  305. /* This register is safe even if display island is off */
  306. PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
  307. PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
  308. for (i = 0; i < dev->num_crtcs; ++i) {
  309. if (dev->vblank[i].enabled)
  310. psb_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
  311. else
  312. psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
  313. }
  314. if (dev_priv->ops->hotplug_enable)
  315. dev_priv->ops->hotplug_enable(dev, true);
  316. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  317. return 0;
  318. }
  319. void psb_irq_uninstall(struct drm_device *dev)
  320. {
  321. struct drm_psb_private *dev_priv = dev->dev_private;
  322. unsigned long irqflags;
  323. unsigned int i;
  324. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  325. if (dev_priv->ops->hotplug_enable)
  326. dev_priv->ops->hotplug_enable(dev, false);
  327. PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
  328. for (i = 0; i < dev->num_crtcs; ++i) {
  329. if (dev->vblank[i].enabled)
  330. psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
  331. }
  332. dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
  333. _PSB_IRQ_MSVDX_FLAG |
  334. _LNC_IRQ_TOPAZ_FLAG;
  335. /* These two registers are safe even if display island is off */
  336. PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
  337. PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
  338. wmb();
  339. /* This register is safe even if display island is off */
  340. PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
  341. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  342. }
  343. void psb_irq_turn_on_dpst(struct drm_device *dev)
  344. {
  345. struct drm_psb_private *dev_priv =
  346. (struct drm_psb_private *) dev->dev_private;
  347. u32 hist_reg;
  348. u32 pwm_reg;
  349. if (gma_power_begin(dev, false)) {
  350. PSB_WVDC32(1 << 31, HISTOGRAM_LOGIC_CONTROL);
  351. hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
  352. PSB_WVDC32(1 << 31, HISTOGRAM_INT_CONTROL);
  353. hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
  354. PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
  355. pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
  356. PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE
  357. | PWM_PHASEIN_INT_ENABLE,
  358. PWM_CONTROL_LOGIC);
  359. pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
  360. psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
  361. hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
  362. PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,
  363. HISTOGRAM_INT_CONTROL);
  364. pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
  365. PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
  366. PWM_CONTROL_LOGIC);
  367. gma_power_end(dev);
  368. }
  369. }
  370. int psb_irq_enable_dpst(struct drm_device *dev)
  371. {
  372. struct drm_psb_private *dev_priv =
  373. (struct drm_psb_private *) dev->dev_private;
  374. unsigned long irqflags;
  375. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  376. /* enable DPST */
  377. mid_enable_pipe_event(dev_priv, 0);
  378. psb_irq_turn_on_dpst(dev);
  379. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  380. return 0;
  381. }
  382. void psb_irq_turn_off_dpst(struct drm_device *dev)
  383. {
  384. struct drm_psb_private *dev_priv =
  385. (struct drm_psb_private *) dev->dev_private;
  386. u32 pwm_reg;
  387. if (gma_power_begin(dev, false)) {
  388. PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
  389. PSB_RVDC32(HISTOGRAM_INT_CONTROL);
  390. psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
  391. pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
  392. PSB_WVDC32(pwm_reg & ~PWM_PHASEIN_INT_ENABLE,
  393. PWM_CONTROL_LOGIC);
  394. pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
  395. gma_power_end(dev);
  396. }
  397. }
  398. int psb_irq_disable_dpst(struct drm_device *dev)
  399. {
  400. struct drm_psb_private *dev_priv =
  401. (struct drm_psb_private *) dev->dev_private;
  402. unsigned long irqflags;
  403. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  404. mid_disable_pipe_event(dev_priv, 0);
  405. psb_irq_turn_off_dpst(dev);
  406. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  407. return 0;
  408. }
  409. /*
  410. * It is used to enable VBLANK interrupt
  411. */
  412. int psb_enable_vblank(struct drm_crtc *crtc)
  413. {
  414. struct drm_device *dev = crtc->dev;
  415. unsigned int pipe = crtc->index;
  416. struct drm_psb_private *dev_priv = dev->dev_private;
  417. unsigned long irqflags;
  418. uint32_t reg_val = 0;
  419. uint32_t pipeconf_reg = mid_pipeconf(pipe);
  420. /* Medfield is different - we should perhaps extract out vblank
  421. and blacklight etc ops */
  422. if (IS_MFLD(dev))
  423. return mdfld_enable_te(dev, pipe);
  424. if (gma_power_begin(dev, false)) {
  425. reg_val = REG_READ(pipeconf_reg);
  426. gma_power_end(dev);
  427. }
  428. if (!(reg_val & PIPEACONF_ENABLE))
  429. return -EINVAL;
  430. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  431. if (pipe == 0)
  432. dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
  433. else if (pipe == 1)
  434. dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
  435. PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
  436. PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
  437. psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
  438. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  439. return 0;
  440. }
  441. /*
  442. * It is used to disable VBLANK interrupt
  443. */
  444. void psb_disable_vblank(struct drm_crtc *crtc)
  445. {
  446. struct drm_device *dev = crtc->dev;
  447. unsigned int pipe = crtc->index;
  448. struct drm_psb_private *dev_priv = dev->dev_private;
  449. unsigned long irqflags;
  450. if (IS_MFLD(dev))
  451. mdfld_disable_te(dev, pipe);
  452. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  453. if (pipe == 0)
  454. dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
  455. else if (pipe == 1)
  456. dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
  457. PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
  458. PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
  459. psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
  460. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  461. }
  462. /*
  463. * It is used to enable TE interrupt
  464. */
  465. int mdfld_enable_te(struct drm_device *dev, int pipe)
  466. {
  467. struct drm_psb_private *dev_priv =
  468. (struct drm_psb_private *) dev->dev_private;
  469. unsigned long irqflags;
  470. uint32_t reg_val = 0;
  471. uint32_t pipeconf_reg = mid_pipeconf(pipe);
  472. if (gma_power_begin(dev, false)) {
  473. reg_val = REG_READ(pipeconf_reg);
  474. gma_power_end(dev);
  475. }
  476. if (!(reg_val & PIPEACONF_ENABLE))
  477. return -EINVAL;
  478. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  479. mid_enable_pipe_event(dev_priv, pipe);
  480. psb_enable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
  481. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  482. return 0;
  483. }
  484. /*
  485. * It is used to disable TE interrupt
  486. */
  487. void mdfld_disable_te(struct drm_device *dev, int pipe)
  488. {
  489. struct drm_psb_private *dev_priv =
  490. (struct drm_psb_private *) dev->dev_private;
  491. unsigned long irqflags;
  492. if (!dev_priv->dsr_enable)
  493. return;
  494. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  495. mid_disable_pipe_event(dev_priv, pipe);
  496. psb_disable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
  497. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  498. }
  499. /* Called from drm generic code, passed a 'crtc', which
  500. * we use as a pipe index
  501. */
  502. u32 psb_get_vblank_counter(struct drm_crtc *crtc)
  503. {
  504. struct drm_device *dev = crtc->dev;
  505. unsigned int pipe = crtc->index;
  506. uint32_t high_frame = PIPEAFRAMEHIGH;
  507. uint32_t low_frame = PIPEAFRAMEPIXEL;
  508. uint32_t pipeconf_reg = PIPEACONF;
  509. uint32_t reg_val = 0;
  510. uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
  511. switch (pipe) {
  512. case 0:
  513. break;
  514. case 1:
  515. high_frame = PIPEBFRAMEHIGH;
  516. low_frame = PIPEBFRAMEPIXEL;
  517. pipeconf_reg = PIPEBCONF;
  518. break;
  519. case 2:
  520. high_frame = PIPECFRAMEHIGH;
  521. low_frame = PIPECFRAMEPIXEL;
  522. pipeconf_reg = PIPECCONF;
  523. break;
  524. default:
  525. dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
  526. return 0;
  527. }
  528. if (!gma_power_begin(dev, false))
  529. return 0;
  530. reg_val = REG_READ(pipeconf_reg);
  531. if (!(reg_val & PIPEACONF_ENABLE)) {
  532. dev_err(dev->dev, "trying to get vblank count for disabled pipe %u\n",
  533. pipe);
  534. goto psb_get_vblank_counter_exit;
  535. }
  536. /*
  537. * High & low register fields aren't synchronized, so make sure
  538. * we get a low value that's stable across two reads of the high
  539. * register.
  540. */
  541. do {
  542. high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
  543. PIPE_FRAME_HIGH_SHIFT);
  544. low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
  545. PIPE_FRAME_LOW_SHIFT);
  546. high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
  547. PIPE_FRAME_HIGH_SHIFT);
  548. } while (high1 != high2);
  549. count = (high1 << 8) | low;
  550. psb_get_vblank_counter_exit:
  551. gma_power_end(dev);
  552. return count;
  553. }