dp.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607
  1. /*
  2. * Copyright (c) 2011-2013, NVIDIA Corporation.
  3. * Copyright 2014 Google Inc.
  4. *
  5. * SPDX-License-Identifier: GPL-2.0
  6. */
  7. #include <common.h>
  8. #include <displayport.h>
  9. #include <dm.h>
  10. #include <div64.h>
  11. #include <errno.h>
  12. #include <fdtdec.h>
  13. #include <asm/io.h>
  14. #include <asm/arch-tegra/dc.h>
  15. #include "displayport.h"
  16. #include "edid.h"
  17. #include "sor.h"
  18. DECLARE_GLOBAL_DATA_PTR;
  19. #define DO_FAST_LINK_TRAINING 1
  20. struct tegra_dp_plat {
  21. ulong base;
  22. };
  23. struct tegra_dp_priv {
  24. struct dpaux_ctlr *regs;
  25. struct tegra_dc_sor_data *sor;
  26. u8 revision;
  27. int enabled;
  28. };
  29. struct tegra_dp_priv dp_data;
  30. static inline u32 tegra_dpaux_readl(struct tegra_dp_priv *dp, u32 reg)
  31. {
  32. return readl((u32 *)dp->regs + reg);
  33. }
  34. static inline void tegra_dpaux_writel(struct tegra_dp_priv *dp, u32 reg,
  35. u32 val)
  36. {
  37. writel(val, (u32 *)dp->regs + reg);
  38. }
  39. static inline u32 tegra_dc_dpaux_poll_register(struct tegra_dp_priv *dp,
  40. u32 reg, u32 mask, u32 exp_val,
  41. u32 poll_interval_us,
  42. u32 timeout_us)
  43. {
  44. u32 reg_val = 0;
  45. u32 temp = timeout_us;
  46. do {
  47. udelay(poll_interval_us);
  48. reg_val = tegra_dpaux_readl(dp, reg);
  49. if (timeout_us > poll_interval_us)
  50. timeout_us -= poll_interval_us;
  51. else
  52. break;
  53. } while ((reg_val & mask) != exp_val);
  54. if ((reg_val & mask) == exp_val)
  55. return 0; /* success */
  56. debug("dpaux_poll_register 0x%x: timeout: (reg_val)0x%08x & (mask)0x%08x != (exp_val)0x%08x\n",
  57. reg, reg_val, mask, exp_val);
  58. return temp;
  59. }
  60. static inline int tegra_dpaux_wait_transaction(struct tegra_dp_priv *dp)
  61. {
  62. /* According to DP spec, each aux transaction needs to finish
  63. within 40ms. */
  64. if (tegra_dc_dpaux_poll_register(dp, DPAUX_DP_AUXCTL,
  65. DPAUX_DP_AUXCTL_TRANSACTREQ_MASK,
  66. DPAUX_DP_AUXCTL_TRANSACTREQ_DONE,
  67. 100, DP_AUX_TIMEOUT_MS * 1000) != 0) {
  68. debug("dp: DPAUX transaction timeout\n");
  69. return -1;
  70. }
  71. return 0;
  72. }
  73. static int tegra_dc_dpaux_write_chunk(struct tegra_dp_priv *dp, u32 cmd,
  74. u32 addr, u8 *data, u32 *size,
  75. u32 *aux_stat)
  76. {
  77. int i;
  78. u32 reg_val;
  79. u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
  80. u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
  81. u32 temp_data;
  82. if (*size > DP_AUX_MAX_BYTES)
  83. return -1; /* only write one chunk of data */
  84. /* Make sure the command is write command */
  85. switch (cmd) {
  86. case DPAUX_DP_AUXCTL_CMD_I2CWR:
  87. case DPAUX_DP_AUXCTL_CMD_MOTWR:
  88. case DPAUX_DP_AUXCTL_CMD_AUXWR:
  89. break;
  90. default:
  91. debug("dp: aux write cmd 0x%x is invalid\n", cmd);
  92. return -EINVAL;
  93. }
  94. tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
  95. for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i) {
  96. memcpy(&temp_data, data, 4);
  97. tegra_dpaux_writel(dp, DPAUX_DP_AUXDATA_WRITE_W(i), temp_data);
  98. data += 4;
  99. }
  100. reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
  101. reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
  102. reg_val |= cmd;
  103. reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
  104. reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
  105. while ((timeout_retries > 0) && (defer_retries > 0)) {
  106. if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
  107. (defer_retries != DP_AUX_DEFER_MAX_TRIES))
  108. udelay(1);
  109. reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
  110. tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
  111. if (tegra_dpaux_wait_transaction(dp))
  112. debug("dp: aux write transaction timeout\n");
  113. *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
  114. if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
  115. (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
  116. (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
  117. (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
  118. if (timeout_retries-- > 0) {
  119. debug("dp: aux write retry (0x%x) -- %d\n",
  120. *aux_stat, timeout_retries);
  121. /* clear the error bits */
  122. tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
  123. *aux_stat);
  124. continue;
  125. } else {
  126. debug("dp: aux write got error (0x%x)\n",
  127. *aux_stat);
  128. return -ETIMEDOUT;
  129. }
  130. }
  131. if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
  132. (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
  133. if (defer_retries-- > 0) {
  134. debug("dp: aux write defer (0x%x) -- %d\n",
  135. *aux_stat, defer_retries);
  136. /* clear the error bits */
  137. tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
  138. *aux_stat);
  139. continue;
  140. } else {
  141. debug("dp: aux write defer exceeds max retries (0x%x)\n",
  142. *aux_stat);
  143. return -ETIMEDOUT;
  144. }
  145. }
  146. if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
  147. DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
  148. *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
  149. return 0;
  150. } else {
  151. debug("dp: aux write failed (0x%x)\n", *aux_stat);
  152. return -EIO;
  153. }
  154. }
  155. /* Should never come to here */
  156. return -EIO;
  157. }
  158. static int tegra_dc_dpaux_read_chunk(struct tegra_dp_priv *dp, u32 cmd,
  159. u32 addr, u8 *data, u32 *size,
  160. u32 *aux_stat)
  161. {
  162. u32 reg_val;
  163. u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
  164. u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
  165. if (*size > DP_AUX_MAX_BYTES) {
  166. debug("only read one chunk\n");
  167. return -EIO; /* only read one chunk */
  168. }
  169. /* Check to make sure the command is read command */
  170. switch (cmd) {
  171. case DPAUX_DP_AUXCTL_CMD_I2CRD:
  172. case DPAUX_DP_AUXCTL_CMD_I2CREQWSTAT:
  173. case DPAUX_DP_AUXCTL_CMD_MOTRD:
  174. case DPAUX_DP_AUXCTL_CMD_AUXRD:
  175. break;
  176. default:
  177. debug("dp: aux read cmd 0x%x is invalid\n", cmd);
  178. return -EIO;
  179. }
  180. *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
  181. if (!(*aux_stat & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
  182. debug("dp: HPD is not detected\n");
  183. return -EIO;
  184. }
  185. tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
  186. reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
  187. reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
  188. reg_val |= cmd;
  189. reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
  190. reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
  191. while ((timeout_retries > 0) && (defer_retries > 0)) {
  192. if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
  193. (defer_retries != DP_AUX_DEFER_MAX_TRIES))
  194. udelay(DP_DPCP_RETRY_SLEEP_NS * 2);
  195. reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
  196. tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
  197. if (tegra_dpaux_wait_transaction(dp))
  198. debug("dp: aux read transaction timeout\n");
  199. *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
  200. if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
  201. (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
  202. (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
  203. (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
  204. if (timeout_retries-- > 0) {
  205. debug("dp: aux read retry (0x%x) -- %d\n",
  206. *aux_stat, timeout_retries);
  207. /* clear the error bits */
  208. tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
  209. *aux_stat);
  210. continue; /* retry */
  211. } else {
  212. debug("dp: aux read got error (0x%x)\n",
  213. *aux_stat);
  214. return -ETIMEDOUT;
  215. }
  216. }
  217. if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
  218. (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
  219. if (defer_retries-- > 0) {
  220. debug("dp: aux read defer (0x%x) -- %d\n",
  221. *aux_stat, defer_retries);
  222. /* clear the error bits */
  223. tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
  224. *aux_stat);
  225. continue;
  226. } else {
  227. debug("dp: aux read defer exceeds max retries (0x%x)\n",
  228. *aux_stat);
  229. return -ETIMEDOUT;
  230. }
  231. }
  232. if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
  233. DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
  234. int i;
  235. u32 temp_data[4];
  236. for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i)
  237. temp_data[i] = tegra_dpaux_readl(dp,
  238. DPAUX_DP_AUXDATA_READ_W(i));
  239. *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
  240. memcpy(data, temp_data, *size);
  241. return 0;
  242. } else {
  243. debug("dp: aux read failed (0x%x\n", *aux_stat);
  244. return -EIO;
  245. }
  246. }
  247. /* Should never come to here */
  248. debug("%s: can't\n", __func__);
  249. return -EIO;
  250. }
  251. static int tegra_dc_dpaux_read(struct tegra_dp_priv *dp, u32 cmd, u32 addr,
  252. u8 *data, u32 *size, u32 *aux_stat)
  253. {
  254. u32 finished = 0;
  255. u32 cur_size;
  256. int ret = 0;
  257. do {
  258. cur_size = *size - finished;
  259. if (cur_size > DP_AUX_MAX_BYTES)
  260. cur_size = DP_AUX_MAX_BYTES;
  261. ret = tegra_dc_dpaux_read_chunk(dp, cmd, addr,
  262. data, &cur_size, aux_stat);
  263. if (ret)
  264. break;
  265. /* cur_size should be the real size returned */
  266. addr += cur_size;
  267. data += cur_size;
  268. finished += cur_size;
  269. } while (*size > finished);
  270. *size = finished;
  271. return ret;
  272. }
  273. static int tegra_dc_dp_dpcd_read(struct tegra_dp_priv *dp, u32 cmd,
  274. u8 *data_ptr)
  275. {
  276. u32 size = 1;
  277. u32 status = 0;
  278. int ret;
  279. ret = tegra_dc_dpaux_read_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
  280. cmd, data_ptr, &size, &status);
  281. if (ret) {
  282. debug("dp: Failed to read DPCD data. CMD 0x%x, Status 0x%x\n",
  283. cmd, status);
  284. }
  285. return ret;
  286. }
  287. static int tegra_dc_dp_dpcd_write(struct tegra_dp_priv *dp, u32 cmd,
  288. u8 data)
  289. {
  290. u32 size = 1;
  291. u32 status = 0;
  292. int ret;
  293. ret = tegra_dc_dpaux_write_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXWR,
  294. cmd, &data, &size, &status);
  295. if (ret) {
  296. debug("dp: Failed to write DPCD data. CMD 0x%x, Status 0x%x\n",
  297. cmd, status);
  298. }
  299. return ret;
  300. }
  301. static int tegra_dc_i2c_aux_read(struct tegra_dp_priv *dp, u32 i2c_addr,
  302. u8 addr, u8 *data, u32 size, u32 *aux_stat)
  303. {
  304. u32 finished = 0;
  305. int ret = 0;
  306. do {
  307. u32 cur_size = min((u32)DP_AUX_MAX_BYTES, size - finished);
  308. u32 len = 1;
  309. ret = tegra_dc_dpaux_write_chunk(
  310. dp, DPAUX_DP_AUXCTL_CMD_MOTWR, i2c_addr,
  311. &addr, &len, aux_stat);
  312. if (ret) {
  313. debug("%s: error sending address to read.\n",
  314. __func__);
  315. return ret;
  316. }
  317. ret = tegra_dc_dpaux_read_chunk(
  318. dp, DPAUX_DP_AUXCTL_CMD_I2CRD, i2c_addr,
  319. data, &cur_size, aux_stat);
  320. if (ret) {
  321. debug("%s: error reading data.\n", __func__);
  322. return ret;
  323. }
  324. /* cur_size should be the real size returned */
  325. addr += cur_size;
  326. data += cur_size;
  327. finished += cur_size;
  328. } while (size > finished);
  329. return finished;
  330. }
  331. static void tegra_dc_dpaux_enable(struct tegra_dp_priv *dp)
  332. {
  333. /* clear interrupt */
  334. tegra_dpaux_writel(dp, DPAUX_INTR_AUX, 0xffffffff);
  335. /* do not enable interrupt for now. Enable them when Isr in place */
  336. tegra_dpaux_writel(dp, DPAUX_INTR_EN_AUX, 0x0);
  337. tegra_dpaux_writel(dp, DPAUX_HYBRID_PADCTL,
  338. DPAUX_HYBRID_PADCTL_AUX_DRVZ_OHM_50 |
  339. DPAUX_HYBRID_PADCTL_AUX_CMH_V0_70 |
  340. 0x18 << DPAUX_HYBRID_PADCTL_AUX_DRVI_SHIFT |
  341. DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV_ENABLE);
  342. tegra_dpaux_writel(dp, DPAUX_HYBRID_SPARE,
  343. DPAUX_HYBRID_SPARE_PAD_PWR_POWERUP);
  344. }
  345. #ifdef DEBUG
  346. static void tegra_dc_dp_dump_link_cfg(struct tegra_dp_priv *dp,
  347. const struct tegra_dp_link_config *link_cfg)
  348. {
  349. debug("DP config: cfg_name cfg_value\n");
  350. debug(" Lane Count %d\n",
  351. link_cfg->max_lane_count);
  352. debug(" SupportEnhancedFraming %s\n",
  353. link_cfg->support_enhanced_framing ? "Y" : "N");
  354. debug(" Bandwidth %d\n",
  355. link_cfg->max_link_bw);
  356. debug(" bpp %d\n",
  357. link_cfg->bits_per_pixel);
  358. debug(" EnhancedFraming %s\n",
  359. link_cfg->enhanced_framing ? "Y" : "N");
  360. debug(" Scramble_enabled %s\n",
  361. link_cfg->scramble_ena ? "Y" : "N");
  362. debug(" LinkBW %d\n",
  363. link_cfg->link_bw);
  364. debug(" lane_count %d\n",
  365. link_cfg->lane_count);
  366. debug(" activespolarity %d\n",
  367. link_cfg->activepolarity);
  368. debug(" active_count %d\n",
  369. link_cfg->active_count);
  370. debug(" tu_size %d\n",
  371. link_cfg->tu_size);
  372. debug(" active_frac %d\n",
  373. link_cfg->active_frac);
  374. debug(" watermark %d\n",
  375. link_cfg->watermark);
  376. debug(" hblank_sym %d\n",
  377. link_cfg->hblank_sym);
  378. debug(" vblank_sym %d\n",
  379. link_cfg->vblank_sym);
  380. }
  381. #endif
  382. static int _tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
  383. struct tegra_dp_link_config *cfg)
  384. {
  385. switch (cfg->link_bw) {
  386. case SOR_LINK_SPEED_G1_62:
  387. if (cfg->max_link_bw > SOR_LINK_SPEED_G1_62)
  388. cfg->link_bw = SOR_LINK_SPEED_G2_7;
  389. cfg->lane_count /= 2;
  390. break;
  391. case SOR_LINK_SPEED_G2_7:
  392. cfg->link_bw = SOR_LINK_SPEED_G1_62;
  393. break;
  394. case SOR_LINK_SPEED_G5_4:
  395. if (cfg->lane_count == 1) {
  396. cfg->link_bw = SOR_LINK_SPEED_G2_7;
  397. cfg->lane_count = cfg->max_lane_count;
  398. } else {
  399. cfg->lane_count /= 2;
  400. }
  401. break;
  402. default:
  403. debug("dp: Error link rate %d\n", cfg->link_bw);
  404. return -ENOLINK;
  405. }
  406. return (cfg->lane_count > 0) ? 0 : -ENOLINK;
  407. }
  408. /*
  409. * Calcuate if given cfg can meet the mode request.
  410. * Return 0 if mode is possible, -1 otherwise
  411. */
  412. static int tegra_dc_dp_calc_config(struct tegra_dp_priv *dp,
  413. const struct display_timing *timing,
  414. struct tegra_dp_link_config *link_cfg)
  415. {
  416. const u32 link_rate = 27 * link_cfg->link_bw * 1000 * 1000;
  417. const u64 f = 100000; /* precision factor */
  418. u32 num_linkclk_line; /* Number of link clocks per line */
  419. u64 ratio_f; /* Ratio of incoming to outgoing data rate */
  420. u64 frac_f;
  421. u64 activesym_f; /* Activesym per TU */
  422. u64 activecount_f;
  423. u32 activecount;
  424. u32 activepolarity;
  425. u64 approx_value_f;
  426. u32 activefrac = 0;
  427. u64 accumulated_error_f = 0;
  428. u32 lowest_neg_activecount = 0;
  429. u32 lowest_neg_activepolarity = 0;
  430. u32 lowest_neg_tusize = 64;
  431. u32 num_symbols_per_line;
  432. u64 lowest_neg_activefrac = 0;
  433. u64 lowest_neg_error_f = 64 * f;
  434. u64 watermark_f;
  435. int i;
  436. int neg;
  437. if (!link_rate || !link_cfg->lane_count || !timing->pixelclock.typ ||
  438. !link_cfg->bits_per_pixel)
  439. return -1;
  440. if ((u64)timing->pixelclock.typ * link_cfg->bits_per_pixel >=
  441. (u64)link_rate * 8 * link_cfg->lane_count)
  442. return -1;
  443. num_linkclk_line = (u32)(lldiv(link_rate * timing->hactive.typ,
  444. timing->pixelclock.typ));
  445. ratio_f = (u64)timing->pixelclock.typ * link_cfg->bits_per_pixel * f;
  446. ratio_f /= 8;
  447. do_div(ratio_f, link_rate * link_cfg->lane_count);
  448. for (i = 64; i >= 32; --i) {
  449. activesym_f = ratio_f * i;
  450. activecount_f = lldiv(activesym_f, (u32)f) * f;
  451. frac_f = activesym_f - activecount_f;
  452. activecount = (u32)(lldiv(activecount_f, (u32)f));
  453. if (frac_f < (lldiv(f, 2))) /* fraction < 0.5 */
  454. activepolarity = 0;
  455. else {
  456. activepolarity = 1;
  457. frac_f = f - frac_f;
  458. }
  459. if (frac_f != 0) {
  460. /* warning: frac_f should be 64-bit */
  461. frac_f = lldiv(f * f, frac_f); /* 1 / fraction */
  462. if (frac_f > (15 * f))
  463. activefrac = activepolarity ? 1 : 15;
  464. else
  465. activefrac = activepolarity ?
  466. (u32)lldiv(frac_f, (u32)f) + 1 :
  467. (u32)lldiv(frac_f, (u32)f);
  468. }
  469. if (activefrac == 1)
  470. activepolarity = 0;
  471. if (activepolarity == 1)
  472. approx_value_f = activefrac ? lldiv(
  473. (activecount_f + (activefrac * f - f) * f),
  474. (activefrac * f)) :
  475. activecount_f + f;
  476. else
  477. approx_value_f = activefrac ?
  478. activecount_f + lldiv(f, activefrac) :
  479. activecount_f;
  480. if (activesym_f < approx_value_f) {
  481. accumulated_error_f = num_linkclk_line *
  482. lldiv(approx_value_f - activesym_f, i);
  483. neg = 1;
  484. } else {
  485. accumulated_error_f = num_linkclk_line *
  486. lldiv(activesym_f - approx_value_f, i);
  487. neg = 0;
  488. }
  489. if ((neg && (lowest_neg_error_f > accumulated_error_f)) ||
  490. (accumulated_error_f == 0)) {
  491. lowest_neg_error_f = accumulated_error_f;
  492. lowest_neg_tusize = i;
  493. lowest_neg_activecount = activecount;
  494. lowest_neg_activepolarity = activepolarity;
  495. lowest_neg_activefrac = activefrac;
  496. if (accumulated_error_f == 0)
  497. break;
  498. }
  499. }
  500. if (lowest_neg_activefrac == 0) {
  501. link_cfg->activepolarity = 0;
  502. link_cfg->active_count = lowest_neg_activepolarity ?
  503. lowest_neg_activecount : lowest_neg_activecount - 1;
  504. link_cfg->tu_size = lowest_neg_tusize;
  505. link_cfg->active_frac = 1;
  506. } else {
  507. link_cfg->activepolarity = lowest_neg_activepolarity;
  508. link_cfg->active_count = (u32)lowest_neg_activecount;
  509. link_cfg->tu_size = lowest_neg_tusize;
  510. link_cfg->active_frac = (u32)lowest_neg_activefrac;
  511. }
  512. watermark_f = lldiv(ratio_f * link_cfg->tu_size * (f - ratio_f), f);
  513. link_cfg->watermark = (u32)(lldiv(watermark_f + lowest_neg_error_f,
  514. f)) + link_cfg->bits_per_pixel / 4 - 1;
  515. num_symbols_per_line = (timing->hactive.typ *
  516. link_cfg->bits_per_pixel) /
  517. (8 * link_cfg->lane_count);
  518. if (link_cfg->watermark > 30) {
  519. debug("dp: sor setting: unable to get a good tusize, force watermark to 30\n");
  520. link_cfg->watermark = 30;
  521. return -1;
  522. } else if (link_cfg->watermark > num_symbols_per_line) {
  523. debug("dp: sor setting: force watermark to the number of symbols in the line\n");
  524. link_cfg->watermark = num_symbols_per_line;
  525. return -1;
  526. }
  527. /*
  528. * Refer to dev_disp.ref for more information.
  529. * # symbols/hblank = ((SetRasterBlankEnd.X + SetRasterSize.Width -
  530. * SetRasterBlankStart.X - 7) * link_clk / pclk)
  531. * - 3 * enhanced_framing - Y
  532. * where Y = (# lanes == 4) 3 : (# lanes == 2) ? 6 : 12
  533. */
  534. link_cfg->hblank_sym = (int)lldiv(((uint64_t)timing->hback_porch.typ +
  535. timing->hfront_porch.typ + timing->hsync_len.typ - 7) *
  536. link_rate, timing->pixelclock.typ) -
  537. 3 * link_cfg->enhanced_framing -
  538. (12 / link_cfg->lane_count);
  539. if (link_cfg->hblank_sym < 0)
  540. link_cfg->hblank_sym = 0;
  541. /*
  542. * Refer to dev_disp.ref for more information.
  543. * # symbols/vblank = ((SetRasterBlankStart.X -
  544. * SetRasterBlankEen.X - 25) * link_clk / pclk)
  545. * - Y - 1;
  546. * where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39
  547. */
  548. link_cfg->vblank_sym = (int)lldiv(((uint64_t)timing->hactive.typ - 25)
  549. * link_rate, timing->pixelclock.typ) - (36 /
  550. link_cfg->lane_count) - 4;
  551. if (link_cfg->vblank_sym < 0)
  552. link_cfg->vblank_sym = 0;
  553. link_cfg->is_valid = 1;
  554. #ifdef DEBUG
  555. tegra_dc_dp_dump_link_cfg(dp, link_cfg);
  556. #endif
  557. return 0;
  558. }
  559. static int tegra_dc_dp_init_max_link_cfg(
  560. const struct display_timing *timing,
  561. struct tegra_dp_priv *dp,
  562. struct tegra_dp_link_config *link_cfg)
  563. {
  564. const int drive_current = 0x40404040;
  565. const int preemphasis = 0x0f0f0f0f;
  566. const int postcursor = 0;
  567. u8 dpcd_data;
  568. int ret;
  569. ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LANE_COUNT, &dpcd_data);
  570. if (ret)
  571. return ret;
  572. link_cfg->max_lane_count = dpcd_data & DP_MAX_LANE_COUNT_MASK;
  573. link_cfg->tps3_supported = (dpcd_data &
  574. DP_MAX_LANE_COUNT_TPS3_SUPPORTED_YES) ? 1 : 0;
  575. link_cfg->support_enhanced_framing =
  576. (dpcd_data & DP_MAX_LANE_COUNT_ENHANCED_FRAMING_YES) ?
  577. 1 : 0;
  578. ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_DOWNSPREAD, &dpcd_data);
  579. if (ret)
  580. return ret;
  581. link_cfg->downspread = (dpcd_data & DP_MAX_DOWNSPREAD_VAL_0_5_PCT) ?
  582. 1 : 0;
  583. ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_TRAINING_AUX_RD_INTERVAL,
  584. &link_cfg->aux_rd_interval);
  585. if (ret)
  586. return ret;
  587. ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LINK_RATE,
  588. &link_cfg->max_link_bw);
  589. if (ret)
  590. return ret;
  591. /*
  592. * Set to a high value for link training and attach.
  593. * Will be re-programmed when dp is enabled.
  594. */
  595. link_cfg->drive_current = drive_current;
  596. link_cfg->preemphasis = preemphasis;
  597. link_cfg->postcursor = postcursor;
  598. ret = tegra_dc_dp_dpcd_read(dp, DP_EDP_CONFIGURATION_CAP, &dpcd_data);
  599. if (ret)
  600. return ret;
  601. link_cfg->alt_scramber_reset_cap =
  602. (dpcd_data & DP_EDP_CONFIGURATION_CAP_ASC_RESET_YES) ?
  603. 1 : 0;
  604. link_cfg->only_enhanced_framing =
  605. (dpcd_data & DP_EDP_CONFIGURATION_CAP_FRAMING_CHANGE_YES) ?
  606. 1 : 0;
  607. link_cfg->lane_count = link_cfg->max_lane_count;
  608. link_cfg->link_bw = link_cfg->max_link_bw;
  609. link_cfg->enhanced_framing = link_cfg->support_enhanced_framing;
  610. link_cfg->frame_in_ms = (1000 / 60) + 1;
  611. tegra_dc_dp_calc_config(dp, timing, link_cfg);
  612. return 0;
  613. }
  614. static int tegra_dc_dp_set_assr(struct tegra_dp_priv *dp,
  615. struct tegra_dc_sor_data *sor, int ena)
  616. {
  617. int ret;
  618. u8 dpcd_data = ena ?
  619. DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_ENABLE :
  620. DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_DISABLE;
  621. ret = tegra_dc_dp_dpcd_write(dp, DP_EDP_CONFIGURATION_SET,
  622. dpcd_data);
  623. if (ret)
  624. return ret;
  625. /* Also reset the scrambler to 0xfffe */
  626. tegra_dc_sor_set_internal_panel(sor, ena);
  627. return 0;
  628. }
  629. static int tegra_dp_set_link_bandwidth(struct tegra_dp_priv *dp,
  630. struct tegra_dc_sor_data *sor,
  631. u8 link_bw)
  632. {
  633. tegra_dc_sor_set_link_bandwidth(sor, link_bw);
  634. /* Sink side */
  635. return tegra_dc_dp_dpcd_write(dp, DP_LINK_BW_SET, link_bw);
  636. }
  637. static int tegra_dp_set_lane_count(struct tegra_dp_priv *dp,
  638. const struct tegra_dp_link_config *link_cfg,
  639. struct tegra_dc_sor_data *sor)
  640. {
  641. u8 dpcd_data;
  642. int ret;
  643. /* check if panel support enhanched_framing */
  644. dpcd_data = link_cfg->lane_count;
  645. if (link_cfg->enhanced_framing)
  646. dpcd_data |= DP_LANE_COUNT_SET_ENHANCEDFRAMING_T;
  647. ret = tegra_dc_dp_dpcd_write(dp, DP_LANE_COUNT_SET, dpcd_data);
  648. if (ret)
  649. return ret;
  650. tegra_dc_sor_set_lane_count(sor, link_cfg->lane_count);
  651. /* Also power down lanes that will not be used */
  652. return 0;
  653. }
  654. static int tegra_dc_dp_link_trained(struct tegra_dp_priv *dp,
  655. const struct tegra_dp_link_config *cfg)
  656. {
  657. u32 lane;
  658. u8 mask;
  659. u8 data;
  660. int ret;
  661. for (lane = 0; lane < cfg->lane_count; ++lane) {
  662. ret = tegra_dc_dp_dpcd_read(dp, (lane / 2) ?
  663. DP_LANE2_3_STATUS : DP_LANE0_1_STATUS,
  664. &data);
  665. if (ret)
  666. return ret;
  667. mask = (lane & 1) ?
  668. NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES |
  669. NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_YES |
  670. NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_YES :
  671. DP_LANE_CR_DONE |
  672. DP_LANE_CHANNEL_EQ_DONE |
  673. DP_LANE_SYMBOL_LOCKED;
  674. if ((data & mask) != mask)
  675. return -1;
  676. }
  677. return 0;
  678. }
  679. static int tegra_dp_channel_eq_status(struct tegra_dp_priv *dp,
  680. const struct tegra_dp_link_config *cfg)
  681. {
  682. u32 cnt;
  683. u32 n_lanes = cfg->lane_count;
  684. u8 data;
  685. u8 ce_done = 1;
  686. int ret;
  687. for (cnt = 0; cnt < n_lanes / 2; cnt++) {
  688. ret = tegra_dc_dp_dpcd_read(dp, DP_LANE0_1_STATUS + cnt, &data);
  689. if (ret)
  690. return ret;
  691. if (n_lanes == 1) {
  692. ce_done = (data & (0x1 <<
  693. NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) &&
  694. (data & (0x1 <<
  695. NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT));
  696. break;
  697. } else if (!(data & (0x1 <<
  698. NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) ||
  699. !(data & (0x1 <<
  700. NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT)) ||
  701. !(data & (0x1 <<
  702. NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_SHIFT)) ||
  703. !(data & (0x1 <<
  704. NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_SHIFT)))
  705. return -EIO;
  706. }
  707. if (ce_done) {
  708. ret = tegra_dc_dp_dpcd_read(dp,
  709. DP_LANE_ALIGN_STATUS_UPDATED,
  710. &data);
  711. if (ret)
  712. return ret;
  713. if (!(data & NV_DPCD_LANE_ALIGN_STATUS_UPDATED_DONE_YES))
  714. ce_done = 0;
  715. }
  716. return ce_done ? 0 : -EIO;
  717. }
  718. static int tegra_dp_clock_recovery_status(struct tegra_dp_priv *dp,
  719. const struct tegra_dp_link_config *cfg)
  720. {
  721. u32 cnt;
  722. u32 n_lanes = cfg->lane_count;
  723. u8 data_ptr;
  724. int ret;
  725. for (cnt = 0; cnt < n_lanes / 2; cnt++) {
  726. ret = tegra_dc_dp_dpcd_read(dp, (DP_LANE0_1_STATUS + cnt),
  727. &data_ptr);
  728. if (ret)
  729. return ret;
  730. if (n_lanes == 1)
  731. return (data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ?
  732. 1 : 0;
  733. else if (!(data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ||
  734. !(data_ptr & (NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES)))
  735. return 0;
  736. }
  737. return 1;
  738. }
  739. static int tegra_dp_lt_adjust(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
  740. u32 pc[4], u8 pc_supported,
  741. const struct tegra_dp_link_config *cfg)
  742. {
  743. size_t cnt;
  744. u8 data_ptr;
  745. u32 n_lanes = cfg->lane_count;
  746. int ret;
  747. for (cnt = 0; cnt < n_lanes / 2; cnt++) {
  748. ret = tegra_dc_dp_dpcd_read(dp, DP_ADJUST_REQUEST_LANE0_1 + cnt,
  749. &data_ptr);
  750. if (ret)
  751. return ret;
  752. pe[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_PE_MASK) >>
  753. NV_DPCD_ADJUST_REQ_LANEX_PE_SHIFT;
  754. vs[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_DC_MASK) >>
  755. NV_DPCD_ADJUST_REQ_LANEX_DC_SHIFT;
  756. pe[1 + 2 * cnt] =
  757. (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_MASK) >>
  758. NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_SHIFT;
  759. vs[1 + 2 * cnt] =
  760. (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_MASK) >>
  761. NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_SHIFT;
  762. }
  763. if (pc_supported) {
  764. ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_ADJUST_REQ_POST_CURSOR2,
  765. &data_ptr);
  766. if (ret)
  767. return ret;
  768. for (cnt = 0; cnt < n_lanes; cnt++) {
  769. pc[cnt] = (data_ptr >>
  770. NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_SHIFT(cnt)) &
  771. NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_MASK;
  772. }
  773. }
  774. return 0;
  775. }
  776. static void tegra_dp_wait_aux_training(struct tegra_dp_priv *dp,
  777. bool is_clk_recovery,
  778. const struct tegra_dp_link_config *cfg)
  779. {
  780. if (!cfg->aux_rd_interval)
  781. udelay(is_clk_recovery ? 200 : 500);
  782. else
  783. mdelay(cfg->aux_rd_interval * 4);
  784. }
  785. static void tegra_dp_tpg(struct tegra_dp_priv *dp, u32 tp, u32 n_lanes,
  786. const struct tegra_dp_link_config *cfg)
  787. {
  788. u8 data = (tp == training_pattern_disabled)
  789. ? (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_F)
  790. : (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_T);
  791. tegra_dc_sor_set_dp_linkctl(dp->sor, 1, tp, cfg);
  792. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, data);
  793. }
  794. static int tegra_dp_link_config(struct tegra_dp_priv *dp,
  795. const struct tegra_dp_link_config *link_cfg)
  796. {
  797. u8 dpcd_data;
  798. u32 retry;
  799. int ret;
  800. if (link_cfg->lane_count == 0) {
  801. debug("dp: error: lane count is 0. Can not set link config.\n");
  802. return -ENOLINK;
  803. }
  804. /* Set power state if it is not in normal level */
  805. ret = tegra_dc_dp_dpcd_read(dp, DP_SET_POWER, &dpcd_data);
  806. if (ret)
  807. return ret;
  808. if (dpcd_data == DP_SET_POWER_D3) {
  809. dpcd_data = DP_SET_POWER_D0;
  810. /* DP spec requires 3 retries */
  811. for (retry = 3; retry > 0; --retry) {
  812. ret = tegra_dc_dp_dpcd_write(dp, DP_SET_POWER,
  813. dpcd_data);
  814. if (!ret)
  815. break;
  816. if (retry == 1) {
  817. debug("dp: Failed to set DP panel power\n");
  818. return ret;
  819. }
  820. }
  821. }
  822. /* Enable ASSR if possible */
  823. if (link_cfg->alt_scramber_reset_cap) {
  824. ret = tegra_dc_dp_set_assr(dp, dp->sor, 1);
  825. if (ret)
  826. return ret;
  827. }
  828. ret = tegra_dp_set_link_bandwidth(dp, dp->sor, link_cfg->link_bw);
  829. if (ret) {
  830. debug("dp: Failed to set link bandwidth\n");
  831. return ret;
  832. }
  833. ret = tegra_dp_set_lane_count(dp, link_cfg, dp->sor);
  834. if (ret) {
  835. debug("dp: Failed to set lane count\n");
  836. return ret;
  837. }
  838. tegra_dc_sor_set_dp_linkctl(dp->sor, 1, training_pattern_none,
  839. link_cfg);
  840. return 0;
  841. }
  842. static int tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
  843. const struct display_timing *timing,
  844. struct tegra_dp_link_config *cfg)
  845. {
  846. struct tegra_dp_link_config tmp_cfg;
  847. int ret;
  848. tmp_cfg = *cfg;
  849. cfg->is_valid = 0;
  850. ret = _tegra_dp_lower_link_config(dp, cfg);
  851. if (!ret)
  852. ret = tegra_dc_dp_calc_config(dp, timing, cfg);
  853. if (!ret)
  854. ret = tegra_dp_link_config(dp, cfg);
  855. if (ret)
  856. goto fail;
  857. return 0;
  858. fail:
  859. *cfg = tmp_cfg;
  860. tegra_dp_link_config(dp, &tmp_cfg);
  861. return ret;
  862. }
  863. static int tegra_dp_lt_config(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
  864. u32 pc[4], const struct tegra_dp_link_config *cfg)
  865. {
  866. struct tegra_dc_sor_data *sor = dp->sor;
  867. u32 n_lanes = cfg->lane_count;
  868. u8 pc_supported = cfg->tps3_supported;
  869. u32 cnt;
  870. u32 val;
  871. for (cnt = 0; cnt < n_lanes; cnt++) {
  872. u32 mask = 0;
  873. u32 pe_reg, vs_reg, pc_reg;
  874. u32 shift = 0;
  875. switch (cnt) {
  876. case 0:
  877. mask = PR_LANE2_DP_LANE0_MASK;
  878. shift = PR_LANE2_DP_LANE0_SHIFT;
  879. break;
  880. case 1:
  881. mask = PR_LANE1_DP_LANE1_MASK;
  882. shift = PR_LANE1_DP_LANE1_SHIFT;
  883. break;
  884. case 2:
  885. mask = PR_LANE0_DP_LANE2_MASK;
  886. shift = PR_LANE0_DP_LANE2_SHIFT;
  887. break;
  888. case 3:
  889. mask = PR_LANE3_DP_LANE3_MASK;
  890. shift = PR_LANE3_DP_LANE3_SHIFT;
  891. break;
  892. default:
  893. debug("dp: incorrect lane cnt\n");
  894. return -EINVAL;
  895. }
  896. pe_reg = tegra_dp_pe_regs[pc[cnt]][vs[cnt]][pe[cnt]];
  897. vs_reg = tegra_dp_vs_regs[pc[cnt]][vs[cnt]][pe[cnt]];
  898. pc_reg = tegra_dp_pc_regs[pc[cnt]][vs[cnt]][pe[cnt]];
  899. tegra_dp_set_pe_vs_pc(sor, mask, pe_reg << shift,
  900. vs_reg << shift, pc_reg << shift,
  901. pc_supported);
  902. }
  903. tegra_dp_disable_tx_pu(dp->sor);
  904. udelay(20);
  905. for (cnt = 0; cnt < n_lanes; cnt++) {
  906. u32 max_vs_flag = tegra_dp_is_max_vs(pe[cnt], vs[cnt]);
  907. u32 max_pe_flag = tegra_dp_is_max_pe(pe[cnt], vs[cnt]);
  908. val = (vs[cnt] << NV_DPCD_TRAINING_LANEX_SET_DC_SHIFT) |
  909. (max_vs_flag ?
  910. NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_T :
  911. NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_F) |
  912. (pe[cnt] << NV_DPCD_TRAINING_LANEX_SET_PE_SHIFT) |
  913. (max_pe_flag ?
  914. NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_T :
  915. NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_F);
  916. tegra_dc_dp_dpcd_write(dp, (DP_TRAINING_LANE0_SET + cnt), val);
  917. }
  918. if (pc_supported) {
  919. for (cnt = 0; cnt < n_lanes / 2; cnt++) {
  920. u32 max_pc_flag0 = tegra_dp_is_max_pc(pc[cnt]);
  921. u32 max_pc_flag1 = tegra_dp_is_max_pc(pc[cnt + 1]);
  922. val = (pc[cnt] << NV_DPCD_LANEX_SET2_PC2_SHIFT) |
  923. (max_pc_flag0 ?
  924. NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_T :
  925. NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_F) |
  926. (pc[cnt + 1] <<
  927. NV_DPCD_LANEXPLUS1_SET2_PC2_SHIFT) |
  928. (max_pc_flag1 ?
  929. NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_T :
  930. NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_F);
  931. tegra_dc_dp_dpcd_write(dp,
  932. NV_DPCD_TRAINING_LANE0_1_SET2 +
  933. cnt, val);
  934. }
  935. }
  936. return 0;
  937. }
  938. static int _tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4],
  939. u32 vs[4], u32 pc[4], u8 pc_supported,
  940. u32 n_lanes,
  941. const struct tegra_dp_link_config *cfg)
  942. {
  943. u32 retry_cnt;
  944. for (retry_cnt = 0; retry_cnt < 4; retry_cnt++) {
  945. int ret;
  946. if (retry_cnt) {
  947. ret = tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported,
  948. cfg);
  949. if (ret)
  950. return ret;
  951. tegra_dp_lt_config(dp, pe, vs, pc, cfg);
  952. }
  953. tegra_dp_wait_aux_training(dp, false, cfg);
  954. if (!tegra_dp_clock_recovery_status(dp, cfg)) {
  955. debug("dp: CR failed in channel EQ sequence!\n");
  956. break;
  957. }
  958. if (!tegra_dp_channel_eq_status(dp, cfg))
  959. return 0;
  960. }
  961. return -EIO;
  962. }
  963. static int tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
  964. u32 pc[4],
  965. const struct tegra_dp_link_config *cfg)
  966. {
  967. u32 n_lanes = cfg->lane_count;
  968. u8 pc_supported = cfg->tps3_supported;
  969. int ret;
  970. u32 tp_src = training_pattern_2;
  971. if (pc_supported)
  972. tp_src = training_pattern_3;
  973. tegra_dp_tpg(dp, tp_src, n_lanes, cfg);
  974. ret = _tegra_dp_channel_eq(dp, pe, vs, pc, pc_supported, n_lanes, cfg);
  975. tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
  976. return ret;
  977. }
  978. static int _tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
  979. u32 vs[4], u32 pc[4], u8 pc_supported,
  980. u32 n_lanes,
  981. const struct tegra_dp_link_config *cfg)
  982. {
  983. u32 vs_temp[4];
  984. u32 retry_cnt = 0;
  985. do {
  986. tegra_dp_lt_config(dp, pe, vs, pc, cfg);
  987. tegra_dp_wait_aux_training(dp, true, cfg);
  988. if (tegra_dp_clock_recovery_status(dp, cfg))
  989. return 0;
  990. memcpy(vs_temp, vs, sizeof(vs_temp));
  991. tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported, cfg);
  992. if (memcmp(vs_temp, vs, sizeof(vs_temp)))
  993. retry_cnt = 0;
  994. else
  995. ++retry_cnt;
  996. } while (retry_cnt < 5);
  997. return -EIO;
  998. }
  999. static int tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
  1000. u32 vs[4], u32 pc[4],
  1001. const struct tegra_dp_link_config *cfg)
  1002. {
  1003. u32 n_lanes = cfg->lane_count;
  1004. u8 pc_supported = cfg->tps3_supported;
  1005. int err;
  1006. tegra_dp_tpg(dp, training_pattern_1, n_lanes, cfg);
  1007. err = _tegra_dp_clk_recovery(dp, pe, vs, pc, pc_supported, n_lanes,
  1008. cfg);
  1009. if (err < 0)
  1010. tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
  1011. return err;
  1012. }
  1013. static int tegra_dc_dp_full_link_training(struct tegra_dp_priv *dp,
  1014. const struct display_timing *timing,
  1015. struct tegra_dp_link_config *cfg)
  1016. {
  1017. struct tegra_dc_sor_data *sor = dp->sor;
  1018. int err;
  1019. u32 pe[4], vs[4], pc[4];
  1020. tegra_sor_precharge_lanes(sor, cfg);
  1021. retry_cr:
  1022. memset(pe, PREEMPHASIS_DISABLED, sizeof(pe));
  1023. memset(vs, DRIVECURRENT_LEVEL0, sizeof(vs));
  1024. memset(pc, POSTCURSOR2_LEVEL0, sizeof(pc));
  1025. err = tegra_dp_clk_recovery(dp, pe, vs, pc, cfg);
  1026. if (err) {
  1027. if (!tegra_dp_lower_link_config(dp, timing, cfg))
  1028. goto retry_cr;
  1029. debug("dp: clk recovery failed\n");
  1030. goto fail;
  1031. }
  1032. err = tegra_dp_channel_eq(dp, pe, vs, pc, cfg);
  1033. if (err) {
  1034. if (!tegra_dp_lower_link_config(dp, timing, cfg))
  1035. goto retry_cr;
  1036. debug("dp: channel equalization failed\n");
  1037. goto fail;
  1038. }
  1039. #ifdef DEBUG
  1040. tegra_dc_dp_dump_link_cfg(dp, cfg);
  1041. #endif
  1042. return 0;
  1043. fail:
  1044. return err;
  1045. }
  1046. /*
  1047. * All link training functions are ported from kernel dc driver.
  1048. * See more details at drivers/video/tegra/dc/dp.c
  1049. */
  1050. static int tegra_dc_dp_fast_link_training(struct tegra_dp_priv *dp,
  1051. const struct tegra_dp_link_config *link_cfg,
  1052. struct tegra_dc_sor_data *sor)
  1053. {
  1054. u8 link_bw;
  1055. u8 lane_count;
  1056. u16 data16;
  1057. u32 data32;
  1058. u32 size;
  1059. u32 status;
  1060. int j;
  1061. u32 mask = 0xffff >> ((4 - link_cfg->lane_count) * 4);
  1062. tegra_dc_sor_set_lane_parm(sor, link_cfg);
  1063. tegra_dc_dp_dpcd_write(dp, DP_MAIN_LINK_CHANNEL_CODING_SET,
  1064. DP_SET_ANSI_8B10B);
  1065. /* Send TP1 */
  1066. tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_1, link_cfg);
  1067. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
  1068. DP_TRAINING_PATTERN_1);
  1069. for (j = 0; j < link_cfg->lane_count; ++j)
  1070. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
  1071. udelay(520);
  1072. size = sizeof(data16);
  1073. tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
  1074. DP_LANE0_1_STATUS, (u8 *)&data16, &size, &status);
  1075. status = mask & 0x1111;
  1076. if ((data16 & status) != status) {
  1077. debug("dp: Link training error for TP1 (%#x, status %#x)\n",
  1078. data16, status);
  1079. return -EFAULT;
  1080. }
  1081. /* enable ASSR */
  1082. tegra_dc_dp_set_assr(dp, sor, link_cfg->scramble_ena);
  1083. tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_3, link_cfg);
  1084. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
  1085. link_cfg->link_bw == 20 ? 0x23 : 0x22);
  1086. for (j = 0; j < link_cfg->lane_count; ++j)
  1087. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
  1088. udelay(520);
  1089. size = sizeof(data32);
  1090. tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD, DP_LANE0_1_STATUS,
  1091. (u8 *)&data32, &size, &status);
  1092. if ((data32 & mask) != (0x7777 & mask)) {
  1093. debug("dp: Link training error for TP2/3 (0x%x)\n", data32);
  1094. return -EFAULT;
  1095. }
  1096. tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_disabled,
  1097. link_cfg);
  1098. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, 0);
  1099. if (tegra_dc_dp_link_trained(dp, link_cfg)) {
  1100. tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
  1101. debug("Fast link training failed, link bw %d, lane # %d\n",
  1102. link_bw, lane_count);
  1103. return -EFAULT;
  1104. }
  1105. debug("Fast link training succeeded, link bw %d, lane %d\n",
  1106. link_cfg->link_bw, link_cfg->lane_count);
  1107. return 0;
  1108. }
  1109. static int tegra_dp_do_link_training(struct tegra_dp_priv *dp,
  1110. struct tegra_dp_link_config *link_cfg,
  1111. const struct display_timing *timing,
  1112. struct tegra_dc_sor_data *sor)
  1113. {
  1114. u8 link_bw;
  1115. u8 lane_count;
  1116. int ret;
  1117. if (DO_FAST_LINK_TRAINING) {
  1118. ret = tegra_dc_dp_fast_link_training(dp, link_cfg, sor);
  1119. if (ret) {
  1120. debug("dp: fast link training failed\n");
  1121. } else {
  1122. /*
  1123. * set to a known-good drive setting if fast link
  1124. * succeeded. Ignore any error.
  1125. */
  1126. ret = tegra_dc_sor_set_voltage_swing(dp->sor, link_cfg);
  1127. if (ret)
  1128. debug("Failed to set voltage swing\n");
  1129. }
  1130. } else {
  1131. ret = -ENOSYS;
  1132. }
  1133. if (ret) {
  1134. /* Try full link training then */
  1135. ret = tegra_dc_dp_full_link_training(dp, timing, link_cfg);
  1136. if (ret) {
  1137. debug("dp: full link training failed\n");
  1138. return ret;
  1139. }
  1140. }
  1141. /* Everything is good; double check the link config */
  1142. tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
  1143. if ((link_cfg->link_bw == link_bw) &&
  1144. (link_cfg->lane_count == lane_count))
  1145. return 0;
  1146. else
  1147. return -EFAULT;
  1148. }
  1149. static int tegra_dc_dp_explore_link_cfg(struct tegra_dp_priv *dp,
  1150. struct tegra_dp_link_config *link_cfg,
  1151. struct tegra_dc_sor_data *sor,
  1152. const struct display_timing *timing)
  1153. {
  1154. struct tegra_dp_link_config temp_cfg;
  1155. if (!timing->pixelclock.typ || !timing->hactive.typ ||
  1156. !timing->vactive.typ) {
  1157. debug("dp: error mode configuration");
  1158. return -EINVAL;
  1159. }
  1160. if (!link_cfg->max_link_bw || !link_cfg->max_lane_count) {
  1161. debug("dp: error link configuration");
  1162. return -EINVAL;
  1163. }
  1164. link_cfg->is_valid = 0;
  1165. memcpy(&temp_cfg, link_cfg, sizeof(temp_cfg));
  1166. temp_cfg.link_bw = temp_cfg.max_link_bw;
  1167. temp_cfg.lane_count = temp_cfg.max_lane_count;
  1168. /*
  1169. * set to max link config
  1170. */
  1171. if ((!tegra_dc_dp_calc_config(dp, timing, &temp_cfg)) &&
  1172. (!tegra_dp_link_config(dp, &temp_cfg)) &&
  1173. (!tegra_dp_do_link_training(dp, &temp_cfg, timing, sor)))
  1174. /* the max link cfg is doable */
  1175. memcpy(link_cfg, &temp_cfg, sizeof(temp_cfg));
  1176. return link_cfg->is_valid ? 0 : -EFAULT;
  1177. }
  1178. static int tegra_dp_hpd_plug(struct tegra_dp_priv *dp)
  1179. {
  1180. const int vdd_to_hpd_delay_ms = 200;
  1181. u32 val;
  1182. ulong start;
  1183. start = get_timer(0);
  1184. do {
  1185. val = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
  1186. if (val & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)
  1187. return 0;
  1188. udelay(100);
  1189. } while (get_timer(start) < vdd_to_hpd_delay_ms);
  1190. return -EIO;
  1191. }
  1192. static int tegra_dc_dp_sink_out_of_sync(struct tegra_dp_priv *dp, u32 delay_ms)
  1193. {
  1194. u8 dpcd_data;
  1195. int out_of_sync;
  1196. int ret;
  1197. debug("%s: delay=%d\n", __func__, delay_ms);
  1198. mdelay(delay_ms);
  1199. ret = tegra_dc_dp_dpcd_read(dp, DP_SINK_STATUS, &dpcd_data);
  1200. if (ret)
  1201. return ret;
  1202. out_of_sync = !(dpcd_data & DP_SINK_STATUS_PORT0_IN_SYNC);
  1203. if (out_of_sync)
  1204. debug("SINK receive port 0 out of sync, data=%x\n", dpcd_data);
  1205. else
  1206. debug("SINK is in synchronization\n");
  1207. return out_of_sync;
  1208. }
  1209. static int tegra_dc_dp_check_sink(struct tegra_dp_priv *dp,
  1210. struct tegra_dp_link_config *link_cfg,
  1211. const struct display_timing *timing)
  1212. {
  1213. const int max_retry = 5;
  1214. int delay_frame;
  1215. int retries;
  1216. /*
  1217. * DP TCON may skip some main stream frames, thus we need to wait
  1218. * some delay before reading the DPCD SINK STATUS register, starting
  1219. * from 5
  1220. */
  1221. delay_frame = 5;
  1222. retries = max_retry;
  1223. do {
  1224. int ret;
  1225. if (!tegra_dc_dp_sink_out_of_sync(dp, link_cfg->frame_in_ms *
  1226. delay_frame))
  1227. return 0;
  1228. debug("%s: retries left %d\n", __func__, retries);
  1229. if (!retries--) {
  1230. printf("DP: Out of sync after %d retries\n", max_retry);
  1231. return -EIO;
  1232. }
  1233. ret = tegra_dc_sor_detach(dp->sor);
  1234. if (ret)
  1235. return ret;
  1236. if (tegra_dc_dp_explore_link_cfg(dp, link_cfg, dp->sor,
  1237. timing)) {
  1238. debug("dp: %s: error to configure link\n", __func__);
  1239. continue;
  1240. }
  1241. tegra_dc_sor_set_power_state(dp->sor, 1);
  1242. tegra_dc_sor_attach(dp->sor, link_cfg, timing);
  1243. /* Increase delay_frame for next try in case the sink is
  1244. skipping more frames */
  1245. delay_frame += 10;
  1246. } while (1);
  1247. }
  1248. int tegra_dp_enable(struct udevice *dev, int panel_bpp,
  1249. const struct display_timing *timing)
  1250. {
  1251. struct tegra_dp_priv *priv = dev_get_priv(dev);
  1252. struct tegra_dp_link_config slink_cfg, *link_cfg = &slink_cfg;
  1253. struct tegra_dc_sor_data *sor;
  1254. int data;
  1255. int retry;
  1256. int ret;
  1257. memset(link_cfg, '\0', sizeof(*link_cfg));
  1258. link_cfg->is_valid = 0;
  1259. link_cfg->scramble_ena = 1;
  1260. tegra_dc_dpaux_enable(priv);
  1261. if (tegra_dp_hpd_plug(priv) < 0) {
  1262. debug("dp: hpd plug failed\n");
  1263. return -EIO;
  1264. }
  1265. link_cfg->bits_per_pixel = panel_bpp;
  1266. if (tegra_dc_dp_init_max_link_cfg(timing, priv, link_cfg)) {
  1267. debug("dp: failed to init link configuration\n");
  1268. return -ENOLINK;
  1269. }
  1270. ret = tegra_dc_sor_init(&sor);
  1271. if (ret)
  1272. return ret;
  1273. priv->sor = sor;
  1274. ret = tegra_dc_sor_enable_dp(sor, link_cfg);
  1275. if (ret)
  1276. return ret;
  1277. tegra_dc_sor_set_panel_power(sor, 1);
  1278. /* Write power on to DPCD */
  1279. data = DP_SET_POWER_D0;
  1280. retry = 0;
  1281. do {
  1282. ret = tegra_dc_dp_dpcd_write(priv, DP_SET_POWER, data);
  1283. } while ((retry++ < DP_POWER_ON_MAX_TRIES) && ret);
  1284. if (ret || retry >= DP_POWER_ON_MAX_TRIES) {
  1285. debug("dp: failed to power on panel (0x%x)\n", ret);
  1286. return -ENETUNREACH;
  1287. goto error_enable;
  1288. }
  1289. /* Confirm DP plugging status */
  1290. if (!(tegra_dpaux_readl(priv, DPAUX_DP_AUXSTAT) &
  1291. DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
  1292. debug("dp: could not detect HPD\n");
  1293. return -ENXIO;
  1294. }
  1295. /* Check DP version */
  1296. if (tegra_dc_dp_dpcd_read(priv, DP_DPCD_REV, &priv->revision)) {
  1297. debug("dp: failed to read the revision number from sink\n");
  1298. return -EIO;
  1299. }
  1300. if (tegra_dc_dp_explore_link_cfg(priv, link_cfg, sor, timing)) {
  1301. debug("dp: error configuring link\n");
  1302. return -ENOMEDIUM;
  1303. }
  1304. tegra_dc_sor_set_power_state(sor, 1);
  1305. ret = tegra_dc_sor_attach(sor, link_cfg, timing);
  1306. if (ret && ret != -EEXIST)
  1307. return ret;
  1308. /*
  1309. * This takes a long time, but can apparently resolve a failure to
  1310. * bring up the display correctly.
  1311. */
  1312. if (0) {
  1313. ret = tegra_dc_dp_check_sink(priv, link_cfg, timing);
  1314. if (ret)
  1315. return ret;
  1316. }
  1317. /* Power down the unused lanes to save power - a few hundred mW */
  1318. tegra_dc_sor_power_down_unused_lanes(sor, link_cfg);
  1319. priv->enabled = true;
  1320. error_enable:
  1321. return 0;
  1322. }
  1323. static int tegra_dp_ofdata_to_platdata(struct udevice *dev)
  1324. {
  1325. struct tegra_dp_plat *plat = dev_get_platdata(dev);
  1326. const void *blob = gd->fdt_blob;
  1327. plat->base = fdtdec_get_addr(blob, dev->of_offset, "reg");
  1328. return 0;
  1329. }
  1330. static int tegra_dp_read_edid(struct udevice *dev, u8 *buf, int buf_size)
  1331. {
  1332. struct tegra_dp_priv *priv = dev_get_priv(dev);
  1333. const int tegra_edid_i2c_address = 0x50;
  1334. u32 aux_stat = 0;
  1335. tegra_dc_dpaux_enable(priv);
  1336. return tegra_dc_i2c_aux_read(priv, tegra_edid_i2c_address, 0, buf,
  1337. buf_size, &aux_stat);
  1338. }
  1339. static const struct dm_display_port_ops dp_tegra_ops = {
  1340. .read_edid = tegra_dp_read_edid,
  1341. .enable = tegra_dp_enable,
  1342. };
  1343. static int dp_tegra_probe(struct udevice *dev)
  1344. {
  1345. struct tegra_dp_plat *plat = dev_get_platdata(dev);
  1346. struct tegra_dp_priv *priv = dev_get_priv(dev);
  1347. priv->regs = (struct dpaux_ctlr *)plat->base;
  1348. priv->enabled = false;
  1349. return 0;
  1350. }
  1351. static const struct udevice_id tegra_dp_ids[] = {
  1352. { .compatible = "nvidia,tegra124-dpaux" },
  1353. { }
  1354. };
  1355. U_BOOT_DRIVER(dp_tegra) = {
  1356. .name = "dpaux_tegra",
  1357. .id = UCLASS_DISPLAY_PORT,
  1358. .of_match = tegra_dp_ids,
  1359. .ofdata_to_platdata = tegra_dp_ofdata_to_platdata,
  1360. .probe = dp_tegra_probe,
  1361. .ops = &dp_tegra_ops,
  1362. .priv_auto_alloc_size = sizeof(struct tegra_dp_priv),
  1363. .platdata_auto_alloc_size = sizeof(struct tegra_dp_plat),
  1364. };