dp.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2011-2013, NVIDIA Corporation.
  4. * Copyright 2014 Google Inc.
  5. */
  6. #include <common.h>
  7. #include <display.h>
  8. #include <dm.h>
  9. #include <div64.h>
  10. #include <errno.h>
  11. #include <video_bridge.h>
  12. #include <asm/io.h>
  13. #include <asm/arch-tegra/dc.h>
  14. #include "display.h"
  15. #include "edid.h"
  16. #include "sor.h"
  17. #include "displayport.h"
  18. #define DO_FAST_LINK_TRAINING 1
  19. struct tegra_dp_plat {
  20. ulong base;
  21. };
  22. /**
  23. * struct tegra_dp_priv - private displayport driver info
  24. *
  25. * @dc_dev: Display controller device that is sending the video feed
  26. */
  27. struct tegra_dp_priv {
  28. struct udevice *sor;
  29. struct udevice *dc_dev;
  30. struct dpaux_ctlr *regs;
  31. u8 revision;
  32. int enabled;
  33. };
  34. struct tegra_dp_priv dp_data;
  35. static inline u32 tegra_dpaux_readl(struct tegra_dp_priv *dp, u32 reg)
  36. {
  37. return readl((u32 *)dp->regs + reg);
  38. }
  39. static inline void tegra_dpaux_writel(struct tegra_dp_priv *dp, u32 reg,
  40. u32 val)
  41. {
  42. writel(val, (u32 *)dp->regs + reg);
  43. }
  44. static inline u32 tegra_dc_dpaux_poll_register(struct tegra_dp_priv *dp,
  45. u32 reg, u32 mask, u32 exp_val,
  46. u32 poll_interval_us,
  47. u32 timeout_us)
  48. {
  49. u32 reg_val = 0;
  50. u32 temp = timeout_us;
  51. do {
  52. udelay(poll_interval_us);
  53. reg_val = tegra_dpaux_readl(dp, reg);
  54. if (timeout_us > poll_interval_us)
  55. timeout_us -= poll_interval_us;
  56. else
  57. break;
  58. } while ((reg_val & mask) != exp_val);
  59. if ((reg_val & mask) == exp_val)
  60. return 0; /* success */
  61. debug("dpaux_poll_register 0x%x: timeout: (reg_val)0x%08x & (mask)0x%08x != (exp_val)0x%08x\n",
  62. reg, reg_val, mask, exp_val);
  63. return temp;
  64. }
  65. static inline int tegra_dpaux_wait_transaction(struct tegra_dp_priv *dp)
  66. {
  67. /* According to DP spec, each aux transaction needs to finish
  68. within 40ms. */
  69. if (tegra_dc_dpaux_poll_register(dp, DPAUX_DP_AUXCTL,
  70. DPAUX_DP_AUXCTL_TRANSACTREQ_MASK,
  71. DPAUX_DP_AUXCTL_TRANSACTREQ_DONE,
  72. 100, DP_AUX_TIMEOUT_MS * 1000) != 0) {
  73. debug("dp: DPAUX transaction timeout\n");
  74. return -1;
  75. }
  76. return 0;
  77. }
  78. static int tegra_dc_dpaux_write_chunk(struct tegra_dp_priv *dp, u32 cmd,
  79. u32 addr, u8 *data, u32 *size,
  80. u32 *aux_stat)
  81. {
  82. int i;
  83. u32 reg_val;
  84. u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
  85. u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
  86. u32 temp_data;
  87. if (*size > DP_AUX_MAX_BYTES)
  88. return -1; /* only write one chunk of data */
  89. /* Make sure the command is write command */
  90. switch (cmd) {
  91. case DPAUX_DP_AUXCTL_CMD_I2CWR:
  92. case DPAUX_DP_AUXCTL_CMD_MOTWR:
  93. case DPAUX_DP_AUXCTL_CMD_AUXWR:
  94. break;
  95. default:
  96. debug("dp: aux write cmd 0x%x is invalid\n", cmd);
  97. return -EINVAL;
  98. }
  99. tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
  100. for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i) {
  101. memcpy(&temp_data, data, 4);
  102. tegra_dpaux_writel(dp, DPAUX_DP_AUXDATA_WRITE_W(i), temp_data);
  103. data += 4;
  104. }
  105. reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
  106. reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
  107. reg_val |= cmd;
  108. reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
  109. reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
  110. while ((timeout_retries > 0) && (defer_retries > 0)) {
  111. if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
  112. (defer_retries != DP_AUX_DEFER_MAX_TRIES))
  113. udelay(1);
  114. reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
  115. tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
  116. if (tegra_dpaux_wait_transaction(dp))
  117. debug("dp: aux write transaction timeout\n");
  118. *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
  119. if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
  120. (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
  121. (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
  122. (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
  123. if (timeout_retries-- > 0) {
  124. debug("dp: aux write retry (0x%x) -- %d\n",
  125. *aux_stat, timeout_retries);
  126. /* clear the error bits */
  127. tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
  128. *aux_stat);
  129. continue;
  130. } else {
  131. debug("dp: aux write got error (0x%x)\n",
  132. *aux_stat);
  133. return -ETIMEDOUT;
  134. }
  135. }
  136. if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
  137. (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
  138. if (defer_retries-- > 0) {
  139. debug("dp: aux write defer (0x%x) -- %d\n",
  140. *aux_stat, defer_retries);
  141. /* clear the error bits */
  142. tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
  143. *aux_stat);
  144. continue;
  145. } else {
  146. debug("dp: aux write defer exceeds max retries (0x%x)\n",
  147. *aux_stat);
  148. return -ETIMEDOUT;
  149. }
  150. }
  151. if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
  152. DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
  153. *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
  154. return 0;
  155. } else {
  156. debug("dp: aux write failed (0x%x)\n", *aux_stat);
  157. return -EIO;
  158. }
  159. }
  160. /* Should never come to here */
  161. return -EIO;
  162. }
  163. static int tegra_dc_dpaux_read_chunk(struct tegra_dp_priv *dp, u32 cmd,
  164. u32 addr, u8 *data, u32 *size,
  165. u32 *aux_stat)
  166. {
  167. u32 reg_val;
  168. u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
  169. u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
  170. if (*size > DP_AUX_MAX_BYTES) {
  171. debug("only read one chunk\n");
  172. return -EIO; /* only read one chunk */
  173. }
  174. /* Check to make sure the command is read command */
  175. switch (cmd) {
  176. case DPAUX_DP_AUXCTL_CMD_I2CRD:
  177. case DPAUX_DP_AUXCTL_CMD_I2CREQWSTAT:
  178. case DPAUX_DP_AUXCTL_CMD_MOTRD:
  179. case DPAUX_DP_AUXCTL_CMD_AUXRD:
  180. break;
  181. default:
  182. debug("dp: aux read cmd 0x%x is invalid\n", cmd);
  183. return -EIO;
  184. }
  185. *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
  186. if (!(*aux_stat & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
  187. debug("dp: HPD is not detected\n");
  188. return -EIO;
  189. }
  190. tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
  191. reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
  192. reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
  193. reg_val |= cmd;
  194. reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
  195. reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
  196. while ((timeout_retries > 0) && (defer_retries > 0)) {
  197. if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
  198. (defer_retries != DP_AUX_DEFER_MAX_TRIES))
  199. udelay(DP_DPCP_RETRY_SLEEP_NS * 2);
  200. reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
  201. tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
  202. if (tegra_dpaux_wait_transaction(dp))
  203. debug("dp: aux read transaction timeout\n");
  204. *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
  205. if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
  206. (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
  207. (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
  208. (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
  209. if (timeout_retries-- > 0) {
  210. debug("dp: aux read retry (0x%x) -- %d\n",
  211. *aux_stat, timeout_retries);
  212. /* clear the error bits */
  213. tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
  214. *aux_stat);
  215. continue; /* retry */
  216. } else {
  217. debug("dp: aux read got error (0x%x)\n",
  218. *aux_stat);
  219. return -ETIMEDOUT;
  220. }
  221. }
  222. if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
  223. (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
  224. if (defer_retries-- > 0) {
  225. debug("dp: aux read defer (0x%x) -- %d\n",
  226. *aux_stat, defer_retries);
  227. /* clear the error bits */
  228. tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
  229. *aux_stat);
  230. continue;
  231. } else {
  232. debug("dp: aux read defer exceeds max retries (0x%x)\n",
  233. *aux_stat);
  234. return -ETIMEDOUT;
  235. }
  236. }
  237. if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
  238. DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
  239. int i;
  240. u32 temp_data[4];
  241. for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i)
  242. temp_data[i] = tegra_dpaux_readl(dp,
  243. DPAUX_DP_AUXDATA_READ_W(i));
  244. *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
  245. memcpy(data, temp_data, *size);
  246. return 0;
  247. } else {
  248. debug("dp: aux read failed (0x%x\n", *aux_stat);
  249. return -EIO;
  250. }
  251. }
  252. /* Should never come to here */
  253. debug("%s: can't\n", __func__);
  254. return -EIO;
  255. }
  256. static int tegra_dc_dpaux_read(struct tegra_dp_priv *dp, u32 cmd, u32 addr,
  257. u8 *data, u32 *size, u32 *aux_stat)
  258. {
  259. u32 finished = 0;
  260. u32 cur_size;
  261. int ret = 0;
  262. do {
  263. cur_size = *size - finished;
  264. if (cur_size > DP_AUX_MAX_BYTES)
  265. cur_size = DP_AUX_MAX_BYTES;
  266. ret = tegra_dc_dpaux_read_chunk(dp, cmd, addr,
  267. data, &cur_size, aux_stat);
  268. if (ret)
  269. break;
  270. /* cur_size should be the real size returned */
  271. addr += cur_size;
  272. data += cur_size;
  273. finished += cur_size;
  274. } while (*size > finished);
  275. *size = finished;
  276. return ret;
  277. }
  278. static int tegra_dc_dp_dpcd_read(struct tegra_dp_priv *dp, u32 cmd,
  279. u8 *data_ptr)
  280. {
  281. u32 size = 1;
  282. u32 status = 0;
  283. int ret;
  284. ret = tegra_dc_dpaux_read_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
  285. cmd, data_ptr, &size, &status);
  286. if (ret) {
  287. debug("dp: Failed to read DPCD data. CMD 0x%x, Status 0x%x\n",
  288. cmd, status);
  289. }
  290. return ret;
  291. }
  292. static int tegra_dc_dp_dpcd_write(struct tegra_dp_priv *dp, u32 cmd,
  293. u8 data)
  294. {
  295. u32 size = 1;
  296. u32 status = 0;
  297. int ret;
  298. ret = tegra_dc_dpaux_write_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXWR,
  299. cmd, &data, &size, &status);
  300. if (ret) {
  301. debug("dp: Failed to write DPCD data. CMD 0x%x, Status 0x%x\n",
  302. cmd, status);
  303. }
  304. return ret;
  305. }
  306. static int tegra_dc_i2c_aux_read(struct tegra_dp_priv *dp, u32 i2c_addr,
  307. u8 addr, u8 *data, u32 size, u32 *aux_stat)
  308. {
  309. u32 finished = 0;
  310. int ret = 0;
  311. do {
  312. u32 cur_size = min((u32)DP_AUX_MAX_BYTES, size - finished);
  313. u32 len = 1;
  314. ret = tegra_dc_dpaux_write_chunk(
  315. dp, DPAUX_DP_AUXCTL_CMD_MOTWR, i2c_addr,
  316. &addr, &len, aux_stat);
  317. if (ret) {
  318. debug("%s: error sending address to read.\n",
  319. __func__);
  320. return ret;
  321. }
  322. ret = tegra_dc_dpaux_read_chunk(
  323. dp, DPAUX_DP_AUXCTL_CMD_I2CRD, i2c_addr,
  324. data, &cur_size, aux_stat);
  325. if (ret) {
  326. debug("%s: error reading data.\n", __func__);
  327. return ret;
  328. }
  329. /* cur_size should be the real size returned */
  330. addr += cur_size;
  331. data += cur_size;
  332. finished += cur_size;
  333. } while (size > finished);
  334. return finished;
  335. }
  336. static void tegra_dc_dpaux_enable(struct tegra_dp_priv *dp)
  337. {
  338. /* clear interrupt */
  339. tegra_dpaux_writel(dp, DPAUX_INTR_AUX, 0xffffffff);
  340. /* do not enable interrupt for now. Enable them when Isr in place */
  341. tegra_dpaux_writel(dp, DPAUX_INTR_EN_AUX, 0x0);
  342. tegra_dpaux_writel(dp, DPAUX_HYBRID_PADCTL,
  343. DPAUX_HYBRID_PADCTL_AUX_DRVZ_OHM_50 |
  344. DPAUX_HYBRID_PADCTL_AUX_CMH_V0_70 |
  345. 0x18 << DPAUX_HYBRID_PADCTL_AUX_DRVI_SHIFT |
  346. DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV_ENABLE);
  347. tegra_dpaux_writel(dp, DPAUX_HYBRID_SPARE,
  348. DPAUX_HYBRID_SPARE_PAD_PWR_POWERUP);
  349. }
  350. #ifdef DEBUG
  351. static void tegra_dc_dp_dump_link_cfg(struct tegra_dp_priv *dp,
  352. const struct tegra_dp_link_config *link_cfg)
  353. {
  354. debug("DP config: cfg_name cfg_value\n");
  355. debug(" Lane Count %d\n",
  356. link_cfg->max_lane_count);
  357. debug(" SupportEnhancedFraming %s\n",
  358. link_cfg->support_enhanced_framing ? "Y" : "N");
  359. debug(" Bandwidth %d\n",
  360. link_cfg->max_link_bw);
  361. debug(" bpp %d\n",
  362. link_cfg->bits_per_pixel);
  363. debug(" EnhancedFraming %s\n",
  364. link_cfg->enhanced_framing ? "Y" : "N");
  365. debug(" Scramble_enabled %s\n",
  366. link_cfg->scramble_ena ? "Y" : "N");
  367. debug(" LinkBW %d\n",
  368. link_cfg->link_bw);
  369. debug(" lane_count %d\n",
  370. link_cfg->lane_count);
  371. debug(" activespolarity %d\n",
  372. link_cfg->activepolarity);
  373. debug(" active_count %d\n",
  374. link_cfg->active_count);
  375. debug(" tu_size %d\n",
  376. link_cfg->tu_size);
  377. debug(" active_frac %d\n",
  378. link_cfg->active_frac);
  379. debug(" watermark %d\n",
  380. link_cfg->watermark);
  381. debug(" hblank_sym %d\n",
  382. link_cfg->hblank_sym);
  383. debug(" vblank_sym %d\n",
  384. link_cfg->vblank_sym);
  385. }
  386. #endif
  387. static int _tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
  388. struct tegra_dp_link_config *cfg)
  389. {
  390. switch (cfg->link_bw) {
  391. case SOR_LINK_SPEED_G1_62:
  392. if (cfg->max_link_bw > SOR_LINK_SPEED_G1_62)
  393. cfg->link_bw = SOR_LINK_SPEED_G2_7;
  394. cfg->lane_count /= 2;
  395. break;
  396. case SOR_LINK_SPEED_G2_7:
  397. cfg->link_bw = SOR_LINK_SPEED_G1_62;
  398. break;
  399. case SOR_LINK_SPEED_G5_4:
  400. if (cfg->lane_count == 1) {
  401. cfg->link_bw = SOR_LINK_SPEED_G2_7;
  402. cfg->lane_count = cfg->max_lane_count;
  403. } else {
  404. cfg->lane_count /= 2;
  405. }
  406. break;
  407. default:
  408. debug("dp: Error link rate %d\n", cfg->link_bw);
  409. return -ENOLINK;
  410. }
  411. return (cfg->lane_count > 0) ? 0 : -ENOLINK;
  412. }
  413. /*
  414. * Calcuate if given cfg can meet the mode request.
  415. * Return 0 if mode is possible, -1 otherwise
  416. */
  417. static int tegra_dc_dp_calc_config(struct tegra_dp_priv *dp,
  418. const struct display_timing *timing,
  419. struct tegra_dp_link_config *link_cfg)
  420. {
  421. const u32 link_rate = 27 * link_cfg->link_bw * 1000 * 1000;
  422. const u64 f = 100000; /* precision factor */
  423. u32 num_linkclk_line; /* Number of link clocks per line */
  424. u64 ratio_f; /* Ratio of incoming to outgoing data rate */
  425. u64 frac_f;
  426. u64 activesym_f; /* Activesym per TU */
  427. u64 activecount_f;
  428. u32 activecount;
  429. u32 activepolarity;
  430. u64 approx_value_f;
  431. u32 activefrac = 0;
  432. u64 accumulated_error_f = 0;
  433. u32 lowest_neg_activecount = 0;
  434. u32 lowest_neg_activepolarity = 0;
  435. u32 lowest_neg_tusize = 64;
  436. u32 num_symbols_per_line;
  437. u64 lowest_neg_activefrac = 0;
  438. u64 lowest_neg_error_f = 64 * f;
  439. u64 watermark_f;
  440. int i;
  441. int neg;
  442. if (!link_rate || !link_cfg->lane_count || !timing->pixelclock.typ ||
  443. !link_cfg->bits_per_pixel)
  444. return -1;
  445. if ((u64)timing->pixelclock.typ * link_cfg->bits_per_pixel >=
  446. (u64)link_rate * 8 * link_cfg->lane_count)
  447. return -1;
  448. num_linkclk_line = (u32)(lldiv(link_rate * timing->hactive.typ,
  449. timing->pixelclock.typ));
  450. ratio_f = (u64)timing->pixelclock.typ * link_cfg->bits_per_pixel * f;
  451. ratio_f /= 8;
  452. do_div(ratio_f, link_rate * link_cfg->lane_count);
  453. for (i = 64; i >= 32; --i) {
  454. activesym_f = ratio_f * i;
  455. activecount_f = lldiv(activesym_f, (u32)f) * f;
  456. frac_f = activesym_f - activecount_f;
  457. activecount = (u32)(lldiv(activecount_f, (u32)f));
  458. if (frac_f < (lldiv(f, 2))) /* fraction < 0.5 */
  459. activepolarity = 0;
  460. else {
  461. activepolarity = 1;
  462. frac_f = f - frac_f;
  463. }
  464. if (frac_f != 0) {
  465. /* warning: frac_f should be 64-bit */
  466. frac_f = lldiv(f * f, frac_f); /* 1 / fraction */
  467. if (frac_f > (15 * f))
  468. activefrac = activepolarity ? 1 : 15;
  469. else
  470. activefrac = activepolarity ?
  471. (u32)lldiv(frac_f, (u32)f) + 1 :
  472. (u32)lldiv(frac_f, (u32)f);
  473. }
  474. if (activefrac == 1)
  475. activepolarity = 0;
  476. if (activepolarity == 1)
  477. approx_value_f = activefrac ? lldiv(
  478. (activecount_f + (activefrac * f - f) * f),
  479. (activefrac * f)) :
  480. activecount_f + f;
  481. else
  482. approx_value_f = activefrac ?
  483. activecount_f + lldiv(f, activefrac) :
  484. activecount_f;
  485. if (activesym_f < approx_value_f) {
  486. accumulated_error_f = num_linkclk_line *
  487. lldiv(approx_value_f - activesym_f, i);
  488. neg = 1;
  489. } else {
  490. accumulated_error_f = num_linkclk_line *
  491. lldiv(activesym_f - approx_value_f, i);
  492. neg = 0;
  493. }
  494. if ((neg && (lowest_neg_error_f > accumulated_error_f)) ||
  495. (accumulated_error_f == 0)) {
  496. lowest_neg_error_f = accumulated_error_f;
  497. lowest_neg_tusize = i;
  498. lowest_neg_activecount = activecount;
  499. lowest_neg_activepolarity = activepolarity;
  500. lowest_neg_activefrac = activefrac;
  501. if (accumulated_error_f == 0)
  502. break;
  503. }
  504. }
  505. if (lowest_neg_activefrac == 0) {
  506. link_cfg->activepolarity = 0;
  507. link_cfg->active_count = lowest_neg_activepolarity ?
  508. lowest_neg_activecount : lowest_neg_activecount - 1;
  509. link_cfg->tu_size = lowest_neg_tusize;
  510. link_cfg->active_frac = 1;
  511. } else {
  512. link_cfg->activepolarity = lowest_neg_activepolarity;
  513. link_cfg->active_count = (u32)lowest_neg_activecount;
  514. link_cfg->tu_size = lowest_neg_tusize;
  515. link_cfg->active_frac = (u32)lowest_neg_activefrac;
  516. }
  517. watermark_f = lldiv(ratio_f * link_cfg->tu_size * (f - ratio_f), f);
  518. link_cfg->watermark = (u32)(lldiv(watermark_f + lowest_neg_error_f,
  519. f)) + link_cfg->bits_per_pixel / 4 - 1;
  520. num_symbols_per_line = (timing->hactive.typ *
  521. link_cfg->bits_per_pixel) /
  522. (8 * link_cfg->lane_count);
  523. if (link_cfg->watermark > 30) {
  524. debug("dp: sor setting: unable to get a good tusize, force watermark to 30\n");
  525. link_cfg->watermark = 30;
  526. return -1;
  527. } else if (link_cfg->watermark > num_symbols_per_line) {
  528. debug("dp: sor setting: force watermark to the number of symbols in the line\n");
  529. link_cfg->watermark = num_symbols_per_line;
  530. return -1;
  531. }
  532. /*
  533. * Refer to dev_disp.ref for more information.
  534. * # symbols/hblank = ((SetRasterBlankEnd.X + SetRasterSize.Width -
  535. * SetRasterBlankStart.X - 7) * link_clk / pclk)
  536. * - 3 * enhanced_framing - Y
  537. * where Y = (# lanes == 4) 3 : (# lanes == 2) ? 6 : 12
  538. */
  539. link_cfg->hblank_sym = (int)lldiv(((uint64_t)timing->hback_porch.typ +
  540. timing->hfront_porch.typ + timing->hsync_len.typ - 7) *
  541. link_rate, timing->pixelclock.typ) -
  542. 3 * link_cfg->enhanced_framing -
  543. (12 / link_cfg->lane_count);
  544. if (link_cfg->hblank_sym < 0)
  545. link_cfg->hblank_sym = 0;
  546. /*
  547. * Refer to dev_disp.ref for more information.
  548. * # symbols/vblank = ((SetRasterBlankStart.X -
  549. * SetRasterBlankEen.X - 25) * link_clk / pclk)
  550. * - Y - 1;
  551. * where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39
  552. */
  553. link_cfg->vblank_sym = (int)lldiv(((uint64_t)timing->hactive.typ - 25)
  554. * link_rate, timing->pixelclock.typ) - (36 /
  555. link_cfg->lane_count) - 4;
  556. if (link_cfg->vblank_sym < 0)
  557. link_cfg->vblank_sym = 0;
  558. link_cfg->is_valid = 1;
  559. #ifdef DEBUG
  560. tegra_dc_dp_dump_link_cfg(dp, link_cfg);
  561. #endif
  562. return 0;
  563. }
  564. static int tegra_dc_dp_init_max_link_cfg(
  565. const struct display_timing *timing,
  566. struct tegra_dp_priv *dp,
  567. struct tegra_dp_link_config *link_cfg)
  568. {
  569. const int drive_current = 0x40404040;
  570. const int preemphasis = 0x0f0f0f0f;
  571. const int postcursor = 0;
  572. u8 dpcd_data;
  573. int ret;
  574. ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LANE_COUNT, &dpcd_data);
  575. if (ret)
  576. return ret;
  577. link_cfg->max_lane_count = dpcd_data & DP_MAX_LANE_COUNT_MASK;
  578. link_cfg->tps3_supported = (dpcd_data &
  579. DP_MAX_LANE_COUNT_TPS3_SUPPORTED_YES) ? 1 : 0;
  580. link_cfg->support_enhanced_framing =
  581. (dpcd_data & DP_MAX_LANE_COUNT_ENHANCED_FRAMING_YES) ?
  582. 1 : 0;
  583. ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_DOWNSPREAD, &dpcd_data);
  584. if (ret)
  585. return ret;
  586. link_cfg->downspread = (dpcd_data & DP_MAX_DOWNSPREAD_VAL_0_5_PCT) ?
  587. 1 : 0;
  588. ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_TRAINING_AUX_RD_INTERVAL,
  589. &link_cfg->aux_rd_interval);
  590. if (ret)
  591. return ret;
  592. ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LINK_RATE,
  593. &link_cfg->max_link_bw);
  594. if (ret)
  595. return ret;
  596. /*
  597. * Set to a high value for link training and attach.
  598. * Will be re-programmed when dp is enabled.
  599. */
  600. link_cfg->drive_current = drive_current;
  601. link_cfg->preemphasis = preemphasis;
  602. link_cfg->postcursor = postcursor;
  603. ret = tegra_dc_dp_dpcd_read(dp, DP_EDP_CONFIGURATION_CAP, &dpcd_data);
  604. if (ret)
  605. return ret;
  606. link_cfg->alt_scramber_reset_cap =
  607. (dpcd_data & DP_EDP_CONFIGURATION_CAP_ASC_RESET_YES) ?
  608. 1 : 0;
  609. link_cfg->only_enhanced_framing =
  610. (dpcd_data & DP_EDP_CONFIGURATION_CAP_FRAMING_CHANGE_YES) ?
  611. 1 : 0;
  612. link_cfg->lane_count = link_cfg->max_lane_count;
  613. link_cfg->link_bw = link_cfg->max_link_bw;
  614. link_cfg->enhanced_framing = link_cfg->support_enhanced_framing;
  615. link_cfg->frame_in_ms = (1000 / 60) + 1;
  616. tegra_dc_dp_calc_config(dp, timing, link_cfg);
  617. return 0;
  618. }
  619. static int tegra_dc_dp_set_assr(struct tegra_dp_priv *priv,
  620. struct udevice *sor, int ena)
  621. {
  622. int ret;
  623. u8 dpcd_data = ena ?
  624. DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_ENABLE :
  625. DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_DISABLE;
  626. ret = tegra_dc_dp_dpcd_write(priv, DP_EDP_CONFIGURATION_SET,
  627. dpcd_data);
  628. if (ret)
  629. return ret;
  630. /* Also reset the scrambler to 0xfffe */
  631. tegra_dc_sor_set_internal_panel(sor, ena);
  632. return 0;
  633. }
  634. static int tegra_dp_set_link_bandwidth(struct tegra_dp_priv *dp,
  635. struct udevice *sor,
  636. u8 link_bw)
  637. {
  638. tegra_dc_sor_set_link_bandwidth(sor, link_bw);
  639. /* Sink side */
  640. return tegra_dc_dp_dpcd_write(dp, DP_LINK_BW_SET, link_bw);
  641. }
  642. static int tegra_dp_set_lane_count(struct tegra_dp_priv *dp,
  643. const struct tegra_dp_link_config *link_cfg,
  644. struct udevice *sor)
  645. {
  646. u8 dpcd_data;
  647. int ret;
  648. /* check if panel support enhanched_framing */
  649. dpcd_data = link_cfg->lane_count;
  650. if (link_cfg->enhanced_framing)
  651. dpcd_data |= DP_LANE_COUNT_SET_ENHANCEDFRAMING_T;
  652. ret = tegra_dc_dp_dpcd_write(dp, DP_LANE_COUNT_SET, dpcd_data);
  653. if (ret)
  654. return ret;
  655. tegra_dc_sor_set_lane_count(sor, link_cfg->lane_count);
  656. /* Also power down lanes that will not be used */
  657. return 0;
  658. }
  659. static int tegra_dc_dp_link_trained(struct tegra_dp_priv *dp,
  660. const struct tegra_dp_link_config *cfg)
  661. {
  662. u32 lane;
  663. u8 mask;
  664. u8 data;
  665. int ret;
  666. for (lane = 0; lane < cfg->lane_count; ++lane) {
  667. ret = tegra_dc_dp_dpcd_read(dp, (lane / 2) ?
  668. DP_LANE2_3_STATUS : DP_LANE0_1_STATUS,
  669. &data);
  670. if (ret)
  671. return ret;
  672. mask = (lane & 1) ?
  673. NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES |
  674. NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_YES |
  675. NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_YES :
  676. DP_LANE_CR_DONE |
  677. DP_LANE_CHANNEL_EQ_DONE |
  678. DP_LANE_SYMBOL_LOCKED;
  679. if ((data & mask) != mask)
  680. return -1;
  681. }
  682. return 0;
  683. }
  684. static int tegra_dp_channel_eq_status(struct tegra_dp_priv *dp,
  685. const struct tegra_dp_link_config *cfg)
  686. {
  687. u32 cnt;
  688. u32 n_lanes = cfg->lane_count;
  689. u8 data;
  690. u8 ce_done = 1;
  691. int ret;
  692. for (cnt = 0; cnt < n_lanes / 2; cnt++) {
  693. ret = tegra_dc_dp_dpcd_read(dp, DP_LANE0_1_STATUS + cnt, &data);
  694. if (ret)
  695. return ret;
  696. if (n_lanes == 1) {
  697. ce_done = (data & (0x1 <<
  698. NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) &&
  699. (data & (0x1 <<
  700. NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT));
  701. break;
  702. } else if (!(data & (0x1 <<
  703. NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) ||
  704. !(data & (0x1 <<
  705. NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT)) ||
  706. !(data & (0x1 <<
  707. NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_SHIFT)) ||
  708. !(data & (0x1 <<
  709. NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_SHIFT)))
  710. return -EIO;
  711. }
  712. if (ce_done) {
  713. ret = tegra_dc_dp_dpcd_read(dp,
  714. DP_LANE_ALIGN_STATUS_UPDATED,
  715. &data);
  716. if (ret)
  717. return ret;
  718. if (!(data & NV_DPCD_LANE_ALIGN_STATUS_UPDATED_DONE_YES))
  719. ce_done = 0;
  720. }
  721. return ce_done ? 0 : -EIO;
  722. }
  723. static int tegra_dp_clock_recovery_status(struct tegra_dp_priv *dp,
  724. const struct tegra_dp_link_config *cfg)
  725. {
  726. u32 cnt;
  727. u32 n_lanes = cfg->lane_count;
  728. u8 data_ptr;
  729. int ret;
  730. for (cnt = 0; cnt < n_lanes / 2; cnt++) {
  731. ret = tegra_dc_dp_dpcd_read(dp, (DP_LANE0_1_STATUS + cnt),
  732. &data_ptr);
  733. if (ret)
  734. return ret;
  735. if (n_lanes == 1)
  736. return (data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ?
  737. 1 : 0;
  738. else if (!(data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ||
  739. !(data_ptr & (NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES)))
  740. return 0;
  741. }
  742. return 1;
  743. }
  744. static int tegra_dp_lt_adjust(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
  745. u32 pc[4], u8 pc_supported,
  746. const struct tegra_dp_link_config *cfg)
  747. {
  748. size_t cnt;
  749. u8 data_ptr;
  750. u32 n_lanes = cfg->lane_count;
  751. int ret;
  752. for (cnt = 0; cnt < n_lanes / 2; cnt++) {
  753. ret = tegra_dc_dp_dpcd_read(dp, DP_ADJUST_REQUEST_LANE0_1 + cnt,
  754. &data_ptr);
  755. if (ret)
  756. return ret;
  757. pe[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_PE_MASK) >>
  758. NV_DPCD_ADJUST_REQ_LANEX_PE_SHIFT;
  759. vs[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_DC_MASK) >>
  760. NV_DPCD_ADJUST_REQ_LANEX_DC_SHIFT;
  761. pe[1 + 2 * cnt] =
  762. (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_MASK) >>
  763. NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_SHIFT;
  764. vs[1 + 2 * cnt] =
  765. (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_MASK) >>
  766. NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_SHIFT;
  767. }
  768. if (pc_supported) {
  769. ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_ADJUST_REQ_POST_CURSOR2,
  770. &data_ptr);
  771. if (ret)
  772. return ret;
  773. for (cnt = 0; cnt < n_lanes; cnt++) {
  774. pc[cnt] = (data_ptr >>
  775. NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_SHIFT(cnt)) &
  776. NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_MASK;
  777. }
  778. }
  779. return 0;
  780. }
  781. static void tegra_dp_wait_aux_training(struct tegra_dp_priv *dp,
  782. bool is_clk_recovery,
  783. const struct tegra_dp_link_config *cfg)
  784. {
  785. if (!cfg->aux_rd_interval)
  786. udelay(is_clk_recovery ? 200 : 500);
  787. else
  788. mdelay(cfg->aux_rd_interval * 4);
  789. }
  790. static void tegra_dp_tpg(struct tegra_dp_priv *dp, u32 tp, u32 n_lanes,
  791. const struct tegra_dp_link_config *cfg)
  792. {
  793. u8 data = (tp == training_pattern_disabled)
  794. ? (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_F)
  795. : (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_T);
  796. tegra_dc_sor_set_dp_linkctl(dp->sor, 1, tp, cfg);
  797. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, data);
  798. }
  799. static int tegra_dp_link_config(struct tegra_dp_priv *dp,
  800. const struct tegra_dp_link_config *link_cfg)
  801. {
  802. u8 dpcd_data;
  803. u32 retry;
  804. int ret;
  805. if (link_cfg->lane_count == 0) {
  806. debug("dp: error: lane count is 0. Can not set link config.\n");
  807. return -ENOLINK;
  808. }
  809. /* Set power state if it is not in normal level */
  810. ret = tegra_dc_dp_dpcd_read(dp, DP_SET_POWER, &dpcd_data);
  811. if (ret)
  812. return ret;
  813. if (dpcd_data == DP_SET_POWER_D3) {
  814. dpcd_data = DP_SET_POWER_D0;
  815. /* DP spec requires 3 retries */
  816. for (retry = 3; retry > 0; --retry) {
  817. ret = tegra_dc_dp_dpcd_write(dp, DP_SET_POWER,
  818. dpcd_data);
  819. if (!ret)
  820. break;
  821. if (retry == 1) {
  822. debug("dp: Failed to set DP panel power\n");
  823. return ret;
  824. }
  825. }
  826. }
  827. /* Enable ASSR if possible */
  828. if (link_cfg->alt_scramber_reset_cap) {
  829. ret = tegra_dc_dp_set_assr(dp, dp->sor, 1);
  830. if (ret)
  831. return ret;
  832. }
  833. ret = tegra_dp_set_link_bandwidth(dp, dp->sor, link_cfg->link_bw);
  834. if (ret) {
  835. debug("dp: Failed to set link bandwidth\n");
  836. return ret;
  837. }
  838. ret = tegra_dp_set_lane_count(dp, link_cfg, dp->sor);
  839. if (ret) {
  840. debug("dp: Failed to set lane count\n");
  841. return ret;
  842. }
  843. tegra_dc_sor_set_dp_linkctl(dp->sor, 1, training_pattern_none,
  844. link_cfg);
  845. return 0;
  846. }
  847. static int tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
  848. const struct display_timing *timing,
  849. struct tegra_dp_link_config *cfg)
  850. {
  851. struct tegra_dp_link_config tmp_cfg;
  852. int ret;
  853. tmp_cfg = *cfg;
  854. cfg->is_valid = 0;
  855. ret = _tegra_dp_lower_link_config(dp, cfg);
  856. if (!ret)
  857. ret = tegra_dc_dp_calc_config(dp, timing, cfg);
  858. if (!ret)
  859. ret = tegra_dp_link_config(dp, cfg);
  860. if (ret)
  861. goto fail;
  862. return 0;
  863. fail:
  864. *cfg = tmp_cfg;
  865. tegra_dp_link_config(dp, &tmp_cfg);
  866. return ret;
  867. }
  868. static int tegra_dp_lt_config(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
  869. u32 pc[4], const struct tegra_dp_link_config *cfg)
  870. {
  871. struct udevice *sor = dp->sor;
  872. u32 n_lanes = cfg->lane_count;
  873. u8 pc_supported = cfg->tps3_supported;
  874. u32 cnt;
  875. u32 val;
  876. for (cnt = 0; cnt < n_lanes; cnt++) {
  877. u32 mask = 0;
  878. u32 pe_reg, vs_reg, pc_reg;
  879. u32 shift = 0;
  880. switch (cnt) {
  881. case 0:
  882. mask = PR_LANE2_DP_LANE0_MASK;
  883. shift = PR_LANE2_DP_LANE0_SHIFT;
  884. break;
  885. case 1:
  886. mask = PR_LANE1_DP_LANE1_MASK;
  887. shift = PR_LANE1_DP_LANE1_SHIFT;
  888. break;
  889. case 2:
  890. mask = PR_LANE0_DP_LANE2_MASK;
  891. shift = PR_LANE0_DP_LANE2_SHIFT;
  892. break;
  893. case 3:
  894. mask = PR_LANE3_DP_LANE3_MASK;
  895. shift = PR_LANE3_DP_LANE3_SHIFT;
  896. break;
  897. default:
  898. debug("dp: incorrect lane cnt\n");
  899. return -EINVAL;
  900. }
  901. pe_reg = tegra_dp_pe_regs[pc[cnt]][vs[cnt]][pe[cnt]];
  902. vs_reg = tegra_dp_vs_regs[pc[cnt]][vs[cnt]][pe[cnt]];
  903. pc_reg = tegra_dp_pc_regs[pc[cnt]][vs[cnt]][pe[cnt]];
  904. tegra_dp_set_pe_vs_pc(sor, mask, pe_reg << shift,
  905. vs_reg << shift, pc_reg << shift,
  906. pc_supported);
  907. }
  908. tegra_dp_disable_tx_pu(dp->sor);
  909. udelay(20);
  910. for (cnt = 0; cnt < n_lanes; cnt++) {
  911. u32 max_vs_flag = tegra_dp_is_max_vs(pe[cnt], vs[cnt]);
  912. u32 max_pe_flag = tegra_dp_is_max_pe(pe[cnt], vs[cnt]);
  913. val = (vs[cnt] << NV_DPCD_TRAINING_LANEX_SET_DC_SHIFT) |
  914. (max_vs_flag ?
  915. NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_T :
  916. NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_F) |
  917. (pe[cnt] << NV_DPCD_TRAINING_LANEX_SET_PE_SHIFT) |
  918. (max_pe_flag ?
  919. NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_T :
  920. NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_F);
  921. tegra_dc_dp_dpcd_write(dp, (DP_TRAINING_LANE0_SET + cnt), val);
  922. }
  923. if (pc_supported) {
  924. for (cnt = 0; cnt < n_lanes / 2; cnt++) {
  925. u32 max_pc_flag0 = tegra_dp_is_max_pc(pc[cnt]);
  926. u32 max_pc_flag1 = tegra_dp_is_max_pc(pc[cnt + 1]);
  927. val = (pc[cnt] << NV_DPCD_LANEX_SET2_PC2_SHIFT) |
  928. (max_pc_flag0 ?
  929. NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_T :
  930. NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_F) |
  931. (pc[cnt + 1] <<
  932. NV_DPCD_LANEXPLUS1_SET2_PC2_SHIFT) |
  933. (max_pc_flag1 ?
  934. NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_T :
  935. NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_F);
  936. tegra_dc_dp_dpcd_write(dp,
  937. NV_DPCD_TRAINING_LANE0_1_SET2 +
  938. cnt, val);
  939. }
  940. }
  941. return 0;
  942. }
  943. static int _tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4],
  944. u32 vs[4], u32 pc[4], u8 pc_supported,
  945. u32 n_lanes,
  946. const struct tegra_dp_link_config *cfg)
  947. {
  948. u32 retry_cnt;
  949. for (retry_cnt = 0; retry_cnt < 4; retry_cnt++) {
  950. int ret;
  951. if (retry_cnt) {
  952. ret = tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported,
  953. cfg);
  954. if (ret)
  955. return ret;
  956. tegra_dp_lt_config(dp, pe, vs, pc, cfg);
  957. }
  958. tegra_dp_wait_aux_training(dp, false, cfg);
  959. if (!tegra_dp_clock_recovery_status(dp, cfg)) {
  960. debug("dp: CR failed in channel EQ sequence!\n");
  961. break;
  962. }
  963. if (!tegra_dp_channel_eq_status(dp, cfg))
  964. return 0;
  965. }
  966. return -EIO;
  967. }
  968. static int tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
  969. u32 pc[4],
  970. const struct tegra_dp_link_config *cfg)
  971. {
  972. u32 n_lanes = cfg->lane_count;
  973. u8 pc_supported = cfg->tps3_supported;
  974. int ret;
  975. u32 tp_src = training_pattern_2;
  976. if (pc_supported)
  977. tp_src = training_pattern_3;
  978. tegra_dp_tpg(dp, tp_src, n_lanes, cfg);
  979. ret = _tegra_dp_channel_eq(dp, pe, vs, pc, pc_supported, n_lanes, cfg);
  980. tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
  981. return ret;
  982. }
  983. static int _tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
  984. u32 vs[4], u32 pc[4], u8 pc_supported,
  985. u32 n_lanes,
  986. const struct tegra_dp_link_config *cfg)
  987. {
  988. u32 vs_temp[4];
  989. u32 retry_cnt = 0;
  990. do {
  991. tegra_dp_lt_config(dp, pe, vs, pc, cfg);
  992. tegra_dp_wait_aux_training(dp, true, cfg);
  993. if (tegra_dp_clock_recovery_status(dp, cfg))
  994. return 0;
  995. memcpy(vs_temp, vs, sizeof(vs_temp));
  996. tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported, cfg);
  997. if (memcmp(vs_temp, vs, sizeof(vs_temp)))
  998. retry_cnt = 0;
  999. else
  1000. ++retry_cnt;
  1001. } while (retry_cnt < 5);
  1002. return -EIO;
  1003. }
  1004. static int tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
  1005. u32 vs[4], u32 pc[4],
  1006. const struct tegra_dp_link_config *cfg)
  1007. {
  1008. u32 n_lanes = cfg->lane_count;
  1009. u8 pc_supported = cfg->tps3_supported;
  1010. int err;
  1011. tegra_dp_tpg(dp, training_pattern_1, n_lanes, cfg);
  1012. err = _tegra_dp_clk_recovery(dp, pe, vs, pc, pc_supported, n_lanes,
  1013. cfg);
  1014. if (err < 0)
  1015. tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
  1016. return err;
  1017. }
  1018. static int tegra_dc_dp_full_link_training(struct tegra_dp_priv *dp,
  1019. const struct display_timing *timing,
  1020. struct tegra_dp_link_config *cfg)
  1021. {
  1022. struct udevice *sor = dp->sor;
  1023. int err;
  1024. u32 pe[4], vs[4], pc[4];
  1025. tegra_sor_precharge_lanes(sor, cfg);
  1026. retry_cr:
  1027. memset(pe, PREEMPHASIS_DISABLED, sizeof(pe));
  1028. memset(vs, DRIVECURRENT_LEVEL0, sizeof(vs));
  1029. memset(pc, POSTCURSOR2_LEVEL0, sizeof(pc));
  1030. err = tegra_dp_clk_recovery(dp, pe, vs, pc, cfg);
  1031. if (err) {
  1032. if (!tegra_dp_lower_link_config(dp, timing, cfg))
  1033. goto retry_cr;
  1034. debug("dp: clk recovery failed\n");
  1035. goto fail;
  1036. }
  1037. err = tegra_dp_channel_eq(dp, pe, vs, pc, cfg);
  1038. if (err) {
  1039. if (!tegra_dp_lower_link_config(dp, timing, cfg))
  1040. goto retry_cr;
  1041. debug("dp: channel equalization failed\n");
  1042. goto fail;
  1043. }
  1044. #ifdef DEBUG
  1045. tegra_dc_dp_dump_link_cfg(dp, cfg);
  1046. #endif
  1047. return 0;
  1048. fail:
  1049. return err;
  1050. }
  1051. /*
  1052. * All link training functions are ported from kernel dc driver.
  1053. * See more details at drivers/video/tegra/dc/dp.c
  1054. */
  1055. static int tegra_dc_dp_fast_link_training(struct tegra_dp_priv *dp,
  1056. const struct tegra_dp_link_config *link_cfg,
  1057. struct udevice *sor)
  1058. {
  1059. u8 link_bw;
  1060. u8 lane_count;
  1061. u16 data16;
  1062. u32 data32;
  1063. u32 size;
  1064. u32 status;
  1065. int j;
  1066. u32 mask = 0xffff >> ((4 - link_cfg->lane_count) * 4);
  1067. tegra_dc_sor_set_lane_parm(sor, link_cfg);
  1068. tegra_dc_dp_dpcd_write(dp, DP_MAIN_LINK_CHANNEL_CODING_SET,
  1069. DP_SET_ANSI_8B10B);
  1070. /* Send TP1 */
  1071. tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_1, link_cfg);
  1072. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
  1073. DP_TRAINING_PATTERN_1);
  1074. for (j = 0; j < link_cfg->lane_count; ++j)
  1075. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
  1076. udelay(520);
  1077. size = sizeof(data16);
  1078. tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
  1079. DP_LANE0_1_STATUS, (u8 *)&data16, &size, &status);
  1080. status = mask & 0x1111;
  1081. if ((data16 & status) != status) {
  1082. debug("dp: Link training error for TP1 (%#x, status %#x)\n",
  1083. data16, status);
  1084. return -EFAULT;
  1085. }
  1086. /* enable ASSR */
  1087. tegra_dc_dp_set_assr(dp, sor, link_cfg->scramble_ena);
  1088. tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_3, link_cfg);
  1089. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
  1090. link_cfg->link_bw == 20 ? 0x23 : 0x22);
  1091. for (j = 0; j < link_cfg->lane_count; ++j)
  1092. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
  1093. udelay(520);
  1094. size = sizeof(data32);
  1095. tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD, DP_LANE0_1_STATUS,
  1096. (u8 *)&data32, &size, &status);
  1097. if ((data32 & mask) != (0x7777 & mask)) {
  1098. debug("dp: Link training error for TP2/3 (0x%x)\n", data32);
  1099. return -EFAULT;
  1100. }
  1101. tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_disabled,
  1102. link_cfg);
  1103. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, 0);
  1104. if (tegra_dc_dp_link_trained(dp, link_cfg)) {
  1105. tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
  1106. debug("Fast link training failed, link bw %d, lane # %d\n",
  1107. link_bw, lane_count);
  1108. return -EFAULT;
  1109. }
  1110. debug("Fast link training succeeded, link bw %d, lane %d\n",
  1111. link_cfg->link_bw, link_cfg->lane_count);
  1112. return 0;
  1113. }
  1114. static int tegra_dp_do_link_training(struct tegra_dp_priv *dp,
  1115. struct tegra_dp_link_config *link_cfg,
  1116. const struct display_timing *timing,
  1117. struct udevice *sor)
  1118. {
  1119. u8 link_bw;
  1120. u8 lane_count;
  1121. int ret;
  1122. if (DO_FAST_LINK_TRAINING) {
  1123. ret = tegra_dc_dp_fast_link_training(dp, link_cfg, sor);
  1124. if (ret) {
  1125. debug("dp: fast link training failed\n");
  1126. } else {
  1127. /*
  1128. * set to a known-good drive setting if fast link
  1129. * succeeded. Ignore any error.
  1130. */
  1131. ret = tegra_dc_sor_set_voltage_swing(dp->sor, link_cfg);
  1132. if (ret)
  1133. debug("Failed to set voltage swing\n");
  1134. }
  1135. } else {
  1136. ret = -ENOSYS;
  1137. }
  1138. if (ret) {
  1139. /* Try full link training then */
  1140. ret = tegra_dc_dp_full_link_training(dp, timing, link_cfg);
  1141. if (ret) {
  1142. debug("dp: full link training failed\n");
  1143. return ret;
  1144. }
  1145. }
  1146. /* Everything is good; double check the link config */
  1147. tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
  1148. if ((link_cfg->link_bw == link_bw) &&
  1149. (link_cfg->lane_count == lane_count))
  1150. return 0;
  1151. else
  1152. return -EFAULT;
  1153. }
  1154. static int tegra_dc_dp_explore_link_cfg(struct tegra_dp_priv *dp,
  1155. struct tegra_dp_link_config *link_cfg,
  1156. struct udevice *sor,
  1157. const struct display_timing *timing)
  1158. {
  1159. struct tegra_dp_link_config temp_cfg;
  1160. if (!timing->pixelclock.typ || !timing->hactive.typ ||
  1161. !timing->vactive.typ) {
  1162. debug("dp: error mode configuration");
  1163. return -EINVAL;
  1164. }
  1165. if (!link_cfg->max_link_bw || !link_cfg->max_lane_count) {
  1166. debug("dp: error link configuration");
  1167. return -EINVAL;
  1168. }
  1169. link_cfg->is_valid = 0;
  1170. memcpy(&temp_cfg, link_cfg, sizeof(temp_cfg));
  1171. temp_cfg.link_bw = temp_cfg.max_link_bw;
  1172. temp_cfg.lane_count = temp_cfg.max_lane_count;
  1173. /*
  1174. * set to max link config
  1175. */
  1176. if ((!tegra_dc_dp_calc_config(dp, timing, &temp_cfg)) &&
  1177. (!tegra_dp_link_config(dp, &temp_cfg)) &&
  1178. (!tegra_dp_do_link_training(dp, &temp_cfg, timing, sor)))
  1179. /* the max link cfg is doable */
  1180. memcpy(link_cfg, &temp_cfg, sizeof(temp_cfg));
  1181. return link_cfg->is_valid ? 0 : -EFAULT;
  1182. }
  1183. static int tegra_dp_hpd_plug(struct tegra_dp_priv *dp)
  1184. {
  1185. const int vdd_to_hpd_delay_ms = 200;
  1186. u32 val;
  1187. ulong start;
  1188. start = get_timer(0);
  1189. do {
  1190. val = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
  1191. if (val & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)
  1192. return 0;
  1193. udelay(100);
  1194. } while (get_timer(start) < vdd_to_hpd_delay_ms);
  1195. return -EIO;
  1196. }
  1197. static int tegra_dc_dp_sink_out_of_sync(struct tegra_dp_priv *dp, u32 delay_ms)
  1198. {
  1199. u8 dpcd_data;
  1200. int out_of_sync;
  1201. int ret;
  1202. debug("%s: delay=%d\n", __func__, delay_ms);
  1203. mdelay(delay_ms);
  1204. ret = tegra_dc_dp_dpcd_read(dp, DP_SINK_STATUS, &dpcd_data);
  1205. if (ret)
  1206. return ret;
  1207. out_of_sync = !(dpcd_data & DP_SINK_STATUS_PORT0_IN_SYNC);
  1208. if (out_of_sync)
  1209. debug("SINK receive port 0 out of sync, data=%x\n", dpcd_data);
  1210. else
  1211. debug("SINK is in synchronization\n");
  1212. return out_of_sync;
  1213. }
  1214. static int tegra_dc_dp_check_sink(struct tegra_dp_priv *dp,
  1215. struct tegra_dp_link_config *link_cfg,
  1216. const struct display_timing *timing)
  1217. {
  1218. const int max_retry = 5;
  1219. int delay_frame;
  1220. int retries;
  1221. /*
  1222. * DP TCON may skip some main stream frames, thus we need to wait
  1223. * some delay before reading the DPCD SINK STATUS register, starting
  1224. * from 5
  1225. */
  1226. delay_frame = 5;
  1227. retries = max_retry;
  1228. do {
  1229. int ret;
  1230. if (!tegra_dc_dp_sink_out_of_sync(dp, link_cfg->frame_in_ms *
  1231. delay_frame))
  1232. return 0;
  1233. debug("%s: retries left %d\n", __func__, retries);
  1234. if (!retries--) {
  1235. printf("DP: Out of sync after %d retries\n", max_retry);
  1236. return -EIO;
  1237. }
  1238. ret = tegra_dc_sor_detach(dp->dc_dev, dp->sor);
  1239. if (ret)
  1240. return ret;
  1241. if (tegra_dc_dp_explore_link_cfg(dp, link_cfg, dp->sor,
  1242. timing)) {
  1243. debug("dp: %s: error to configure link\n", __func__);
  1244. continue;
  1245. }
  1246. tegra_dc_sor_set_power_state(dp->sor, 1);
  1247. tegra_dc_sor_attach(dp->dc_dev, dp->sor, link_cfg, timing);
  1248. /* Increase delay_frame for next try in case the sink is
  1249. skipping more frames */
  1250. delay_frame += 10;
  1251. } while (1);
  1252. }
  1253. int tegra_dp_enable(struct udevice *dev, int panel_bpp,
  1254. const struct display_timing *timing)
  1255. {
  1256. struct tegra_dp_priv *priv = dev_get_priv(dev);
  1257. struct tegra_dp_link_config slink_cfg, *link_cfg = &slink_cfg;
  1258. struct udevice *sor;
  1259. int data;
  1260. int retry;
  1261. int ret;
  1262. memset(link_cfg, '\0', sizeof(*link_cfg));
  1263. link_cfg->is_valid = 0;
  1264. link_cfg->scramble_ena = 1;
  1265. tegra_dc_dpaux_enable(priv);
  1266. if (tegra_dp_hpd_plug(priv) < 0) {
  1267. debug("dp: hpd plug failed\n");
  1268. return -EIO;
  1269. }
  1270. link_cfg->bits_per_pixel = panel_bpp;
  1271. if (tegra_dc_dp_init_max_link_cfg(timing, priv, link_cfg)) {
  1272. debug("dp: failed to init link configuration\n");
  1273. return -ENOLINK;
  1274. }
  1275. ret = uclass_first_device(UCLASS_VIDEO_BRIDGE, &sor);
  1276. if (ret || !sor) {
  1277. debug("dp: failed to find SOR device: ret=%d\n", ret);
  1278. return ret;
  1279. }
  1280. priv->sor = sor;
  1281. ret = tegra_dc_sor_enable_dp(sor, link_cfg);
  1282. if (ret)
  1283. return ret;
  1284. tegra_dc_sor_set_panel_power(sor, 1);
  1285. /* Write power on to DPCD */
  1286. data = DP_SET_POWER_D0;
  1287. retry = 0;
  1288. do {
  1289. ret = tegra_dc_dp_dpcd_write(priv, DP_SET_POWER, data);
  1290. } while ((retry++ < DP_POWER_ON_MAX_TRIES) && ret);
  1291. if (ret || retry >= DP_POWER_ON_MAX_TRIES) {
  1292. debug("dp: failed to power on panel (0x%x)\n", ret);
  1293. return -ENETUNREACH;
  1294. goto error_enable;
  1295. }
  1296. /* Confirm DP plugging status */
  1297. if (!(tegra_dpaux_readl(priv, DPAUX_DP_AUXSTAT) &
  1298. DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
  1299. debug("dp: could not detect HPD\n");
  1300. return -ENXIO;
  1301. }
  1302. /* Check DP version */
  1303. if (tegra_dc_dp_dpcd_read(priv, DP_DPCD_REV, &priv->revision)) {
  1304. debug("dp: failed to read the revision number from sink\n");
  1305. return -EIO;
  1306. }
  1307. if (tegra_dc_dp_explore_link_cfg(priv, link_cfg, sor, timing)) {
  1308. debug("dp: error configuring link\n");
  1309. return -ENOMEDIUM;
  1310. }
  1311. tegra_dc_sor_set_power_state(sor, 1);
  1312. ret = tegra_dc_sor_attach(priv->dc_dev, sor, link_cfg, timing);
  1313. if (ret && ret != -EEXIST)
  1314. return ret;
  1315. /*
  1316. * This takes a long time, but can apparently resolve a failure to
  1317. * bring up the display correctly.
  1318. */
  1319. if (0) {
  1320. ret = tegra_dc_dp_check_sink(priv, link_cfg, timing);
  1321. if (ret)
  1322. return ret;
  1323. }
  1324. /* Power down the unused lanes to save power - a few hundred mW */
  1325. tegra_dc_sor_power_down_unused_lanes(sor, link_cfg);
  1326. ret = video_bridge_set_backlight(sor, 80);
  1327. if (ret) {
  1328. debug("dp: failed to set backlight\n");
  1329. return ret;
  1330. }
  1331. priv->enabled = true;
  1332. error_enable:
  1333. return 0;
  1334. }
  1335. static int tegra_dp_ofdata_to_platdata(struct udevice *dev)
  1336. {
  1337. struct tegra_dp_plat *plat = dev_get_platdata(dev);
  1338. plat->base = dev_read_addr(dev);
  1339. return 0;
  1340. }
  1341. static int tegra_dp_read_edid(struct udevice *dev, u8 *buf, int buf_size)
  1342. {
  1343. struct tegra_dp_priv *priv = dev_get_priv(dev);
  1344. const int tegra_edid_i2c_address = 0x50;
  1345. u32 aux_stat = 0;
  1346. tegra_dc_dpaux_enable(priv);
  1347. return tegra_dc_i2c_aux_read(priv, tegra_edid_i2c_address, 0, buf,
  1348. buf_size, &aux_stat);
  1349. }
  1350. static const struct dm_display_ops dp_tegra_ops = {
  1351. .read_edid = tegra_dp_read_edid,
  1352. .enable = tegra_dp_enable,
  1353. };
  1354. static int dp_tegra_probe(struct udevice *dev)
  1355. {
  1356. struct tegra_dp_plat *plat = dev_get_platdata(dev);
  1357. struct tegra_dp_priv *priv = dev_get_priv(dev);
  1358. struct display_plat *disp_uc_plat = dev_get_uclass_platdata(dev);
  1359. priv->regs = (struct dpaux_ctlr *)plat->base;
  1360. priv->enabled = false;
  1361. /* Remember the display controller that is sending us video */
  1362. priv->dc_dev = disp_uc_plat->src_dev;
  1363. return 0;
  1364. }
  1365. static const struct udevice_id tegra_dp_ids[] = {
  1366. { .compatible = "nvidia,tegra124-dpaux" },
  1367. { }
  1368. };
  1369. U_BOOT_DRIVER(dp_tegra) = {
  1370. .name = "dpaux_tegra",
  1371. .id = UCLASS_DISPLAY,
  1372. .of_match = tegra_dp_ids,
  1373. .ofdata_to_platdata = tegra_dp_ofdata_to_platdata,
  1374. .probe = dp_tegra_probe,
  1375. .ops = &dp_tegra_ops,
  1376. .priv_auto_alloc_size = sizeof(struct tegra_dp_priv),
  1377. .platdata_auto_alloc_size = sizeof(struct tegra_dp_plat),
  1378. };