dp.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2011-2013, NVIDIA Corporation.
  4. * Copyright 2014 Google Inc.
  5. */
  6. #include <common.h>
  7. #include <display.h>
  8. #include <dm.h>
  9. #include <div64.h>
  10. #include <errno.h>
  11. #include <log.h>
  12. #include <video_bridge.h>
  13. #include <asm/io.h>
  14. #include <asm/arch-tegra/dc.h>
  15. #include <linux/delay.h>
  16. #include "display.h"
  17. #include "edid.h"
  18. #include "sor.h"
  19. #include "displayport.h"
  20. #define DO_FAST_LINK_TRAINING 1
  21. struct tegra_dp_plat {
  22. ulong base;
  23. };
  24. /**
  25. * struct tegra_dp_priv - private displayport driver info
  26. *
  27. * @dc_dev: Display controller device that is sending the video feed
  28. */
  29. struct tegra_dp_priv {
  30. struct udevice *sor;
  31. struct udevice *dc_dev;
  32. struct dpaux_ctlr *regs;
  33. u8 revision;
  34. int enabled;
  35. };
  36. struct tegra_dp_priv dp_data;
  37. static inline u32 tegra_dpaux_readl(struct tegra_dp_priv *dp, u32 reg)
  38. {
  39. return readl((u32 *)dp->regs + reg);
  40. }
  41. static inline void tegra_dpaux_writel(struct tegra_dp_priv *dp, u32 reg,
  42. u32 val)
  43. {
  44. writel(val, (u32 *)dp->regs + reg);
  45. }
  46. static inline u32 tegra_dc_dpaux_poll_register(struct tegra_dp_priv *dp,
  47. u32 reg, u32 mask, u32 exp_val,
  48. u32 poll_interval_us,
  49. u32 timeout_us)
  50. {
  51. u32 reg_val = 0;
  52. u32 temp = timeout_us;
  53. do {
  54. udelay(poll_interval_us);
  55. reg_val = tegra_dpaux_readl(dp, reg);
  56. if (timeout_us > poll_interval_us)
  57. timeout_us -= poll_interval_us;
  58. else
  59. break;
  60. } while ((reg_val & mask) != exp_val);
  61. if ((reg_val & mask) == exp_val)
  62. return 0; /* success */
  63. debug("dpaux_poll_register 0x%x: timeout: (reg_val)0x%08x & (mask)0x%08x != (exp_val)0x%08x\n",
  64. reg, reg_val, mask, exp_val);
  65. return temp;
  66. }
  67. static inline int tegra_dpaux_wait_transaction(struct tegra_dp_priv *dp)
  68. {
  69. /* According to DP spec, each aux transaction needs to finish
  70. within 40ms. */
  71. if (tegra_dc_dpaux_poll_register(dp, DPAUX_DP_AUXCTL,
  72. DPAUX_DP_AUXCTL_TRANSACTREQ_MASK,
  73. DPAUX_DP_AUXCTL_TRANSACTREQ_DONE,
  74. 100, DP_AUX_TIMEOUT_MS * 1000) != 0) {
  75. debug("dp: DPAUX transaction timeout\n");
  76. return -1;
  77. }
  78. return 0;
  79. }
  80. static int tegra_dc_dpaux_write_chunk(struct tegra_dp_priv *dp, u32 cmd,
  81. u32 addr, u8 *data, u32 *size,
  82. u32 *aux_stat)
  83. {
  84. int i;
  85. u32 reg_val;
  86. u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
  87. u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
  88. u32 temp_data;
  89. if (*size > DP_AUX_MAX_BYTES)
  90. return -1; /* only write one chunk of data */
  91. /* Make sure the command is write command */
  92. switch (cmd) {
  93. case DPAUX_DP_AUXCTL_CMD_I2CWR:
  94. case DPAUX_DP_AUXCTL_CMD_MOTWR:
  95. case DPAUX_DP_AUXCTL_CMD_AUXWR:
  96. break;
  97. default:
  98. debug("dp: aux write cmd 0x%x is invalid\n", cmd);
  99. return -EINVAL;
  100. }
  101. tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
  102. for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i) {
  103. memcpy(&temp_data, data, 4);
  104. tegra_dpaux_writel(dp, DPAUX_DP_AUXDATA_WRITE_W(i), temp_data);
  105. data += 4;
  106. }
  107. reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
  108. reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
  109. reg_val |= cmd;
  110. reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
  111. reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
  112. while ((timeout_retries > 0) && (defer_retries > 0)) {
  113. if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
  114. (defer_retries != DP_AUX_DEFER_MAX_TRIES))
  115. udelay(1);
  116. reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
  117. tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
  118. if (tegra_dpaux_wait_transaction(dp))
  119. debug("dp: aux write transaction timeout\n");
  120. *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
  121. if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
  122. (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
  123. (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
  124. (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
  125. if (timeout_retries-- > 0) {
  126. debug("dp: aux write retry (0x%x) -- %d\n",
  127. *aux_stat, timeout_retries);
  128. /* clear the error bits */
  129. tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
  130. *aux_stat);
  131. continue;
  132. } else {
  133. debug("dp: aux write got error (0x%x)\n",
  134. *aux_stat);
  135. return -ETIMEDOUT;
  136. }
  137. }
  138. if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
  139. (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
  140. if (defer_retries-- > 0) {
  141. debug("dp: aux write defer (0x%x) -- %d\n",
  142. *aux_stat, defer_retries);
  143. /* clear the error bits */
  144. tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
  145. *aux_stat);
  146. continue;
  147. } else {
  148. debug("dp: aux write defer exceeds max retries (0x%x)\n",
  149. *aux_stat);
  150. return -ETIMEDOUT;
  151. }
  152. }
  153. if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
  154. DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
  155. *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
  156. return 0;
  157. } else {
  158. debug("dp: aux write failed (0x%x)\n", *aux_stat);
  159. return -EIO;
  160. }
  161. }
  162. /* Should never come to here */
  163. return -EIO;
  164. }
  165. static int tegra_dc_dpaux_read_chunk(struct tegra_dp_priv *dp, u32 cmd,
  166. u32 addr, u8 *data, u32 *size,
  167. u32 *aux_stat)
  168. {
  169. u32 reg_val;
  170. u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
  171. u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
  172. if (*size > DP_AUX_MAX_BYTES) {
  173. debug("only read one chunk\n");
  174. return -EIO; /* only read one chunk */
  175. }
  176. /* Check to make sure the command is read command */
  177. switch (cmd) {
  178. case DPAUX_DP_AUXCTL_CMD_I2CRD:
  179. case DPAUX_DP_AUXCTL_CMD_I2CREQWSTAT:
  180. case DPAUX_DP_AUXCTL_CMD_MOTRD:
  181. case DPAUX_DP_AUXCTL_CMD_AUXRD:
  182. break;
  183. default:
  184. debug("dp: aux read cmd 0x%x is invalid\n", cmd);
  185. return -EIO;
  186. }
  187. *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
  188. if (!(*aux_stat & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
  189. debug("dp: HPD is not detected\n");
  190. return -EIO;
  191. }
  192. tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
  193. reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
  194. reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
  195. reg_val |= cmd;
  196. reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
  197. reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
  198. while ((timeout_retries > 0) && (defer_retries > 0)) {
  199. if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
  200. (defer_retries != DP_AUX_DEFER_MAX_TRIES))
  201. udelay(DP_DPCP_RETRY_SLEEP_NS * 2);
  202. reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
  203. tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
  204. if (tegra_dpaux_wait_transaction(dp))
  205. debug("dp: aux read transaction timeout\n");
  206. *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
  207. if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
  208. (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
  209. (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
  210. (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
  211. if (timeout_retries-- > 0) {
  212. debug("dp: aux read retry (0x%x) -- %d\n",
  213. *aux_stat, timeout_retries);
  214. /* clear the error bits */
  215. tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
  216. *aux_stat);
  217. continue; /* retry */
  218. } else {
  219. debug("dp: aux read got error (0x%x)\n",
  220. *aux_stat);
  221. return -ETIMEDOUT;
  222. }
  223. }
  224. if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
  225. (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
  226. if (defer_retries-- > 0) {
  227. debug("dp: aux read defer (0x%x) -- %d\n",
  228. *aux_stat, defer_retries);
  229. /* clear the error bits */
  230. tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
  231. *aux_stat);
  232. continue;
  233. } else {
  234. debug("dp: aux read defer exceeds max retries (0x%x)\n",
  235. *aux_stat);
  236. return -ETIMEDOUT;
  237. }
  238. }
  239. if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
  240. DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
  241. int i;
  242. u32 temp_data[4];
  243. for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i)
  244. temp_data[i] = tegra_dpaux_readl(dp,
  245. DPAUX_DP_AUXDATA_READ_W(i));
  246. *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
  247. memcpy(data, temp_data, *size);
  248. return 0;
  249. } else {
  250. debug("dp: aux read failed (0x%x\n", *aux_stat);
  251. return -EIO;
  252. }
  253. }
  254. /* Should never come to here */
  255. debug("%s: can't\n", __func__);
  256. return -EIO;
  257. }
  258. static int tegra_dc_dpaux_read(struct tegra_dp_priv *dp, u32 cmd, u32 addr,
  259. u8 *data, u32 *size, u32 *aux_stat)
  260. {
  261. u32 finished = 0;
  262. u32 cur_size;
  263. int ret = 0;
  264. do {
  265. cur_size = *size - finished;
  266. if (cur_size > DP_AUX_MAX_BYTES)
  267. cur_size = DP_AUX_MAX_BYTES;
  268. ret = tegra_dc_dpaux_read_chunk(dp, cmd, addr,
  269. data, &cur_size, aux_stat);
  270. if (ret)
  271. break;
  272. /* cur_size should be the real size returned */
  273. addr += cur_size;
  274. data += cur_size;
  275. finished += cur_size;
  276. } while (*size > finished);
  277. *size = finished;
  278. return ret;
  279. }
  280. static int tegra_dc_dp_dpcd_read(struct tegra_dp_priv *dp, u32 cmd,
  281. u8 *data_ptr)
  282. {
  283. u32 size = 1;
  284. u32 status = 0;
  285. int ret;
  286. ret = tegra_dc_dpaux_read_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
  287. cmd, data_ptr, &size, &status);
  288. if (ret) {
  289. debug("dp: Failed to read DPCD data. CMD 0x%x, Status 0x%x\n",
  290. cmd, status);
  291. }
  292. return ret;
  293. }
  294. static int tegra_dc_dp_dpcd_write(struct tegra_dp_priv *dp, u32 cmd,
  295. u8 data)
  296. {
  297. u32 size = 1;
  298. u32 status = 0;
  299. int ret;
  300. ret = tegra_dc_dpaux_write_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXWR,
  301. cmd, &data, &size, &status);
  302. if (ret) {
  303. debug("dp: Failed to write DPCD data. CMD 0x%x, Status 0x%x\n",
  304. cmd, status);
  305. }
  306. return ret;
  307. }
  308. static int tegra_dc_i2c_aux_read(struct tegra_dp_priv *dp, u32 i2c_addr,
  309. u8 addr, u8 *data, u32 size, u32 *aux_stat)
  310. {
  311. u32 finished = 0;
  312. int ret = 0;
  313. do {
  314. u32 cur_size = min((u32)DP_AUX_MAX_BYTES, size - finished);
  315. u32 len = 1;
  316. ret = tegra_dc_dpaux_write_chunk(
  317. dp, DPAUX_DP_AUXCTL_CMD_MOTWR, i2c_addr,
  318. &addr, &len, aux_stat);
  319. if (ret) {
  320. debug("%s: error sending address to read.\n",
  321. __func__);
  322. return ret;
  323. }
  324. ret = tegra_dc_dpaux_read_chunk(
  325. dp, DPAUX_DP_AUXCTL_CMD_I2CRD, i2c_addr,
  326. data, &cur_size, aux_stat);
  327. if (ret) {
  328. debug("%s: error reading data.\n", __func__);
  329. return ret;
  330. }
  331. /* cur_size should be the real size returned */
  332. addr += cur_size;
  333. data += cur_size;
  334. finished += cur_size;
  335. } while (size > finished);
  336. return finished;
  337. }
  338. static void tegra_dc_dpaux_enable(struct tegra_dp_priv *dp)
  339. {
  340. /* clear interrupt */
  341. tegra_dpaux_writel(dp, DPAUX_INTR_AUX, 0xffffffff);
  342. /* do not enable interrupt for now. Enable them when Isr in place */
  343. tegra_dpaux_writel(dp, DPAUX_INTR_EN_AUX, 0x0);
  344. tegra_dpaux_writel(dp, DPAUX_HYBRID_PADCTL,
  345. DPAUX_HYBRID_PADCTL_AUX_DRVZ_OHM_50 |
  346. DPAUX_HYBRID_PADCTL_AUX_CMH_V0_70 |
  347. 0x18 << DPAUX_HYBRID_PADCTL_AUX_DRVI_SHIFT |
  348. DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV_ENABLE);
  349. tegra_dpaux_writel(dp, DPAUX_HYBRID_SPARE,
  350. DPAUX_HYBRID_SPARE_PAD_PWR_POWERUP);
  351. }
  352. #ifdef DEBUG
  353. static void tegra_dc_dp_dump_link_cfg(struct tegra_dp_priv *dp,
  354. const struct tegra_dp_link_config *link_cfg)
  355. {
  356. debug("DP config: cfg_name cfg_value\n");
  357. debug(" Lane Count %d\n",
  358. link_cfg->max_lane_count);
  359. debug(" SupportEnhancedFraming %s\n",
  360. link_cfg->support_enhanced_framing ? "Y" : "N");
  361. debug(" Bandwidth %d\n",
  362. link_cfg->max_link_bw);
  363. debug(" bpp %d\n",
  364. link_cfg->bits_per_pixel);
  365. debug(" EnhancedFraming %s\n",
  366. link_cfg->enhanced_framing ? "Y" : "N");
  367. debug(" Scramble_enabled %s\n",
  368. link_cfg->scramble_ena ? "Y" : "N");
  369. debug(" LinkBW %d\n",
  370. link_cfg->link_bw);
  371. debug(" lane_count %d\n",
  372. link_cfg->lane_count);
  373. debug(" activespolarity %d\n",
  374. link_cfg->activepolarity);
  375. debug(" active_count %d\n",
  376. link_cfg->active_count);
  377. debug(" tu_size %d\n",
  378. link_cfg->tu_size);
  379. debug(" active_frac %d\n",
  380. link_cfg->active_frac);
  381. debug(" watermark %d\n",
  382. link_cfg->watermark);
  383. debug(" hblank_sym %d\n",
  384. link_cfg->hblank_sym);
  385. debug(" vblank_sym %d\n",
  386. link_cfg->vblank_sym);
  387. }
  388. #endif
  389. static int _tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
  390. struct tegra_dp_link_config *cfg)
  391. {
  392. switch (cfg->link_bw) {
  393. case SOR_LINK_SPEED_G1_62:
  394. if (cfg->max_link_bw > SOR_LINK_SPEED_G1_62)
  395. cfg->link_bw = SOR_LINK_SPEED_G2_7;
  396. cfg->lane_count /= 2;
  397. break;
  398. case SOR_LINK_SPEED_G2_7:
  399. cfg->link_bw = SOR_LINK_SPEED_G1_62;
  400. break;
  401. case SOR_LINK_SPEED_G5_4:
  402. if (cfg->lane_count == 1) {
  403. cfg->link_bw = SOR_LINK_SPEED_G2_7;
  404. cfg->lane_count = cfg->max_lane_count;
  405. } else {
  406. cfg->lane_count /= 2;
  407. }
  408. break;
  409. default:
  410. debug("dp: Error link rate %d\n", cfg->link_bw);
  411. return -ENOLINK;
  412. }
  413. return (cfg->lane_count > 0) ? 0 : -ENOLINK;
  414. }
  415. /*
  416. * Calcuate if given cfg can meet the mode request.
  417. * Return 0 if mode is possible, -1 otherwise
  418. */
  419. static int tegra_dc_dp_calc_config(struct tegra_dp_priv *dp,
  420. const struct display_timing *timing,
  421. struct tegra_dp_link_config *link_cfg)
  422. {
  423. const u32 link_rate = 27 * link_cfg->link_bw * 1000 * 1000;
  424. const u64 f = 100000; /* precision factor */
  425. u32 num_linkclk_line; /* Number of link clocks per line */
  426. u64 ratio_f; /* Ratio of incoming to outgoing data rate */
  427. u64 frac_f;
  428. u64 activesym_f; /* Activesym per TU */
  429. u64 activecount_f;
  430. u32 activecount;
  431. u32 activepolarity;
  432. u64 approx_value_f;
  433. u32 activefrac = 0;
  434. u64 accumulated_error_f = 0;
  435. u32 lowest_neg_activecount = 0;
  436. u32 lowest_neg_activepolarity = 0;
  437. u32 lowest_neg_tusize = 64;
  438. u32 num_symbols_per_line;
  439. u64 lowest_neg_activefrac = 0;
  440. u64 lowest_neg_error_f = 64 * f;
  441. u64 watermark_f;
  442. int i;
  443. int neg;
  444. if (!link_rate || !link_cfg->lane_count || !timing->pixelclock.typ ||
  445. !link_cfg->bits_per_pixel)
  446. return -1;
  447. if ((u64)timing->pixelclock.typ * link_cfg->bits_per_pixel >=
  448. (u64)link_rate * 8 * link_cfg->lane_count)
  449. return -1;
  450. num_linkclk_line = (u32)(lldiv(link_rate * timing->hactive.typ,
  451. timing->pixelclock.typ));
  452. ratio_f = (u64)timing->pixelclock.typ * link_cfg->bits_per_pixel * f;
  453. ratio_f /= 8;
  454. do_div(ratio_f, link_rate * link_cfg->lane_count);
  455. for (i = 64; i >= 32; --i) {
  456. activesym_f = ratio_f * i;
  457. activecount_f = lldiv(activesym_f, (u32)f) * f;
  458. frac_f = activesym_f - activecount_f;
  459. activecount = (u32)(lldiv(activecount_f, (u32)f));
  460. if (frac_f < (lldiv(f, 2))) /* fraction < 0.5 */
  461. activepolarity = 0;
  462. else {
  463. activepolarity = 1;
  464. frac_f = f - frac_f;
  465. }
  466. if (frac_f != 0) {
  467. /* warning: frac_f should be 64-bit */
  468. frac_f = lldiv(f * f, frac_f); /* 1 / fraction */
  469. if (frac_f > (15 * f))
  470. activefrac = activepolarity ? 1 : 15;
  471. else
  472. activefrac = activepolarity ?
  473. (u32)lldiv(frac_f, (u32)f) + 1 :
  474. (u32)lldiv(frac_f, (u32)f);
  475. }
  476. if (activefrac == 1)
  477. activepolarity = 0;
  478. if (activepolarity == 1)
  479. approx_value_f = activefrac ? lldiv(
  480. (activecount_f + (activefrac * f - f) * f),
  481. (activefrac * f)) :
  482. activecount_f + f;
  483. else
  484. approx_value_f = activefrac ?
  485. activecount_f + lldiv(f, activefrac) :
  486. activecount_f;
  487. if (activesym_f < approx_value_f) {
  488. accumulated_error_f = num_linkclk_line *
  489. lldiv(approx_value_f - activesym_f, i);
  490. neg = 1;
  491. } else {
  492. accumulated_error_f = num_linkclk_line *
  493. lldiv(activesym_f - approx_value_f, i);
  494. neg = 0;
  495. }
  496. if ((neg && (lowest_neg_error_f > accumulated_error_f)) ||
  497. (accumulated_error_f == 0)) {
  498. lowest_neg_error_f = accumulated_error_f;
  499. lowest_neg_tusize = i;
  500. lowest_neg_activecount = activecount;
  501. lowest_neg_activepolarity = activepolarity;
  502. lowest_neg_activefrac = activefrac;
  503. if (accumulated_error_f == 0)
  504. break;
  505. }
  506. }
  507. if (lowest_neg_activefrac == 0) {
  508. link_cfg->activepolarity = 0;
  509. link_cfg->active_count = lowest_neg_activepolarity ?
  510. lowest_neg_activecount : lowest_neg_activecount - 1;
  511. link_cfg->tu_size = lowest_neg_tusize;
  512. link_cfg->active_frac = 1;
  513. } else {
  514. link_cfg->activepolarity = lowest_neg_activepolarity;
  515. link_cfg->active_count = (u32)lowest_neg_activecount;
  516. link_cfg->tu_size = lowest_neg_tusize;
  517. link_cfg->active_frac = (u32)lowest_neg_activefrac;
  518. }
  519. watermark_f = lldiv(ratio_f * link_cfg->tu_size * (f - ratio_f), f);
  520. link_cfg->watermark = (u32)(lldiv(watermark_f + lowest_neg_error_f,
  521. f)) + link_cfg->bits_per_pixel / 4 - 1;
  522. num_symbols_per_line = (timing->hactive.typ *
  523. link_cfg->bits_per_pixel) /
  524. (8 * link_cfg->lane_count);
  525. if (link_cfg->watermark > 30) {
  526. debug("dp: sor setting: unable to get a good tusize, force watermark to 30\n");
  527. link_cfg->watermark = 30;
  528. return -1;
  529. } else if (link_cfg->watermark > num_symbols_per_line) {
  530. debug("dp: sor setting: force watermark to the number of symbols in the line\n");
  531. link_cfg->watermark = num_symbols_per_line;
  532. return -1;
  533. }
  534. /*
  535. * Refer to dev_disp.ref for more information.
  536. * # symbols/hblank = ((SetRasterBlankEnd.X + SetRasterSize.Width -
  537. * SetRasterBlankStart.X - 7) * link_clk / pclk)
  538. * - 3 * enhanced_framing - Y
  539. * where Y = (# lanes == 4) 3 : (# lanes == 2) ? 6 : 12
  540. */
  541. link_cfg->hblank_sym = (int)lldiv(((uint64_t)timing->hback_porch.typ +
  542. timing->hfront_porch.typ + timing->hsync_len.typ - 7) *
  543. link_rate, timing->pixelclock.typ) -
  544. 3 * link_cfg->enhanced_framing -
  545. (12 / link_cfg->lane_count);
  546. if (link_cfg->hblank_sym < 0)
  547. link_cfg->hblank_sym = 0;
  548. /*
  549. * Refer to dev_disp.ref for more information.
  550. * # symbols/vblank = ((SetRasterBlankStart.X -
  551. * SetRasterBlankEen.X - 25) * link_clk / pclk)
  552. * - Y - 1;
  553. * where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39
  554. */
  555. link_cfg->vblank_sym = (int)lldiv(((uint64_t)timing->hactive.typ - 25)
  556. * link_rate, timing->pixelclock.typ) - (36 /
  557. link_cfg->lane_count) - 4;
  558. if (link_cfg->vblank_sym < 0)
  559. link_cfg->vblank_sym = 0;
  560. link_cfg->is_valid = 1;
  561. #ifdef DEBUG
  562. tegra_dc_dp_dump_link_cfg(dp, link_cfg);
  563. #endif
  564. return 0;
  565. }
  566. static int tegra_dc_dp_init_max_link_cfg(
  567. const struct display_timing *timing,
  568. struct tegra_dp_priv *dp,
  569. struct tegra_dp_link_config *link_cfg)
  570. {
  571. const int drive_current = 0x40404040;
  572. const int preemphasis = 0x0f0f0f0f;
  573. const int postcursor = 0;
  574. u8 dpcd_data;
  575. int ret;
  576. ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LANE_COUNT, &dpcd_data);
  577. if (ret)
  578. return ret;
  579. link_cfg->max_lane_count = dpcd_data & DP_MAX_LANE_COUNT_MASK;
  580. link_cfg->tps3_supported = (dpcd_data &
  581. DP_MAX_LANE_COUNT_TPS3_SUPPORTED_YES) ? 1 : 0;
  582. link_cfg->support_enhanced_framing =
  583. (dpcd_data & DP_MAX_LANE_COUNT_ENHANCED_FRAMING_YES) ?
  584. 1 : 0;
  585. ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_DOWNSPREAD, &dpcd_data);
  586. if (ret)
  587. return ret;
  588. link_cfg->downspread = (dpcd_data & DP_MAX_DOWNSPREAD_VAL_0_5_PCT) ?
  589. 1 : 0;
  590. ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_TRAINING_AUX_RD_INTERVAL,
  591. &link_cfg->aux_rd_interval);
  592. if (ret)
  593. return ret;
  594. ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LINK_RATE,
  595. &link_cfg->max_link_bw);
  596. if (ret)
  597. return ret;
  598. /*
  599. * Set to a high value for link training and attach.
  600. * Will be re-programmed when dp is enabled.
  601. */
  602. link_cfg->drive_current = drive_current;
  603. link_cfg->preemphasis = preemphasis;
  604. link_cfg->postcursor = postcursor;
  605. ret = tegra_dc_dp_dpcd_read(dp, DP_EDP_CONFIGURATION_CAP, &dpcd_data);
  606. if (ret)
  607. return ret;
  608. link_cfg->alt_scramber_reset_cap =
  609. (dpcd_data & DP_EDP_CONFIGURATION_CAP_ASC_RESET_YES) ?
  610. 1 : 0;
  611. link_cfg->only_enhanced_framing =
  612. (dpcd_data & DP_EDP_CONFIGURATION_CAP_FRAMING_CHANGE_YES) ?
  613. 1 : 0;
  614. link_cfg->lane_count = link_cfg->max_lane_count;
  615. link_cfg->link_bw = link_cfg->max_link_bw;
  616. link_cfg->enhanced_framing = link_cfg->support_enhanced_framing;
  617. link_cfg->frame_in_ms = (1000 / 60) + 1;
  618. tegra_dc_dp_calc_config(dp, timing, link_cfg);
  619. return 0;
  620. }
  621. static int tegra_dc_dp_set_assr(struct tegra_dp_priv *priv,
  622. struct udevice *sor, int ena)
  623. {
  624. int ret;
  625. u8 dpcd_data = ena ?
  626. DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_ENABLE :
  627. DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_DISABLE;
  628. ret = tegra_dc_dp_dpcd_write(priv, DP_EDP_CONFIGURATION_SET,
  629. dpcd_data);
  630. if (ret)
  631. return ret;
  632. /* Also reset the scrambler to 0xfffe */
  633. tegra_dc_sor_set_internal_panel(sor, ena);
  634. return 0;
  635. }
  636. static int tegra_dp_set_link_bandwidth(struct tegra_dp_priv *dp,
  637. struct udevice *sor,
  638. u8 link_bw)
  639. {
  640. tegra_dc_sor_set_link_bandwidth(sor, link_bw);
  641. /* Sink side */
  642. return tegra_dc_dp_dpcd_write(dp, DP_LINK_BW_SET, link_bw);
  643. }
  644. static int tegra_dp_set_lane_count(struct tegra_dp_priv *dp,
  645. const struct tegra_dp_link_config *link_cfg,
  646. struct udevice *sor)
  647. {
  648. u8 dpcd_data;
  649. int ret;
  650. /* check if panel support enhanched_framing */
  651. dpcd_data = link_cfg->lane_count;
  652. if (link_cfg->enhanced_framing)
  653. dpcd_data |= DP_LANE_COUNT_SET_ENHANCEDFRAMING_T;
  654. ret = tegra_dc_dp_dpcd_write(dp, DP_LANE_COUNT_SET, dpcd_data);
  655. if (ret)
  656. return ret;
  657. tegra_dc_sor_set_lane_count(sor, link_cfg->lane_count);
  658. /* Also power down lanes that will not be used */
  659. return 0;
  660. }
  661. static int tegra_dc_dp_link_trained(struct tegra_dp_priv *dp,
  662. const struct tegra_dp_link_config *cfg)
  663. {
  664. u32 lane;
  665. u8 mask;
  666. u8 data;
  667. int ret;
  668. for (lane = 0; lane < cfg->lane_count; ++lane) {
  669. ret = tegra_dc_dp_dpcd_read(dp, (lane / 2) ?
  670. DP_LANE2_3_STATUS : DP_LANE0_1_STATUS,
  671. &data);
  672. if (ret)
  673. return ret;
  674. mask = (lane & 1) ?
  675. NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES |
  676. NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_YES |
  677. NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_YES :
  678. DP_LANE_CR_DONE |
  679. DP_LANE_CHANNEL_EQ_DONE |
  680. DP_LANE_SYMBOL_LOCKED;
  681. if ((data & mask) != mask)
  682. return -1;
  683. }
  684. return 0;
  685. }
  686. static int tegra_dp_channel_eq_status(struct tegra_dp_priv *dp,
  687. const struct tegra_dp_link_config *cfg)
  688. {
  689. u32 cnt;
  690. u32 n_lanes = cfg->lane_count;
  691. u8 data;
  692. u8 ce_done = 1;
  693. int ret;
  694. for (cnt = 0; cnt < n_lanes / 2; cnt++) {
  695. ret = tegra_dc_dp_dpcd_read(dp, DP_LANE0_1_STATUS + cnt, &data);
  696. if (ret)
  697. return ret;
  698. if (n_lanes == 1) {
  699. ce_done = (data & (0x1 <<
  700. NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) &&
  701. (data & (0x1 <<
  702. NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT));
  703. break;
  704. } else if (!(data & (0x1 <<
  705. NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) ||
  706. !(data & (0x1 <<
  707. NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT)) ||
  708. !(data & (0x1 <<
  709. NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_SHIFT)) ||
  710. !(data & (0x1 <<
  711. NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_SHIFT)))
  712. return -EIO;
  713. }
  714. if (ce_done) {
  715. ret = tegra_dc_dp_dpcd_read(dp,
  716. DP_LANE_ALIGN_STATUS_UPDATED,
  717. &data);
  718. if (ret)
  719. return ret;
  720. if (!(data & NV_DPCD_LANE_ALIGN_STATUS_UPDATED_DONE_YES))
  721. ce_done = 0;
  722. }
  723. return ce_done ? 0 : -EIO;
  724. }
  725. static int tegra_dp_clock_recovery_status(struct tegra_dp_priv *dp,
  726. const struct tegra_dp_link_config *cfg)
  727. {
  728. u32 cnt;
  729. u32 n_lanes = cfg->lane_count;
  730. u8 data_ptr;
  731. int ret;
  732. for (cnt = 0; cnt < n_lanes / 2; cnt++) {
  733. ret = tegra_dc_dp_dpcd_read(dp, (DP_LANE0_1_STATUS + cnt),
  734. &data_ptr);
  735. if (ret)
  736. return ret;
  737. if (n_lanes == 1)
  738. return (data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ?
  739. 1 : 0;
  740. else if (!(data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ||
  741. !(data_ptr & (NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES)))
  742. return 0;
  743. }
  744. return 1;
  745. }
  746. static int tegra_dp_lt_adjust(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
  747. u32 pc[4], u8 pc_supported,
  748. const struct tegra_dp_link_config *cfg)
  749. {
  750. size_t cnt;
  751. u8 data_ptr;
  752. u32 n_lanes = cfg->lane_count;
  753. int ret;
  754. for (cnt = 0; cnt < n_lanes / 2; cnt++) {
  755. ret = tegra_dc_dp_dpcd_read(dp, DP_ADJUST_REQUEST_LANE0_1 + cnt,
  756. &data_ptr);
  757. if (ret)
  758. return ret;
  759. pe[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_PE_MASK) >>
  760. NV_DPCD_ADJUST_REQ_LANEX_PE_SHIFT;
  761. vs[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_DC_MASK) >>
  762. NV_DPCD_ADJUST_REQ_LANEX_DC_SHIFT;
  763. pe[1 + 2 * cnt] =
  764. (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_MASK) >>
  765. NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_SHIFT;
  766. vs[1 + 2 * cnt] =
  767. (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_MASK) >>
  768. NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_SHIFT;
  769. }
  770. if (pc_supported) {
  771. ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_ADJUST_REQ_POST_CURSOR2,
  772. &data_ptr);
  773. if (ret)
  774. return ret;
  775. for (cnt = 0; cnt < n_lanes; cnt++) {
  776. pc[cnt] = (data_ptr >>
  777. NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_SHIFT(cnt)) &
  778. NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_MASK;
  779. }
  780. }
  781. return 0;
  782. }
  783. static void tegra_dp_wait_aux_training(struct tegra_dp_priv *dp,
  784. bool is_clk_recovery,
  785. const struct tegra_dp_link_config *cfg)
  786. {
  787. if (!cfg->aux_rd_interval)
  788. udelay(is_clk_recovery ? 200 : 500);
  789. else
  790. mdelay(cfg->aux_rd_interval * 4);
  791. }
  792. static void tegra_dp_tpg(struct tegra_dp_priv *dp, u32 tp, u32 n_lanes,
  793. const struct tegra_dp_link_config *cfg)
  794. {
  795. u8 data = (tp == training_pattern_disabled)
  796. ? (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_F)
  797. : (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_T);
  798. tegra_dc_sor_set_dp_linkctl(dp->sor, 1, tp, cfg);
  799. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, data);
  800. }
  801. static int tegra_dp_link_config(struct tegra_dp_priv *dp,
  802. const struct tegra_dp_link_config *link_cfg)
  803. {
  804. u8 dpcd_data;
  805. u32 retry;
  806. int ret;
  807. if (link_cfg->lane_count == 0) {
  808. debug("dp: error: lane count is 0. Can not set link config.\n");
  809. return -ENOLINK;
  810. }
  811. /* Set power state if it is not in normal level */
  812. ret = tegra_dc_dp_dpcd_read(dp, DP_SET_POWER, &dpcd_data);
  813. if (ret)
  814. return ret;
  815. if (dpcd_data == DP_SET_POWER_D3) {
  816. dpcd_data = DP_SET_POWER_D0;
  817. /* DP spec requires 3 retries */
  818. for (retry = 3; retry > 0; --retry) {
  819. ret = tegra_dc_dp_dpcd_write(dp, DP_SET_POWER,
  820. dpcd_data);
  821. if (!ret)
  822. break;
  823. if (retry == 1) {
  824. debug("dp: Failed to set DP panel power\n");
  825. return ret;
  826. }
  827. }
  828. }
  829. /* Enable ASSR if possible */
  830. if (link_cfg->alt_scramber_reset_cap) {
  831. ret = tegra_dc_dp_set_assr(dp, dp->sor, 1);
  832. if (ret)
  833. return ret;
  834. }
  835. ret = tegra_dp_set_link_bandwidth(dp, dp->sor, link_cfg->link_bw);
  836. if (ret) {
  837. debug("dp: Failed to set link bandwidth\n");
  838. return ret;
  839. }
  840. ret = tegra_dp_set_lane_count(dp, link_cfg, dp->sor);
  841. if (ret) {
  842. debug("dp: Failed to set lane count\n");
  843. return ret;
  844. }
  845. tegra_dc_sor_set_dp_linkctl(dp->sor, 1, training_pattern_none,
  846. link_cfg);
  847. return 0;
  848. }
  849. static int tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
  850. const struct display_timing *timing,
  851. struct tegra_dp_link_config *cfg)
  852. {
  853. struct tegra_dp_link_config tmp_cfg;
  854. int ret;
  855. tmp_cfg = *cfg;
  856. cfg->is_valid = 0;
  857. ret = _tegra_dp_lower_link_config(dp, cfg);
  858. if (!ret)
  859. ret = tegra_dc_dp_calc_config(dp, timing, cfg);
  860. if (!ret)
  861. ret = tegra_dp_link_config(dp, cfg);
  862. if (ret)
  863. goto fail;
  864. return 0;
  865. fail:
  866. *cfg = tmp_cfg;
  867. tegra_dp_link_config(dp, &tmp_cfg);
  868. return ret;
  869. }
  870. static int tegra_dp_lt_config(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
  871. u32 pc[4], const struct tegra_dp_link_config *cfg)
  872. {
  873. struct udevice *sor = dp->sor;
  874. u32 n_lanes = cfg->lane_count;
  875. u8 pc_supported = cfg->tps3_supported;
  876. u32 cnt;
  877. u32 val;
  878. for (cnt = 0; cnt < n_lanes; cnt++) {
  879. u32 mask = 0;
  880. u32 pe_reg, vs_reg, pc_reg;
  881. u32 shift = 0;
  882. switch (cnt) {
  883. case 0:
  884. mask = PR_LANE2_DP_LANE0_MASK;
  885. shift = PR_LANE2_DP_LANE0_SHIFT;
  886. break;
  887. case 1:
  888. mask = PR_LANE1_DP_LANE1_MASK;
  889. shift = PR_LANE1_DP_LANE1_SHIFT;
  890. break;
  891. case 2:
  892. mask = PR_LANE0_DP_LANE2_MASK;
  893. shift = PR_LANE0_DP_LANE2_SHIFT;
  894. break;
  895. case 3:
  896. mask = PR_LANE3_DP_LANE3_MASK;
  897. shift = PR_LANE3_DP_LANE3_SHIFT;
  898. break;
  899. default:
  900. debug("dp: incorrect lane cnt\n");
  901. return -EINVAL;
  902. }
  903. pe_reg = tegra_dp_pe_regs[pc[cnt]][vs[cnt]][pe[cnt]];
  904. vs_reg = tegra_dp_vs_regs[pc[cnt]][vs[cnt]][pe[cnt]];
  905. pc_reg = tegra_dp_pc_regs[pc[cnt]][vs[cnt]][pe[cnt]];
  906. tegra_dp_set_pe_vs_pc(sor, mask, pe_reg << shift,
  907. vs_reg << shift, pc_reg << shift,
  908. pc_supported);
  909. }
  910. tegra_dp_disable_tx_pu(dp->sor);
  911. udelay(20);
  912. for (cnt = 0; cnt < n_lanes; cnt++) {
  913. u32 max_vs_flag = tegra_dp_is_max_vs(pe[cnt], vs[cnt]);
  914. u32 max_pe_flag = tegra_dp_is_max_pe(pe[cnt], vs[cnt]);
  915. val = (vs[cnt] << NV_DPCD_TRAINING_LANEX_SET_DC_SHIFT) |
  916. (max_vs_flag ?
  917. NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_T :
  918. NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_F) |
  919. (pe[cnt] << NV_DPCD_TRAINING_LANEX_SET_PE_SHIFT) |
  920. (max_pe_flag ?
  921. NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_T :
  922. NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_F);
  923. tegra_dc_dp_dpcd_write(dp, (DP_TRAINING_LANE0_SET + cnt), val);
  924. }
  925. if (pc_supported) {
  926. for (cnt = 0; cnt < n_lanes / 2; cnt++) {
  927. u32 max_pc_flag0 = tegra_dp_is_max_pc(pc[cnt]);
  928. u32 max_pc_flag1 = tegra_dp_is_max_pc(pc[cnt + 1]);
  929. val = (pc[cnt] << NV_DPCD_LANEX_SET2_PC2_SHIFT) |
  930. (max_pc_flag0 ?
  931. NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_T :
  932. NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_F) |
  933. (pc[cnt + 1] <<
  934. NV_DPCD_LANEXPLUS1_SET2_PC2_SHIFT) |
  935. (max_pc_flag1 ?
  936. NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_T :
  937. NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_F);
  938. tegra_dc_dp_dpcd_write(dp,
  939. NV_DPCD_TRAINING_LANE0_1_SET2 +
  940. cnt, val);
  941. }
  942. }
  943. return 0;
  944. }
  945. static int _tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4],
  946. u32 vs[4], u32 pc[4], u8 pc_supported,
  947. u32 n_lanes,
  948. const struct tegra_dp_link_config *cfg)
  949. {
  950. u32 retry_cnt;
  951. for (retry_cnt = 0; retry_cnt < 4; retry_cnt++) {
  952. int ret;
  953. if (retry_cnt) {
  954. ret = tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported,
  955. cfg);
  956. if (ret)
  957. return ret;
  958. tegra_dp_lt_config(dp, pe, vs, pc, cfg);
  959. }
  960. tegra_dp_wait_aux_training(dp, false, cfg);
  961. if (!tegra_dp_clock_recovery_status(dp, cfg)) {
  962. debug("dp: CR failed in channel EQ sequence!\n");
  963. break;
  964. }
  965. if (!tegra_dp_channel_eq_status(dp, cfg))
  966. return 0;
  967. }
  968. return -EIO;
  969. }
  970. static int tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
  971. u32 pc[4],
  972. const struct tegra_dp_link_config *cfg)
  973. {
  974. u32 n_lanes = cfg->lane_count;
  975. u8 pc_supported = cfg->tps3_supported;
  976. int ret;
  977. u32 tp_src = training_pattern_2;
  978. if (pc_supported)
  979. tp_src = training_pattern_3;
  980. tegra_dp_tpg(dp, tp_src, n_lanes, cfg);
  981. ret = _tegra_dp_channel_eq(dp, pe, vs, pc, pc_supported, n_lanes, cfg);
  982. tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
  983. return ret;
  984. }
  985. static int _tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
  986. u32 vs[4], u32 pc[4], u8 pc_supported,
  987. u32 n_lanes,
  988. const struct tegra_dp_link_config *cfg)
  989. {
  990. u32 vs_temp[4];
  991. u32 retry_cnt = 0;
  992. do {
  993. tegra_dp_lt_config(dp, pe, vs, pc, cfg);
  994. tegra_dp_wait_aux_training(dp, true, cfg);
  995. if (tegra_dp_clock_recovery_status(dp, cfg))
  996. return 0;
  997. memcpy(vs_temp, vs, sizeof(vs_temp));
  998. tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported, cfg);
  999. if (memcmp(vs_temp, vs, sizeof(vs_temp)))
  1000. retry_cnt = 0;
  1001. else
  1002. ++retry_cnt;
  1003. } while (retry_cnt < 5);
  1004. return -EIO;
  1005. }
  1006. static int tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
  1007. u32 vs[4], u32 pc[4],
  1008. const struct tegra_dp_link_config *cfg)
  1009. {
  1010. u32 n_lanes = cfg->lane_count;
  1011. u8 pc_supported = cfg->tps3_supported;
  1012. int err;
  1013. tegra_dp_tpg(dp, training_pattern_1, n_lanes, cfg);
  1014. err = _tegra_dp_clk_recovery(dp, pe, vs, pc, pc_supported, n_lanes,
  1015. cfg);
  1016. if (err < 0)
  1017. tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
  1018. return err;
  1019. }
  1020. static int tegra_dc_dp_full_link_training(struct tegra_dp_priv *dp,
  1021. const struct display_timing *timing,
  1022. struct tegra_dp_link_config *cfg)
  1023. {
  1024. struct udevice *sor = dp->sor;
  1025. int err;
  1026. u32 pe[4], vs[4], pc[4];
  1027. tegra_sor_precharge_lanes(sor, cfg);
  1028. retry_cr:
  1029. memset(pe, PREEMPHASIS_DISABLED, sizeof(pe));
  1030. memset(vs, DRIVECURRENT_LEVEL0, sizeof(vs));
  1031. memset(pc, POSTCURSOR2_LEVEL0, sizeof(pc));
  1032. err = tegra_dp_clk_recovery(dp, pe, vs, pc, cfg);
  1033. if (err) {
  1034. if (!tegra_dp_lower_link_config(dp, timing, cfg))
  1035. goto retry_cr;
  1036. debug("dp: clk recovery failed\n");
  1037. goto fail;
  1038. }
  1039. err = tegra_dp_channel_eq(dp, pe, vs, pc, cfg);
  1040. if (err) {
  1041. if (!tegra_dp_lower_link_config(dp, timing, cfg))
  1042. goto retry_cr;
  1043. debug("dp: channel equalization failed\n");
  1044. goto fail;
  1045. }
  1046. #ifdef DEBUG
  1047. tegra_dc_dp_dump_link_cfg(dp, cfg);
  1048. #endif
  1049. return 0;
  1050. fail:
  1051. return err;
  1052. }
  1053. /*
  1054. * All link training functions are ported from kernel dc driver.
  1055. * See more details at drivers/video/tegra/dc/dp.c
  1056. */
  1057. static int tegra_dc_dp_fast_link_training(struct tegra_dp_priv *dp,
  1058. const struct tegra_dp_link_config *link_cfg,
  1059. struct udevice *sor)
  1060. {
  1061. u8 link_bw;
  1062. u8 lane_count;
  1063. u16 data16;
  1064. u32 data32;
  1065. u32 size;
  1066. u32 status;
  1067. int j;
  1068. u32 mask = 0xffff >> ((4 - link_cfg->lane_count) * 4);
  1069. tegra_dc_sor_set_lane_parm(sor, link_cfg);
  1070. tegra_dc_dp_dpcd_write(dp, DP_MAIN_LINK_CHANNEL_CODING_SET,
  1071. DP_SET_ANSI_8B10B);
  1072. /* Send TP1 */
  1073. tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_1, link_cfg);
  1074. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
  1075. DP_TRAINING_PATTERN_1);
  1076. for (j = 0; j < link_cfg->lane_count; ++j)
  1077. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
  1078. udelay(520);
  1079. size = sizeof(data16);
  1080. tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
  1081. DP_LANE0_1_STATUS, (u8 *)&data16, &size, &status);
  1082. status = mask & 0x1111;
  1083. if ((data16 & status) != status) {
  1084. debug("dp: Link training error for TP1 (%#x, status %#x)\n",
  1085. data16, status);
  1086. return -EFAULT;
  1087. }
  1088. /* enable ASSR */
  1089. tegra_dc_dp_set_assr(dp, sor, link_cfg->scramble_ena);
  1090. tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_3, link_cfg);
  1091. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
  1092. link_cfg->link_bw == 20 ? 0x23 : 0x22);
  1093. for (j = 0; j < link_cfg->lane_count; ++j)
  1094. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
  1095. udelay(520);
  1096. size = sizeof(data32);
  1097. tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD, DP_LANE0_1_STATUS,
  1098. (u8 *)&data32, &size, &status);
  1099. if ((data32 & mask) != (0x7777 & mask)) {
  1100. debug("dp: Link training error for TP2/3 (0x%x)\n", data32);
  1101. return -EFAULT;
  1102. }
  1103. tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_disabled,
  1104. link_cfg);
  1105. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, 0);
  1106. if (tegra_dc_dp_link_trained(dp, link_cfg)) {
  1107. tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
  1108. debug("Fast link training failed, link bw %d, lane # %d\n",
  1109. link_bw, lane_count);
  1110. return -EFAULT;
  1111. }
  1112. debug("Fast link training succeeded, link bw %d, lane %d\n",
  1113. link_cfg->link_bw, link_cfg->lane_count);
  1114. return 0;
  1115. }
  1116. static int tegra_dp_do_link_training(struct tegra_dp_priv *dp,
  1117. struct tegra_dp_link_config *link_cfg,
  1118. const struct display_timing *timing,
  1119. struct udevice *sor)
  1120. {
  1121. u8 link_bw;
  1122. u8 lane_count;
  1123. int ret;
  1124. if (DO_FAST_LINK_TRAINING) {
  1125. ret = tegra_dc_dp_fast_link_training(dp, link_cfg, sor);
  1126. if (ret) {
  1127. debug("dp: fast link training failed\n");
  1128. } else {
  1129. /*
  1130. * set to a known-good drive setting if fast link
  1131. * succeeded. Ignore any error.
  1132. */
  1133. ret = tegra_dc_sor_set_voltage_swing(dp->sor, link_cfg);
  1134. if (ret)
  1135. debug("Failed to set voltage swing\n");
  1136. }
  1137. } else {
  1138. ret = -ENOSYS;
  1139. }
  1140. if (ret) {
  1141. /* Try full link training then */
  1142. ret = tegra_dc_dp_full_link_training(dp, timing, link_cfg);
  1143. if (ret) {
  1144. debug("dp: full link training failed\n");
  1145. return ret;
  1146. }
  1147. }
  1148. /* Everything is good; double check the link config */
  1149. tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
  1150. if ((link_cfg->link_bw == link_bw) &&
  1151. (link_cfg->lane_count == lane_count))
  1152. return 0;
  1153. else
  1154. return -EFAULT;
  1155. }
  1156. static int tegra_dc_dp_explore_link_cfg(struct tegra_dp_priv *dp,
  1157. struct tegra_dp_link_config *link_cfg,
  1158. struct udevice *sor,
  1159. const struct display_timing *timing)
  1160. {
  1161. struct tegra_dp_link_config temp_cfg;
  1162. if (!timing->pixelclock.typ || !timing->hactive.typ ||
  1163. !timing->vactive.typ) {
  1164. debug("dp: error mode configuration");
  1165. return -EINVAL;
  1166. }
  1167. if (!link_cfg->max_link_bw || !link_cfg->max_lane_count) {
  1168. debug("dp: error link configuration");
  1169. return -EINVAL;
  1170. }
  1171. link_cfg->is_valid = 0;
  1172. memcpy(&temp_cfg, link_cfg, sizeof(temp_cfg));
  1173. temp_cfg.link_bw = temp_cfg.max_link_bw;
  1174. temp_cfg.lane_count = temp_cfg.max_lane_count;
  1175. /*
  1176. * set to max link config
  1177. */
  1178. if ((!tegra_dc_dp_calc_config(dp, timing, &temp_cfg)) &&
  1179. (!tegra_dp_link_config(dp, &temp_cfg)) &&
  1180. (!tegra_dp_do_link_training(dp, &temp_cfg, timing, sor)))
  1181. /* the max link cfg is doable */
  1182. memcpy(link_cfg, &temp_cfg, sizeof(temp_cfg));
  1183. return link_cfg->is_valid ? 0 : -EFAULT;
  1184. }
  1185. static int tegra_dp_hpd_plug(struct tegra_dp_priv *dp)
  1186. {
  1187. const int vdd_to_hpd_delay_ms = 200;
  1188. u32 val;
  1189. ulong start;
  1190. start = get_timer(0);
  1191. do {
  1192. val = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
  1193. if (val & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)
  1194. return 0;
  1195. udelay(100);
  1196. } while (get_timer(start) < vdd_to_hpd_delay_ms);
  1197. return -EIO;
  1198. }
  1199. static int tegra_dc_dp_sink_out_of_sync(struct tegra_dp_priv *dp, u32 delay_ms)
  1200. {
  1201. u8 dpcd_data;
  1202. int out_of_sync;
  1203. int ret;
  1204. debug("%s: delay=%d\n", __func__, delay_ms);
  1205. mdelay(delay_ms);
  1206. ret = tegra_dc_dp_dpcd_read(dp, DP_SINK_STATUS, &dpcd_data);
  1207. if (ret)
  1208. return ret;
  1209. out_of_sync = !(dpcd_data & DP_SINK_STATUS_PORT0_IN_SYNC);
  1210. if (out_of_sync)
  1211. debug("SINK receive port 0 out of sync, data=%x\n", dpcd_data);
  1212. else
  1213. debug("SINK is in synchronization\n");
  1214. return out_of_sync;
  1215. }
  1216. static int tegra_dc_dp_check_sink(struct tegra_dp_priv *dp,
  1217. struct tegra_dp_link_config *link_cfg,
  1218. const struct display_timing *timing)
  1219. {
  1220. const int max_retry = 5;
  1221. int delay_frame;
  1222. int retries;
  1223. /*
  1224. * DP TCON may skip some main stream frames, thus we need to wait
  1225. * some delay before reading the DPCD SINK STATUS register, starting
  1226. * from 5
  1227. */
  1228. delay_frame = 5;
  1229. retries = max_retry;
  1230. do {
  1231. int ret;
  1232. if (!tegra_dc_dp_sink_out_of_sync(dp, link_cfg->frame_in_ms *
  1233. delay_frame))
  1234. return 0;
  1235. debug("%s: retries left %d\n", __func__, retries);
  1236. if (!retries--) {
  1237. printf("DP: Out of sync after %d retries\n", max_retry);
  1238. return -EIO;
  1239. }
  1240. ret = tegra_dc_sor_detach(dp->dc_dev, dp->sor);
  1241. if (ret)
  1242. return ret;
  1243. if (tegra_dc_dp_explore_link_cfg(dp, link_cfg, dp->sor,
  1244. timing)) {
  1245. debug("dp: %s: error to configure link\n", __func__);
  1246. continue;
  1247. }
  1248. tegra_dc_sor_set_power_state(dp->sor, 1);
  1249. tegra_dc_sor_attach(dp->dc_dev, dp->sor, link_cfg, timing);
  1250. /* Increase delay_frame for next try in case the sink is
  1251. skipping more frames */
  1252. delay_frame += 10;
  1253. } while (1);
  1254. }
  1255. int tegra_dp_enable(struct udevice *dev, int panel_bpp,
  1256. const struct display_timing *timing)
  1257. {
  1258. struct tegra_dp_priv *priv = dev_get_priv(dev);
  1259. struct tegra_dp_link_config slink_cfg, *link_cfg = &slink_cfg;
  1260. struct udevice *sor;
  1261. int data;
  1262. int retry;
  1263. int ret;
  1264. memset(link_cfg, '\0', sizeof(*link_cfg));
  1265. link_cfg->is_valid = 0;
  1266. link_cfg->scramble_ena = 1;
  1267. tegra_dc_dpaux_enable(priv);
  1268. if (tegra_dp_hpd_plug(priv) < 0) {
  1269. debug("dp: hpd plug failed\n");
  1270. return -EIO;
  1271. }
  1272. link_cfg->bits_per_pixel = panel_bpp;
  1273. if (tegra_dc_dp_init_max_link_cfg(timing, priv, link_cfg)) {
  1274. debug("dp: failed to init link configuration\n");
  1275. return -ENOLINK;
  1276. }
  1277. ret = uclass_first_device(UCLASS_VIDEO_BRIDGE, &sor);
  1278. if (ret || !sor) {
  1279. debug("dp: failed to find SOR device: ret=%d\n", ret);
  1280. return ret;
  1281. }
  1282. priv->sor = sor;
  1283. ret = tegra_dc_sor_enable_dp(sor, link_cfg);
  1284. if (ret)
  1285. return ret;
  1286. tegra_dc_sor_set_panel_power(sor, 1);
  1287. /* Write power on to DPCD */
  1288. data = DP_SET_POWER_D0;
  1289. retry = 0;
  1290. do {
  1291. ret = tegra_dc_dp_dpcd_write(priv, DP_SET_POWER, data);
  1292. } while ((retry++ < DP_POWER_ON_MAX_TRIES) && ret);
  1293. if (ret || retry >= DP_POWER_ON_MAX_TRIES) {
  1294. debug("dp: failed to power on panel (0x%x)\n", ret);
  1295. return -ENETUNREACH;
  1296. goto error_enable;
  1297. }
  1298. /* Confirm DP plugging status */
  1299. if (!(tegra_dpaux_readl(priv, DPAUX_DP_AUXSTAT) &
  1300. DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
  1301. debug("dp: could not detect HPD\n");
  1302. return -ENXIO;
  1303. }
  1304. /* Check DP version */
  1305. if (tegra_dc_dp_dpcd_read(priv, DP_DPCD_REV, &priv->revision)) {
  1306. debug("dp: failed to read the revision number from sink\n");
  1307. return -EIO;
  1308. }
  1309. if (tegra_dc_dp_explore_link_cfg(priv, link_cfg, sor, timing)) {
  1310. debug("dp: error configuring link\n");
  1311. return -ENOMEDIUM;
  1312. }
  1313. tegra_dc_sor_set_power_state(sor, 1);
  1314. ret = tegra_dc_sor_attach(priv->dc_dev, sor, link_cfg, timing);
  1315. if (ret && ret != -EEXIST)
  1316. return ret;
  1317. /*
  1318. * This takes a long time, but can apparently resolve a failure to
  1319. * bring up the display correctly.
  1320. */
  1321. if (0) {
  1322. ret = tegra_dc_dp_check_sink(priv, link_cfg, timing);
  1323. if (ret)
  1324. return ret;
  1325. }
  1326. /* Power down the unused lanes to save power - a few hundred mW */
  1327. tegra_dc_sor_power_down_unused_lanes(sor, link_cfg);
  1328. ret = video_bridge_set_backlight(sor, 80);
  1329. if (ret) {
  1330. debug("dp: failed to set backlight\n");
  1331. return ret;
  1332. }
  1333. priv->enabled = true;
  1334. error_enable:
  1335. return 0;
  1336. }
  1337. static int tegra_dp_ofdata_to_platdata(struct udevice *dev)
  1338. {
  1339. struct tegra_dp_plat *plat = dev_get_platdata(dev);
  1340. plat->base = dev_read_addr(dev);
  1341. return 0;
  1342. }
  1343. static int tegra_dp_read_edid(struct udevice *dev, u8 *buf, int buf_size)
  1344. {
  1345. struct tegra_dp_priv *priv = dev_get_priv(dev);
  1346. const int tegra_edid_i2c_address = 0x50;
  1347. u32 aux_stat = 0;
  1348. tegra_dc_dpaux_enable(priv);
  1349. return tegra_dc_i2c_aux_read(priv, tegra_edid_i2c_address, 0, buf,
  1350. buf_size, &aux_stat);
  1351. }
  1352. static const struct dm_display_ops dp_tegra_ops = {
  1353. .read_edid = tegra_dp_read_edid,
  1354. .enable = tegra_dp_enable,
  1355. };
  1356. static int dp_tegra_probe(struct udevice *dev)
  1357. {
  1358. struct tegra_dp_plat *plat = dev_get_platdata(dev);
  1359. struct tegra_dp_priv *priv = dev_get_priv(dev);
  1360. struct display_plat *disp_uc_plat = dev_get_uclass_platdata(dev);
  1361. priv->regs = (struct dpaux_ctlr *)plat->base;
  1362. priv->enabled = false;
  1363. /* Remember the display controller that is sending us video */
  1364. priv->dc_dev = disp_uc_plat->src_dev;
  1365. return 0;
  1366. }
  1367. static const struct udevice_id tegra_dp_ids[] = {
  1368. { .compatible = "nvidia,tegra124-dpaux" },
  1369. { }
  1370. };
  1371. U_BOOT_DRIVER(dp_tegra) = {
  1372. .name = "dpaux_tegra",
  1373. .id = UCLASS_DISPLAY,
  1374. .of_match = tegra_dp_ids,
  1375. .ofdata_to_platdata = tegra_dp_ofdata_to_platdata,
  1376. .probe = dp_tegra_probe,
  1377. .ops = &dp_tegra_ops,
  1378. .priv_auto_alloc_size = sizeof(struct tegra_dp_priv),
  1379. .platdata_auto_alloc_size = sizeof(struct tegra_dp_plat),
  1380. };