cdn-dp-reg.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  4. * Author: Chris Zhong <zyw@rock-chips.com>
  5. */
  6. #include <linux/clk.h>
  7. #include <linux/device.h>
  8. #include <linux/delay.h>
  9. #include <linux/io.h>
  10. #include <linux/iopoll.h>
  11. #include <linux/reset.h>
  12. #include "cdn-dp-core.h"
  13. #include "cdn-dp-reg.h"
  14. #define CDN_DP_SPDIF_CLK 200000000
  15. #define FW_ALIVE_TIMEOUT_US 1000000
  16. #define MAILBOX_RETRY_US 1000
  17. #define MAILBOX_TIMEOUT_US 5000000
  18. #define LINK_TRAINING_RETRY_MS 20
  19. #define LINK_TRAINING_TIMEOUT_MS 500
  20. void cdn_dp_set_fw_clk(struct cdn_dp_device *dp, unsigned long clk)
  21. {
  22. writel(clk / 1000000, dp->regs + SW_CLK_H);
  23. }
  24. void cdn_dp_clock_reset(struct cdn_dp_device *dp)
  25. {
  26. u32 val;
  27. val = DPTX_FRMR_DATA_CLK_RSTN_EN |
  28. DPTX_FRMR_DATA_CLK_EN |
  29. DPTX_PHY_DATA_RSTN_EN |
  30. DPTX_PHY_DATA_CLK_EN |
  31. DPTX_PHY_CHAR_RSTN_EN |
  32. DPTX_PHY_CHAR_CLK_EN |
  33. SOURCE_AUX_SYS_CLK_RSTN_EN |
  34. SOURCE_AUX_SYS_CLK_EN |
  35. DPTX_SYS_CLK_RSTN_EN |
  36. DPTX_SYS_CLK_EN |
  37. CFG_DPTX_VIF_CLK_RSTN_EN |
  38. CFG_DPTX_VIF_CLK_EN;
  39. writel(val, dp->regs + SOURCE_DPTX_CAR);
  40. val = SOURCE_PHY_RSTN_EN | SOURCE_PHY_CLK_EN;
  41. writel(val, dp->regs + SOURCE_PHY_CAR);
  42. val = SOURCE_PKT_SYS_RSTN_EN |
  43. SOURCE_PKT_SYS_CLK_EN |
  44. SOURCE_PKT_DATA_RSTN_EN |
  45. SOURCE_PKT_DATA_CLK_EN;
  46. writel(val, dp->regs + SOURCE_PKT_CAR);
  47. val = SPDIF_CDR_CLK_RSTN_EN |
  48. SPDIF_CDR_CLK_EN |
  49. SOURCE_AIF_SYS_RSTN_EN |
  50. SOURCE_AIF_SYS_CLK_EN |
  51. SOURCE_AIF_CLK_RSTN_EN |
  52. SOURCE_AIF_CLK_EN;
  53. writel(val, dp->regs + SOURCE_AIF_CAR);
  54. val = SOURCE_CIPHER_SYSTEM_CLK_RSTN_EN |
  55. SOURCE_CIPHER_SYS_CLK_EN |
  56. SOURCE_CIPHER_CHAR_CLK_RSTN_EN |
  57. SOURCE_CIPHER_CHAR_CLK_EN;
  58. writel(val, dp->regs + SOURCE_CIPHER_CAR);
  59. val = SOURCE_CRYPTO_SYS_CLK_RSTN_EN |
  60. SOURCE_CRYPTO_SYS_CLK_EN;
  61. writel(val, dp->regs + SOURCE_CRYPTO_CAR);
  62. /* enable Mailbox and PIF interrupt */
  63. writel(0, dp->regs + APB_INT_MASK);
  64. }
  65. static int cdn_dp_mailbox_read(struct cdn_dp_device *dp)
  66. {
  67. int val, ret;
  68. ret = readx_poll_timeout(readl, dp->regs + MAILBOX_EMPTY_ADDR,
  69. val, !val, MAILBOX_RETRY_US,
  70. MAILBOX_TIMEOUT_US);
  71. if (ret < 0)
  72. return ret;
  73. return readl(dp->regs + MAILBOX0_RD_DATA) & 0xff;
  74. }
  75. static int cdp_dp_mailbox_write(struct cdn_dp_device *dp, u8 val)
  76. {
  77. int ret, full;
  78. ret = readx_poll_timeout(readl, dp->regs + MAILBOX_FULL_ADDR,
  79. full, !full, MAILBOX_RETRY_US,
  80. MAILBOX_TIMEOUT_US);
  81. if (ret < 0)
  82. return ret;
  83. writel(val, dp->regs + MAILBOX0_WR_DATA);
  84. return 0;
  85. }
  86. static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp,
  87. u8 module_id, u8 opcode,
  88. u16 req_size)
  89. {
  90. u32 mbox_size, i;
  91. u8 header[4];
  92. int ret;
  93. /* read the header of the message */
  94. for (i = 0; i < 4; i++) {
  95. ret = cdn_dp_mailbox_read(dp);
  96. if (ret < 0)
  97. return ret;
  98. header[i] = ret;
  99. }
  100. mbox_size = (header[2] << 8) | header[3];
  101. if (opcode != header[0] || module_id != header[1] ||
  102. req_size != mbox_size) {
  103. /*
  104. * If the message in mailbox is not what we want, we need to
  105. * clear the mailbox by reading its contents.
  106. */
  107. for (i = 0; i < mbox_size; i++)
  108. if (cdn_dp_mailbox_read(dp) < 0)
  109. break;
  110. return -EINVAL;
  111. }
  112. return 0;
  113. }
  114. static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp,
  115. u8 *buff, u16 buff_size)
  116. {
  117. u32 i;
  118. int ret;
  119. for (i = 0; i < buff_size; i++) {
  120. ret = cdn_dp_mailbox_read(dp);
  121. if (ret < 0)
  122. return ret;
  123. buff[i] = ret;
  124. }
  125. return 0;
  126. }
  127. static int cdn_dp_mailbox_send(struct cdn_dp_device *dp, u8 module_id,
  128. u8 opcode, u16 size, u8 *message)
  129. {
  130. u8 header[4];
  131. int ret, i;
  132. header[0] = opcode;
  133. header[1] = module_id;
  134. header[2] = (size >> 8) & 0xff;
  135. header[3] = size & 0xff;
  136. for (i = 0; i < 4; i++) {
  137. ret = cdp_dp_mailbox_write(dp, header[i]);
  138. if (ret)
  139. return ret;
  140. }
  141. for (i = 0; i < size; i++) {
  142. ret = cdp_dp_mailbox_write(dp, message[i]);
  143. if (ret)
  144. return ret;
  145. }
  146. return 0;
  147. }
  148. static int cdn_dp_reg_write(struct cdn_dp_device *dp, u16 addr, u32 val)
  149. {
  150. u8 msg[6];
  151. msg[0] = (addr >> 8) & 0xff;
  152. msg[1] = addr & 0xff;
  153. msg[2] = (val >> 24) & 0xff;
  154. msg[3] = (val >> 16) & 0xff;
  155. msg[4] = (val >> 8) & 0xff;
  156. msg[5] = val & 0xff;
  157. return cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_REGISTER,
  158. sizeof(msg), msg);
  159. }
  160. static int cdn_dp_reg_write_bit(struct cdn_dp_device *dp, u16 addr,
  161. u8 start_bit, u8 bits_no, u32 val)
  162. {
  163. u8 field[8];
  164. field[0] = (addr >> 8) & 0xff;
  165. field[1] = addr & 0xff;
  166. field[2] = start_bit;
  167. field[3] = bits_no;
  168. field[4] = (val >> 24) & 0xff;
  169. field[5] = (val >> 16) & 0xff;
  170. field[6] = (val >> 8) & 0xff;
  171. field[7] = val & 0xff;
  172. return cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_FIELD,
  173. sizeof(field), field);
  174. }
  175. int cdn_dp_dpcd_read(struct cdn_dp_device *dp, u32 addr, u8 *data, u16 len)
  176. {
  177. u8 msg[5], reg[5];
  178. int ret;
  179. msg[0] = (len >> 8) & 0xff;
  180. msg[1] = len & 0xff;
  181. msg[2] = (addr >> 16) & 0xff;
  182. msg[3] = (addr >> 8) & 0xff;
  183. msg[4] = addr & 0xff;
  184. ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_READ_DPCD,
  185. sizeof(msg), msg);
  186. if (ret)
  187. goto err_dpcd_read;
  188. ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
  189. DPTX_READ_DPCD,
  190. sizeof(reg) + len);
  191. if (ret)
  192. goto err_dpcd_read;
  193. ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg));
  194. if (ret)
  195. goto err_dpcd_read;
  196. ret = cdn_dp_mailbox_read_receive(dp, data, len);
  197. err_dpcd_read:
  198. return ret;
  199. }
  200. int cdn_dp_dpcd_write(struct cdn_dp_device *dp, u32 addr, u8 value)
  201. {
  202. u8 msg[6], reg[5];
  203. int ret;
  204. msg[0] = 0;
  205. msg[1] = 1;
  206. msg[2] = (addr >> 16) & 0xff;
  207. msg[3] = (addr >> 8) & 0xff;
  208. msg[4] = addr & 0xff;
  209. msg[5] = value;
  210. ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_DPCD,
  211. sizeof(msg), msg);
  212. if (ret)
  213. goto err_dpcd_write;
  214. ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
  215. DPTX_WRITE_DPCD, sizeof(reg));
  216. if (ret)
  217. goto err_dpcd_write;
  218. ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg));
  219. if (ret)
  220. goto err_dpcd_write;
  221. if (addr != (reg[2] << 16 | reg[3] << 8 | reg[4]))
  222. ret = -EINVAL;
  223. err_dpcd_write:
  224. if (ret)
  225. DRM_DEV_ERROR(dp->dev, "dpcd write failed: %d\n", ret);
  226. return ret;
  227. }
  228. int cdn_dp_load_firmware(struct cdn_dp_device *dp, const u32 *i_mem,
  229. u32 i_size, const u32 *d_mem, u32 d_size)
  230. {
  231. u32 reg;
  232. int i, ret;
  233. /* reset ucpu before load firmware*/
  234. writel(APB_IRAM_PATH | APB_DRAM_PATH | APB_XT_RESET,
  235. dp->regs + APB_CTRL);
  236. for (i = 0; i < i_size; i += 4)
  237. writel(*i_mem++, dp->regs + ADDR_IMEM + i);
  238. for (i = 0; i < d_size; i += 4)
  239. writel(*d_mem++, dp->regs + ADDR_DMEM + i);
  240. /* un-reset ucpu */
  241. writel(0, dp->regs + APB_CTRL);
  242. /* check the keep alive register to make sure fw working */
  243. ret = readx_poll_timeout(readl, dp->regs + KEEP_ALIVE,
  244. reg, reg, 2000, FW_ALIVE_TIMEOUT_US);
  245. if (ret < 0) {
  246. DRM_DEV_ERROR(dp->dev, "failed to loaded the FW reg = %x\n",
  247. reg);
  248. return -EINVAL;
  249. }
  250. reg = readl(dp->regs + VER_L) & 0xff;
  251. dp->fw_version = reg;
  252. reg = readl(dp->regs + VER_H) & 0xff;
  253. dp->fw_version |= reg << 8;
  254. reg = readl(dp->regs + VER_LIB_L_ADDR) & 0xff;
  255. dp->fw_version |= reg << 16;
  256. reg = readl(dp->regs + VER_LIB_H_ADDR) & 0xff;
  257. dp->fw_version |= reg << 24;
  258. DRM_DEV_DEBUG(dp->dev, "firmware version: %x\n", dp->fw_version);
  259. return 0;
  260. }
  261. int cdn_dp_set_firmware_active(struct cdn_dp_device *dp, bool enable)
  262. {
  263. u8 msg[5];
  264. int ret, i;
  265. msg[0] = GENERAL_MAIN_CONTROL;
  266. msg[1] = MB_MODULE_ID_GENERAL;
  267. msg[2] = 0;
  268. msg[3] = 1;
  269. msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
  270. for (i = 0; i < sizeof(msg); i++) {
  271. ret = cdp_dp_mailbox_write(dp, msg[i]);
  272. if (ret)
  273. goto err_set_firmware_active;
  274. }
  275. /* read the firmware state */
  276. for (i = 0; i < sizeof(msg); i++) {
  277. ret = cdn_dp_mailbox_read(dp);
  278. if (ret < 0)
  279. goto err_set_firmware_active;
  280. msg[i] = ret;
  281. }
  282. ret = 0;
  283. err_set_firmware_active:
  284. if (ret < 0)
  285. DRM_DEV_ERROR(dp->dev, "set firmware active failed\n");
  286. return ret;
  287. }
  288. int cdn_dp_set_host_cap(struct cdn_dp_device *dp, u8 lanes, bool flip)
  289. {
  290. u8 msg[8];
  291. int ret;
  292. msg[0] = CDN_DP_MAX_LINK_RATE;
  293. msg[1] = lanes | SCRAMBLER_EN;
  294. msg[2] = VOLTAGE_LEVEL_2;
  295. msg[3] = PRE_EMPHASIS_LEVEL_3;
  296. msg[4] = PTS1 | PTS2 | PTS3 | PTS4;
  297. msg[5] = FAST_LT_NOT_SUPPORT;
  298. msg[6] = flip ? LANE_MAPPING_FLIPPED : LANE_MAPPING_NORMAL;
  299. msg[7] = ENHANCED;
  300. ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX,
  301. DPTX_SET_HOST_CAPABILITIES,
  302. sizeof(msg), msg);
  303. if (ret)
  304. goto err_set_host_cap;
  305. ret = cdn_dp_reg_write(dp, DP_AUX_SWAP_INVERSION_CONTROL,
  306. AUX_HOST_INVERT);
  307. err_set_host_cap:
  308. if (ret)
  309. DRM_DEV_ERROR(dp->dev, "set host cap failed: %d\n", ret);
  310. return ret;
  311. }
  312. int cdn_dp_event_config(struct cdn_dp_device *dp)
  313. {
  314. u8 msg[5];
  315. int ret;
  316. memset(msg, 0, sizeof(msg));
  317. msg[0] = DPTX_EVENT_ENABLE_HPD | DPTX_EVENT_ENABLE_TRAINING;
  318. ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_ENABLE_EVENT,
  319. sizeof(msg), msg);
  320. if (ret)
  321. DRM_DEV_ERROR(dp->dev, "set event config failed: %d\n", ret);
  322. return ret;
  323. }
  324. u32 cdn_dp_get_event(struct cdn_dp_device *dp)
  325. {
  326. return readl(dp->regs + SW_EVENTS0);
  327. }
  328. int cdn_dp_get_hpd_status(struct cdn_dp_device *dp)
  329. {
  330. u8 status;
  331. int ret;
  332. ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_HPD_STATE,
  333. 0, NULL);
  334. if (ret)
  335. goto err_get_hpd;
  336. ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
  337. DPTX_HPD_STATE, sizeof(status));
  338. if (ret)
  339. goto err_get_hpd;
  340. ret = cdn_dp_mailbox_read_receive(dp, &status, sizeof(status));
  341. if (ret)
  342. goto err_get_hpd;
  343. return status;
  344. err_get_hpd:
  345. DRM_DEV_ERROR(dp->dev, "get hpd status failed: %d\n", ret);
  346. return ret;
  347. }
  348. int cdn_dp_get_edid_block(void *data, u8 *edid,
  349. unsigned int block, size_t length)
  350. {
  351. struct cdn_dp_device *dp = data;
  352. u8 msg[2], reg[2], i;
  353. int ret;
  354. for (i = 0; i < 4; i++) {
  355. msg[0] = block / 2;
  356. msg[1] = block % 2;
  357. ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_GET_EDID,
  358. sizeof(msg), msg);
  359. if (ret)
  360. continue;
  361. ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
  362. DPTX_GET_EDID,
  363. sizeof(reg) + length);
  364. if (ret)
  365. continue;
  366. ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg));
  367. if (ret)
  368. continue;
  369. ret = cdn_dp_mailbox_read_receive(dp, edid, length);
  370. if (ret)
  371. continue;
  372. if (reg[0] == length && reg[1] == block / 2)
  373. break;
  374. }
  375. if (ret)
  376. DRM_DEV_ERROR(dp->dev, "get block[%d] edid failed: %d\n", block,
  377. ret);
  378. return ret;
  379. }
  380. static int cdn_dp_training_start(struct cdn_dp_device *dp)
  381. {
  382. unsigned long timeout;
  383. u8 msg, event[2];
  384. int ret;
  385. msg = LINK_TRAINING_RUN;
  386. /* start training */
  387. ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_TRAINING_CONTROL,
  388. sizeof(msg), &msg);
  389. if (ret)
  390. goto err_training_start;
  391. timeout = jiffies + msecs_to_jiffies(LINK_TRAINING_TIMEOUT_MS);
  392. while (time_before(jiffies, timeout)) {
  393. msleep(LINK_TRAINING_RETRY_MS);
  394. ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX,
  395. DPTX_READ_EVENT, 0, NULL);
  396. if (ret)
  397. goto err_training_start;
  398. ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
  399. DPTX_READ_EVENT,
  400. sizeof(event));
  401. if (ret)
  402. goto err_training_start;
  403. ret = cdn_dp_mailbox_read_receive(dp, event, sizeof(event));
  404. if (ret)
  405. goto err_training_start;
  406. if (event[1] & EQ_PHASE_FINISHED)
  407. return 0;
  408. }
  409. ret = -ETIMEDOUT;
  410. err_training_start:
  411. DRM_DEV_ERROR(dp->dev, "training failed: %d\n", ret);
  412. return ret;
  413. }
  414. static int cdn_dp_get_training_status(struct cdn_dp_device *dp)
  415. {
  416. u8 status[10];
  417. int ret;
  418. ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_READ_LINK_STAT,
  419. 0, NULL);
  420. if (ret)
  421. goto err_get_training_status;
  422. ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
  423. DPTX_READ_LINK_STAT,
  424. sizeof(status));
  425. if (ret)
  426. goto err_get_training_status;
  427. ret = cdn_dp_mailbox_read_receive(dp, status, sizeof(status));
  428. if (ret)
  429. goto err_get_training_status;
  430. dp->max_rate = drm_dp_bw_code_to_link_rate(status[0]);
  431. dp->max_lanes = status[1];
  432. err_get_training_status:
  433. if (ret)
  434. DRM_DEV_ERROR(dp->dev, "get training status failed: %d\n", ret);
  435. return ret;
  436. }
  437. int cdn_dp_train_link(struct cdn_dp_device *dp)
  438. {
  439. int ret;
  440. ret = cdn_dp_training_start(dp);
  441. if (ret) {
  442. DRM_DEV_ERROR(dp->dev, "Failed to start training %d\n", ret);
  443. return ret;
  444. }
  445. ret = cdn_dp_get_training_status(dp);
  446. if (ret) {
  447. DRM_DEV_ERROR(dp->dev, "Failed to get training stat %d\n", ret);
  448. return ret;
  449. }
  450. DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->max_rate,
  451. dp->max_lanes);
  452. return ret;
  453. }
  454. int cdn_dp_set_video_status(struct cdn_dp_device *dp, int active)
  455. {
  456. u8 msg;
  457. int ret;
  458. msg = !!active;
  459. ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_SET_VIDEO,
  460. sizeof(msg), &msg);
  461. if (ret)
  462. DRM_DEV_ERROR(dp->dev, "set video status failed: %d\n", ret);
  463. return ret;
  464. }
  465. static int cdn_dp_get_msa_misc(struct video_info *video,
  466. struct drm_display_mode *mode)
  467. {
  468. u32 msa_misc;
  469. u8 val[2] = {0};
  470. switch (video->color_fmt) {
  471. case PXL_RGB:
  472. case Y_ONLY:
  473. val[0] = 0;
  474. break;
  475. /* set YUV default color space conversion to BT601 */
  476. case YCBCR_4_4_4:
  477. val[0] = 6 + BT_601 * 8;
  478. break;
  479. case YCBCR_4_2_2:
  480. val[0] = 5 + BT_601 * 8;
  481. break;
  482. case YCBCR_4_2_0:
  483. val[0] = 5;
  484. break;
  485. }
  486. switch (video->color_depth) {
  487. case 6:
  488. val[1] = 0;
  489. break;
  490. case 8:
  491. val[1] = 1;
  492. break;
  493. case 10:
  494. val[1] = 2;
  495. break;
  496. case 12:
  497. val[1] = 3;
  498. break;
  499. case 16:
  500. val[1] = 4;
  501. break;
  502. }
  503. msa_misc = 2 * val[0] + 32 * val[1] +
  504. ((video->color_fmt == Y_ONLY) ? (1 << 14) : 0);
  505. return msa_misc;
  506. }
  507. int cdn_dp_config_video(struct cdn_dp_device *dp)
  508. {
  509. struct video_info *video = &dp->video_info;
  510. struct drm_display_mode *mode = &dp->mode;
  511. u64 symbol;
  512. u32 val, link_rate, rem;
  513. u8 bit_per_pix, tu_size_reg = TU_SIZE;
  514. int ret;
  515. bit_per_pix = (video->color_fmt == YCBCR_4_2_2) ?
  516. (video->color_depth * 2) : (video->color_depth * 3);
  517. link_rate = dp->max_rate / 1000;
  518. ret = cdn_dp_reg_write(dp, BND_HSYNC2VSYNC, VIF_BYPASS_INTERLACE);
  519. if (ret)
  520. goto err_config_video;
  521. ret = cdn_dp_reg_write(dp, HSYNC2VSYNC_POL_CTRL, 0);
  522. if (ret)
  523. goto err_config_video;
  524. /*
  525. * get a best tu_size and valid symbol:
  526. * 1. chose Lclk freq(162Mhz, 270Mhz, 540Mhz), set TU to 32
  527. * 2. calculate VS(valid symbol) = TU * Pclk * Bpp / (Lclk * Lanes)
  528. * 3. if VS > *.85 or VS < *.1 or VS < 2 or TU < VS + 4, then set
  529. * TU += 2 and repeat 2nd step.
  530. */
  531. do {
  532. tu_size_reg += 2;
  533. symbol = (u64)tu_size_reg * mode->clock * bit_per_pix;
  534. do_div(symbol, dp->max_lanes * link_rate * 8);
  535. rem = do_div(symbol, 1000);
  536. if (tu_size_reg > 64) {
  537. ret = -EINVAL;
  538. DRM_DEV_ERROR(dp->dev,
  539. "tu error, clk:%d, lanes:%d, rate:%d\n",
  540. mode->clock, dp->max_lanes, link_rate);
  541. goto err_config_video;
  542. }
  543. } while ((symbol <= 1) || (tu_size_reg - symbol < 4) ||
  544. (rem > 850) || (rem < 100));
  545. val = symbol + (tu_size_reg << 8);
  546. val |= TU_CNT_RST_EN;
  547. ret = cdn_dp_reg_write(dp, DP_FRAMER_TU, val);
  548. if (ret)
  549. goto err_config_video;
  550. /* set the FIFO Buffer size */
  551. val = div_u64(mode->clock * (symbol + 1), 1000) + link_rate;
  552. val /= (dp->max_lanes * link_rate);
  553. val = div_u64(8 * (symbol + 1), bit_per_pix) - val;
  554. val += 2;
  555. ret = cdn_dp_reg_write(dp, DP_VC_TABLE(15), val);
  556. switch (video->color_depth) {
  557. case 6:
  558. val = BCS_6;
  559. break;
  560. case 8:
  561. val = BCS_8;
  562. break;
  563. case 10:
  564. val = BCS_10;
  565. break;
  566. case 12:
  567. val = BCS_12;
  568. break;
  569. case 16:
  570. val = BCS_16;
  571. break;
  572. }
  573. val += video->color_fmt << 8;
  574. ret = cdn_dp_reg_write(dp, DP_FRAMER_PXL_REPR, val);
  575. if (ret)
  576. goto err_config_video;
  577. val = video->h_sync_polarity ? DP_FRAMER_SP_HSP : 0;
  578. val |= video->v_sync_polarity ? DP_FRAMER_SP_VSP : 0;
  579. ret = cdn_dp_reg_write(dp, DP_FRAMER_SP, val);
  580. if (ret)
  581. goto err_config_video;
  582. val = (mode->hsync_start - mode->hdisplay) << 16;
  583. val |= mode->htotal - mode->hsync_end;
  584. ret = cdn_dp_reg_write(dp, DP_FRONT_BACK_PORCH, val);
  585. if (ret)
  586. goto err_config_video;
  587. val = mode->hdisplay * bit_per_pix / 8;
  588. ret = cdn_dp_reg_write(dp, DP_BYTE_COUNT, val);
  589. if (ret)
  590. goto err_config_video;
  591. val = mode->htotal | ((mode->htotal - mode->hsync_start) << 16);
  592. ret = cdn_dp_reg_write(dp, MSA_HORIZONTAL_0, val);
  593. if (ret)
  594. goto err_config_video;
  595. val = mode->hsync_end - mode->hsync_start;
  596. val |= (mode->hdisplay << 16) | (video->h_sync_polarity << 15);
  597. ret = cdn_dp_reg_write(dp, MSA_HORIZONTAL_1, val);
  598. if (ret)
  599. goto err_config_video;
  600. val = mode->vtotal;
  601. val |= (mode->vtotal - mode->vsync_start) << 16;
  602. ret = cdn_dp_reg_write(dp, MSA_VERTICAL_0, val);
  603. if (ret)
  604. goto err_config_video;
  605. val = mode->vsync_end - mode->vsync_start;
  606. val |= (mode->vdisplay << 16) | (video->v_sync_polarity << 15);
  607. ret = cdn_dp_reg_write(dp, MSA_VERTICAL_1, val);
  608. if (ret)
  609. goto err_config_video;
  610. val = cdn_dp_get_msa_misc(video, mode);
  611. ret = cdn_dp_reg_write(dp, MSA_MISC, val);
  612. if (ret)
  613. goto err_config_video;
  614. ret = cdn_dp_reg_write(dp, STREAM_CONFIG, 1);
  615. if (ret)
  616. goto err_config_video;
  617. val = mode->hsync_end - mode->hsync_start;
  618. val |= mode->hdisplay << 16;
  619. ret = cdn_dp_reg_write(dp, DP_HORIZONTAL, val);
  620. if (ret)
  621. goto err_config_video;
  622. val = mode->vdisplay;
  623. val |= (mode->vtotal - mode->vsync_start) << 16;
  624. ret = cdn_dp_reg_write(dp, DP_VERTICAL_0, val);
  625. if (ret)
  626. goto err_config_video;
  627. val = mode->vtotal;
  628. ret = cdn_dp_reg_write(dp, DP_VERTICAL_1, val);
  629. if (ret)
  630. goto err_config_video;
  631. ret = cdn_dp_reg_write_bit(dp, DP_VB_ID, 2, 1, 0);
  632. err_config_video:
  633. if (ret)
  634. DRM_DEV_ERROR(dp->dev, "config video failed: %d\n", ret);
  635. return ret;
  636. }
  637. int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
  638. {
  639. int ret;
  640. ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, 0);
  641. if (ret) {
  642. DRM_DEV_ERROR(dp->dev, "audio stop failed: %d\n", ret);
  643. return ret;
  644. }
  645. writel(0, dp->regs + SPDIF_CTRL_ADDR);
  646. /* clearn the audio config and reset */
  647. writel(0, dp->regs + AUDIO_SRC_CNTL);
  648. writel(0, dp->regs + AUDIO_SRC_CNFG);
  649. writel(AUDIO_SW_RST, dp->regs + AUDIO_SRC_CNTL);
  650. writel(0, dp->regs + AUDIO_SRC_CNTL);
  651. /* reset smpl2pckt component */
  652. writel(0, dp->regs + SMPL2PKT_CNTL);
  653. writel(AUDIO_SW_RST, dp->regs + SMPL2PKT_CNTL);
  654. writel(0, dp->regs + SMPL2PKT_CNTL);
  655. /* reset FIFO */
  656. writel(AUDIO_SW_RST, dp->regs + FIFO_CNTL);
  657. writel(0, dp->regs + FIFO_CNTL);
  658. if (audio->format == AFMT_SPDIF)
  659. clk_disable_unprepare(dp->spdif_clk);
  660. return 0;
  661. }
  662. int cdn_dp_audio_mute(struct cdn_dp_device *dp, bool enable)
  663. {
  664. int ret;
  665. ret = cdn_dp_reg_write_bit(dp, DP_VB_ID, 4, 1, enable);
  666. if (ret)
  667. DRM_DEV_ERROR(dp->dev, "audio mute failed: %d\n", ret);
  668. return ret;
  669. }
  670. static void cdn_dp_audio_config_i2s(struct cdn_dp_device *dp,
  671. struct audio_info *audio)
  672. {
  673. int sub_pckt_num = 1, i2s_port_en_val = 0xf, i;
  674. u32 val;
  675. if (audio->channels == 2) {
  676. if (dp->max_lanes == 1)
  677. sub_pckt_num = 2;
  678. else
  679. sub_pckt_num = 4;
  680. i2s_port_en_val = 1;
  681. } else if (audio->channels == 4) {
  682. i2s_port_en_val = 3;
  683. }
  684. writel(0x0, dp->regs + SPDIF_CTRL_ADDR);
  685. writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL);
  686. val = MAX_NUM_CH(audio->channels);
  687. val |= NUM_OF_I2S_PORTS(audio->channels);
  688. val |= AUDIO_TYPE_LPCM;
  689. val |= CFG_SUB_PCKT_NUM(sub_pckt_num);
  690. writel(val, dp->regs + SMPL2PKT_CNFG);
  691. if (audio->sample_width == 16)
  692. val = 0;
  693. else if (audio->sample_width == 24)
  694. val = 1 << 9;
  695. else
  696. val = 2 << 9;
  697. val |= AUDIO_CH_NUM(audio->channels);
  698. val |= I2S_DEC_PORT_EN(i2s_port_en_val);
  699. val |= TRANS_SMPL_WIDTH_32;
  700. writel(val, dp->regs + AUDIO_SRC_CNFG);
  701. for (i = 0; i < (audio->channels + 1) / 2; i++) {
  702. if (audio->sample_width == 16)
  703. val = (0x02 << 8) | (0x02 << 20);
  704. else if (audio->sample_width == 24)
  705. val = (0x0b << 8) | (0x0b << 20);
  706. val |= ((2 * i) << 4) | ((2 * i + 1) << 16);
  707. writel(val, dp->regs + STTS_BIT_CH(i));
  708. }
  709. switch (audio->sample_rate) {
  710. case 32000:
  711. val = SAMPLING_FREQ(3) |
  712. ORIGINAL_SAMP_FREQ(0xc);
  713. break;
  714. case 44100:
  715. val = SAMPLING_FREQ(0) |
  716. ORIGINAL_SAMP_FREQ(0xf);
  717. break;
  718. case 48000:
  719. val = SAMPLING_FREQ(2) |
  720. ORIGINAL_SAMP_FREQ(0xd);
  721. break;
  722. case 88200:
  723. val = SAMPLING_FREQ(8) |
  724. ORIGINAL_SAMP_FREQ(0x7);
  725. break;
  726. case 96000:
  727. val = SAMPLING_FREQ(0xa) |
  728. ORIGINAL_SAMP_FREQ(5);
  729. break;
  730. case 176400:
  731. val = SAMPLING_FREQ(0xc) |
  732. ORIGINAL_SAMP_FREQ(3);
  733. break;
  734. case 192000:
  735. val = SAMPLING_FREQ(0xe) |
  736. ORIGINAL_SAMP_FREQ(1);
  737. break;
  738. }
  739. val |= 4;
  740. writel(val, dp->regs + COM_CH_STTS_BITS);
  741. writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL);
  742. writel(I2S_DEC_START, dp->regs + AUDIO_SRC_CNTL);
  743. }
  744. static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
  745. {
  746. u32 val;
  747. writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL);
  748. val = MAX_NUM_CH(2) | AUDIO_TYPE_LPCM | CFG_SUB_PCKT_NUM(4);
  749. writel(val, dp->regs + SMPL2PKT_CNFG);
  750. writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL);
  751. val = SPDIF_ENABLE | SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
  752. writel(val, dp->regs + SPDIF_CTRL_ADDR);
  753. clk_prepare_enable(dp->spdif_clk);
  754. clk_set_rate(dp->spdif_clk, CDN_DP_SPDIF_CLK);
  755. }
  756. int cdn_dp_audio_config(struct cdn_dp_device *dp, struct audio_info *audio)
  757. {
  758. int ret;
  759. /* reset the spdif clk before config */
  760. if (audio->format == AFMT_SPDIF) {
  761. reset_control_assert(dp->spdif_rst);
  762. reset_control_deassert(dp->spdif_rst);
  763. }
  764. ret = cdn_dp_reg_write(dp, CM_LANE_CTRL, LANE_REF_CYC);
  765. if (ret)
  766. goto err_audio_config;
  767. ret = cdn_dp_reg_write(dp, CM_CTRL, 0);
  768. if (ret)
  769. goto err_audio_config;
  770. if (audio->format == AFMT_I2S)
  771. cdn_dp_audio_config_i2s(dp, audio);
  772. else if (audio->format == AFMT_SPDIF)
  773. cdn_dp_audio_config_spdif(dp);
  774. ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, AUDIO_PACK_EN);
  775. err_audio_config:
  776. if (ret)
  777. DRM_DEV_ERROR(dp->dev, "audio config failed: %d\n", ret);
  778. return ret;
  779. }