cdn-dp-core.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  4. * Author: Chris Zhong <zyw@rock-chips.com>
  5. */
  6. #include <linux/clk.h>
  7. #include <linux/component.h>
  8. #include <linux/extcon.h>
  9. #include <linux/firmware.h>
  10. #include <linux/mfd/syscon.h>
  11. #include <linux/phy/phy.h>
  12. #include <linux/regmap.h>
  13. #include <linux/reset.h>
  14. #include <sound/hdmi-codec.h>
  15. #include <drm/drm_atomic_helper.h>
  16. #include <drm/drm_dp_helper.h>
  17. #include <drm/drm_edid.h>
  18. #include <drm/drm_of.h>
  19. #include <drm/drm_probe_helper.h>
  20. #include <drm/drm_simple_kms_helper.h>
  21. #include "cdn-dp-core.h"
  22. #include "cdn-dp-reg.h"
  23. #include "rockchip_drm_vop.h"
  24. #define connector_to_dp(c) \
  25. container_of(c, struct cdn_dp_device, connector)
  26. #define encoder_to_dp(c) \
  27. container_of(c, struct cdn_dp_device, encoder)
  28. #define GRF_SOC_CON9 0x6224
  29. #define DP_SEL_VOP_LIT BIT(12)
  30. #define GRF_SOC_CON26 0x6268
  31. #define DPTX_HPD_SEL (3 << 12)
  32. #define DPTX_HPD_DEL (2 << 12)
  33. #define DPTX_HPD_SEL_MASK (3 << 28)
  34. #define CDN_FW_TIMEOUT_MS (64 * 1000)
  35. #define CDN_DPCD_TIMEOUT_MS 5000
  36. #define CDN_DP_FIRMWARE "rockchip/dptx.bin"
  37. struct cdn_dp_data {
  38. u8 max_phy;
  39. };
  40. struct cdn_dp_data rk3399_cdn_dp = {
  41. .max_phy = 2,
  42. };
  43. static const struct of_device_id cdn_dp_dt_ids[] = {
  44. { .compatible = "rockchip,rk3399-cdn-dp",
  45. .data = (void *)&rk3399_cdn_dp },
  46. {}
  47. };
  48. MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids);
  49. static int cdn_dp_grf_write(struct cdn_dp_device *dp,
  50. unsigned int reg, unsigned int val)
  51. {
  52. int ret;
  53. ret = clk_prepare_enable(dp->grf_clk);
  54. if (ret) {
  55. DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n");
  56. return ret;
  57. }
  58. ret = regmap_write(dp->grf, reg, val);
  59. if (ret) {
  60. DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
  61. clk_disable_unprepare(dp->grf_clk);
  62. return ret;
  63. }
  64. clk_disable_unprepare(dp->grf_clk);
  65. return 0;
  66. }
  67. static int cdn_dp_clk_enable(struct cdn_dp_device *dp)
  68. {
  69. int ret;
  70. unsigned long rate;
  71. ret = clk_prepare_enable(dp->pclk);
  72. if (ret < 0) {
  73. DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret);
  74. goto err_pclk;
  75. }
  76. ret = clk_prepare_enable(dp->core_clk);
  77. if (ret < 0) {
  78. DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret);
  79. goto err_core_clk;
  80. }
  81. ret = pm_runtime_get_sync(dp->dev);
  82. if (ret < 0) {
  83. DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret);
  84. goto err_pm_runtime_get;
  85. }
  86. reset_control_assert(dp->core_rst);
  87. reset_control_assert(dp->dptx_rst);
  88. reset_control_assert(dp->apb_rst);
  89. reset_control_deassert(dp->core_rst);
  90. reset_control_deassert(dp->dptx_rst);
  91. reset_control_deassert(dp->apb_rst);
  92. rate = clk_get_rate(dp->core_clk);
  93. if (!rate) {
  94. DRM_DEV_ERROR(dp->dev, "get clk rate failed\n");
  95. ret = -EINVAL;
  96. goto err_set_rate;
  97. }
  98. cdn_dp_set_fw_clk(dp, rate);
  99. cdn_dp_clock_reset(dp);
  100. return 0;
  101. err_set_rate:
  102. pm_runtime_put(dp->dev);
  103. err_pm_runtime_get:
  104. clk_disable_unprepare(dp->core_clk);
  105. err_core_clk:
  106. clk_disable_unprepare(dp->pclk);
  107. err_pclk:
  108. return ret;
  109. }
  110. static void cdn_dp_clk_disable(struct cdn_dp_device *dp)
  111. {
  112. pm_runtime_put_sync(dp->dev);
  113. clk_disable_unprepare(dp->pclk);
  114. clk_disable_unprepare(dp->core_clk);
  115. }
  116. static int cdn_dp_get_port_lanes(struct cdn_dp_port *port)
  117. {
  118. struct extcon_dev *edev = port->extcon;
  119. union extcon_property_value property;
  120. int dptx;
  121. u8 lanes;
  122. dptx = extcon_get_state(edev, EXTCON_DISP_DP);
  123. if (dptx > 0) {
  124. extcon_get_property(edev, EXTCON_DISP_DP,
  125. EXTCON_PROP_USB_SS, &property);
  126. if (property.intval)
  127. lanes = 2;
  128. else
  129. lanes = 4;
  130. } else {
  131. lanes = 0;
  132. }
  133. return lanes;
  134. }
  135. static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count)
  136. {
  137. int ret;
  138. u8 value;
  139. *sink_count = 0;
  140. ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1);
  141. if (ret)
  142. return ret;
  143. *sink_count = DP_GET_SINK_COUNT(value);
  144. return 0;
  145. }
  146. static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp)
  147. {
  148. struct cdn_dp_port *port;
  149. int i, lanes;
  150. for (i = 0; i < dp->ports; i++) {
  151. port = dp->port[i];
  152. lanes = cdn_dp_get_port_lanes(port);
  153. if (lanes)
  154. return port;
  155. }
  156. return NULL;
  157. }
  158. static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
  159. {
  160. unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS);
  161. struct cdn_dp_port *port;
  162. u8 sink_count = 0;
  163. if (dp->active_port < 0 || dp->active_port >= dp->ports) {
  164. DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n");
  165. return false;
  166. }
  167. port = dp->port[dp->active_port];
  168. /*
  169. * Attempt to read sink count, retry in case the sink may not be ready.
  170. *
  171. * Sinks are *supposed* to come up within 1ms from an off state, but
  172. * some docks need more time to power up.
  173. */
  174. while (time_before(jiffies, timeout)) {
  175. if (!extcon_get_state(port->extcon, EXTCON_DISP_DP))
  176. return false;
  177. if (!cdn_dp_get_sink_count(dp, &sink_count))
  178. return sink_count ? true : false;
  179. usleep_range(5000, 10000);
  180. }
  181. DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n");
  182. return false;
  183. }
  184. static enum drm_connector_status
  185. cdn_dp_connector_detect(struct drm_connector *connector, bool force)
  186. {
  187. struct cdn_dp_device *dp = connector_to_dp(connector);
  188. enum drm_connector_status status = connector_status_disconnected;
  189. mutex_lock(&dp->lock);
  190. if (dp->connected)
  191. status = connector_status_connected;
  192. mutex_unlock(&dp->lock);
  193. return status;
  194. }
  195. static void cdn_dp_connector_destroy(struct drm_connector *connector)
  196. {
  197. drm_connector_unregister(connector);
  198. drm_connector_cleanup(connector);
  199. }
  200. static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
  201. .detect = cdn_dp_connector_detect,
  202. .destroy = cdn_dp_connector_destroy,
  203. .fill_modes = drm_helper_probe_single_connector_modes,
  204. .reset = drm_atomic_helper_connector_reset,
  205. .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
  206. .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  207. };
  208. static int cdn_dp_connector_get_modes(struct drm_connector *connector)
  209. {
  210. struct cdn_dp_device *dp = connector_to_dp(connector);
  211. struct edid *edid;
  212. int ret = 0;
  213. mutex_lock(&dp->lock);
  214. edid = dp->edid;
  215. if (edid) {
  216. DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
  217. edid->width_cm, edid->height_cm);
  218. dp->sink_has_audio = drm_detect_monitor_audio(edid);
  219. ret = drm_add_edid_modes(connector, edid);
  220. if (ret)
  221. drm_connector_update_edid_property(connector,
  222. edid);
  223. }
  224. mutex_unlock(&dp->lock);
  225. return ret;
  226. }
  227. static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
  228. struct drm_display_mode *mode)
  229. {
  230. struct cdn_dp_device *dp = connector_to_dp(connector);
  231. struct drm_display_info *display_info = &dp->connector.display_info;
  232. u32 requested, actual, rate, sink_max, source_max = 0;
  233. u8 lanes, bpc;
  234. /* If DP is disconnected, every mode is invalid */
  235. if (!dp->connected)
  236. return MODE_BAD;
  237. switch (display_info->bpc) {
  238. case 10:
  239. bpc = 10;
  240. break;
  241. case 6:
  242. bpc = 6;
  243. break;
  244. default:
  245. bpc = 8;
  246. break;
  247. }
  248. requested = mode->clock * bpc * 3 / 1000;
  249. source_max = dp->lanes;
  250. sink_max = drm_dp_max_lane_count(dp->dpcd);
  251. lanes = min(source_max, sink_max);
  252. source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE);
  253. sink_max = drm_dp_max_link_rate(dp->dpcd);
  254. rate = min(source_max, sink_max);
  255. actual = rate * lanes / 100;
  256. /* efficiency is about 0.8 */
  257. actual = actual * 8 / 10;
  258. if (requested > actual) {
  259. DRM_DEV_DEBUG_KMS(dp->dev,
  260. "requested=%d, actual=%d, clock=%d\n",
  261. requested, actual, mode->clock);
  262. return MODE_CLOCK_HIGH;
  263. }
  264. return MODE_OK;
  265. }
  266. static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
  267. .get_modes = cdn_dp_connector_get_modes,
  268. .mode_valid = cdn_dp_connector_mode_valid,
  269. };
  270. static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
  271. {
  272. int ret;
  273. const u32 *iram_data, *dram_data;
  274. const struct firmware *fw = dp->fw;
  275. const struct cdn_firmware_header *hdr;
  276. hdr = (struct cdn_firmware_header *)fw->data;
  277. if (fw->size != le32_to_cpu(hdr->size_bytes)) {
  278. DRM_DEV_ERROR(dp->dev, "firmware is invalid\n");
  279. return -EINVAL;
  280. }
  281. iram_data = (const u32 *)(fw->data + hdr->header_size);
  282. dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size);
  283. ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size,
  284. dram_data, hdr->dram_size);
  285. if (ret)
  286. return ret;
  287. ret = cdn_dp_set_firmware_active(dp, true);
  288. if (ret) {
  289. DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret);
  290. return ret;
  291. }
  292. return cdn_dp_event_config(dp);
  293. }
  294. static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
  295. {
  296. int ret;
  297. if (!cdn_dp_check_sink_connection(dp))
  298. return -ENODEV;
  299. ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd,
  300. DP_RECEIVER_CAP_SIZE);
  301. if (ret) {
  302. DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret);
  303. return ret;
  304. }
  305. kfree(dp->edid);
  306. dp->edid = drm_do_get_edid(&dp->connector,
  307. cdn_dp_get_edid_block, dp);
  308. return 0;
  309. }
  310. static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
  311. {
  312. union extcon_property_value property;
  313. int ret;
  314. if (!port->phy_enabled) {
  315. ret = phy_power_on(port->phy);
  316. if (ret) {
  317. DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n",
  318. ret);
  319. goto err_phy;
  320. }
  321. port->phy_enabled = true;
  322. }
  323. ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
  324. DPTX_HPD_SEL_MASK | DPTX_HPD_SEL);
  325. if (ret) {
  326. DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret);
  327. goto err_power_on;
  328. }
  329. ret = cdn_dp_get_hpd_status(dp);
  330. if (ret <= 0) {
  331. if (!ret)
  332. DRM_DEV_ERROR(dp->dev, "hpd does not exist\n");
  333. goto err_power_on;
  334. }
  335. ret = extcon_get_property(port->extcon, EXTCON_DISP_DP,
  336. EXTCON_PROP_USB_TYPEC_POLARITY, &property);
  337. if (ret) {
  338. DRM_DEV_ERROR(dp->dev, "get property failed\n");
  339. goto err_power_on;
  340. }
  341. port->lanes = cdn_dp_get_port_lanes(port);
  342. ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval);
  343. if (ret) {
  344. DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n",
  345. ret);
  346. goto err_power_on;
  347. }
  348. dp->active_port = port->id;
  349. return 0;
  350. err_power_on:
  351. if (phy_power_off(port->phy))
  352. DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
  353. else
  354. port->phy_enabled = false;
  355. err_phy:
  356. cdn_dp_grf_write(dp, GRF_SOC_CON26,
  357. DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
  358. return ret;
  359. }
  360. static int cdn_dp_disable_phy(struct cdn_dp_device *dp,
  361. struct cdn_dp_port *port)
  362. {
  363. int ret;
  364. if (port->phy_enabled) {
  365. ret = phy_power_off(port->phy);
  366. if (ret) {
  367. DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
  368. return ret;
  369. }
  370. }
  371. port->phy_enabled = false;
  372. port->lanes = 0;
  373. dp->active_port = -1;
  374. return 0;
  375. }
  376. static int cdn_dp_disable(struct cdn_dp_device *dp)
  377. {
  378. int ret, i;
  379. if (!dp->active)
  380. return 0;
  381. for (i = 0; i < dp->ports; i++)
  382. cdn_dp_disable_phy(dp, dp->port[i]);
  383. ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
  384. DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
  385. if (ret) {
  386. DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n",
  387. ret);
  388. return ret;
  389. }
  390. cdn_dp_set_firmware_active(dp, false);
  391. cdn_dp_clk_disable(dp);
  392. dp->active = false;
  393. dp->max_lanes = 0;
  394. dp->max_rate = 0;
  395. if (!dp->connected) {
  396. kfree(dp->edid);
  397. dp->edid = NULL;
  398. }
  399. return 0;
  400. }
  401. static int cdn_dp_enable(struct cdn_dp_device *dp)
  402. {
  403. int ret, i, lanes;
  404. struct cdn_dp_port *port;
  405. port = cdn_dp_connected_port(dp);
  406. if (!port) {
  407. DRM_DEV_ERROR(dp->dev,
  408. "Can't enable without connection\n");
  409. return -ENODEV;
  410. }
  411. if (dp->active)
  412. return 0;
  413. ret = cdn_dp_clk_enable(dp);
  414. if (ret)
  415. return ret;
  416. ret = cdn_dp_firmware_init(dp);
  417. if (ret) {
  418. DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret);
  419. goto err_clk_disable;
  420. }
  421. /* only enable the port that connected with downstream device */
  422. for (i = port->id; i < dp->ports; i++) {
  423. port = dp->port[i];
  424. lanes = cdn_dp_get_port_lanes(port);
  425. if (lanes) {
  426. ret = cdn_dp_enable_phy(dp, port);
  427. if (ret)
  428. continue;
  429. ret = cdn_dp_get_sink_capability(dp);
  430. if (ret) {
  431. cdn_dp_disable_phy(dp, port);
  432. } else {
  433. dp->active = true;
  434. dp->lanes = port->lanes;
  435. return 0;
  436. }
  437. }
  438. }
  439. err_clk_disable:
  440. cdn_dp_clk_disable(dp);
  441. return ret;
  442. }
  443. static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
  444. struct drm_display_mode *mode,
  445. struct drm_display_mode *adjusted)
  446. {
  447. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  448. struct drm_display_info *display_info = &dp->connector.display_info;
  449. struct video_info *video = &dp->video_info;
  450. switch (display_info->bpc) {
  451. case 10:
  452. video->color_depth = 10;
  453. break;
  454. case 6:
  455. video->color_depth = 6;
  456. break;
  457. default:
  458. video->color_depth = 8;
  459. break;
  460. }
  461. video->color_fmt = PXL_RGB;
  462. video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
  463. video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
  464. memcpy(&dp->mode, adjusted, sizeof(*mode));
  465. }
  466. static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
  467. {
  468. u8 link_status[DP_LINK_STATUS_SIZE];
  469. struct cdn_dp_port *port = cdn_dp_connected_port(dp);
  470. u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
  471. if (!port || !dp->max_rate || !dp->max_lanes)
  472. return false;
  473. if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
  474. DP_LINK_STATUS_SIZE)) {
  475. DRM_ERROR("Failed to get link status\n");
  476. return false;
  477. }
  478. /* if link training is requested we should perform it always */
  479. return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes));
  480. }
  481. static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
  482. {
  483. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  484. int ret, val;
  485. ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
  486. if (ret < 0) {
  487. DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret);
  488. return;
  489. }
  490. DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
  491. (ret) ? "LIT" : "BIG");
  492. if (ret)
  493. val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
  494. else
  495. val = DP_SEL_VOP_LIT << 16;
  496. ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
  497. if (ret)
  498. return;
  499. mutex_lock(&dp->lock);
  500. ret = cdn_dp_enable(dp);
  501. if (ret) {
  502. DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n",
  503. ret);
  504. goto out;
  505. }
  506. if (!cdn_dp_check_link_status(dp)) {
  507. ret = cdn_dp_train_link(dp);
  508. if (ret) {
  509. DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret);
  510. goto out;
  511. }
  512. }
  513. ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE);
  514. if (ret) {
  515. DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret);
  516. goto out;
  517. }
  518. ret = cdn_dp_config_video(dp);
  519. if (ret) {
  520. DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret);
  521. goto out;
  522. }
  523. ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID);
  524. if (ret) {
  525. DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret);
  526. goto out;
  527. }
  528. out:
  529. mutex_unlock(&dp->lock);
  530. }
  531. static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
  532. {
  533. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  534. int ret;
  535. mutex_lock(&dp->lock);
  536. if (dp->active) {
  537. ret = cdn_dp_disable(dp);
  538. if (ret) {
  539. DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n",
  540. ret);
  541. }
  542. }
  543. mutex_unlock(&dp->lock);
  544. /*
  545. * In the following 2 cases, we need to run the event_work to re-enable
  546. * the DP:
  547. * 1. If there is not just one port device is connected, and remove one
  548. * device from a port, the DP will be disabled here, at this case,
  549. * run the event_work to re-open DP for the other port.
  550. * 2. If re-training or re-config failed, the DP will be disabled here.
  551. * run the event_work to re-connect it.
  552. */
  553. if (!dp->connected && cdn_dp_connected_port(dp))
  554. schedule_work(&dp->event_work);
  555. }
  556. static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
  557. struct drm_crtc_state *crtc_state,
  558. struct drm_connector_state *conn_state)
  559. {
  560. struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
  561. s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
  562. s->output_type = DRM_MODE_CONNECTOR_DisplayPort;
  563. return 0;
  564. }
  565. static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
  566. .mode_set = cdn_dp_encoder_mode_set,
  567. .enable = cdn_dp_encoder_enable,
  568. .disable = cdn_dp_encoder_disable,
  569. .atomic_check = cdn_dp_encoder_atomic_check,
  570. };
  571. static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
  572. {
  573. struct device *dev = dp->dev;
  574. struct device_node *np = dev->of_node;
  575. struct platform_device *pdev = to_platform_device(dev);
  576. struct resource *res;
  577. dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
  578. if (IS_ERR(dp->grf)) {
  579. DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n");
  580. return PTR_ERR(dp->grf);
  581. }
  582. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  583. dp->regs = devm_ioremap_resource(dev, res);
  584. if (IS_ERR(dp->regs)) {
  585. DRM_DEV_ERROR(dev, "ioremap reg failed\n");
  586. return PTR_ERR(dp->regs);
  587. }
  588. dp->core_clk = devm_clk_get(dev, "core-clk");
  589. if (IS_ERR(dp->core_clk)) {
  590. DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n");
  591. return PTR_ERR(dp->core_clk);
  592. }
  593. dp->pclk = devm_clk_get(dev, "pclk");
  594. if (IS_ERR(dp->pclk)) {
  595. DRM_DEV_ERROR(dev, "cannot get pclk\n");
  596. return PTR_ERR(dp->pclk);
  597. }
  598. dp->spdif_clk = devm_clk_get(dev, "spdif");
  599. if (IS_ERR(dp->spdif_clk)) {
  600. DRM_DEV_ERROR(dev, "cannot get spdif_clk\n");
  601. return PTR_ERR(dp->spdif_clk);
  602. }
  603. dp->grf_clk = devm_clk_get(dev, "grf");
  604. if (IS_ERR(dp->grf_clk)) {
  605. DRM_DEV_ERROR(dev, "cannot get grf clk\n");
  606. return PTR_ERR(dp->grf_clk);
  607. }
  608. dp->spdif_rst = devm_reset_control_get(dev, "spdif");
  609. if (IS_ERR(dp->spdif_rst)) {
  610. DRM_DEV_ERROR(dev, "no spdif reset control found\n");
  611. return PTR_ERR(dp->spdif_rst);
  612. }
  613. dp->dptx_rst = devm_reset_control_get(dev, "dptx");
  614. if (IS_ERR(dp->dptx_rst)) {
  615. DRM_DEV_ERROR(dev, "no uphy reset control found\n");
  616. return PTR_ERR(dp->dptx_rst);
  617. }
  618. dp->core_rst = devm_reset_control_get(dev, "core");
  619. if (IS_ERR(dp->core_rst)) {
  620. DRM_DEV_ERROR(dev, "no core reset control found\n");
  621. return PTR_ERR(dp->core_rst);
  622. }
  623. dp->apb_rst = devm_reset_control_get(dev, "apb");
  624. if (IS_ERR(dp->apb_rst)) {
  625. DRM_DEV_ERROR(dev, "no apb reset control found\n");
  626. return PTR_ERR(dp->apb_rst);
  627. }
  628. return 0;
  629. }
  630. static int cdn_dp_audio_hw_params(struct device *dev, void *data,
  631. struct hdmi_codec_daifmt *daifmt,
  632. struct hdmi_codec_params *params)
  633. {
  634. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  635. struct audio_info audio = {
  636. .sample_width = params->sample_width,
  637. .sample_rate = params->sample_rate,
  638. .channels = params->channels,
  639. };
  640. int ret;
  641. mutex_lock(&dp->lock);
  642. if (!dp->active) {
  643. ret = -ENODEV;
  644. goto out;
  645. }
  646. switch (daifmt->fmt) {
  647. case HDMI_I2S:
  648. audio.format = AFMT_I2S;
  649. break;
  650. case HDMI_SPDIF:
  651. audio.format = AFMT_SPDIF;
  652. break;
  653. default:
  654. DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
  655. ret = -EINVAL;
  656. goto out;
  657. }
  658. ret = cdn_dp_audio_config(dp, &audio);
  659. if (!ret)
  660. dp->audio_info = audio;
  661. out:
  662. mutex_unlock(&dp->lock);
  663. return ret;
  664. }
  665. static void cdn_dp_audio_shutdown(struct device *dev, void *data)
  666. {
  667. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  668. int ret;
  669. mutex_lock(&dp->lock);
  670. if (!dp->active)
  671. goto out;
  672. ret = cdn_dp_audio_stop(dp, &dp->audio_info);
  673. if (!ret)
  674. dp->audio_info.format = AFMT_UNUSED;
  675. out:
  676. mutex_unlock(&dp->lock);
  677. }
  678. static int cdn_dp_audio_mute_stream(struct device *dev, void *data,
  679. bool enable, int direction)
  680. {
  681. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  682. int ret;
  683. mutex_lock(&dp->lock);
  684. if (!dp->active) {
  685. ret = -ENODEV;
  686. goto out;
  687. }
  688. ret = cdn_dp_audio_mute(dp, enable);
  689. out:
  690. mutex_unlock(&dp->lock);
  691. return ret;
  692. }
  693. static int cdn_dp_audio_get_eld(struct device *dev, void *data,
  694. u8 *buf, size_t len)
  695. {
  696. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  697. memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
  698. return 0;
  699. }
  700. static const struct hdmi_codec_ops audio_codec_ops = {
  701. .hw_params = cdn_dp_audio_hw_params,
  702. .audio_shutdown = cdn_dp_audio_shutdown,
  703. .mute_stream = cdn_dp_audio_mute_stream,
  704. .get_eld = cdn_dp_audio_get_eld,
  705. .no_capture_mute = 1,
  706. };
  707. static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
  708. struct device *dev)
  709. {
  710. struct hdmi_codec_pdata codec_data = {
  711. .i2s = 1,
  712. .spdif = 1,
  713. .ops = &audio_codec_ops,
  714. .max_i2s_channels = 8,
  715. };
  716. dp->audio_pdev = platform_device_register_data(
  717. dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
  718. &codec_data, sizeof(codec_data));
  719. return PTR_ERR_OR_ZERO(dp->audio_pdev);
  720. }
  721. static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
  722. {
  723. int ret;
  724. unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS);
  725. unsigned long sleep = 1000;
  726. WARN_ON(!mutex_is_locked(&dp->lock));
  727. if (dp->fw_loaded)
  728. return 0;
  729. /* Drop the lock before getting the firmware to avoid blocking boot */
  730. mutex_unlock(&dp->lock);
  731. while (time_before(jiffies, timeout)) {
  732. ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev);
  733. if (ret == -ENOENT) {
  734. msleep(sleep);
  735. sleep *= 2;
  736. continue;
  737. } else if (ret) {
  738. DRM_DEV_ERROR(dp->dev,
  739. "failed to request firmware: %d\n", ret);
  740. goto out;
  741. }
  742. dp->fw_loaded = true;
  743. ret = 0;
  744. goto out;
  745. }
  746. DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n");
  747. ret = -ETIMEDOUT;
  748. out:
  749. mutex_lock(&dp->lock);
  750. return ret;
  751. }
  752. static void cdn_dp_pd_event_work(struct work_struct *work)
  753. {
  754. struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
  755. event_work);
  756. struct drm_connector *connector = &dp->connector;
  757. enum drm_connector_status old_status;
  758. int ret;
  759. mutex_lock(&dp->lock);
  760. if (dp->suspended)
  761. goto out;
  762. ret = cdn_dp_request_firmware(dp);
  763. if (ret)
  764. goto out;
  765. dp->connected = true;
  766. /* Not connected, notify userspace to disable the block */
  767. if (!cdn_dp_connected_port(dp)) {
  768. DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n");
  769. dp->connected = false;
  770. /* Connected but not enabled, enable the block */
  771. } else if (!dp->active) {
  772. DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n");
  773. ret = cdn_dp_enable(dp);
  774. if (ret) {
  775. DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret);
  776. dp->connected = false;
  777. }
  778. /* Enabled and connected to a dongle without a sink, notify userspace */
  779. } else if (!cdn_dp_check_sink_connection(dp)) {
  780. DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n");
  781. dp->connected = false;
  782. /* Enabled and connected with a sink, re-train if requested */
  783. } else if (!cdn_dp_check_link_status(dp)) {
  784. unsigned int rate = dp->max_rate;
  785. unsigned int lanes = dp->max_lanes;
  786. struct drm_display_mode *mode = &dp->mode;
  787. DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
  788. ret = cdn_dp_train_link(dp);
  789. if (ret) {
  790. dp->connected = false;
  791. DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret);
  792. goto out;
  793. }
  794. /* If training result is changed, update the video config */
  795. if (mode->clock &&
  796. (rate != dp->max_rate || lanes != dp->max_lanes)) {
  797. ret = cdn_dp_config_video(dp);
  798. if (ret) {
  799. dp->connected = false;
  800. DRM_DEV_ERROR(dp->dev,
  801. "Failed to config video %d\n",
  802. ret);
  803. }
  804. }
  805. }
  806. out:
  807. mutex_unlock(&dp->lock);
  808. old_status = connector->status;
  809. connector->status = connector->funcs->detect(connector, false);
  810. if (old_status != connector->status)
  811. drm_kms_helper_hotplug_event(dp->drm_dev);
  812. }
  813. static int cdn_dp_pd_event(struct notifier_block *nb,
  814. unsigned long event, void *priv)
  815. {
  816. struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port,
  817. event_nb);
  818. struct cdn_dp_device *dp = port->dp;
  819. /*
  820. * It would be nice to be able to just do the work inline right here.
  821. * However, we need to make a bunch of calls that might sleep in order
  822. * to turn on the block/phy, so use a worker instead.
  823. */
  824. schedule_work(&dp->event_work);
  825. return NOTIFY_DONE;
  826. }
  827. static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
  828. {
  829. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  830. struct drm_encoder *encoder;
  831. struct drm_connector *connector;
  832. struct cdn_dp_port *port;
  833. struct drm_device *drm_dev = data;
  834. int ret, i;
  835. ret = cdn_dp_parse_dt(dp);
  836. if (ret < 0)
  837. return ret;
  838. dp->drm_dev = drm_dev;
  839. dp->connected = false;
  840. dp->active = false;
  841. dp->active_port = -1;
  842. dp->fw_loaded = false;
  843. INIT_WORK(&dp->event_work, cdn_dp_pd_event_work);
  844. encoder = &dp->encoder;
  845. encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
  846. dev->of_node);
  847. DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
  848. ret = drm_simple_encoder_init(drm_dev, encoder,
  849. DRM_MODE_ENCODER_TMDS);
  850. if (ret) {
  851. DRM_ERROR("failed to initialize encoder with drm\n");
  852. return ret;
  853. }
  854. drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs);
  855. connector = &dp->connector;
  856. connector->polled = DRM_CONNECTOR_POLL_HPD;
  857. connector->dpms = DRM_MODE_DPMS_OFF;
  858. ret = drm_connector_init(drm_dev, connector,
  859. &cdn_dp_atomic_connector_funcs,
  860. DRM_MODE_CONNECTOR_DisplayPort);
  861. if (ret) {
  862. DRM_ERROR("failed to initialize connector with drm\n");
  863. goto err_free_encoder;
  864. }
  865. drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
  866. ret = drm_connector_attach_encoder(connector, encoder);
  867. if (ret) {
  868. DRM_ERROR("failed to attach connector and encoder\n");
  869. goto err_free_connector;
  870. }
  871. for (i = 0; i < dp->ports; i++) {
  872. port = dp->port[i];
  873. port->event_nb.notifier_call = cdn_dp_pd_event;
  874. ret = devm_extcon_register_notifier(dp->dev, port->extcon,
  875. EXTCON_DISP_DP,
  876. &port->event_nb);
  877. if (ret) {
  878. DRM_DEV_ERROR(dev,
  879. "register EXTCON_DISP_DP notifier err\n");
  880. goto err_free_connector;
  881. }
  882. }
  883. pm_runtime_enable(dev);
  884. schedule_work(&dp->event_work);
  885. return 0;
  886. err_free_connector:
  887. drm_connector_cleanup(connector);
  888. err_free_encoder:
  889. drm_encoder_cleanup(encoder);
  890. return ret;
  891. }
  892. static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
  893. {
  894. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  895. struct drm_encoder *encoder = &dp->encoder;
  896. struct drm_connector *connector = &dp->connector;
  897. cancel_work_sync(&dp->event_work);
  898. cdn_dp_encoder_disable(encoder);
  899. encoder->funcs->destroy(encoder);
  900. connector->funcs->destroy(connector);
  901. pm_runtime_disable(dev);
  902. if (dp->fw_loaded)
  903. release_firmware(dp->fw);
  904. kfree(dp->edid);
  905. dp->edid = NULL;
  906. }
  907. static const struct component_ops cdn_dp_component_ops = {
  908. .bind = cdn_dp_bind,
  909. .unbind = cdn_dp_unbind,
  910. };
  911. static int cdn_dp_suspend(struct device *dev)
  912. {
  913. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  914. int ret = 0;
  915. mutex_lock(&dp->lock);
  916. if (dp->active)
  917. ret = cdn_dp_disable(dp);
  918. dp->suspended = true;
  919. mutex_unlock(&dp->lock);
  920. return ret;
  921. }
  922. static __maybe_unused int cdn_dp_resume(struct device *dev)
  923. {
  924. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  925. mutex_lock(&dp->lock);
  926. dp->suspended = false;
  927. if (dp->fw_loaded)
  928. schedule_work(&dp->event_work);
  929. mutex_unlock(&dp->lock);
  930. return 0;
  931. }
  932. static int cdn_dp_probe(struct platform_device *pdev)
  933. {
  934. struct device *dev = &pdev->dev;
  935. const struct of_device_id *match;
  936. struct cdn_dp_data *dp_data;
  937. struct cdn_dp_port *port;
  938. struct cdn_dp_device *dp;
  939. struct extcon_dev *extcon;
  940. struct phy *phy;
  941. int i;
  942. dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
  943. if (!dp)
  944. return -ENOMEM;
  945. dp->dev = dev;
  946. match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node);
  947. dp_data = (struct cdn_dp_data *)match->data;
  948. for (i = 0; i < dp_data->max_phy; i++) {
  949. extcon = extcon_get_edev_by_phandle(dev, i);
  950. phy = devm_of_phy_get_by_index(dev, dev->of_node, i);
  951. if (PTR_ERR(extcon) == -EPROBE_DEFER ||
  952. PTR_ERR(phy) == -EPROBE_DEFER)
  953. return -EPROBE_DEFER;
  954. if (IS_ERR(extcon) || IS_ERR(phy))
  955. continue;
  956. port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
  957. if (!port)
  958. return -ENOMEM;
  959. port->extcon = extcon;
  960. port->phy = phy;
  961. port->dp = dp;
  962. port->id = i;
  963. dp->port[dp->ports++] = port;
  964. }
  965. if (!dp->ports) {
  966. DRM_DEV_ERROR(dev, "missing extcon or phy\n");
  967. return -EINVAL;
  968. }
  969. mutex_init(&dp->lock);
  970. dev_set_drvdata(dev, dp);
  971. cdn_dp_audio_codec_init(dp, dev);
  972. return component_add(dev, &cdn_dp_component_ops);
  973. }
  974. static int cdn_dp_remove(struct platform_device *pdev)
  975. {
  976. struct cdn_dp_device *dp = platform_get_drvdata(pdev);
  977. platform_device_unregister(dp->audio_pdev);
  978. cdn_dp_suspend(dp->dev);
  979. component_del(&pdev->dev, &cdn_dp_component_ops);
  980. return 0;
  981. }
  982. static void cdn_dp_shutdown(struct platform_device *pdev)
  983. {
  984. struct cdn_dp_device *dp = platform_get_drvdata(pdev);
  985. cdn_dp_suspend(dp->dev);
  986. }
  987. static const struct dev_pm_ops cdn_dp_pm_ops = {
  988. SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend,
  989. cdn_dp_resume)
  990. };
  991. struct platform_driver cdn_dp_driver = {
  992. .probe = cdn_dp_probe,
  993. .remove = cdn_dp_remove,
  994. .shutdown = cdn_dp_shutdown,
  995. .driver = {
  996. .name = "cdn-dp",
  997. .owner = THIS_MODULE,
  998. .of_match_table = of_match_ptr(cdn_dp_dt_ids),
  999. .pm = &cdn_dp_pm_ops,
  1000. },
  1001. };