sun4i_backend.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2015 Free Electrons
  4. * Copyright (C) 2015 NextThing Co
  5. *
  6. * Maxime Ripard <maxime.ripard@free-electrons.com>
  7. */
  8. #include <linux/component.h>
  9. #include <linux/list.h>
  10. #include <linux/module.h>
  11. #include <linux/of_device.h>
  12. #include <linux/of_graph.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/reset.h>
  16. #include <drm/drm_atomic.h>
  17. #include <drm/drm_atomic_helper.h>
  18. #include <drm/drm_crtc.h>
  19. #include <drm/drm_fb_cma_helper.h>
  20. #include <drm/drm_fourcc.h>
  21. #include <drm/drm_gem_cma_helper.h>
  22. #include <drm/drm_plane_helper.h>
  23. #include <drm/drm_probe_helper.h>
  24. #include "sun4i_backend.h"
  25. #include "sun4i_drv.h"
  26. #include "sun4i_frontend.h"
  27. #include "sun4i_layer.h"
  28. #include "sunxi_engine.h"
  29. struct sun4i_backend_quirks {
  30. /* backend <-> TCON muxing selection done in backend */
  31. bool needs_output_muxing;
  32. /* alpha at the lowest z position is not always supported */
  33. bool supports_lowest_plane_alpha;
  34. };
  35. static const u32 sunxi_rgb2yuv_coef[12] = {
  36. 0x00000107, 0x00000204, 0x00000064, 0x00000108,
  37. 0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
  38. 0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
  39. };
  40. static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
  41. {
  42. int i;
  43. DRM_DEBUG_DRIVER("Applying RGB to YUV color correction\n");
  44. /* Set color correction */
  45. regmap_write(engine->regs, SUN4I_BACKEND_OCCTL_REG,
  46. SUN4I_BACKEND_OCCTL_ENABLE);
  47. for (i = 0; i < 12; i++)
  48. regmap_write(engine->regs, SUN4I_BACKEND_OCRCOEF_REG(i),
  49. sunxi_rgb2yuv_coef[i]);
  50. }
  51. static void sun4i_backend_disable_color_correction(struct sunxi_engine *engine)
  52. {
  53. DRM_DEBUG_DRIVER("Disabling color correction\n");
  54. /* Disable color correction */
  55. regmap_update_bits(engine->regs, SUN4I_BACKEND_OCCTL_REG,
  56. SUN4I_BACKEND_OCCTL_ENABLE, 0);
  57. }
  58. static void sun4i_backend_commit(struct sunxi_engine *engine)
  59. {
  60. DRM_DEBUG_DRIVER("Committing changes\n");
  61. regmap_write(engine->regs, SUN4I_BACKEND_REGBUFFCTL_REG,
  62. SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS |
  63. SUN4I_BACKEND_REGBUFFCTL_LOADCTL);
  64. }
  65. void sun4i_backend_layer_enable(struct sun4i_backend *backend,
  66. int layer, bool enable)
  67. {
  68. u32 val;
  69. DRM_DEBUG_DRIVER("%sabling layer %d\n", enable ? "En" : "Dis",
  70. layer);
  71. if (enable)
  72. val = SUN4I_BACKEND_MODCTL_LAY_EN(layer);
  73. else
  74. val = 0;
  75. regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
  76. SUN4I_BACKEND_MODCTL_LAY_EN(layer), val);
  77. }
  78. static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
  79. {
  80. switch (format) {
  81. case DRM_FORMAT_ARGB8888:
  82. *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
  83. break;
  84. case DRM_FORMAT_ARGB4444:
  85. *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB4444;
  86. break;
  87. case DRM_FORMAT_ARGB1555:
  88. *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB1555;
  89. break;
  90. case DRM_FORMAT_RGBA5551:
  91. *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA5551;
  92. break;
  93. case DRM_FORMAT_RGBA4444:
  94. *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA4444;
  95. break;
  96. case DRM_FORMAT_XRGB8888:
  97. *mode = SUN4I_BACKEND_LAY_FBFMT_XRGB8888;
  98. break;
  99. case DRM_FORMAT_RGB888:
  100. *mode = SUN4I_BACKEND_LAY_FBFMT_RGB888;
  101. break;
  102. case DRM_FORMAT_RGB565:
  103. *mode = SUN4I_BACKEND_LAY_FBFMT_RGB565;
  104. break;
  105. default:
  106. return -EINVAL;
  107. }
  108. return 0;
  109. }
  110. static const uint32_t sun4i_backend_formats[] = {
  111. DRM_FORMAT_ARGB1555,
  112. DRM_FORMAT_ARGB4444,
  113. DRM_FORMAT_ARGB8888,
  114. DRM_FORMAT_RGB565,
  115. DRM_FORMAT_RGB888,
  116. DRM_FORMAT_RGBA4444,
  117. DRM_FORMAT_RGBA5551,
  118. DRM_FORMAT_UYVY,
  119. DRM_FORMAT_VYUY,
  120. DRM_FORMAT_XRGB8888,
  121. DRM_FORMAT_YUYV,
  122. DRM_FORMAT_YVYU,
  123. };
  124. bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier)
  125. {
  126. unsigned int i;
  127. if (modifier != DRM_FORMAT_MOD_LINEAR)
  128. return false;
  129. for (i = 0; i < ARRAY_SIZE(sun4i_backend_formats); i++)
  130. if (sun4i_backend_formats[i] == fmt)
  131. return true;
  132. return false;
  133. }
  134. int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
  135. int layer, struct drm_plane *plane)
  136. {
  137. struct drm_plane_state *state = plane->state;
  138. DRM_DEBUG_DRIVER("Updating layer %d\n", layer);
  139. if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
  140. DRM_DEBUG_DRIVER("Primary layer, updating global size W: %u H: %u\n",
  141. state->crtc_w, state->crtc_h);
  142. regmap_write(backend->engine.regs, SUN4I_BACKEND_DISSIZE_REG,
  143. SUN4I_BACKEND_DISSIZE(state->crtc_w,
  144. state->crtc_h));
  145. }
  146. /* Set height and width */
  147. DRM_DEBUG_DRIVER("Layer size W: %u H: %u\n",
  148. state->crtc_w, state->crtc_h);
  149. regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYSIZE_REG(layer),
  150. SUN4I_BACKEND_LAYSIZE(state->crtc_w,
  151. state->crtc_h));
  152. /* Set base coordinates */
  153. DRM_DEBUG_DRIVER("Layer coordinates X: %d Y: %d\n",
  154. state->crtc_x, state->crtc_y);
  155. regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYCOOR_REG(layer),
  156. SUN4I_BACKEND_LAYCOOR(state->crtc_x,
  157. state->crtc_y));
  158. return 0;
  159. }
  160. static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
  161. int layer, struct drm_plane *plane)
  162. {
  163. struct drm_plane_state *state = plane->state;
  164. struct drm_framebuffer *fb = state->fb;
  165. const struct drm_format_info *format = fb->format;
  166. const uint32_t fmt = format->format;
  167. u32 val = SUN4I_BACKEND_IYUVCTL_EN;
  168. int i;
  169. for (i = 0; i < ARRAY_SIZE(sunxi_bt601_yuv2rgb_coef); i++)
  170. regmap_write(backend->engine.regs,
  171. SUN4I_BACKEND_YGCOEF_REG(i),
  172. sunxi_bt601_yuv2rgb_coef[i]);
  173. /*
  174. * We should do that only for a single plane, but the
  175. * framebuffer's atomic_check has our back on this.
  176. */
  177. regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
  178. SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN,
  179. SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN);
  180. /* TODO: Add support for the multi-planar YUV formats */
  181. if (drm_format_info_is_yuv_packed(format) &&
  182. drm_format_info_is_yuv_sampling_422(format))
  183. val |= SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422;
  184. else
  185. DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", fmt);
  186. /*
  187. * Allwinner seems to list the pixel sequence from right to left, while
  188. * DRM lists it from left to right.
  189. */
  190. switch (fmt) {
  191. case DRM_FORMAT_YUYV:
  192. val |= SUN4I_BACKEND_IYUVCTL_FBPS_VYUY;
  193. break;
  194. case DRM_FORMAT_YVYU:
  195. val |= SUN4I_BACKEND_IYUVCTL_FBPS_UYVY;
  196. break;
  197. case DRM_FORMAT_UYVY:
  198. val |= SUN4I_BACKEND_IYUVCTL_FBPS_YVYU;
  199. break;
  200. case DRM_FORMAT_VYUY:
  201. val |= SUN4I_BACKEND_IYUVCTL_FBPS_YUYV;
  202. break;
  203. default:
  204. DRM_DEBUG_DRIVER("Unsupported YUV pixel sequence (0x%x)\n",
  205. fmt);
  206. }
  207. regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVCTL_REG, val);
  208. return 0;
  209. }
  210. int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
  211. int layer, struct drm_plane *plane)
  212. {
  213. struct drm_plane_state *state = plane->state;
  214. struct drm_framebuffer *fb = state->fb;
  215. bool interlaced = false;
  216. u32 val;
  217. int ret;
  218. /* Clear the YUV mode */
  219. regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
  220. SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
  221. if (plane->state->crtc)
  222. interlaced = plane->state->crtc->state->adjusted_mode.flags
  223. & DRM_MODE_FLAG_INTERLACE;
  224. regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
  225. SUN4I_BACKEND_MODCTL_ITLMOD_EN,
  226. interlaced ? SUN4I_BACKEND_MODCTL_ITLMOD_EN : 0);
  227. DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
  228. interlaced ? "on" : "off");
  229. val = SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA(state->alpha >> 8);
  230. if (state->alpha != DRM_BLEND_ALPHA_OPAQUE)
  231. val |= SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN;
  232. regmap_update_bits(backend->engine.regs,
  233. SUN4I_BACKEND_ATTCTL_REG0(layer),
  234. SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_MASK |
  235. SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN,
  236. val);
  237. if (fb->format->is_yuv)
  238. return sun4i_backend_update_yuv_format(backend, layer, plane);
  239. ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val);
  240. if (ret) {
  241. DRM_DEBUG_DRIVER("Invalid format\n");
  242. return ret;
  243. }
  244. regmap_update_bits(backend->engine.regs,
  245. SUN4I_BACKEND_ATTCTL_REG1(layer),
  246. SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
  247. return 0;
  248. }
  249. int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend,
  250. int layer, uint32_t fmt)
  251. {
  252. u32 val;
  253. int ret;
  254. ret = sun4i_backend_drm_format_to_layer(fmt, &val);
  255. if (ret) {
  256. DRM_DEBUG_DRIVER("Invalid format\n");
  257. return ret;
  258. }
  259. regmap_update_bits(backend->engine.regs,
  260. SUN4I_BACKEND_ATTCTL_REG0(layer),
  261. SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN,
  262. SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN);
  263. regmap_update_bits(backend->engine.regs,
  264. SUN4I_BACKEND_ATTCTL_REG1(layer),
  265. SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
  266. return 0;
  267. }
  268. static int sun4i_backend_update_yuv_buffer(struct sun4i_backend *backend,
  269. struct drm_framebuffer *fb,
  270. dma_addr_t paddr)
  271. {
  272. /* TODO: Add support for the multi-planar YUV formats */
  273. DRM_DEBUG_DRIVER("Setting packed YUV buffer address to %pad\n", &paddr);
  274. regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVADD_REG(0), paddr);
  275. DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
  276. regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVLINEWIDTH_REG(0),
  277. fb->pitches[0] * 8);
  278. return 0;
  279. }
  280. int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
  281. int layer, struct drm_plane *plane)
  282. {
  283. struct drm_plane_state *state = plane->state;
  284. struct drm_framebuffer *fb = state->fb;
  285. u32 lo_paddr, hi_paddr;
  286. dma_addr_t paddr;
  287. /* Set the line width */
  288. DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
  289. regmap_write(backend->engine.regs,
  290. SUN4I_BACKEND_LAYLINEWIDTH_REG(layer),
  291. fb->pitches[0] * 8);
  292. /* Get the start of the displayed memory */
  293. paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
  294. DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
  295. if (fb->format->is_yuv)
  296. return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
  297. /* Write the 32 lower bits of the address (in bits) */
  298. lo_paddr = paddr << 3;
  299. DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
  300. regmap_write(backend->engine.regs,
  301. SUN4I_BACKEND_LAYFB_L32ADD_REG(layer),
  302. lo_paddr);
  303. /* And the upper bits */
  304. hi_paddr = paddr >> 29;
  305. DRM_DEBUG_DRIVER("Setting address high bits to 0x%x\n", hi_paddr);
  306. regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_LAYFB_H4ADD_REG,
  307. SUN4I_BACKEND_LAYFB_H4ADD_MSK(layer),
  308. SUN4I_BACKEND_LAYFB_H4ADD(layer, hi_paddr));
  309. return 0;
  310. }
  311. int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend, int layer,
  312. struct drm_plane *plane)
  313. {
  314. struct drm_plane_state *state = plane->state;
  315. struct sun4i_layer_state *p_state = state_to_sun4i_layer_state(state);
  316. unsigned int priority = state->normalized_zpos;
  317. unsigned int pipe = p_state->pipe;
  318. DRM_DEBUG_DRIVER("Setting layer %d's priority to %d and pipe %d\n",
  319. layer, priority, pipe);
  320. regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
  321. SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK |
  322. SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK,
  323. SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(p_state->pipe) |
  324. SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(priority));
  325. return 0;
  326. }
  327. void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
  328. int layer)
  329. {
  330. regmap_update_bits(backend->engine.regs,
  331. SUN4I_BACKEND_ATTCTL_REG0(layer),
  332. SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN |
  333. SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
  334. }
  335. static bool sun4i_backend_plane_uses_scaler(struct drm_plane_state *state)
  336. {
  337. u16 src_h = state->src_h >> 16;
  338. u16 src_w = state->src_w >> 16;
  339. DRM_DEBUG_DRIVER("Input size %dx%d, output size %dx%d\n",
  340. src_w, src_h, state->crtc_w, state->crtc_h);
  341. if ((state->crtc_h != src_h) || (state->crtc_w != src_w))
  342. return true;
  343. return false;
  344. }
  345. static bool sun4i_backend_plane_uses_frontend(struct drm_plane_state *state)
  346. {
  347. struct sun4i_layer *layer = plane_to_sun4i_layer(state->plane);
  348. struct sun4i_backend *backend = layer->backend;
  349. uint32_t format = state->fb->format->format;
  350. uint64_t modifier = state->fb->modifier;
  351. if (IS_ERR(backend->frontend))
  352. return false;
  353. if (!sun4i_frontend_format_is_supported(format, modifier))
  354. return false;
  355. if (!sun4i_backend_format_is_supported(format, modifier))
  356. return true;
  357. /*
  358. * TODO: The backend alone allows 2x and 4x integer scaling, including
  359. * support for an alpha component (which the frontend doesn't support).
  360. * Use the backend directly instead of the frontend in this case, with
  361. * another test to return false.
  362. */
  363. if (sun4i_backend_plane_uses_scaler(state))
  364. return true;
  365. /*
  366. * Here the format is supported by both the frontend and the backend
  367. * and no frontend scaling is required, so use the backend directly.
  368. */
  369. return false;
  370. }
  371. static bool sun4i_backend_plane_is_supported(struct drm_plane_state *state,
  372. bool *uses_frontend)
  373. {
  374. if (sun4i_backend_plane_uses_frontend(state)) {
  375. *uses_frontend = true;
  376. return true;
  377. }
  378. *uses_frontend = false;
  379. /* Scaling is not supported without the frontend. */
  380. if (sun4i_backend_plane_uses_scaler(state))
  381. return false;
  382. return true;
  383. }
  384. static void sun4i_backend_atomic_begin(struct sunxi_engine *engine,
  385. struct drm_crtc_state *old_state)
  386. {
  387. u32 val;
  388. WARN_ON(regmap_read_poll_timeout(engine->regs,
  389. SUN4I_BACKEND_REGBUFFCTL_REG,
  390. val, !(val & SUN4I_BACKEND_REGBUFFCTL_LOADCTL),
  391. 100, 50000));
  392. }
  393. static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
  394. struct drm_crtc_state *crtc_state)
  395. {
  396. struct drm_plane_state *plane_states[SUN4I_BACKEND_NUM_LAYERS] = { 0 };
  397. struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
  398. struct drm_atomic_state *state = crtc_state->state;
  399. struct drm_device *drm = state->dev;
  400. struct drm_plane *plane;
  401. unsigned int num_planes = 0;
  402. unsigned int num_alpha_planes = 0;
  403. unsigned int num_frontend_planes = 0;
  404. unsigned int num_alpha_planes_max = 1;
  405. unsigned int num_yuv_planes = 0;
  406. unsigned int current_pipe = 0;
  407. unsigned int i;
  408. DRM_DEBUG_DRIVER("Starting checking our planes\n");
  409. if (!crtc_state->planes_changed)
  410. return 0;
  411. drm_for_each_plane_mask(plane, drm, crtc_state->plane_mask) {
  412. struct drm_plane_state *plane_state =
  413. drm_atomic_get_plane_state(state, plane);
  414. struct sun4i_layer_state *layer_state =
  415. state_to_sun4i_layer_state(plane_state);
  416. struct drm_framebuffer *fb = plane_state->fb;
  417. struct drm_format_name_buf format_name;
  418. if (!sun4i_backend_plane_is_supported(plane_state,
  419. &layer_state->uses_frontend))
  420. return -EINVAL;
  421. if (layer_state->uses_frontend) {
  422. DRM_DEBUG_DRIVER("Using the frontend for plane %d\n",
  423. plane->index);
  424. num_frontend_planes++;
  425. } else {
  426. if (fb->format->is_yuv) {
  427. DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
  428. num_yuv_planes++;
  429. }
  430. }
  431. DRM_DEBUG_DRIVER("Plane FB format is %s\n",
  432. drm_get_format_name(fb->format->format,
  433. &format_name));
  434. if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
  435. num_alpha_planes++;
  436. DRM_DEBUG_DRIVER("Plane zpos is %d\n",
  437. plane_state->normalized_zpos);
  438. /* Sort our planes by Zpos */
  439. plane_states[plane_state->normalized_zpos] = plane_state;
  440. num_planes++;
  441. }
  442. /* All our planes were disabled, bail out */
  443. if (!num_planes)
  444. return 0;
  445. /*
  446. * The hardware is a bit unusual here.
  447. *
  448. * Even though it supports 4 layers, it does the composition
  449. * in two separate steps.
  450. *
  451. * The first one is assigning a layer to one of its two
  452. * pipes. If more that 1 layer is assigned to the same pipe,
  453. * and if pixels overlaps, the pipe will take the pixel from
  454. * the layer with the highest priority.
  455. *
  456. * The second step is the actual alpha blending, that takes
  457. * the two pipes as input, and uses the potential alpha
  458. * component to do the transparency between the two.
  459. *
  460. * This two-step scenario makes us unable to guarantee a
  461. * robust alpha blending between the 4 layers in all
  462. * situations, since this means that we need to have one layer
  463. * with alpha at the lowest position of our two pipes.
  464. *
  465. * However, we cannot even do that on every platform, since
  466. * the hardware has a bug where the lowest plane of the lowest
  467. * pipe (pipe 0, priority 0), if it has any alpha, will
  468. * discard the pixel data entirely and just display the pixels
  469. * in the background color (black by default).
  470. *
  471. * This means that on the affected platforms, we effectively
  472. * have only three valid configurations with alpha, all of
  473. * them with the alpha being on pipe1 with the lowest
  474. * position, which can be 1, 2 or 3 depending on the number of
  475. * planes and their zpos.
  476. */
  477. /* For platforms that are not affected by the issue described above. */
  478. if (backend->quirks->supports_lowest_plane_alpha)
  479. num_alpha_planes_max++;
  480. if (num_alpha_planes > num_alpha_planes_max) {
  481. DRM_DEBUG_DRIVER("Too many planes with alpha, rejecting...\n");
  482. return -EINVAL;
  483. }
  484. /* We can't have an alpha plane at the lowest position */
  485. if (!backend->quirks->supports_lowest_plane_alpha &&
  486. (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
  487. return -EINVAL;
  488. for (i = 1; i < num_planes; i++) {
  489. struct drm_plane_state *p_state = plane_states[i];
  490. struct drm_framebuffer *fb = p_state->fb;
  491. struct sun4i_layer_state *s_state = state_to_sun4i_layer_state(p_state);
  492. /*
  493. * The only alpha position is the lowest plane of the
  494. * second pipe.
  495. */
  496. if (fb->format->has_alpha || (p_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
  497. current_pipe++;
  498. s_state->pipe = current_pipe;
  499. }
  500. /* We can only have a single YUV plane at a time */
  501. if (num_yuv_planes > SUN4I_BACKEND_NUM_YUV_PLANES) {
  502. DRM_DEBUG_DRIVER("Too many planes with YUV, rejecting...\n");
  503. return -EINVAL;
  504. }
  505. if (num_frontend_planes > SUN4I_BACKEND_NUM_FRONTEND_LAYERS) {
  506. DRM_DEBUG_DRIVER("Too many planes going through the frontend, rejecting\n");
  507. return -EINVAL;
  508. }
  509. DRM_DEBUG_DRIVER("State valid with %u planes, %u alpha, %u video, %u YUV\n",
  510. num_planes, num_alpha_planes, num_frontend_planes,
  511. num_yuv_planes);
  512. return 0;
  513. }
  514. static void sun4i_backend_vblank_quirk(struct sunxi_engine *engine)
  515. {
  516. struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
  517. struct sun4i_frontend *frontend = backend->frontend;
  518. if (!frontend)
  519. return;
  520. /*
  521. * In a teardown scenario with the frontend involved, we have
  522. * to keep the frontend enabled until the next vblank, and
  523. * only then disable it.
  524. *
  525. * This is due to the fact that the backend will not take into
  526. * account the new configuration (with the plane that used to
  527. * be fed by the frontend now disabled) until we write to the
  528. * commit bit and the hardware fetches the new configuration
  529. * during the next vblank.
  530. *
  531. * So we keep the frontend around in order to prevent any
  532. * visual artifacts.
  533. */
  534. spin_lock(&backend->frontend_lock);
  535. if (backend->frontend_teardown) {
  536. sun4i_frontend_exit(frontend);
  537. backend->frontend_teardown = false;
  538. }
  539. spin_unlock(&backend->frontend_lock);
  540. };
  541. static int sun4i_backend_init_sat(struct device *dev) {
  542. struct sun4i_backend *backend = dev_get_drvdata(dev);
  543. int ret;
  544. backend->sat_reset = devm_reset_control_get(dev, "sat");
  545. if (IS_ERR(backend->sat_reset)) {
  546. dev_err(dev, "Couldn't get the SAT reset line\n");
  547. return PTR_ERR(backend->sat_reset);
  548. }
  549. ret = reset_control_deassert(backend->sat_reset);
  550. if (ret) {
  551. dev_err(dev, "Couldn't deassert the SAT reset line\n");
  552. return ret;
  553. }
  554. backend->sat_clk = devm_clk_get(dev, "sat");
  555. if (IS_ERR(backend->sat_clk)) {
  556. dev_err(dev, "Couldn't get our SAT clock\n");
  557. ret = PTR_ERR(backend->sat_clk);
  558. goto err_assert_reset;
  559. }
  560. ret = clk_prepare_enable(backend->sat_clk);
  561. if (ret) {
  562. dev_err(dev, "Couldn't enable the SAT clock\n");
  563. return ret;
  564. }
  565. return 0;
  566. err_assert_reset:
  567. reset_control_assert(backend->sat_reset);
  568. return ret;
  569. }
  570. static int sun4i_backend_free_sat(struct device *dev) {
  571. struct sun4i_backend *backend = dev_get_drvdata(dev);
  572. clk_disable_unprepare(backend->sat_clk);
  573. reset_control_assert(backend->sat_reset);
  574. return 0;
  575. }
  576. /*
  577. * The display backend can take video output from the display frontend, or
  578. * the display enhancement unit on the A80, as input for one it its layers.
  579. * This relationship within the display pipeline is encoded in the device
  580. * tree with of_graph, and we use it here to figure out which backend, if
  581. * there are 2 or more, we are currently probing. The number would be in
  582. * the "reg" property of the upstream output port endpoint.
  583. */
  584. static int sun4i_backend_of_get_id(struct device_node *node)
  585. {
  586. struct device_node *ep, *remote;
  587. struct of_endpoint of_ep;
  588. /* Input port is 0, and we want the first endpoint. */
  589. ep = of_graph_get_endpoint_by_regs(node, 0, -1);
  590. if (!ep)
  591. return -EINVAL;
  592. remote = of_graph_get_remote_endpoint(ep);
  593. of_node_put(ep);
  594. if (!remote)
  595. return -EINVAL;
  596. of_graph_parse_endpoint(remote, &of_ep);
  597. of_node_put(remote);
  598. return of_ep.id;
  599. }
  600. /* TODO: This needs to take multiple pipelines into account */
  601. static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
  602. struct device_node *node)
  603. {
  604. struct device_node *port, *ep, *remote;
  605. struct sun4i_frontend *frontend;
  606. port = of_graph_get_port_by_id(node, 0);
  607. if (!port)
  608. return ERR_PTR(-EINVAL);
  609. for_each_available_child_of_node(port, ep) {
  610. remote = of_graph_get_remote_port_parent(ep);
  611. if (!remote)
  612. continue;
  613. of_node_put(remote);
  614. /* does this node match any registered engines? */
  615. list_for_each_entry(frontend, &drv->frontend_list, list) {
  616. if (remote == frontend->node) {
  617. of_node_put(port);
  618. of_node_put(ep);
  619. return frontend;
  620. }
  621. }
  622. }
  623. of_node_put(port);
  624. return ERR_PTR(-EINVAL);
  625. }
  626. static const struct sunxi_engine_ops sun4i_backend_engine_ops = {
  627. .atomic_begin = sun4i_backend_atomic_begin,
  628. .atomic_check = sun4i_backend_atomic_check,
  629. .commit = sun4i_backend_commit,
  630. .layers_init = sun4i_layers_init,
  631. .apply_color_correction = sun4i_backend_apply_color_correction,
  632. .disable_color_correction = sun4i_backend_disable_color_correction,
  633. .vblank_quirk = sun4i_backend_vblank_quirk,
  634. };
  635. static const struct regmap_config sun4i_backend_regmap_config = {
  636. .reg_bits = 32,
  637. .val_bits = 32,
  638. .reg_stride = 4,
  639. .max_register = 0x5800,
  640. };
  641. static int sun4i_backend_bind(struct device *dev, struct device *master,
  642. void *data)
  643. {
  644. struct platform_device *pdev = to_platform_device(dev);
  645. struct drm_device *drm = data;
  646. struct sun4i_drv *drv = drm->dev_private;
  647. struct sun4i_backend *backend;
  648. const struct sun4i_backend_quirks *quirks;
  649. struct resource *res;
  650. void __iomem *regs;
  651. int i, ret;
  652. backend = devm_kzalloc(dev, sizeof(*backend), GFP_KERNEL);
  653. if (!backend)
  654. return -ENOMEM;
  655. dev_set_drvdata(dev, backend);
  656. spin_lock_init(&backend->frontend_lock);
  657. if (of_find_property(dev->of_node, "interconnects", NULL)) {
  658. /*
  659. * This assume we have the same DMA constraints for all our the
  660. * devices in our pipeline (all the backends, but also the
  661. * frontends). This sounds bad, but it has always been the case
  662. * for us, and DRM doesn't do per-device allocation either, so
  663. * we would need to fix DRM first...
  664. */
  665. ret = of_dma_configure(drm->dev, dev->of_node, true);
  666. if (ret)
  667. return ret;
  668. } else {
  669. /*
  670. * If we don't have the interconnect property, most likely
  671. * because of an old DT, we need to set the DMA offset by hand
  672. * on our device since the RAM mapping is at 0 for the DMA bus,
  673. * unlike the CPU.
  674. *
  675. * XXX(hch): this has no business in a driver and needs to move
  676. * to the device tree.
  677. *
  678. * If we have two subsequent calls to dma_direct_set_offset
  679. * returns -EINVAL. Unfortunately, this happens when we have two
  680. * backends in the system, and will result in the driver
  681. * reporting an error while it has been setup properly before.
  682. * Ignore EINVAL, but it should really be removed eventually.
  683. */
  684. ret = dma_direct_set_offset(drm->dev, PHYS_OFFSET, 0, SZ_4G);
  685. if (ret && ret != -EINVAL)
  686. return ret;
  687. }
  688. backend->engine.node = dev->of_node;
  689. backend->engine.ops = &sun4i_backend_engine_ops;
  690. backend->engine.id = sun4i_backend_of_get_id(dev->of_node);
  691. if (backend->engine.id < 0)
  692. return backend->engine.id;
  693. backend->frontend = sun4i_backend_find_frontend(drv, dev->of_node);
  694. if (IS_ERR(backend->frontend))
  695. dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n");
  696. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  697. regs = devm_ioremap_resource(dev, res);
  698. if (IS_ERR(regs))
  699. return PTR_ERR(regs);
  700. backend->reset = devm_reset_control_get(dev, NULL);
  701. if (IS_ERR(backend->reset)) {
  702. dev_err(dev, "Couldn't get our reset line\n");
  703. return PTR_ERR(backend->reset);
  704. }
  705. ret = reset_control_deassert(backend->reset);
  706. if (ret) {
  707. dev_err(dev, "Couldn't deassert our reset line\n");
  708. return ret;
  709. }
  710. backend->bus_clk = devm_clk_get(dev, "ahb");
  711. if (IS_ERR(backend->bus_clk)) {
  712. dev_err(dev, "Couldn't get the backend bus clock\n");
  713. ret = PTR_ERR(backend->bus_clk);
  714. goto err_assert_reset;
  715. }
  716. clk_prepare_enable(backend->bus_clk);
  717. backend->mod_clk = devm_clk_get(dev, "mod");
  718. if (IS_ERR(backend->mod_clk)) {
  719. dev_err(dev, "Couldn't get the backend module clock\n");
  720. ret = PTR_ERR(backend->mod_clk);
  721. goto err_disable_bus_clk;
  722. }
  723. ret = clk_set_rate_exclusive(backend->mod_clk, 300000000);
  724. if (ret) {
  725. dev_err(dev, "Couldn't set the module clock frequency\n");
  726. goto err_disable_bus_clk;
  727. }
  728. clk_prepare_enable(backend->mod_clk);
  729. backend->ram_clk = devm_clk_get(dev, "ram");
  730. if (IS_ERR(backend->ram_clk)) {
  731. dev_err(dev, "Couldn't get the backend RAM clock\n");
  732. ret = PTR_ERR(backend->ram_clk);
  733. goto err_disable_mod_clk;
  734. }
  735. clk_prepare_enable(backend->ram_clk);
  736. if (of_device_is_compatible(dev->of_node,
  737. "allwinner,sun8i-a33-display-backend")) {
  738. ret = sun4i_backend_init_sat(dev);
  739. if (ret) {
  740. dev_err(dev, "Couldn't init SAT resources\n");
  741. goto err_disable_ram_clk;
  742. }
  743. }
  744. backend->engine.regs = devm_regmap_init_mmio(dev, regs,
  745. &sun4i_backend_regmap_config);
  746. if (IS_ERR(backend->engine.regs)) {
  747. dev_err(dev, "Couldn't create the backend regmap\n");
  748. return PTR_ERR(backend->engine.regs);
  749. }
  750. list_add_tail(&backend->engine.list, &drv->engine_list);
  751. /*
  752. * Many of the backend's layer configuration registers have
  753. * undefined default values. This poses a risk as we use
  754. * regmap_update_bits in some places, and don't overwrite
  755. * the whole register.
  756. *
  757. * Clear the registers here to have something predictable.
  758. */
  759. for (i = 0x800; i < 0x1000; i += 4)
  760. regmap_write(backend->engine.regs, i, 0);
  761. /* Disable registers autoloading */
  762. regmap_write(backend->engine.regs, SUN4I_BACKEND_REGBUFFCTL_REG,
  763. SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS);
  764. /* Enable the backend */
  765. regmap_write(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
  766. SUN4I_BACKEND_MODCTL_DEBE_EN |
  767. SUN4I_BACKEND_MODCTL_START_CTL);
  768. /* Set output selection if needed */
  769. quirks = of_device_get_match_data(dev);
  770. if (quirks->needs_output_muxing) {
  771. /*
  772. * We assume there is no dynamic muxing of backends
  773. * and TCONs, so we select the backend with same ID.
  774. *
  775. * While dynamic selection might be interesting, since
  776. * the CRTC is tied to the TCON, while the layers are
  777. * tied to the backends, this means, we will need to
  778. * switch between groups of layers. There might not be
  779. * a way to represent this constraint in DRM.
  780. */
  781. regmap_update_bits(backend->engine.regs,
  782. SUN4I_BACKEND_MODCTL_REG,
  783. SUN4I_BACKEND_MODCTL_OUT_SEL,
  784. (backend->engine.id
  785. ? SUN4I_BACKEND_MODCTL_OUT_LCD1
  786. : SUN4I_BACKEND_MODCTL_OUT_LCD0));
  787. }
  788. backend->quirks = quirks;
  789. return 0;
  790. err_disable_ram_clk:
  791. clk_disable_unprepare(backend->ram_clk);
  792. err_disable_mod_clk:
  793. clk_rate_exclusive_put(backend->mod_clk);
  794. clk_disable_unprepare(backend->mod_clk);
  795. err_disable_bus_clk:
  796. clk_disable_unprepare(backend->bus_clk);
  797. err_assert_reset:
  798. reset_control_assert(backend->reset);
  799. return ret;
  800. }
  801. static void sun4i_backend_unbind(struct device *dev, struct device *master,
  802. void *data)
  803. {
  804. struct sun4i_backend *backend = dev_get_drvdata(dev);
  805. list_del(&backend->engine.list);
  806. if (of_device_is_compatible(dev->of_node,
  807. "allwinner,sun8i-a33-display-backend"))
  808. sun4i_backend_free_sat(dev);
  809. clk_disable_unprepare(backend->ram_clk);
  810. clk_rate_exclusive_put(backend->mod_clk);
  811. clk_disable_unprepare(backend->mod_clk);
  812. clk_disable_unprepare(backend->bus_clk);
  813. reset_control_assert(backend->reset);
  814. }
  815. static const struct component_ops sun4i_backend_ops = {
  816. .bind = sun4i_backend_bind,
  817. .unbind = sun4i_backend_unbind,
  818. };
  819. static int sun4i_backend_probe(struct platform_device *pdev)
  820. {
  821. return component_add(&pdev->dev, &sun4i_backend_ops);
  822. }
  823. static int sun4i_backend_remove(struct platform_device *pdev)
  824. {
  825. component_del(&pdev->dev, &sun4i_backend_ops);
  826. return 0;
  827. }
  828. static const struct sun4i_backend_quirks sun4i_backend_quirks = {
  829. .needs_output_muxing = true,
  830. };
  831. static const struct sun4i_backend_quirks sun5i_backend_quirks = {
  832. };
  833. static const struct sun4i_backend_quirks sun6i_backend_quirks = {
  834. };
  835. static const struct sun4i_backend_quirks sun7i_backend_quirks = {
  836. .needs_output_muxing = true,
  837. };
  838. static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
  839. .supports_lowest_plane_alpha = true,
  840. };
  841. static const struct sun4i_backend_quirks sun9i_backend_quirks = {
  842. };
  843. static const struct of_device_id sun4i_backend_of_table[] = {
  844. {
  845. .compatible = "allwinner,sun4i-a10-display-backend",
  846. .data = &sun4i_backend_quirks,
  847. },
  848. {
  849. .compatible = "allwinner,sun5i-a13-display-backend",
  850. .data = &sun5i_backend_quirks,
  851. },
  852. {
  853. .compatible = "allwinner,sun6i-a31-display-backend",
  854. .data = &sun6i_backend_quirks,
  855. },
  856. {
  857. .compatible = "allwinner,sun7i-a20-display-backend",
  858. .data = &sun7i_backend_quirks,
  859. },
  860. {
  861. .compatible = "allwinner,sun8i-a23-display-backend",
  862. .data = &sun8i_a33_backend_quirks,
  863. },
  864. {
  865. .compatible = "allwinner,sun8i-a33-display-backend",
  866. .data = &sun8i_a33_backend_quirks,
  867. },
  868. {
  869. .compatible = "allwinner,sun9i-a80-display-backend",
  870. .data = &sun9i_backend_quirks,
  871. },
  872. { }
  873. };
  874. MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
  875. static struct platform_driver sun4i_backend_platform_driver = {
  876. .probe = sun4i_backend_probe,
  877. .remove = sun4i_backend_remove,
  878. .driver = {
  879. .name = "sun4i-backend",
  880. .of_match_table = sun4i_backend_of_table,
  881. },
  882. };
  883. module_platform_driver(sun4i_backend_platform_driver);
  884. MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
  885. MODULE_DESCRIPTION("Allwinner A10 Display Backend Driver");
  886. MODULE_LICENSE("GPL");