nouveau_display.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. /*
  2. * Copyright (C) 2008 Maarten Maathuis.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining
  6. * a copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sublicense, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the
  14. * next paragraph) shall be included in all copies or substantial
  15. * portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. *
  25. */
  26. #include <acpi/video.h>
  27. #include <drm/drm_atomic.h>
  28. #include <drm/drm_atomic_helper.h>
  29. #include <drm/drm_crtc_helper.h>
  30. #include <drm/drm_fb_helper.h>
  31. #include <drm/drm_fourcc.h>
  32. #include <drm/drm_gem_framebuffer_helper.h>
  33. #include <drm/drm_probe_helper.h>
  34. #include <drm/drm_vblank.h>
  35. #include "nouveau_fbcon.h"
  36. #include "nouveau_crtc.h"
  37. #include "nouveau_gem.h"
  38. #include "nouveau_connector.h"
  39. #include "nv50_display.h"
  40. #include <nvif/class.h>
  41. #include <nvif/cl0046.h>
  42. #include <nvif/event.h>
  43. #include <dispnv50/crc.h>
  44. int
  45. nouveau_display_vblank_enable(struct drm_crtc *crtc)
  46. {
  47. struct nouveau_crtc *nv_crtc;
  48. nv_crtc = nouveau_crtc(crtc);
  49. nvif_notify_get(&nv_crtc->vblank);
  50. return 0;
  51. }
  52. void
  53. nouveau_display_vblank_disable(struct drm_crtc *crtc)
  54. {
  55. struct nouveau_crtc *nv_crtc;
  56. nv_crtc = nouveau_crtc(crtc);
  57. nvif_notify_put(&nv_crtc->vblank);
  58. }
  59. static inline int
  60. calc(int blanks, int blanke, int total, int line)
  61. {
  62. if (blanke >= blanks) {
  63. if (line >= blanks)
  64. line -= total;
  65. } else {
  66. if (line >= blanks)
  67. line -= total;
  68. line -= blanke + 1;
  69. }
  70. return line;
  71. }
  72. static bool
  73. nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
  74. ktime_t *stime, ktime_t *etime)
  75. {
  76. struct {
  77. struct nv04_disp_mthd_v0 base;
  78. struct nv04_disp_scanoutpos_v0 scan;
  79. } args = {
  80. .base.method = NV04_DISP_SCANOUTPOS,
  81. .base.head = nouveau_crtc(crtc)->index,
  82. };
  83. struct nouveau_display *disp = nouveau_display(crtc->dev);
  84. struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
  85. int retry = 20;
  86. bool ret = false;
  87. do {
  88. ret = nvif_mthd(&disp->disp.object, 0, &args, sizeof(args));
  89. if (ret != 0)
  90. return false;
  91. if (args.scan.vline) {
  92. ret = true;
  93. break;
  94. }
  95. if (retry) ndelay(vblank->linedur_ns);
  96. } while (retry--);
  97. *hpos = args.scan.hline;
  98. *vpos = calc(args.scan.vblanks, args.scan.vblanke,
  99. args.scan.vtotal, args.scan.vline);
  100. if (stime) *stime = ns_to_ktime(args.scan.time[0]);
  101. if (etime) *etime = ns_to_ktime(args.scan.time[1]);
  102. return ret;
  103. }
  104. bool
  105. nouveau_display_scanoutpos(struct drm_crtc *crtc,
  106. bool in_vblank_irq, int *vpos, int *hpos,
  107. ktime_t *stime, ktime_t *etime,
  108. const struct drm_display_mode *mode)
  109. {
  110. return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
  111. stime, etime);
  112. }
  113. static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
  114. .destroy = drm_gem_fb_destroy,
  115. .create_handle = drm_gem_fb_create_handle,
  116. };
  117. static void
  118. nouveau_decode_mod(struct nouveau_drm *drm,
  119. uint64_t modifier,
  120. uint32_t *tile_mode,
  121. uint8_t *kind)
  122. {
  123. struct nouveau_display *disp = nouveau_display(drm->dev);
  124. BUG_ON(!tile_mode || !kind);
  125. if (modifier == DRM_FORMAT_MOD_LINEAR) {
  126. /* tile_mode will not be used in this case */
  127. *tile_mode = 0;
  128. *kind = 0;
  129. } else {
  130. /*
  131. * Extract the block height and kind from the corresponding
  132. * modifier fields. See drm_fourcc.h for details.
  133. */
  134. if ((modifier & (0xffull << 12)) == 0ull) {
  135. /* Legacy modifier. Translate to this dev's 'kind.' */
  136. modifier |= disp->format_modifiers[0] & (0xffull << 12);
  137. }
  138. *tile_mode = (uint32_t)(modifier & 0xF);
  139. *kind = (uint8_t)((modifier >> 12) & 0xFF);
  140. if (drm->client.device.info.chipset >= 0xc0)
  141. *tile_mode <<= 4;
  142. }
  143. }
  144. void
  145. nouveau_framebuffer_get_layout(struct drm_framebuffer *fb,
  146. uint32_t *tile_mode,
  147. uint8_t *kind)
  148. {
  149. if (fb->flags & DRM_MODE_FB_MODIFIERS) {
  150. struct nouveau_drm *drm = nouveau_drm(fb->dev);
  151. nouveau_decode_mod(drm, fb->modifier, tile_mode, kind);
  152. } else {
  153. const struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
  154. *tile_mode = nvbo->mode;
  155. *kind = nvbo->kind;
  156. }
  157. }
  158. static const u64 legacy_modifiers[] = {
  159. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
  160. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
  161. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
  162. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
  163. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
  164. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
  165. DRM_FORMAT_MOD_INVALID
  166. };
  167. static int
  168. nouveau_validate_decode_mod(struct nouveau_drm *drm,
  169. uint64_t modifier,
  170. uint32_t *tile_mode,
  171. uint8_t *kind)
  172. {
  173. struct nouveau_display *disp = nouveau_display(drm->dev);
  174. int mod;
  175. if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
  176. return -EINVAL;
  177. }
  178. BUG_ON(!disp->format_modifiers);
  179. for (mod = 0;
  180. (disp->format_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
  181. (disp->format_modifiers[mod] != modifier);
  182. mod++);
  183. if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) {
  184. for (mod = 0;
  185. (legacy_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
  186. (legacy_modifiers[mod] != modifier);
  187. mod++);
  188. if (legacy_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
  189. return -EINVAL;
  190. }
  191. nouveau_decode_mod(drm, modifier, tile_mode, kind);
  192. return 0;
  193. }
  194. static inline uint32_t
  195. nouveau_get_width_in_blocks(uint32_t stride)
  196. {
  197. /* GOBs per block in the x direction is always one, and GOBs are
  198. * 64 bytes wide
  199. */
  200. static const uint32_t log_block_width = 6;
  201. return (stride + (1 << log_block_width) - 1) >> log_block_width;
  202. }
  203. static inline uint32_t
  204. nouveau_get_height_in_blocks(struct nouveau_drm *drm,
  205. uint32_t height,
  206. uint32_t log_block_height_in_gobs)
  207. {
  208. uint32_t log_gob_height;
  209. uint32_t log_block_height;
  210. BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
  211. if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
  212. log_gob_height = 2;
  213. else
  214. log_gob_height = 3;
  215. log_block_height = log_block_height_in_gobs + log_gob_height;
  216. return (height + (1 << log_block_height) - 1) >> log_block_height;
  217. }
  218. static int
  219. nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
  220. uint32_t offset, uint32_t stride, uint32_t h,
  221. uint32_t tile_mode)
  222. {
  223. uint32_t gob_size, bw, bh;
  224. uint64_t bl_size;
  225. BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
  226. if (drm->client.device.info.chipset >= 0xc0) {
  227. if (tile_mode & 0xF)
  228. return -EINVAL;
  229. tile_mode >>= 4;
  230. }
  231. if (tile_mode & 0xFFFFFFF0)
  232. return -EINVAL;
  233. if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
  234. gob_size = 256;
  235. else
  236. gob_size = 512;
  237. bw = nouveau_get_width_in_blocks(stride);
  238. bh = nouveau_get_height_in_blocks(drm, h, tile_mode);
  239. bl_size = bw * bh * (1 << tile_mode) * gob_size;
  240. DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n",
  241. offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
  242. nvbo->bo.mem.size);
  243. if (bl_size + offset > nvbo->bo.mem.size)
  244. return -ERANGE;
  245. return 0;
  246. }
  247. int
  248. nouveau_framebuffer_new(struct drm_device *dev,
  249. const struct drm_mode_fb_cmd2 *mode_cmd,
  250. struct drm_gem_object *gem,
  251. struct drm_framebuffer **pfb)
  252. {
  253. struct nouveau_drm *drm = nouveau_drm(dev);
  254. struct nouveau_bo *nvbo = nouveau_gem_object(gem);
  255. struct drm_framebuffer *fb;
  256. const struct drm_format_info *info;
  257. unsigned int width, height, i;
  258. uint32_t tile_mode;
  259. uint8_t kind;
  260. int ret;
  261. /* YUV overlays have special requirements pre-NV50 */
  262. if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA &&
  263. (mode_cmd->pixel_format == DRM_FORMAT_YUYV ||
  264. mode_cmd->pixel_format == DRM_FORMAT_UYVY ||
  265. mode_cmd->pixel_format == DRM_FORMAT_NV12 ||
  266. mode_cmd->pixel_format == DRM_FORMAT_NV21) &&
  267. (mode_cmd->pitches[0] & 0x3f || /* align 64 */
  268. mode_cmd->pitches[0] >= 0x10000 || /* at most 64k pitch */
  269. (mode_cmd->pitches[1] && /* pitches for planes must match */
  270. mode_cmd->pitches[0] != mode_cmd->pitches[1]))) {
  271. struct drm_format_name_buf format_name;
  272. DRM_DEBUG_KMS("Unsuitable framebuffer: format: %s; pitches: 0x%x\n 0x%x\n",
  273. drm_get_format_name(mode_cmd->pixel_format,
  274. &format_name),
  275. mode_cmd->pitches[0],
  276. mode_cmd->pitches[1]);
  277. return -EINVAL;
  278. }
  279. if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
  280. if (nouveau_validate_decode_mod(drm, mode_cmd->modifier[0],
  281. &tile_mode, &kind)) {
  282. DRM_DEBUG_KMS("Unsupported modifier: 0x%llx\n",
  283. mode_cmd->modifier[0]);
  284. return -EINVAL;
  285. }
  286. } else {
  287. tile_mode = nvbo->mode;
  288. kind = nvbo->kind;
  289. }
  290. info = drm_get_format_info(dev, mode_cmd);
  291. for (i = 0; i < info->num_planes; i++) {
  292. width = drm_format_info_plane_width(info,
  293. mode_cmd->width,
  294. i);
  295. height = drm_format_info_plane_height(info,
  296. mode_cmd->height,
  297. i);
  298. if (kind) {
  299. ret = nouveau_check_bl_size(drm, nvbo,
  300. mode_cmd->offsets[i],
  301. mode_cmd->pitches[i],
  302. height, tile_mode);
  303. if (ret)
  304. return ret;
  305. } else {
  306. uint32_t size = mode_cmd->pitches[i] * height;
  307. if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size)
  308. return -ERANGE;
  309. }
  310. }
  311. if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
  312. return -ENOMEM;
  313. drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
  314. fb->obj[0] = gem;
  315. ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
  316. if (ret)
  317. kfree(fb);
  318. return ret;
  319. }
  320. struct drm_framebuffer *
  321. nouveau_user_framebuffer_create(struct drm_device *dev,
  322. struct drm_file *file_priv,
  323. const struct drm_mode_fb_cmd2 *mode_cmd)
  324. {
  325. struct drm_framebuffer *fb;
  326. struct drm_gem_object *gem;
  327. int ret;
  328. gem = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
  329. if (!gem)
  330. return ERR_PTR(-ENOENT);
  331. ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb);
  332. if (ret == 0)
  333. return fb;
  334. drm_gem_object_put(gem);
  335. return ERR_PTR(ret);
  336. }
  337. static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
  338. .fb_create = nouveau_user_framebuffer_create,
  339. .output_poll_changed = nouveau_fbcon_output_poll_changed,
  340. };
  341. struct nouveau_drm_prop_enum_list {
  342. u8 gen_mask;
  343. int type;
  344. char *name;
  345. };
  346. static struct nouveau_drm_prop_enum_list underscan[] = {
  347. { 6, UNDERSCAN_AUTO, "auto" },
  348. { 6, UNDERSCAN_OFF, "off" },
  349. { 6, UNDERSCAN_ON, "on" },
  350. {}
  351. };
  352. static struct nouveau_drm_prop_enum_list dither_mode[] = {
  353. { 7, DITHERING_MODE_AUTO, "auto" },
  354. { 7, DITHERING_MODE_OFF, "off" },
  355. { 1, DITHERING_MODE_ON, "on" },
  356. { 6, DITHERING_MODE_STATIC2X2, "static 2x2" },
  357. { 6, DITHERING_MODE_DYNAMIC2X2, "dynamic 2x2" },
  358. { 4, DITHERING_MODE_TEMPORAL, "temporal" },
  359. {}
  360. };
  361. static struct nouveau_drm_prop_enum_list dither_depth[] = {
  362. { 6, DITHERING_DEPTH_AUTO, "auto" },
  363. { 6, DITHERING_DEPTH_6BPC, "6 bpc" },
  364. { 6, DITHERING_DEPTH_8BPC, "8 bpc" },
  365. {}
  366. };
  367. #define PROP_ENUM(p,gen,n,list) do { \
  368. struct nouveau_drm_prop_enum_list *l = (list); \
  369. int c = 0; \
  370. while (l->gen_mask) { \
  371. if (l->gen_mask & (1 << (gen))) \
  372. c++; \
  373. l++; \
  374. } \
  375. if (c) { \
  376. p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c); \
  377. l = (list); \
  378. while (p && l->gen_mask) { \
  379. if (l->gen_mask & (1 << (gen))) { \
  380. drm_property_add_enum(p, l->type, l->name); \
  381. } \
  382. l++; \
  383. } \
  384. } \
  385. } while(0)
  386. void
  387. nouveau_display_hpd_resume(struct drm_device *dev)
  388. {
  389. struct nouveau_drm *drm = nouveau_drm(dev);
  390. mutex_lock(&drm->hpd_lock);
  391. drm->hpd_pending = ~0;
  392. mutex_unlock(&drm->hpd_lock);
  393. schedule_work(&drm->hpd_work);
  394. }
  395. static void
  396. nouveau_display_hpd_work(struct work_struct *work)
  397. {
  398. struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work);
  399. struct drm_device *dev = drm->dev;
  400. struct drm_connector *connector;
  401. struct drm_connector_list_iter conn_iter;
  402. u32 pending;
  403. bool changed = false;
  404. pm_runtime_get_sync(dev->dev);
  405. mutex_lock(&drm->hpd_lock);
  406. pending = drm->hpd_pending;
  407. drm->hpd_pending = 0;
  408. mutex_unlock(&drm->hpd_lock);
  409. /* Nothing to do, exit early without updating the last busy counter */
  410. if (!pending)
  411. goto noop;
  412. mutex_lock(&dev->mode_config.mutex);
  413. drm_connector_list_iter_begin(dev, &conn_iter);
  414. nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
  415. enum drm_connector_status old_status = connector->status;
  416. u64 old_epoch_counter = connector->epoch_counter;
  417. if (!(pending & drm_connector_mask(connector)))
  418. continue;
  419. connector->status = drm_helper_probe_detect(connector, NULL,
  420. false);
  421. if (old_epoch_counter == connector->epoch_counter)
  422. continue;
  423. changed = true;
  424. drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
  425. connector->base.id, connector->name,
  426. drm_get_connector_status_name(old_status),
  427. drm_get_connector_status_name(connector->status),
  428. old_epoch_counter, connector->epoch_counter);
  429. }
  430. drm_connector_list_iter_end(&conn_iter);
  431. mutex_unlock(&dev->mode_config.mutex);
  432. if (changed)
  433. drm_kms_helper_hotplug_event(dev);
  434. pm_runtime_mark_last_busy(drm->dev->dev);
  435. noop:
  436. pm_runtime_put_sync(drm->dev->dev);
  437. }
  438. #ifdef CONFIG_ACPI
  439. static int
  440. nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
  441. void *data)
  442. {
  443. struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
  444. struct acpi_bus_event *info = data;
  445. int ret;
  446. if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
  447. if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
  448. ret = pm_runtime_get(drm->dev->dev);
  449. if (ret == 1 || ret == -EACCES) {
  450. /* If the GPU is already awake, or in a state
  451. * where we can't wake it up, it can handle
  452. * it's own hotplug events.
  453. */
  454. pm_runtime_put_autosuspend(drm->dev->dev);
  455. } else if (ret == 0) {
  456. /* We've started resuming the GPU already, so
  457. * it will handle scheduling a full reprobe
  458. * itself
  459. */
  460. NV_DEBUG(drm, "ACPI requested connector reprobe\n");
  461. pm_runtime_put_noidle(drm->dev->dev);
  462. } else {
  463. NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
  464. ret);
  465. }
  466. /* acpi-video should not generate keypresses for this */
  467. return NOTIFY_BAD;
  468. }
  469. }
  470. return NOTIFY_DONE;
  471. }
  472. #endif
  473. int
  474. nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
  475. {
  476. struct nouveau_display *disp = nouveau_display(dev);
  477. struct drm_connector *connector;
  478. struct drm_connector_list_iter conn_iter;
  479. int ret;
  480. /*
  481. * Enable hotplug interrupts (done as early as possible, since we need
  482. * them for MST)
  483. */
  484. drm_connector_list_iter_begin(dev, &conn_iter);
  485. nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
  486. struct nouveau_connector *conn = nouveau_connector(connector);
  487. nvif_notify_get(&conn->hpd);
  488. }
  489. drm_connector_list_iter_end(&conn_iter);
  490. ret = disp->init(dev, resume, runtime);
  491. if (ret)
  492. return ret;
  493. /* enable connector detection and polling for connectors without HPD
  494. * support
  495. */
  496. drm_kms_helper_poll_enable(dev);
  497. return ret;
  498. }
  499. void
  500. nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
  501. {
  502. struct nouveau_display *disp = nouveau_display(dev);
  503. struct nouveau_drm *drm = nouveau_drm(dev);
  504. struct drm_connector *connector;
  505. struct drm_connector_list_iter conn_iter;
  506. if (!suspend) {
  507. if (drm_drv_uses_atomic_modeset(dev))
  508. drm_atomic_helper_shutdown(dev);
  509. else
  510. drm_helper_force_disable_all(dev);
  511. }
  512. /* disable hotplug interrupts */
  513. drm_connector_list_iter_begin(dev, &conn_iter);
  514. nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
  515. struct nouveau_connector *conn = nouveau_connector(connector);
  516. nvif_notify_put(&conn->hpd);
  517. }
  518. drm_connector_list_iter_end(&conn_iter);
  519. if (!runtime)
  520. cancel_work_sync(&drm->hpd_work);
  521. drm_kms_helper_poll_disable(dev);
  522. disp->fini(dev, runtime, suspend);
  523. }
  524. static void
  525. nouveau_display_create_properties(struct drm_device *dev)
  526. {
  527. struct nouveau_display *disp = nouveau_display(dev);
  528. int gen;
  529. if (disp->disp.object.oclass < NV50_DISP)
  530. gen = 0;
  531. else
  532. if (disp->disp.object.oclass < GF110_DISP)
  533. gen = 1;
  534. else
  535. gen = 2;
  536. PROP_ENUM(disp->dithering_mode, gen, "dithering mode", dither_mode);
  537. PROP_ENUM(disp->dithering_depth, gen, "dithering depth", dither_depth);
  538. PROP_ENUM(disp->underscan_property, gen, "underscan", underscan);
  539. disp->underscan_hborder_property =
  540. drm_property_create_range(dev, 0, "underscan hborder", 0, 128);
  541. disp->underscan_vborder_property =
  542. drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
  543. if (gen < 1)
  544. return;
  545. /* -90..+90 */
  546. disp->vibrant_hue_property =
  547. drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
  548. /* -100..+100 */
  549. disp->color_vibrance_property =
  550. drm_property_create_range(dev, 0, "color vibrance", 0, 200);
  551. }
  552. int
  553. nouveau_display_create(struct drm_device *dev)
  554. {
  555. struct nouveau_drm *drm = nouveau_drm(dev);
  556. struct nvkm_device *device = nvxx_device(&drm->client.device);
  557. struct nouveau_display *disp;
  558. int ret;
  559. disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
  560. if (!disp)
  561. return -ENOMEM;
  562. drm_mode_config_init(dev);
  563. drm_mode_create_scaling_mode_property(dev);
  564. drm_mode_create_dvi_i_properties(dev);
  565. dev->mode_config.funcs = &nouveau_mode_config_funcs;
  566. dev->mode_config.fb_base = device->func->resource_addr(device, 1);
  567. dev->mode_config.min_width = 0;
  568. dev->mode_config.min_height = 0;
  569. if (drm->client.device.info.family < NV_DEVICE_INFO_V0_CELSIUS) {
  570. dev->mode_config.max_width = 2048;
  571. dev->mode_config.max_height = 2048;
  572. } else
  573. if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
  574. dev->mode_config.max_width = 4096;
  575. dev->mode_config.max_height = 4096;
  576. } else
  577. if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) {
  578. dev->mode_config.max_width = 8192;
  579. dev->mode_config.max_height = 8192;
  580. } else {
  581. dev->mode_config.max_width = 16384;
  582. dev->mode_config.max_height = 16384;
  583. }
  584. dev->mode_config.preferred_depth = 24;
  585. dev->mode_config.prefer_shadow = 1;
  586. if (drm->client.device.info.chipset < 0x11)
  587. dev->mode_config.async_page_flip = false;
  588. else
  589. dev->mode_config.async_page_flip = true;
  590. drm_kms_helper_poll_init(dev);
  591. drm_kms_helper_poll_disable(dev);
  592. if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
  593. ret = nvif_disp_ctor(&drm->client.device, "kmsDisp", 0,
  594. &disp->disp);
  595. if (ret == 0) {
  596. nouveau_display_create_properties(dev);
  597. if (disp->disp.object.oclass < NV50_DISP)
  598. ret = nv04_display_create(dev);
  599. else
  600. ret = nv50_display_create(dev);
  601. }
  602. } else {
  603. ret = 0;
  604. }
  605. if (ret)
  606. goto disp_create_err;
  607. drm_mode_config_reset(dev);
  608. if (dev->mode_config.num_crtc) {
  609. ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
  610. if (ret)
  611. goto vblank_err;
  612. if (disp->disp.object.oclass >= NV50_DISP)
  613. nv50_crc_init(dev);
  614. }
  615. INIT_WORK(&drm->hpd_work, nouveau_display_hpd_work);
  616. mutex_init(&drm->hpd_lock);
  617. #ifdef CONFIG_ACPI
  618. drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy;
  619. register_acpi_notifier(&drm->acpi_nb);
  620. #endif
  621. return 0;
  622. vblank_err:
  623. disp->dtor(dev);
  624. disp_create_err:
  625. drm_kms_helper_poll_fini(dev);
  626. drm_mode_config_cleanup(dev);
  627. return ret;
  628. }
  629. void
  630. nouveau_display_destroy(struct drm_device *dev)
  631. {
  632. struct nouveau_display *disp = nouveau_display(dev);
  633. struct nouveau_drm *drm = nouveau_drm(dev);
  634. #ifdef CONFIG_ACPI
  635. unregister_acpi_notifier(&drm->acpi_nb);
  636. #endif
  637. drm_kms_helper_poll_fini(dev);
  638. drm_mode_config_cleanup(dev);
  639. if (disp->dtor)
  640. disp->dtor(dev);
  641. nvif_disp_dtor(&disp->disp);
  642. nouveau_drm(dev)->display = NULL;
  643. mutex_destroy(&drm->hpd_lock);
  644. kfree(disp);
  645. }
  646. int
  647. nouveau_display_suspend(struct drm_device *dev, bool runtime)
  648. {
  649. struct nouveau_display *disp = nouveau_display(dev);
  650. if (drm_drv_uses_atomic_modeset(dev)) {
  651. if (!runtime) {
  652. disp->suspend = drm_atomic_helper_suspend(dev);
  653. if (IS_ERR(disp->suspend)) {
  654. int ret = PTR_ERR(disp->suspend);
  655. disp->suspend = NULL;
  656. return ret;
  657. }
  658. }
  659. }
  660. nouveau_display_fini(dev, true, runtime);
  661. return 0;
  662. }
  663. void
  664. nouveau_display_resume(struct drm_device *dev, bool runtime)
  665. {
  666. struct nouveau_display *disp = nouveau_display(dev);
  667. nouveau_display_init(dev, true, runtime);
  668. if (drm_drv_uses_atomic_modeset(dev)) {
  669. if (disp->suspend) {
  670. drm_atomic_helper_resume(dev, disp->suspend);
  671. disp->suspend = NULL;
  672. }
  673. return;
  674. }
  675. }
  676. int
  677. nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
  678. struct drm_mode_create_dumb *args)
  679. {
  680. struct nouveau_cli *cli = nouveau_cli(file_priv);
  681. struct nouveau_bo *bo;
  682. uint32_t domain;
  683. int ret;
  684. args->pitch = roundup(args->width * (args->bpp / 8), 256);
  685. args->size = args->pitch * args->height;
  686. args->size = roundup(args->size, PAGE_SIZE);
  687. /* Use VRAM if there is any ; otherwise fallback to system memory */
  688. if (nouveau_drm(dev)->client.device.info.ram_size != 0)
  689. domain = NOUVEAU_GEM_DOMAIN_VRAM;
  690. else
  691. domain = NOUVEAU_GEM_DOMAIN_GART;
  692. ret = nouveau_gem_new(cli, args->size, 0, domain, 0, 0, &bo);
  693. if (ret)
  694. return ret;
  695. ret = drm_gem_handle_create(file_priv, &bo->bo.base, &args->handle);
  696. drm_gem_object_put(&bo->bo.base);
  697. return ret;
  698. }
  699. int
  700. nouveau_display_dumb_map_offset(struct drm_file *file_priv,
  701. struct drm_device *dev,
  702. uint32_t handle, uint64_t *poffset)
  703. {
  704. struct drm_gem_object *gem;
  705. gem = drm_gem_object_lookup(file_priv, handle);
  706. if (gem) {
  707. struct nouveau_bo *bo = nouveau_gem_object(gem);
  708. *poffset = drm_vma_node_offset_addr(&bo->bo.base.vma_node);
  709. drm_gem_object_put(gem);
  710. return 0;
  711. }
  712. return -ENOENT;
  713. }