drm_vm.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670
  1. /*
  2. * \file drm_vm.c
  3. * Memory mapping for DRM
  4. *
  5. * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6. * \author Gareth Hughes <gareth@valinux.com>
  7. */
  8. /*
  9. * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
  10. *
  11. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  12. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  13. * All Rights Reserved.
  14. *
  15. * Permission is hereby granted, free of charge, to any person obtaining a
  16. * copy of this software and associated documentation files (the "Software"),
  17. * to deal in the Software without restriction, including without limitation
  18. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  19. * and/or sell copies of the Software, and to permit persons to whom the
  20. * Software is furnished to do so, subject to the following conditions:
  21. *
  22. * The above copyright notice and this permission notice (including the next
  23. * paragraph) shall be included in all copies or substantial portions of the
  24. * Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  29. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  30. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  31. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  32. * OTHER DEALINGS IN THE SOFTWARE.
  33. */
  34. #include <linux/export.h>
  35. #include <linux/pci.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/vmalloc.h>
  38. #include <linux/pgtable.h>
  39. #if defined(__ia64__)
  40. #include <linux/efi.h>
  41. #include <linux/slab.h>
  42. #endif
  43. #include <linux/mem_encrypt.h>
  44. #include <drm/drm_agpsupport.h>
  45. #include <drm/drm_device.h>
  46. #include <drm/drm_drv.h>
  47. #include <drm/drm_file.h>
  48. #include <drm/drm_framebuffer.h>
  49. #include <drm/drm_print.h>
  50. #include "drm_internal.h"
  51. #include "drm_legacy.h"
  52. struct drm_vma_entry {
  53. struct list_head head;
  54. struct vm_area_struct *vma;
  55. pid_t pid;
  56. };
  57. static void drm_vm_open(struct vm_area_struct *vma);
  58. static void drm_vm_close(struct vm_area_struct *vma);
  59. static pgprot_t drm_io_prot(struct drm_local_map *map,
  60. struct vm_area_struct *vma)
  61. {
  62. pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
  63. /* We don't want graphics memory to be mapped encrypted */
  64. tmp = pgprot_decrypted(tmp);
  65. #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
  66. defined(__mips__)
  67. if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
  68. tmp = pgprot_noncached(tmp);
  69. else
  70. tmp = pgprot_writecombine(tmp);
  71. #elif defined(__ia64__)
  72. if (efi_range_is_wc(vma->vm_start, vma->vm_end -
  73. vma->vm_start))
  74. tmp = pgprot_writecombine(tmp);
  75. else
  76. tmp = pgprot_noncached(tmp);
  77. #elif defined(__sparc__) || defined(__arm__)
  78. tmp = pgprot_noncached(tmp);
  79. #endif
  80. return tmp;
  81. }
  82. static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
  83. {
  84. pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
  85. #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
  86. tmp = pgprot_noncached_wc(tmp);
  87. #endif
  88. return tmp;
  89. }
  90. /*
  91. * \c fault method for AGP virtual memory.
  92. *
  93. * \param vma virtual memory area.
  94. * \param address access address.
  95. * \return pointer to the page structure.
  96. *
  97. * Find the right map and if it's AGP memory find the real physical page to
  98. * map, get the page, increment the use count and return it.
  99. */
  100. #if IS_ENABLED(CONFIG_AGP)
  101. static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
  102. {
  103. struct vm_area_struct *vma = vmf->vma;
  104. struct drm_file *priv = vma->vm_file->private_data;
  105. struct drm_device *dev = priv->minor->dev;
  106. struct drm_local_map *map = NULL;
  107. struct drm_map_list *r_list;
  108. struct drm_hash_item *hash;
  109. /*
  110. * Find the right map
  111. */
  112. if (!dev->agp)
  113. goto vm_fault_error;
  114. if (!dev->agp || !dev->agp->cant_use_aperture)
  115. goto vm_fault_error;
  116. if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
  117. goto vm_fault_error;
  118. r_list = drm_hash_entry(hash, struct drm_map_list, hash);
  119. map = r_list->map;
  120. if (map && map->type == _DRM_AGP) {
  121. /*
  122. * Using vm_pgoff as a selector forces us to use this unusual
  123. * addressing scheme.
  124. */
  125. resource_size_t offset = vmf->address - vma->vm_start;
  126. resource_size_t baddr = map->offset + offset;
  127. struct drm_agp_mem *agpmem;
  128. struct page *page;
  129. #ifdef __alpha__
  130. /*
  131. * Adjust to a bus-relative address
  132. */
  133. baddr -= dev->hose->mem_space->start;
  134. #endif
  135. /*
  136. * It's AGP memory - find the real physical page to map
  137. */
  138. list_for_each_entry(agpmem, &dev->agp->memory, head) {
  139. if (agpmem->bound <= baddr &&
  140. agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
  141. break;
  142. }
  143. if (&agpmem->head == &dev->agp->memory)
  144. goto vm_fault_error;
  145. /*
  146. * Get the page, inc the use count, and return it
  147. */
  148. offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
  149. page = agpmem->memory->pages[offset];
  150. get_page(page);
  151. vmf->page = page;
  152. DRM_DEBUG
  153. ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
  154. (unsigned long long)baddr,
  155. agpmem->memory->pages[offset],
  156. (unsigned long long)offset,
  157. page_count(page));
  158. return 0;
  159. }
  160. vm_fault_error:
  161. return VM_FAULT_SIGBUS; /* Disallow mremap */
  162. }
  163. #else
  164. static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
  165. {
  166. return VM_FAULT_SIGBUS;
  167. }
  168. #endif
  169. /*
  170. * \c nopage method for shared virtual memory.
  171. *
  172. * \param vma virtual memory area.
  173. * \param address access address.
  174. * \return pointer to the page structure.
  175. *
  176. * Get the mapping, find the real physical page to map, get the page, and
  177. * return it.
  178. */
  179. static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
  180. {
  181. struct vm_area_struct *vma = vmf->vma;
  182. struct drm_local_map *map = vma->vm_private_data;
  183. unsigned long offset;
  184. unsigned long i;
  185. struct page *page;
  186. if (!map)
  187. return VM_FAULT_SIGBUS; /* Nothing allocated */
  188. offset = vmf->address - vma->vm_start;
  189. i = (unsigned long)map->handle + offset;
  190. page = vmalloc_to_page((void *)i);
  191. if (!page)
  192. return VM_FAULT_SIGBUS;
  193. get_page(page);
  194. vmf->page = page;
  195. DRM_DEBUG("shm_fault 0x%lx\n", offset);
  196. return 0;
  197. }
  198. /*
  199. * \c close method for shared virtual memory.
  200. *
  201. * \param vma virtual memory area.
  202. *
  203. * Deletes map information if we are the last
  204. * person to close a mapping and it's not in the global maplist.
  205. */
  206. static void drm_vm_shm_close(struct vm_area_struct *vma)
  207. {
  208. struct drm_file *priv = vma->vm_file->private_data;
  209. struct drm_device *dev = priv->minor->dev;
  210. struct drm_vma_entry *pt, *temp;
  211. struct drm_local_map *map;
  212. struct drm_map_list *r_list;
  213. int found_maps = 0;
  214. DRM_DEBUG("0x%08lx,0x%08lx\n",
  215. vma->vm_start, vma->vm_end - vma->vm_start);
  216. map = vma->vm_private_data;
  217. mutex_lock(&dev->struct_mutex);
  218. list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
  219. if (pt->vma->vm_private_data == map)
  220. found_maps++;
  221. if (pt->vma == vma) {
  222. list_del(&pt->head);
  223. kfree(pt);
  224. }
  225. }
  226. /* We were the only map that was found */
  227. if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
  228. /* Check to see if we are in the maplist, if we are not, then
  229. * we delete this mappings information.
  230. */
  231. found_maps = 0;
  232. list_for_each_entry(r_list, &dev->maplist, head) {
  233. if (r_list->map == map)
  234. found_maps++;
  235. }
  236. if (!found_maps) {
  237. switch (map->type) {
  238. case _DRM_REGISTERS:
  239. case _DRM_FRAME_BUFFER:
  240. arch_phys_wc_del(map->mtrr);
  241. iounmap(map->handle);
  242. break;
  243. case _DRM_SHM:
  244. vfree(map->handle);
  245. break;
  246. case _DRM_AGP:
  247. case _DRM_SCATTER_GATHER:
  248. break;
  249. case _DRM_CONSISTENT:
  250. dma_free_coherent(&dev->pdev->dev,
  251. map->size,
  252. map->handle,
  253. map->offset);
  254. break;
  255. }
  256. kfree(map);
  257. }
  258. }
  259. mutex_unlock(&dev->struct_mutex);
  260. }
  261. /*
  262. * \c fault method for DMA virtual memory.
  263. *
  264. * \param address access address.
  265. * \return pointer to the page structure.
  266. *
  267. * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
  268. */
  269. static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
  270. {
  271. struct vm_area_struct *vma = vmf->vma;
  272. struct drm_file *priv = vma->vm_file->private_data;
  273. struct drm_device *dev = priv->minor->dev;
  274. struct drm_device_dma *dma = dev->dma;
  275. unsigned long offset;
  276. unsigned long page_nr;
  277. struct page *page;
  278. if (!dma)
  279. return VM_FAULT_SIGBUS; /* Error */
  280. if (!dma->pagelist)
  281. return VM_FAULT_SIGBUS; /* Nothing allocated */
  282. offset = vmf->address - vma->vm_start;
  283. /* vm_[pg]off[set] should be 0 */
  284. page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
  285. page = virt_to_page((void *)dma->pagelist[page_nr]);
  286. get_page(page);
  287. vmf->page = page;
  288. DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
  289. return 0;
  290. }
  291. /*
  292. * \c fault method for scatter-gather virtual memory.
  293. *
  294. * \param address access address.
  295. * \return pointer to the page structure.
  296. *
  297. * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
  298. */
  299. static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
  300. {
  301. struct vm_area_struct *vma = vmf->vma;
  302. struct drm_local_map *map = vma->vm_private_data;
  303. struct drm_file *priv = vma->vm_file->private_data;
  304. struct drm_device *dev = priv->minor->dev;
  305. struct drm_sg_mem *entry = dev->sg;
  306. unsigned long offset;
  307. unsigned long map_offset;
  308. unsigned long page_offset;
  309. struct page *page;
  310. if (!entry)
  311. return VM_FAULT_SIGBUS; /* Error */
  312. if (!entry->pagelist)
  313. return VM_FAULT_SIGBUS; /* Nothing allocated */
  314. offset = vmf->address - vma->vm_start;
  315. map_offset = map->offset - (unsigned long)dev->sg->virtual;
  316. page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
  317. page = entry->pagelist[page_offset];
  318. get_page(page);
  319. vmf->page = page;
  320. return 0;
  321. }
  322. /** AGP virtual memory operations */
  323. static const struct vm_operations_struct drm_vm_ops = {
  324. .fault = drm_vm_fault,
  325. .open = drm_vm_open,
  326. .close = drm_vm_close,
  327. };
  328. /** Shared virtual memory operations */
  329. static const struct vm_operations_struct drm_vm_shm_ops = {
  330. .fault = drm_vm_shm_fault,
  331. .open = drm_vm_open,
  332. .close = drm_vm_shm_close,
  333. };
  334. /** DMA virtual memory operations */
  335. static const struct vm_operations_struct drm_vm_dma_ops = {
  336. .fault = drm_vm_dma_fault,
  337. .open = drm_vm_open,
  338. .close = drm_vm_close,
  339. };
  340. /** Scatter-gather virtual memory operations */
  341. static const struct vm_operations_struct drm_vm_sg_ops = {
  342. .fault = drm_vm_sg_fault,
  343. .open = drm_vm_open,
  344. .close = drm_vm_close,
  345. };
  346. static void drm_vm_open_locked(struct drm_device *dev,
  347. struct vm_area_struct *vma)
  348. {
  349. struct drm_vma_entry *vma_entry;
  350. DRM_DEBUG("0x%08lx,0x%08lx\n",
  351. vma->vm_start, vma->vm_end - vma->vm_start);
  352. vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
  353. if (vma_entry) {
  354. vma_entry->vma = vma;
  355. vma_entry->pid = current->pid;
  356. list_add(&vma_entry->head, &dev->vmalist);
  357. }
  358. }
  359. static void drm_vm_open(struct vm_area_struct *vma)
  360. {
  361. struct drm_file *priv = vma->vm_file->private_data;
  362. struct drm_device *dev = priv->minor->dev;
  363. mutex_lock(&dev->struct_mutex);
  364. drm_vm_open_locked(dev, vma);
  365. mutex_unlock(&dev->struct_mutex);
  366. }
  367. static void drm_vm_close_locked(struct drm_device *dev,
  368. struct vm_area_struct *vma)
  369. {
  370. struct drm_vma_entry *pt, *temp;
  371. DRM_DEBUG("0x%08lx,0x%08lx\n",
  372. vma->vm_start, vma->vm_end - vma->vm_start);
  373. list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
  374. if (pt->vma == vma) {
  375. list_del(&pt->head);
  376. kfree(pt);
  377. break;
  378. }
  379. }
  380. }
  381. /*
  382. * \c close method for all virtual memory types.
  383. *
  384. * \param vma virtual memory area.
  385. *
  386. * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
  387. * free it.
  388. */
  389. static void drm_vm_close(struct vm_area_struct *vma)
  390. {
  391. struct drm_file *priv = vma->vm_file->private_data;
  392. struct drm_device *dev = priv->minor->dev;
  393. mutex_lock(&dev->struct_mutex);
  394. drm_vm_close_locked(dev, vma);
  395. mutex_unlock(&dev->struct_mutex);
  396. }
  397. /*
  398. * mmap DMA memory.
  399. *
  400. * \param file_priv DRM file private.
  401. * \param vma virtual memory area.
  402. * \return zero on success or a negative number on failure.
  403. *
  404. * Sets the virtual memory area operations structure to vm_dma_ops, the file
  405. * pointer, and calls vm_open().
  406. */
  407. static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
  408. {
  409. struct drm_file *priv = filp->private_data;
  410. struct drm_device *dev;
  411. struct drm_device_dma *dma;
  412. unsigned long length = vma->vm_end - vma->vm_start;
  413. dev = priv->minor->dev;
  414. dma = dev->dma;
  415. DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
  416. vma->vm_start, vma->vm_end, vma->vm_pgoff);
  417. /* Length must match exact page count */
  418. if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
  419. return -EINVAL;
  420. }
  421. if (!capable(CAP_SYS_ADMIN) &&
  422. (dma->flags & _DRM_DMA_USE_PCI_RO)) {
  423. vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
  424. #if defined(__i386__) || defined(__x86_64__)
  425. pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
  426. #else
  427. /* Ye gads this is ugly. With more thought
  428. we could move this up higher and use
  429. `protection_map' instead. */
  430. vma->vm_page_prot =
  431. __pgprot(pte_val
  432. (pte_wrprotect
  433. (__pte(pgprot_val(vma->vm_page_prot)))));
  434. #endif
  435. }
  436. vma->vm_ops = &drm_vm_dma_ops;
  437. vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
  438. drm_vm_open_locked(dev, vma);
  439. return 0;
  440. }
  441. static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
  442. {
  443. #ifdef __alpha__
  444. return dev->hose->dense_mem_base;
  445. #else
  446. return 0;
  447. #endif
  448. }
  449. /*
  450. * mmap DMA memory.
  451. *
  452. * \param file_priv DRM file private.
  453. * \param vma virtual memory area.
  454. * \return zero on success or a negative number on failure.
  455. *
  456. * If the virtual memory area has no offset associated with it then it's a DMA
  457. * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
  458. * checks that the restricted flag is not set, sets the virtual memory operations
  459. * according to the mapping type and remaps the pages. Finally sets the file
  460. * pointer and calls vm_open().
  461. */
  462. static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
  463. {
  464. struct drm_file *priv = filp->private_data;
  465. struct drm_device *dev = priv->minor->dev;
  466. struct drm_local_map *map = NULL;
  467. resource_size_t offset = 0;
  468. struct drm_hash_item *hash;
  469. DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
  470. vma->vm_start, vma->vm_end, vma->vm_pgoff);
  471. if (!priv->authenticated)
  472. return -EACCES;
  473. /* We check for "dma". On Apple's UniNorth, it's valid to have
  474. * the AGP mapped at physical address 0
  475. * --BenH.
  476. */
  477. if (!vma->vm_pgoff
  478. #if IS_ENABLED(CONFIG_AGP)
  479. && (!dev->agp
  480. || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
  481. #endif
  482. )
  483. return drm_mmap_dma(filp, vma);
  484. if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
  485. DRM_ERROR("Could not find map\n");
  486. return -EINVAL;
  487. }
  488. map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
  489. if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
  490. return -EPERM;
  491. /* Check for valid size. */
  492. if (map->size < vma->vm_end - vma->vm_start)
  493. return -EINVAL;
  494. if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
  495. vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
  496. #if defined(__i386__) || defined(__x86_64__)
  497. pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
  498. #else
  499. /* Ye gads this is ugly. With more thought
  500. we could move this up higher and use
  501. `protection_map' instead. */
  502. vma->vm_page_prot =
  503. __pgprot(pte_val
  504. (pte_wrprotect
  505. (__pte(pgprot_val(vma->vm_page_prot)))));
  506. #endif
  507. }
  508. switch (map->type) {
  509. #if !defined(__arm__)
  510. case _DRM_AGP:
  511. if (dev->agp && dev->agp->cant_use_aperture) {
  512. /*
  513. * On some platforms we can't talk to bus dma address from the CPU, so for
  514. * memory of type DRM_AGP, we'll deal with sorting out the real physical
  515. * pages and mappings in fault()
  516. */
  517. #if defined(__powerpc__)
  518. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  519. #endif
  520. vma->vm_ops = &drm_vm_ops;
  521. break;
  522. }
  523. fallthrough; /* to _DRM_FRAME_BUFFER... */
  524. #endif
  525. case _DRM_FRAME_BUFFER:
  526. case _DRM_REGISTERS:
  527. offset = drm_core_get_reg_ofs(dev);
  528. vma->vm_page_prot = drm_io_prot(map, vma);
  529. if (io_remap_pfn_range(vma, vma->vm_start,
  530. (map->offset + offset) >> PAGE_SHIFT,
  531. vma->vm_end - vma->vm_start,
  532. vma->vm_page_prot))
  533. return -EAGAIN;
  534. DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
  535. " offset = 0x%llx\n",
  536. map->type,
  537. vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
  538. vma->vm_ops = &drm_vm_ops;
  539. break;
  540. case _DRM_CONSISTENT:
  541. /* Consistent memory is really like shared memory. But
  542. * it's allocated in a different way, so avoid fault */
  543. if (remap_pfn_range(vma, vma->vm_start,
  544. page_to_pfn(virt_to_page(map->handle)),
  545. vma->vm_end - vma->vm_start, vma->vm_page_prot))
  546. return -EAGAIN;
  547. vma->vm_page_prot = drm_dma_prot(map->type, vma);
  548. fallthrough; /* to _DRM_SHM */
  549. case _DRM_SHM:
  550. vma->vm_ops = &drm_vm_shm_ops;
  551. vma->vm_private_data = (void *)map;
  552. break;
  553. case _DRM_SCATTER_GATHER:
  554. vma->vm_ops = &drm_vm_sg_ops;
  555. vma->vm_private_data = (void *)map;
  556. vma->vm_page_prot = drm_dma_prot(map->type, vma);
  557. break;
  558. default:
  559. return -EINVAL; /* This should never happen. */
  560. }
  561. vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
  562. drm_vm_open_locked(dev, vma);
  563. return 0;
  564. }
  565. int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
  566. {
  567. struct drm_file *priv = filp->private_data;
  568. struct drm_device *dev = priv->minor->dev;
  569. int ret;
  570. if (drm_dev_is_unplugged(dev))
  571. return -ENODEV;
  572. mutex_lock(&dev->struct_mutex);
  573. ret = drm_mmap_locked(filp, vma);
  574. mutex_unlock(&dev->struct_mutex);
  575. return ret;
  576. }
  577. EXPORT_SYMBOL(drm_legacy_mmap);
  578. #if IS_ENABLED(CONFIG_DRM_LEGACY)
  579. void drm_legacy_vma_flush(struct drm_device *dev)
  580. {
  581. struct drm_vma_entry *vma, *vma_temp;
  582. /* Clear vma list (only needed for legacy drivers) */
  583. list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
  584. list_del(&vma->head);
  585. kfree(vma);
  586. }
  587. }
  588. #endif