video_memory.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539
  1. /*
  2. * Copyright (C) 2021 - 2022 Alibaba Group. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version 2
  7. * of the License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. */
  18. /****************************************************************************
  19. *
  20. * The MIT License (MIT)
  21. *
  22. * Copyright (c) 2014 - 2021 Vivante Corporation
  23. *
  24. * Permission is hereby granted, free of charge, to any person obtaining a
  25. * copy of this software and associated documentation files (the "Software"),
  26. * to deal in the Software without restriction, including without limitation
  27. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  28. * and/or sell copies of the Software, and to permit persons to whom the
  29. * Software is furnished to do so, subject to the following conditions:
  30. *
  31. * The above copyright notice and this permission notice shall be included in
  32. * all copies or substantial portions of the Software.
  33. *
  34. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  35. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  36. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  37. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  38. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  39. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  40. * DEALINGS IN THE SOFTWARE.
  41. *
  42. *****************************************************************************
  43. *
  44. * The GPL License (GPL)
  45. *
  46. * Copyright (C) 2014 - 2021 Vivante Corporation
  47. *
  48. * This program is free software; you can redistribute it and/or
  49. * modify it under the terms of the GNU General Public License
  50. * as published by the Free Software Foundation; either version 2
  51. * of the License, or (at your option) any later version.
  52. *
  53. * This program is distributed in the hope that it will be useful,
  54. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  55. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  56. * GNU General Public License for more details.
  57. *
  58. * You should have received a copy of the GNU General Public License
  59. * along with this program; if not, write to the Free Software Foundation,
  60. * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  61. *
  62. *****************************************************************************
  63. *
  64. * Note: This software is released under dual MIT and GPL licenses. A
  65. * recipient may use this file under the terms of either the MIT license or
  66. * GPL License. If you wish to use only one license not the other, you can
  67. * indicate your decision by deleting one of the above license notices in your
  68. * version of this file.
  69. *
  70. *****************************************************************************/
  71. #include <linux/module.h>
  72. #include <linux/pagemap.h>
  73. #include <linux/mman.h>
  74. #include <linux/cdev.h>
  75. #include <linux/errno.h>
  76. #include <linux/dma-mapping.h>
  77. #include <linux/dma-buf.h>
  78. #include <linux/version.h>
  79. #include <linux/platform_device.h>
  80. #include <linux/of.h>
  81. #include <linux/of_address.h>
  82. #include <linux/genalloc.h>
  83. #include "video_memory.h"
  84. #include "rsvmem_pool.h"
  85. //#define VIDMEM_DMA_MAP
  86. #define DISCRETE_PAGES 0
  87. //#define VIDMEM_DEBUG
  88. #define IS_ERROR(status) (status > 0)
  89. /*******************************************************************************
  90. **
  91. ** ONERROR
  92. **
  93. ** Jump to the error handler in case there is an error.
  94. **
  95. ** ASSUMPTIONS:
  96. **
  97. ** 'status' variable of int type must be defined.
  98. **
  99. ** ARGUMENTS:
  100. **
  101. ** func Function to evaluate.
  102. */
  103. #define _ONERROR(prefix, func) \
  104. do \
  105. { \
  106. status = func; \
  107. if (IS_ERROR(status)) \
  108. { \
  109. goto OnError; \
  110. } \
  111. } \
  112. while (false)
  113. #define ONERROR(func) _ONERROR(, func)
  114. /*******************************************************************************
  115. **
  116. ** ERR_BREAK
  117. **
  118. ** Executes a break statement on error.
  119. **
  120. ** ASSUMPTIONS:
  121. **
  122. ** 'status' variable of int type must be defined.
  123. **
  124. ** ARGUMENTS:
  125. **
  126. ** func Function to evaluate.
  127. */
  128. #define _ERR_BREAK(prefix, func){ \
  129. status = func; \
  130. if (IS_ERROR(status)) \
  131. { \
  132. break; \
  133. } \
  134. do { } while (false); \
  135. }
  136. #define ERR_BREAK(func) _ERR_BREAK(, func)
  137. /*******************************************************************************
  138. **
  139. ** VERIFY_ARGUMENT
  140. **
  141. ** Assert if an argument does not apply to the specified expression. If
  142. ** the argument evaluates to false, EINVAL will be
  143. ** returned from the current function. In retail mode this macro does
  144. ** nothing.
  145. **
  146. ** ARGUMENTS:
  147. **
  148. ** arg Argument to evaluate.
  149. */
  150. #define _VERIFY_ARGUMENT(prefix, arg) \
  151. do \
  152. { \
  153. if (!(arg)) \
  154. { \
  155. return EINVAL; \
  156. } \
  157. } \
  158. while (false)
  159. #define VERIFY_ARGUMENT(arg) _VERIFY_ARGUMENT(, arg)
  160. #define VM_FLAGS (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP)
  161. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
  162. #define current_mm_mmap_sem current->mm->mmap_lock
  163. #else
  164. #define current_mm_mmap_sem current->mm->mmap_sem
  165. #endif
  166. #define GetPageCount(size, offset) ((((size) + ((offset) & ~PAGE_MASK)) + PAGE_SIZE - 1) >> PAGE_SHIFT)
  167. #ifndef VIDMEM_DEBUG
  168. #define DEBUG_PRINT(...) \
  169. do { \
  170. } while (0)
  171. #else
  172. #undef DEBUG_PRINT
  173. #define DEBUG_PRINT(...) pr_info(__VA_ARGS__)
  174. #endif
  175. struct mem_block
  176. {
  177. int contiguous;
  178. size_t size;
  179. size_t numPages;
  180. struct dma_buf *dmabuf;
  181. struct dma_buf_attachment * attachment;
  182. struct sg_table * sgt;
  183. unsigned long * pagearray;
  184. struct vm_area_struct * vma;
  185. bool is_cma;
  186. bool is_vi_mem;
  187. void *va;
  188. union
  189. {
  190. /* Pointer to a array of pages. */
  191. struct
  192. {
  193. struct page *contiguousPages;
  194. dma_addr_t dma_addr;
  195. int rsvmem_pool_region_id;
  196. int exact;
  197. };
  198. struct
  199. {
  200. /* Pointer to a array of pointers to page. */
  201. struct page **nonContiguousPages;
  202. struct page **Pages1M;
  203. int numPages1M;
  204. int *isExact;
  205. };
  206. };
  207. };
  208. struct mem_node
  209. {
  210. struct mem_block memBlk;
  211. unsigned long busAddr;
  212. int isImported;
  213. struct list_head link;
  214. };
  215. struct file_node
  216. {
  217. struct list_head memList;
  218. struct file *filp;
  219. struct list_head link;
  220. };
  221. static struct list_head fileList;
  222. static int vidalloc_major = 0;
  223. static int vidalloc_minor = 0;
  224. static struct device *gdev = NULL;
  225. static struct cdev vidalloc_cdev;
  226. static dev_t vidalloc_devt;
  227. static struct class *vidalloc_class;
  228. static DEFINE_SPINLOCK(mem_lock);
  229. #if 1
  230. static int
  231. getPhysical(
  232. IN struct mem_block *MemBlk,
  233. IN unsigned int Offset,
  234. OUT unsigned long * Physical
  235. );
  236. static struct file_node * find_and_delete_file_node(struct file *filp)
  237. {
  238. struct file_node *node;
  239. struct file_node *temp;
  240. spin_lock(&mem_lock);
  241. list_for_each_entry_safe(node, temp, &fileList, link)
  242. {
  243. if (node->filp == filp)
  244. {
  245. list_del(&node->link);
  246. spin_unlock(&mem_lock);
  247. return node;
  248. }
  249. }
  250. spin_unlock(&mem_lock);
  251. return NULL;
  252. }
  253. static struct file_node * get_file_node(struct file *filp)
  254. {
  255. struct file_node *node;
  256. spin_lock(&mem_lock);
  257. list_for_each_entry(node, &fileList, link)
  258. {
  259. if (node->filp == filp)
  260. {
  261. spin_unlock(&mem_lock);
  262. return node;
  263. }
  264. }
  265. spin_unlock(&mem_lock);
  266. return NULL;
  267. }
  268. static struct mem_node * get_mem_node(struct file *filp, unsigned long bus_address, int imported)
  269. {
  270. struct file_node *fnode;
  271. struct mem_node *node;
  272. fnode = get_file_node(filp);
  273. if (NULL == fnode)
  274. {
  275. return NULL;
  276. }
  277. spin_lock(&mem_lock);
  278. list_for_each_entry(node, &fnode->memList, link)
  279. {
  280. if (node->busAddr == bus_address && node->isImported == imported)
  281. {
  282. spin_unlock(&mem_lock);
  283. return node;
  284. }
  285. }
  286. spin_unlock(&mem_lock);
  287. return NULL;
  288. }
  289. static int
  290. AllocateMemory(
  291. IN size_t Bytes,
  292. OUT void * * Memory
  293. )
  294. {
  295. void * memory = NULL;
  296. int status = 0;
  297. /* Verify the arguments. */
  298. VERIFY_ARGUMENT(Bytes > 0);
  299. VERIFY_ARGUMENT(Memory != NULL);
  300. if (Bytes > PAGE_SIZE)
  301. {
  302. memory = (void *) vmalloc(Bytes);
  303. }
  304. else
  305. {
  306. memory = (void *) kmalloc(Bytes, GFP_KERNEL | __GFP_NOWARN);
  307. }
  308. if (memory == NULL)
  309. {
  310. /* Out of memory. */
  311. ONERROR(ENOMEM);
  312. }
  313. /* Return pointer to the memory allocation. */
  314. *Memory = memory;
  315. OnError:
  316. /* Return the status. */
  317. return status;
  318. }
  319. int
  320. static FreeMemory(
  321. IN void * Memory
  322. )
  323. {
  324. /* Verify the arguments. */
  325. VERIFY_ARGUMENT(Memory != NULL);
  326. /* Free the memory from the OS pool. */
  327. if (is_vmalloc_addr(Memory))
  328. {
  329. vfree(Memory);
  330. }
  331. else
  332. {
  333. kfree(Memory);
  334. }
  335. /* Success. */
  336. return 0;
  337. }
  338. static int
  339. GetSGT(
  340. IN struct mem_block *MemBlk,
  341. IN size_t Offset,
  342. IN size_t Bytes,
  343. OUT void * *SGT
  344. )
  345. {
  346. struct page ** pages = NULL;
  347. struct page ** tmpPages = NULL;
  348. struct sg_table *sgt = NULL;
  349. struct mem_block *memBlk = MemBlk;
  350. int status = 0;
  351. size_t offset = Offset & ~PAGE_MASK; /* Offset to the first page */
  352. size_t skipPages = Offset >> PAGE_SHIFT; /* skipped pages */
  353. size_t numPages = (PAGE_ALIGN(Offset + Bytes) >> PAGE_SHIFT) - skipPages;
  354. size_t i;
  355. if (memBlk->contiguous)
  356. {
  357. DEBUG_PRINT("[vidmem] Contiguous memory, %d pages\n", numPages);
  358. ONERROR(AllocateMemory(sizeof(struct page*) * numPages, (void * *)&tmpPages));
  359. pages = tmpPages;
  360. for (i = 0; i < numPages; ++i)
  361. {
  362. pages[i] = nth_page(memBlk->contiguousPages, i + skipPages);
  363. }
  364. }
  365. else
  366. {
  367. DEBUG_PRINT("[vidmem] Non-contiguous memory, %d pages\n", numPages);
  368. pages = &memBlk->nonContiguousPages[skipPages];
  369. }
  370. ONERROR(AllocateMemory(sizeof(struct sg_table) * numPages, (void * *)&sgt));
  371. if (sg_alloc_table_from_pages(sgt, pages, numPages, offset, Bytes, GFP_KERNEL) < 0)
  372. {
  373. ONERROR(EPERM);
  374. }
  375. *SGT = (void *)sgt;
  376. OnError:
  377. if (tmpPages)
  378. {
  379. FreeMemory(tmpPages);
  380. }
  381. if (IS_ERROR(status) && sgt)
  382. {
  383. FreeMemory(sgt);
  384. }
  385. return status;
  386. }
  387. static int
  388. Mmap(
  389. IN struct mem_block *MemBlk,
  390. IN size_t skipPages,
  391. IN size_t numPages,
  392. IN struct vm_area_struct *vma
  393. )
  394. {
  395. struct mem_block *memBlk = MemBlk;
  396. int status = 0;
  397. vma->vm_flags |= VM_FLAGS;
  398. /* Make this mapping write combined. */
  399. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  400. /* Now map all the vmalloc pages to this user address. */
  401. if (memBlk->contiguous)
  402. {
  403. /* map kernel memory to user space.. */
  404. if (memBlk->is_cma == true) {
  405. return dma_mmap_coherent(gdev, vma, memBlk->va,
  406. memBlk->dma_addr, vma->vm_end - vma->vm_start);
  407. } else {
  408. if (remap_pfn_range(vma,
  409. vma->vm_start,
  410. page_to_pfn(memBlk->contiguousPages) + skipPages,
  411. numPages << PAGE_SHIFT,
  412. vma->vm_page_prot) < 0)
  413. {
  414. ONERROR(ENOMEM);
  415. }
  416. }
  417. }
  418. else
  419. {
  420. size_t i;
  421. unsigned long start = vma->vm_start;
  422. for (i = 0; i < numPages; ++i)
  423. {
  424. unsigned long pfn = page_to_pfn(memBlk->nonContiguousPages[i + skipPages]);
  425. if (remap_pfn_range(vma,
  426. start,
  427. pfn,
  428. PAGE_SIZE,
  429. vma->vm_page_prot) < 0)
  430. {
  431. ONERROR(ENOMEM);
  432. }
  433. start += PAGE_SIZE;
  434. }
  435. }
  436. OnError:
  437. return status;
  438. }
  439. static int
  440. Attach(
  441. INOUT struct mem_block *MemBlk
  442. )
  443. {
  444. int status;
  445. struct mem_block *memBlk = MemBlk;
  446. struct dma_buf *dmabuf = memBlk->dmabuf;
  447. struct sg_table *sgt = NULL;
  448. struct dma_buf_attachment *attachment = NULL;
  449. int npages = 0;
  450. unsigned long *pagearray = NULL;
  451. int i, j, k = 0;
  452. struct scatterlist *s;
  453. unsigned int size = 0;
  454. if (!dmabuf)
  455. {
  456. ONERROR(EFAULT);
  457. }
  458. attachment = dma_buf_attach(dmabuf, gdev);
  459. if (!attachment)
  460. {
  461. ONERROR(EFAULT);
  462. }
  463. sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
  464. if (!sgt)
  465. {
  466. ONERROR(EFAULT);
  467. }
  468. /* Prepare page array. */
  469. /* Get number of pages. */
  470. for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
  471. {
  472. npages += (sg_dma_len(s) + PAGE_SIZE - 1) / PAGE_SIZE;
  473. }
  474. /* Allocate page array. */
  475. ONERROR(AllocateMemory(npages * sizeof(*pagearray), (void * *)&pagearray));
  476. /* Fill page array. */
  477. for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
  478. {
  479. for (j = 0; j < (sg_dma_len(s) + PAGE_SIZE - 1) / PAGE_SIZE; j++)
  480. {
  481. #ifdef VIDMEM_DMA_MAP
  482. pagearray[k++] = sg_dma_address(s) + j * PAGE_SIZE;
  483. #else
  484. pagearray[k++] = page_to_phys(nth_page(sg_page(s), j));
  485. #endif
  486. }
  487. size += sg_dma_len(s);
  488. }
  489. memBlk->pagearray = pagearray;
  490. memBlk->attachment = attachment;
  491. memBlk->sgt = sgt;
  492. memBlk->numPages = npages;
  493. memBlk->size = size;
  494. memBlk->contiguous = (sgt->nents == 1) ? true : false;
  495. return 0;
  496. OnError:
  497. if (pagearray)
  498. {
  499. FreeMemory(pagearray);
  500. pagearray = NULL;
  501. }
  502. if (sgt)
  503. {
  504. dma_buf_unmap_attachment(attachment, sgt, DMA_BIDIRECTIONAL);
  505. }
  506. return status;
  507. }
  508. static struct sg_table *_dmabuf_map(struct dma_buf_attachment *attachment,
  509. enum dma_data_direction direction)
  510. {
  511. struct sg_table *sgt = NULL;
  512. struct dma_buf *dmabuf = attachment->dmabuf;
  513. struct mem_block *memBlk = dmabuf->priv;
  514. int status = 0;
  515. DEBUG_PRINT("[vidmem] %s\n", __func__);
  516. do
  517. {
  518. ERR_BREAK(GetSGT(memBlk, 0, memBlk->size, (void **)&sgt));
  519. if (dma_map_sg(attachment->dev, sgt->sgl, sgt->nents, direction) == 0)
  520. {
  521. sg_free_table(sgt);
  522. kfree(sgt);
  523. sgt = NULL;
  524. ERR_BREAK(EPERM);
  525. }
  526. }
  527. while (false);
  528. #ifdef VIDMEM_DEBUG
  529. {
  530. DEBUG_PRINT("[vidmem] sgt: nents = %u, sgl: page_link = %#lx, offset = %#x, length = %#x, dma_address = %llx\n",
  531. sgt->nents, sgt->sgl->page_link, sgt->sgl->offset, sgt->sgl->length, sgt->sgl->dma_address);
  532. int i = 0, j = 0;
  533. struct scatterlist *s;
  534. for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
  535. {
  536. unsigned long phys = page_to_phys(nth_page(sg_page(s), 0));
  537. DEBUG_PRINT("[vidmem] %d, %d: 0x%x, %d pages\n",
  538. i, j, phys, ((sg_dma_len(s) + PAGE_SIZE - 1) / PAGE_SIZE));
  539. }
  540. }
  541. #endif
  542. return sgt;
  543. }
  544. static void _dmabuf_unmap(struct dma_buf_attachment *attachment,
  545. struct sg_table *sgt,
  546. enum dma_data_direction direction)
  547. {
  548. DEBUG_PRINT("[vidmem] %s\n", __func__);
  549. dma_unmap_sg(attachment->dev, sgt->sgl, sgt->nents, direction);
  550. sg_free_table(sgt);
  551. FreeMemory(sgt);
  552. }
  553. static int _dmabuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
  554. {
  555. struct mem_block *memBlk = dmabuf->priv;
  556. size_t skipPages = vma->vm_pgoff;
  557. size_t numPages = PAGE_ALIGN(vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  558. unsigned long physical = 0;
  559. int status = 0;
  560. getPhysical(memBlk, 0, &physical);
  561. DEBUG_PRINT("[vidmem] %s, %d: Mmap 0x%lx with %ld pages\n", __func__, __LINE__, physical, numPages);
  562. ONERROR(Mmap(memBlk, skipPages, numPages, vma));
  563. OnError:
  564. return IS_ERROR(status) ? -EINVAL : 0;
  565. }
  566. static void _dmabuf_release(struct dma_buf *dmabuf)
  567. {
  568. }
  569. static void *_dmabuf_kmap(struct dma_buf *dmabuf, unsigned long offset)
  570. {
  571. char *kvaddr = NULL;
  572. return (void *)kvaddr;
  573. }
  574. static void _dmabuf_kunmap(struct dma_buf *dmabuf, unsigned long offset, void *ptr)
  575. {
  576. }
  577. static struct dma_buf_ops _dmabuf_ops =
  578. {
  579. .map_dma_buf = _dmabuf_map,
  580. .unmap_dma_buf = _dmabuf_unmap,
  581. .mmap = _dmabuf_mmap,
  582. .release = _dmabuf_release,
  583. #if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)
  584. .map = _dmabuf_kmap,
  585. .unmap = _dmabuf_kunmap,
  586. #endif
  587. };
  588. static int
  589. DMABUF_Export(
  590. IN struct file *filp,
  591. IN unsigned long bus_address,
  592. IN signed int Flags,
  593. OUT signed int *FD
  594. )
  595. {
  596. int status = 0;
  597. struct dma_buf *dmabuf = NULL;
  598. struct mem_block *memBlk = NULL;
  599. struct mem_node *mnode = NULL;
  600. DEBUG_PRINT("[vidmem] Export buffer 0x%lx\n", bus_address);
  601. mnode = get_mem_node(filp, bus_address, 0);
  602. if (NULL == mnode)
  603. {
  604. pr_err("[vidmem] Cannot find mem_node with bus address 0x%lx\n", bus_address);
  605. ONERROR(EINVAL);
  606. }
  607. memBlk = &mnode->memBlk;
  608. dmabuf = memBlk->dmabuf;
  609. if (dmabuf == NULL)
  610. {
  611. size_t bytes = memBlk->size;
  612. {
  613. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  614. exp_info.ops = &_dmabuf_ops;
  615. exp_info.size = bytes;
  616. exp_info.flags = Flags;
  617. exp_info.priv = memBlk;
  618. dmabuf = dma_buf_export(&exp_info);
  619. }
  620. if (dmabuf == NULL)
  621. {
  622. ONERROR(EFAULT);
  623. }
  624. memBlk->dmabuf = dmabuf;
  625. }
  626. if (FD)
  627. {
  628. int fd = dma_buf_fd(dmabuf, Flags);
  629. if (fd < 0)
  630. {
  631. ONERROR(EIO);
  632. }
  633. *FD = fd;
  634. }
  635. OnError:
  636. return status;
  637. }
  638. static int
  639. DMABUF_Import(
  640. IN struct file *filp,
  641. IN signed int FD,
  642. OUT unsigned long *bus_address,
  643. OUT unsigned int *size
  644. )
  645. {
  646. int status;
  647. struct mem_block *memBlk = NULL;
  648. struct file_node *fnode = NULL;
  649. struct mem_node *mnode = NULL;
  650. DEBUG_PRINT("[vidmem] enter %s: fd = 0x%x\n",__func__, FD);
  651. mnode = kzalloc(sizeof(struct mem_node), GFP_KERNEL | __GFP_NORETRY);
  652. if (!mnode)
  653. {
  654. ONERROR(ENOMEM);
  655. }
  656. fnode = get_file_node(filp);
  657. if (NULL == fnode)
  658. {
  659. ONERROR(EINVAL);
  660. }
  661. memBlk = &mnode->memBlk;
  662. /* Import dma buf handle. */
  663. memBlk->dmabuf = dma_buf_get(FD);
  664. if (!memBlk->dmabuf)
  665. {
  666. ONERROR(EFAULT);
  667. }
  668. ONERROR(Attach(memBlk));
  669. *bus_address = memBlk->pagearray[0];
  670. *size = memBlk->size;
  671. DEBUG_PRINT("[vidmem] Imported FD %d at 0x%lx in size of %ld\n", FD, memBlk->pagearray[0], memBlk->size);
  672. mnode->busAddr = memBlk->pagearray[0];
  673. mnode->isImported = 1;
  674. spin_lock(&mem_lock);
  675. list_add_tail(&mnode->link, &fnode->memList);
  676. spin_unlock(&mem_lock);
  677. return 0;
  678. OnError:
  679. if (mnode)
  680. {
  681. kfree(mnode);
  682. }
  683. return status;
  684. }
  685. void
  686. DMABUF_Release(
  687. IN struct file *filp,
  688. IN unsigned long bus_address
  689. )
  690. {
  691. struct mem_block *memBlk = NULL;
  692. struct mem_node *mnode = NULL;
  693. mnode = get_mem_node(filp, bus_address, 1);
  694. if (NULL == mnode)
  695. {
  696. return;
  697. }
  698. memBlk = &mnode->memBlk;
  699. dma_buf_unmap_attachment(memBlk->attachment, memBlk->sgt, DMA_BIDIRECTIONAL);
  700. dma_buf_detach(memBlk->dmabuf, memBlk->attachment);
  701. dma_buf_put(memBlk->dmabuf);
  702. FreeMemory(memBlk->pagearray);
  703. spin_lock(&mem_lock);
  704. list_del(&mnode->link);
  705. spin_unlock(&mem_lock);
  706. FreeMemory(mnode);
  707. }
  708. /***************************************************************************\
  709. ************************ GFP Allocator **********************************
  710. \***************************************************************************/
  711. static void
  712. NonContiguousFree(
  713. IN struct page ** Pages,
  714. IN size_t NumPages
  715. )
  716. {
  717. size_t i;
  718. for (i = 0; i < NumPages; i++)
  719. {
  720. __free_page(Pages[i]);
  721. }
  722. FreeMemory(Pages);
  723. }
  724. static int
  725. NonContiguousAlloc(
  726. IN struct mem_block *MemBlk,
  727. IN size_t NumPages,
  728. IN unsigned int Gfp
  729. )
  730. {
  731. struct page ** pages;
  732. struct page *p;
  733. size_t i, size;
  734. if (NumPages > totalram_pages())
  735. {
  736. return ENOMEM;
  737. }
  738. size = NumPages * sizeof(struct page *);
  739. if (AllocateMemory(size, (void * *)&pages))
  740. return ENOMEM;
  741. for (i = 0; i < NumPages; i++)
  742. {
  743. p = alloc_page(Gfp);
  744. if (!p)
  745. {
  746. pr_err("Failed to allocate non-contiguous memory\n");
  747. NonContiguousFree(pages, i);
  748. return ENOMEM;
  749. }
  750. #if DISCRETE_PAGES
  751. if (i != 0)
  752. {
  753. if (page_to_pfn(pages[i-1]) == page_to_pfn(p)-1)
  754. {
  755. /* Replaced page. */
  756. struct page *l = p;
  757. /* Allocate a page which is not contiguous to previous one. */
  758. p = alloc_page(Gfp);
  759. /* Give replaced page back. */
  760. __free_page(l);
  761. if (!p)
  762. {
  763. NonContiguousFree(pages, i);
  764. return ENOMEM;
  765. }
  766. }
  767. }
  768. #endif
  769. pages[i] = p;
  770. }
  771. MemBlk->nonContiguousPages = pages;
  772. return 0;
  773. }
  774. static int
  775. getPhysical(
  776. IN struct mem_block *MemBlk,
  777. IN unsigned int Offset,
  778. OUT unsigned long * Physical
  779. )
  780. {
  781. struct mem_block *memBlk = MemBlk;
  782. unsigned int offsetInPage = Offset & ~PAGE_MASK;
  783. unsigned int index = Offset / PAGE_SIZE;
  784. if (memBlk->contiguous)
  785. {
  786. *Physical = page_to_phys(nth_page(memBlk->contiguousPages, index));
  787. }
  788. else
  789. {
  790. *Physical = page_to_phys(memBlk->nonContiguousPages[index]);
  791. }
  792. *Physical += offsetInPage;
  793. return 0;
  794. }
  795. int
  796. GFP_Alloc(
  797. IN struct file *filp,
  798. IN unsigned int size,
  799. IN unsigned int Flags,
  800. OUT unsigned long *bus_address
  801. )
  802. {
  803. int status;
  804. size_t i = 0;
  805. unsigned int gfp = GFP_KERNEL | GFP_DMA | __GFP_NOWARN;
  806. int contiguous = Flags & (ALLOC_FLAG_CONTIGUOUS | ALLOC_FLAG_CMA | ALLOC_FLAG_VI);
  807. size_t numPages = GetPageCount(size, 0);
  808. unsigned long physical = 0;
  809. struct mem_block *memBlk = NULL;
  810. struct file_node *fnode = NULL;
  811. struct mem_node *mnode = NULL;
  812. if ((Flags & ALLOC_FLAG_CMA) && (Flags & ALLOC_FLAG_VI))
  813. {
  814. ONERROR(EINVAL);
  815. }
  816. mnode = kzalloc(sizeof(struct mem_node), GFP_KERNEL | __GFP_NORETRY);
  817. if (!mnode)
  818. {
  819. ONERROR(ENOMEM);
  820. }
  821. fnode = get_file_node(filp);
  822. if (NULL == fnode)
  823. {
  824. ONERROR(EINVAL);
  825. }
  826. memBlk = &mnode->memBlk;
  827. if (Flags & ALLOC_FLAG_4GB_ADDR)
  828. {
  829. /* remove __GFP_HIGHMEM bit, add __GFP_DMA32 bit */
  830. gfp &= ~__GFP_HIGHMEM;
  831. gfp |= __GFP_DMA32;
  832. }
  833. memBlk->contiguous = contiguous;
  834. memBlk->numPages = numPages;
  835. memBlk->size = size;
  836. memBlk->is_cma = false;
  837. memBlk->is_vi_mem = false;
  838. if (contiguous)
  839. {
  840. size_t bytes = numPages << PAGE_SHIFT;
  841. void *addr = NULL;
  842. if (Flags & ALLOC_FLAG_VI) {
  843. int region_id = GET_ALLOC_FLAG_REGION(Flags);
  844. memBlk->dma_addr = rsvmem_pool_alloc(region_id, bytes);
  845. if (!memBlk->dma_addr)
  846. {
  847. ONERROR(ENOMEM);
  848. }
  849. memBlk->rsvmem_pool_region_id = region_id;
  850. memBlk->contiguousPages = (phys_addr_t)memBlk->dma_addr ? phys_to_page((phys_addr_t)memBlk->dma_addr) : NULL;
  851. memBlk->is_vi_mem = true;
  852. physical = memBlk->dma_addr;
  853. goto OnDone;
  854. }
  855. else if (Flags & ALLOC_FLAG_CMA) {
  856. memBlk->va = dma_alloc_coherent(gdev,
  857. bytes, &memBlk->dma_addr,
  858. GFP_KERNEL | __GFP_NOWARN);
  859. memBlk->contiguousPages = (phys_addr_t)memBlk->dma_addr ? phys_to_page((phys_addr_t)memBlk->dma_addr) : NULL;
  860. if (!memBlk->va) {
  861. return -ENOMEM;
  862. }
  863. memBlk->is_cma = true;
  864. //pr_debug("got cma vir %p phy 0x%x contiguousPages %p\n", memBlk->va, memBlk->dma_addr, memBlk->contiguousPages);
  865. physical = memBlk->dma_addr;
  866. goto OnDone;
  867. }
  868. addr = alloc_pages_exact(bytes, (gfp & ~__GFP_HIGHMEM) | __GFP_NORETRY);
  869. memBlk->contiguousPages = addr ? virt_to_page(addr) : NULL;
  870. if (memBlk->contiguousPages)
  871. {
  872. memBlk->exact = true;
  873. }
  874. if (memBlk->contiguousPages == NULL)
  875. {
  876. int order = get_order(bytes);
  877. if (order >= MAX_ORDER)
  878. {
  879. pr_err("Too big buffer size requested. (order %d >= max %d)\n",
  880. order, MAX_ORDER);
  881. status = ENOMEM;
  882. goto OnError;
  883. }
  884. memBlk->contiguousPages = alloc_pages(gfp, order);
  885. }
  886. if (memBlk->contiguousPages == NULL)
  887. {
  888. pr_debug("Failed to allocate contiguous memory\n");
  889. ONERROR(ENOMEM);
  890. }
  891. #ifdef VIDMEM_DMA_MAP
  892. memBlk->dma_addr = dma_map_page(gdev,
  893. memBlk->contiguousPages, 0, numPages * PAGE_SIZE,
  894. DMA_BIDIRECTIONAL);
  895. if (dma_mapping_error(gdev, memBlk->dma_addr))
  896. {
  897. if (memBlk->exact)
  898. {
  899. free_pages_exact(page_address(memBlk->contiguousPages), bytes);
  900. }
  901. else
  902. {
  903. __free_pages(memBlk->contiguousPages, get_order(bytes));
  904. }
  905. ONERROR(ENOMEM);
  906. }
  907. #endif
  908. }
  909. else // non-contiguous pages
  910. {
  911. ONERROR(NonContiguousAlloc(memBlk, numPages, gfp));
  912. memBlk->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL | __GFP_NORETRY);
  913. if (!memBlk->sgt)
  914. {
  915. ONERROR(ENOMEM);
  916. }
  917. status = sg_alloc_table_from_pages(memBlk->sgt,
  918. memBlk->nonContiguousPages, numPages, 0,
  919. numPages << PAGE_SHIFT, GFP_KERNEL);
  920. memBlk->sgt->orig_nents = memBlk->sgt->nents;
  921. if (status < 0)
  922. {
  923. NonContiguousFree(memBlk->nonContiguousPages, numPages);
  924. ONERROR(ENOMEM);
  925. }
  926. #ifdef VIDMEM_DMA_MAP
  927. status = dma_map_sg(gdev, memBlk->sgt->sgl, memBlk->sgt->nents, DMA_BIDIRECTIONAL);
  928. if (status != memBlk->sgt->nents)
  929. {
  930. NonContiguousFree(memBlk->nonContiguousPages, numPages);
  931. sg_free_table(memBlk->sgt);
  932. ONERROR(ENOMEM);
  933. }
  934. #endif
  935. }
  936. for (i = 0; i < numPages; i++)
  937. {
  938. struct page *page;
  939. if (contiguous)
  940. {
  941. page = nth_page(memBlk->contiguousPages, i);
  942. }
  943. else
  944. {
  945. page = memBlk->nonContiguousPages[i];
  946. }
  947. SetPageReserved(page);
  948. }
  949. getPhysical(memBlk, 0, &physical);
  950. OnDone:
  951. *bus_address = physical;
  952. mnode->busAddr = physical;
  953. mnode->isImported = 0;
  954. list_add_tail(&mnode->link, &fnode->memList);
  955. DEBUG_PRINT("[vidmem] Allocated %d bytes (%ld pages) at physical address 0x%lx with %d sg table entries\n",
  956. size, numPages, physical, contiguous ? 1 : memBlk->sgt->nents);
  957. return 0;
  958. OnError:
  959. if (memBlk->sgt)
  960. {
  961. kfree(memBlk->sgt);
  962. }
  963. if (mnode)
  964. {
  965. kfree(mnode);
  966. }
  967. return status;
  968. }
  969. void
  970. GFP_Free(
  971. IN struct file *filp,
  972. IN unsigned long bus_address
  973. )
  974. {
  975. size_t i;
  976. struct page * page;
  977. struct mem_block *memBlk = NULL;
  978. struct mem_node *mnode = NULL;
  979. mnode = get_mem_node(filp, bus_address, 0);
  980. if (NULL == mnode)
  981. {
  982. return;
  983. }
  984. memBlk = &mnode->memBlk;
  985. DEBUG_PRINT("[vidmem] Free %ld pages from physical address 0x%lx\n", memBlk->numPages, mnode->busAddr);
  986. if (memBlk->contiguous)
  987. {
  988. #ifdef VIDMEM_DMA_MAP
  989. dma_unmap_page(gdev, memBlk->dma_addr,
  990. memBlk->numPages << PAGE_SHIFT, DMA_FROM_DEVICE);
  991. #endif
  992. }
  993. else
  994. {
  995. #ifdef VIDMEM_DMA_MAP
  996. dma_unmap_sg(gdev, memBlk->sgt->sgl, memBlk->sgt->nents,
  997. DMA_FROM_DEVICE);
  998. #endif
  999. sg_free_table(memBlk->sgt);
  1000. if (memBlk->sgt)
  1001. {
  1002. kfree(memBlk->sgt);
  1003. }
  1004. }
  1005. if (memBlk->is_cma == false && memBlk->is_vi_mem == false) {
  1006. for (i = 0; i < memBlk->numPages; i++)
  1007. {
  1008. if (memBlk->contiguous)
  1009. {
  1010. page = nth_page(memBlk->contiguousPages, i);
  1011. ClearPageReserved(page);
  1012. }
  1013. }
  1014. }
  1015. if (memBlk->contiguous)
  1016. {
  1017. size_t bytes = memBlk->numPages << PAGE_SHIFT;
  1018. if (memBlk->is_vi_mem == true)
  1019. {
  1020. rsvmem_pool_free(memBlk->rsvmem_pool_region_id, bytes, memBlk->dma_addr);
  1021. }
  1022. else if (memBlk->is_cma == true)
  1023. {
  1024. dma_free_coherent(gdev, bytes,
  1025. memBlk->va, memBlk->dma_addr);
  1026. }
  1027. else if (memBlk->exact == true)
  1028. {
  1029. free_pages_exact(page_address(memBlk->contiguousPages), memBlk->numPages * PAGE_SIZE);
  1030. }
  1031. else
  1032. {
  1033. __free_pages(memBlk->contiguousPages, get_order(memBlk->numPages * PAGE_SIZE));
  1034. }
  1035. }
  1036. else
  1037. {
  1038. NonContiguousFree(memBlk->nonContiguousPages, memBlk->numPages);
  1039. }
  1040. spin_lock(&mem_lock);
  1041. list_del(&mnode->link);
  1042. spin_unlock(&mem_lock);
  1043. FreeMemory(mnode);
  1044. }
  1045. static int
  1046. GFP_MapUser(
  1047. IN struct file *filp,
  1048. IN struct vm_area_struct *vma
  1049. )
  1050. {
  1051. struct mem_block *memBlk = NULL;
  1052. struct mem_node *mnode = NULL;
  1053. unsigned long bus_address = vma->vm_pgoff * PAGE_SIZE;
  1054. int status = 0;
  1055. mnode = get_mem_node(filp, bus_address, 0);
  1056. if (NULL == mnode)
  1057. {
  1058. return EINVAL;
  1059. }
  1060. memBlk = &mnode->memBlk;
  1061. if (Mmap(memBlk, 0, memBlk->numPages, vma))
  1062. {
  1063. return ENOMEM;
  1064. }
  1065. memBlk->vma = vma;
  1066. DEBUG_PRINT("[vidmem] Map %ld pages from physical address 0x%lx\n", memBlk->numPages, mnode->busAddr);
  1067. return status;
  1068. }
  1069. int vidalloc_mmap(struct file *filp, struct vm_area_struct *vma)
  1070. {
  1071. return GFP_MapUser(filp, vma);
  1072. }
  1073. static long vidalloc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  1074. {
  1075. VidmemParams params;
  1076. int ret = 0;
  1077. if (_IOC_TYPE(cmd) != MEMORY_IOC_MAGIC) return EINVAL;
  1078. if (_IOC_NR(cmd) > MEMORY_IOC_MAXNR) return EINVAL;
  1079. if (_IOC_DIR(cmd) & _IOC_READ)
  1080. ret = !access_ok(arg, _IOC_SIZE(cmd));
  1081. else if (_IOC_DIR(cmd) & _IOC_WRITE)
  1082. ret = !access_ok(arg, _IOC_SIZE(cmd));
  1083. if (ret) return EINVAL;
  1084. switch (cmd)
  1085. {
  1086. case MEMORY_IOC_ALLOCATE:
  1087. {
  1088. ret = copy_from_user(&params, (void*)arg, sizeof(VidmemParams));
  1089. if (!ret)
  1090. {
  1091. ret = GFP_Alloc(filp, params.size, params.flags, &params.bus_address);
  1092. params.translation_offset = 0;
  1093. if (!ret)
  1094. ret = copy_to_user((VidmemParams *)arg, &params, sizeof(VidmemParams));
  1095. }
  1096. break;
  1097. }
  1098. case MEMORY_IOC_FREE:
  1099. {
  1100. ret = copy_from_user(&params, (void*)arg, sizeof(VidmemParams));
  1101. if (!ret)
  1102. {
  1103. GFP_Free(filp, params.bus_address);
  1104. ret = copy_to_user((VidmemParams *)arg, &params, sizeof(VidmemParams));
  1105. }
  1106. break;
  1107. }
  1108. case MEMORY_IOC_DMABUF_EXPORT:
  1109. {
  1110. ret = copy_from_user(&params, (void*)arg, sizeof(VidmemParams));
  1111. if (!ret)
  1112. {
  1113. ret = DMABUF_Export(filp, params.bus_address, params.flags, &params.fd);
  1114. if (!ret)
  1115. ret = copy_to_user((VidmemParams *)arg, &params, sizeof(VidmemParams));
  1116. }
  1117. break;
  1118. }
  1119. case MEMORY_IOC_DMABUF_IMPORT:
  1120. {
  1121. ret = copy_from_user(&params, (void*)arg, sizeof(VidmemParams));
  1122. if (!ret)
  1123. {
  1124. ret = DMABUF_Import(filp, params.fd, &params.bus_address, &params.size);
  1125. params.translation_offset = 0;
  1126. if (!ret)
  1127. ret = copy_to_user((VidmemParams *)arg, &params, sizeof(VidmemParams));
  1128. }
  1129. break;
  1130. }
  1131. case MEMORY_IOC_DMABUF_RELEASE:
  1132. {
  1133. ret = copy_from_user(&params, (void*)arg, sizeof(VidmemParams));
  1134. if (!ret)
  1135. {
  1136. DMABUF_Release(filp, params.bus_address);
  1137. ret = copy_to_user((VidmemParams *)arg, &params, sizeof(VidmemParams));
  1138. }
  1139. break;
  1140. }
  1141. default:
  1142. ret = EINVAL;
  1143. }
  1144. return ret;
  1145. }
  1146. static int vidalloc_open(struct inode *inode, struct file *filp)
  1147. {
  1148. int ret = 0;
  1149. struct file_node *fnode = NULL;
  1150. if (AllocateMemory(sizeof(struct file_node), (void * *)&fnode))
  1151. return ENOMEM;
  1152. fnode->filp = filp;
  1153. INIT_LIST_HEAD(&fnode->memList);
  1154. spin_lock(&mem_lock);
  1155. list_add_tail(&fnode->link, &fileList);
  1156. spin_unlock(&mem_lock);
  1157. return ret;
  1158. }
  1159. static int vidalloc_release(struct inode *inode, struct file *filp)
  1160. {
  1161. struct file_node *fnode = get_file_node(filp);
  1162. struct mem_node *node;
  1163. struct mem_node *temp;
  1164. if (NULL == fnode)
  1165. return EINVAL;
  1166. list_for_each_entry_safe(node, temp, &fnode->memList, link)
  1167. {
  1168. // this is not expected, memory leak detected!
  1169. pr_debug("vidmem: Found unfreed memory at 0x%lx, isImported = %d\n", node->busAddr, node->isImported);
  1170. if (node->isImported)
  1171. DMABUF_Release(filp, node->busAddr);
  1172. else
  1173. GFP_Free(filp, node->busAddr);
  1174. }
  1175. spin_lock(&mem_lock);
  1176. list_del(&fnode->link);
  1177. spin_unlock(&mem_lock);
  1178. FreeMemory(fnode);
  1179. return 0;
  1180. }
  1181. static struct file_operations vidalloc_fops = {
  1182. .owner= THIS_MODULE,
  1183. .open = vidalloc_open,
  1184. .release = vidalloc_release,
  1185. .unlocked_ioctl = vidalloc_ioctl,
  1186. .mmap = vidalloc_mmap,
  1187. .fasync = NULL,
  1188. };
  1189. #endif
  1190. int vidalloc_probe(struct platform_device *pdev)
  1191. {
  1192. int result = 0;
  1193. DEBUG_PRINT("enter %s\n",__func__);
  1194. #if 1
  1195. gdev = &pdev->dev;
  1196. INIT_LIST_HEAD(&fileList);
  1197. result = rsvmem_pool_create(&pdev->dev);
  1198. if (result && result != -ENODEV)
  1199. {
  1200. pr_err("%s: Failed to create reserved memory pool\n", __func__);
  1201. goto err1;
  1202. }
  1203. if (vidalloc_major == 0)
  1204. {
  1205. result = alloc_chrdev_region(&vidalloc_devt, 0, 1, "vidmem");
  1206. if (result != 0)
  1207. {
  1208. pr_err("%s: alloc_chrdev_region error\n", __func__);
  1209. goto err1;
  1210. }
  1211. vidalloc_major = MAJOR(vidalloc_devt);
  1212. vidalloc_minor = MINOR(vidalloc_devt);
  1213. }
  1214. else
  1215. {
  1216. vidalloc_devt = MKDEV(vidalloc_major, vidalloc_minor);
  1217. result = register_chrdev_region(vidalloc_devt, 1, "vidmem");
  1218. if (result)
  1219. {
  1220. pr_err("%s: register_chrdev_region error\n", __func__);
  1221. goto err1;
  1222. }
  1223. }
  1224. vidalloc_class = class_create(THIS_MODULE, "vidmem");
  1225. if (IS_ERR(vidalloc_class))
  1226. {
  1227. pr_err("%s, %d: class_create error!\n", __func__, __LINE__);
  1228. goto err;
  1229. }
  1230. vidalloc_devt = MKDEV(vidalloc_major, vidalloc_minor);
  1231. cdev_init(&vidalloc_cdev, &vidalloc_fops);
  1232. result = cdev_add(&vidalloc_cdev, vidalloc_devt, 1);
  1233. if ( result )
  1234. {
  1235. pr_err("%s, %d: cdev_add error!\n", __func__, __LINE__);
  1236. goto err;
  1237. }
  1238. device_create(vidalloc_class, NULL, vidalloc_devt,
  1239. NULL, "vidmem");
  1240. return 0;
  1241. err:
  1242. unregister_chrdev_region(vidalloc_devt, 1);
  1243. err1:
  1244. pr_err("vidmem: module not inserted\n");
  1245. #endif
  1246. return result;
  1247. }
  1248. static int vidalloc_remove(struct platform_device *pdev)
  1249. {
  1250. DEBUG_PRINT("enter %s\n",__func__);
  1251. rsvmem_pool_destroy();
  1252. cdev_del(&vidalloc_cdev);
  1253. device_destroy(vidalloc_class, vidalloc_devt);
  1254. unregister_chrdev_region(vidalloc_devt, 1);
  1255. class_destroy(vidalloc_class);
  1256. return 0;
  1257. }
  1258. static const struct of_device_id thead_of_match[] = {
  1259. { .compatible = "thead,light-vidmem", },
  1260. { /* sentinel */ },
  1261. };
  1262. static struct platform_driver vidalloc_driver = {
  1263. .probe = vidalloc_probe,
  1264. .remove = vidalloc_remove,
  1265. .driver = {
  1266. .name = "vidmem",
  1267. .owner = THIS_MODULE,
  1268. .of_match_table = of_match_ptr(thead_of_match),
  1269. }
  1270. };
  1271. int __init vidalloc_init(void)
  1272. {
  1273. int ret = 0;
  1274. DEBUG_PRINT("enter %s\n",__func__);
  1275. ret = platform_driver_register(&vidalloc_driver);
  1276. if (ret)
  1277. {
  1278. pr_err("register platform driver failed!\n");
  1279. }
  1280. else
  1281. {
  1282. pr_info("vidmem: module inserted. Major <%d>\n", vidalloc_major);
  1283. }
  1284. return ret;
  1285. }
  1286. void __exit vidalloc_cleanup(void)
  1287. {
  1288. DEBUG_PRINT("enter %s\n",__func__);
  1289. platform_driver_unregister(&vidalloc_driver);
  1290. pr_info("vidmem: module removed.\n");
  1291. }
  1292. module_init(vidalloc_init);
  1293. module_exit(vidalloc_cleanup);
  1294. MODULE_LICENSE("GPL");
  1295. MODULE_AUTHOR("T-HEAD");
  1296. MODULE_DESCRIPTION("Video Memory Allocator");