kernel_allocator.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286
  1. /****************************************************************************
  2. *
  3. * The MIT License (MIT)
  4. *
  5. * Copyright (c) 2014 - 2021 Vivante Corporation
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the "Software"),
  9. * to deal in the Software without restriction, including without limitation
  10. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  11. * and/or sell copies of the Software, and to permit persons to whom the
  12. * Software is furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  22. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  23. * DEALINGS IN THE SOFTWARE.
  24. *
  25. *****************************************************************************
  26. *
  27. * The GPL License (GPL)
  28. *
  29. * Copyright (C) 2014 - 2021 Vivante Corporation
  30. *
  31. * This program is free software; you can redistribute it and/or
  32. * modify it under the terms of the GNU General Public License
  33. * as published by the Free Software Foundation; either version 2
  34. * of the License, or (at your option) any later version.
  35. *
  36. * This program is distributed in the hope that it will be useful,
  37. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  38. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  39. * GNU General Public License for more details.
  40. *
  41. * You should have received a copy of the GNU General Public License
  42. * along with this program; if not, write to the Free Software Foundation,
  43. * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  44. *
  45. *****************************************************************************
  46. *
  47. * Note: This software is released under dual MIT and GPL licenses. A
  48. * recipient may use this file under the terms of either the MIT license or
  49. * GPL License. If you wish to use only one license not the other, you can
  50. * indicate your decision by deleting one of the above license notices in your
  51. * version of this file.
  52. *
  53. *****************************************************************************/
  54. #include <linux/pagemap.h>
  55. #include <linux/seq_file.h>
  56. #include <linux/mman.h>
  57. #include <linux/errno.h>
  58. #include <asm/atomic.h>
  59. #include <linux/dma-mapping.h>
  60. #include <linux/slab.h>
  61. #include <linux/dma-buf.h>
  62. #include <linux/mm_types.h>
  63. #include <linux/version.h>
  64. #include "kernel_allocator.h"
  65. #define DISCRETE_PAGES 0
  66. //#define ALLOCATOR_DEBUG
  67. #define IS_ERROR(status) (status > 0)
  68. /*******************************************************************************
  69. **
  70. ** ONERROR
  71. **
  72. ** Jump to the error handler in case there is an error.
  73. **
  74. ** ASSUMPTIONS:
  75. **
  76. ** 'status' variable of int type must be defined.
  77. **
  78. ** ARGUMENTS:
  79. **
  80. ** func Function to evaluate.
  81. */
  82. #define _ONERROR(prefix, func) \
  83. do \
  84. { \
  85. status = func; \
  86. if (IS_ERROR(status)) \
  87. { \
  88. goto OnError; \
  89. } \
  90. } \
  91. while (false)
  92. #define ONERROR(func) _ONERROR(, func)
  93. /*******************************************************************************
  94. **
  95. ** ERR_BREAK
  96. **
  97. ** Executes a break statement on error.
  98. **
  99. ** ASSUMPTIONS:
  100. **
  101. ** 'status' variable of int type must be defined.
  102. **
  103. ** ARGUMENTS:
  104. **
  105. ** func Function to evaluate.
  106. */
  107. #define _ERR_BREAK(prefix, func){ \
  108. status = func; \
  109. if (IS_ERROR(status)) \
  110. { \
  111. break; \
  112. } \
  113. do { } while (false); \
  114. }
  115. #define ERR_BREAK(func) _ERR_BREAK(, func)
  116. /*******************************************************************************
  117. **
  118. ** VERIFY_ARGUMENT
  119. **
  120. ** Assert if an argument does not apply to the specified expression. If
  121. ** the argument evaluates to false, EINVAL will be
  122. ** returned from the current function. In retail mode this macro does
  123. ** nothing.
  124. **
  125. ** ARGUMENTS:
  126. **
  127. ** arg Argument to evaluate.
  128. */
  129. #define _VERIFY_ARGUMENT(prefix, arg) \
  130. do \
  131. { \
  132. if (!(arg)) \
  133. { \
  134. return EINVAL; \
  135. } \
  136. } \
  137. while (false)
  138. #define VERIFY_ARGUMENT(arg) _VERIFY_ARGUMENT(, arg)
  139. #define VM_FLAGS (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP)
  140. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
  141. #define current_mm_mmap_sem current->mm->mmap_lock
  142. #else
  143. #define current_mm_mmap_sem current->mm->mmap_sem
  144. #endif
  145. #define GetPageCount(size, offset) ((((size) + ((offset) & ~PAGE_MASK)) + PAGE_SIZE - 1) >> PAGE_SHIFT)
  146. #ifndef ALLOCATOR_DEBUG
  147. #define DEBUG_PRINT(...) \
  148. do { \
  149. } while (0)
  150. #else
  151. #undef DEBUG_PRINT
  152. #define DEBUG_PRINT(...) printk(__VA_ARGS__)
  153. #endif
  154. struct mem_block
  155. {
  156. int contiguous;
  157. size_t size;
  158. size_t numPages;
  159. struct dma_buf *dmabuf;
  160. struct dma_buf_attachment * attachment;
  161. struct sg_table * sgt;
  162. unsigned long * pagearray;
  163. struct vm_area_struct * vma;
  164. union
  165. {
  166. /* Pointer to a array of pages. */
  167. struct
  168. {
  169. struct page *contiguousPages;
  170. dma_addr_t dma_addr;
  171. int exact;
  172. };
  173. struct
  174. {
  175. /* Pointer to a array of pointers to page. */
  176. struct page **nonContiguousPages;
  177. struct page **Pages1M;
  178. int numPages1M;
  179. int *isExact;
  180. };
  181. };
  182. };
  183. struct mem_node
  184. {
  185. struct mem_block memBlk;
  186. unsigned long busAddr;
  187. int isImported;
  188. struct list_head link;
  189. };
  190. struct file_node
  191. {
  192. struct list_head memList;
  193. struct file *filp;
  194. struct list_head link;
  195. };
  196. static struct list_head fileList;
  197. static struct device *gdev = NULL;
  198. static DEFINE_SPINLOCK(mem_lock);
  199. static struct file_node * find_and_delete_file_node(struct file *filp)
  200. {
  201. struct file_node *node;
  202. struct file_node *temp;
  203. spin_lock(&mem_lock);
  204. list_for_each_entry_safe(node, temp, &fileList, link)
  205. {
  206. if (node->filp == filp)
  207. {
  208. list_del(&node->link);
  209. spin_unlock(&mem_lock);
  210. return node;
  211. }
  212. }
  213. spin_unlock(&mem_lock);
  214. return NULL;
  215. }
  216. static struct file_node * get_file_node(struct file *filp)
  217. {
  218. struct file_node *node;
  219. spin_lock(&mem_lock);
  220. list_for_each_entry(node, &fileList, link)
  221. {
  222. if (node->filp == filp)
  223. {
  224. spin_unlock(&mem_lock);
  225. return node;
  226. }
  227. }
  228. spin_unlock(&mem_lock);
  229. return NULL;
  230. }
  231. static struct mem_node * get_mem_node(struct file *filp, unsigned long bus_address, int imported)
  232. {
  233. struct file_node *fnode;
  234. struct mem_node *node;
  235. fnode = get_file_node(filp);
  236. if (NULL == fnode)
  237. {
  238. return NULL;
  239. }
  240. spin_lock(&mem_lock);
  241. list_for_each_entry(node, &fnode->memList, link)
  242. {
  243. if (node->busAddr == bus_address && node->isImported == imported)
  244. {
  245. spin_unlock(&mem_lock);
  246. return node;
  247. }
  248. }
  249. spin_unlock(&mem_lock);
  250. return NULL;
  251. }
  252. static int
  253. AllocateMemory(
  254. IN size_t Bytes,
  255. OUT void * * Memory
  256. )
  257. {
  258. void * memory = NULL;
  259. int status = 0;
  260. /* Verify the arguments. */
  261. VERIFY_ARGUMENT(Bytes > 0);
  262. VERIFY_ARGUMENT(Memory != NULL);
  263. if (Bytes > PAGE_SIZE)
  264. {
  265. memory = (void *) vmalloc(Bytes);
  266. }
  267. else
  268. {
  269. memory = (void *) kmalloc(Bytes, GFP_KERNEL | __GFP_NOWARN);
  270. }
  271. if (memory == NULL)
  272. {
  273. /* Out of memory. */
  274. ONERROR(ENOMEM);
  275. }
  276. /* Return pointer to the memory allocation. */
  277. *Memory = memory;
  278. OnError:
  279. /* Return the status. */
  280. return status;
  281. }
  282. int
  283. static FreeMemory(
  284. IN void * Memory
  285. )
  286. {
  287. /* Verify the arguments. */
  288. VERIFY_ARGUMENT(Memory != NULL);
  289. /* Free the memory from the OS pool. */
  290. if (is_vmalloc_addr(Memory))
  291. {
  292. vfree(Memory);
  293. }
  294. else
  295. {
  296. kfree(Memory);
  297. }
  298. /* Success. */
  299. return 0;
  300. }
  301. static int
  302. GetSGT(
  303. IN struct mem_block *MemBlk,
  304. IN size_t Offset,
  305. IN size_t Bytes,
  306. OUT void * *SGT
  307. )
  308. {
  309. struct page ** pages = NULL;
  310. struct page ** tmpPages = NULL;
  311. struct sg_table *sgt = NULL;
  312. struct mem_block *memBlk = MemBlk;
  313. int status = 0;
  314. size_t offset = Offset & ~PAGE_MASK; /* Offset to the first page */
  315. size_t skipPages = Offset >> PAGE_SHIFT; /* skipped pages */
  316. size_t numPages = (PAGE_ALIGN(Offset + Bytes) >> PAGE_SHIFT) - skipPages;
  317. size_t i;
  318. if (memBlk->contiguous)
  319. {
  320. ONERROR(AllocateMemory(sizeof(struct page*) * numPages, (void * *)&tmpPages));
  321. pages = tmpPages;
  322. for (i = 0; i < numPages; ++i)
  323. {
  324. pages[i] = nth_page(memBlk->contiguousPages, i + skipPages);
  325. }
  326. }
  327. else
  328. {
  329. pages = &memBlk->nonContiguousPages[skipPages];
  330. }
  331. ONERROR(AllocateMemory(sizeof(struct sg_table) * numPages, (void * *)&sgt));
  332. if (sg_alloc_table_from_pages(sgt, pages, numPages, offset, Bytes, GFP_KERNEL) < 0)
  333. {
  334. ONERROR(EPERM);
  335. }
  336. *SGT = (void *)sgt;
  337. OnError:
  338. if (tmpPages)
  339. {
  340. FreeMemory(tmpPages);
  341. }
  342. if (IS_ERROR(status) && sgt)
  343. {
  344. FreeMemory(sgt);
  345. }
  346. return status;
  347. }
  348. static int
  349. Mmap(
  350. IN struct mem_block *MemBlk,
  351. IN size_t skipPages,
  352. IN size_t numPages,
  353. IN struct vm_area_struct *vma
  354. )
  355. {
  356. struct mem_block *memBlk = MemBlk;
  357. int status = 0;
  358. vma->vm_flags |= VM_FLAGS;
  359. /* Make this mapping non-cached. */
  360. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  361. /* Now map all the vmalloc pages to this user address. */
  362. if (memBlk->contiguous)
  363. {
  364. /* map kernel memory to user space.. */
  365. if (remap_pfn_range(vma,
  366. vma->vm_start,
  367. page_to_pfn(memBlk->contiguousPages) + skipPages,
  368. numPages << PAGE_SHIFT,
  369. vma->vm_page_prot) < 0)
  370. {
  371. ONERROR(ENOMEM);
  372. }
  373. }
  374. else
  375. {
  376. size_t i;
  377. unsigned long start = vma->vm_start;
  378. for (i = 0; i < numPages; ++i)
  379. {
  380. unsigned long pfn = page_to_pfn(memBlk->nonContiguousPages[i + skipPages]);
  381. if (remap_pfn_range(vma,
  382. start,
  383. pfn,
  384. PAGE_SIZE,
  385. vma->vm_page_prot) < 0)
  386. {
  387. ONERROR(ENOMEM);
  388. }
  389. start += PAGE_SIZE;
  390. }
  391. }
  392. OnError:
  393. return status;
  394. }
  395. static int
  396. Attach(
  397. INOUT struct mem_block *MemBlk
  398. )
  399. {
  400. int status;
  401. struct mem_block *memBlk = MemBlk;
  402. struct dma_buf *dmabuf = memBlk->dmabuf;
  403. struct sg_table *sgt = NULL;
  404. struct dma_buf_attachment *attachment = NULL;
  405. int npages = 0;
  406. unsigned long *pagearray = NULL;
  407. int i, j, k = 0;
  408. struct scatterlist *s;
  409. unsigned int size = 0;
  410. if (!dmabuf)
  411. {
  412. ONERROR(EFAULT);
  413. }
  414. get_dma_buf(dmabuf);
  415. attachment = dma_buf_attach(dmabuf, gdev);
  416. if (!attachment)
  417. {
  418. ONERROR(EFAULT);
  419. }
  420. sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
  421. if (!sgt)
  422. {
  423. ONERROR(EFAULT);
  424. }
  425. /* Prepare page array. */
  426. /* Get number of pages. */
  427. for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
  428. {
  429. npages += (sg_dma_len(s) + PAGE_SIZE - 1) / PAGE_SIZE;
  430. }
  431. /* Allocate page array. */
  432. ONERROR(AllocateMemory(npages * sizeof(*pagearray), (void * *)&pagearray));
  433. /* Fill page array. */
  434. for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
  435. {
  436. for (j = 0; j < (sg_dma_len(s) + PAGE_SIZE - 1) / PAGE_SIZE; j++)
  437. {
  438. pagearray[k++] = sg_dma_address(s) + j * PAGE_SIZE;
  439. }
  440. size = sg_dma_len(s);
  441. }
  442. memBlk->pagearray = pagearray;
  443. memBlk->attachment = attachment;
  444. memBlk->sgt = sgt;
  445. memBlk->numPages = npages;
  446. memBlk->size = size;
  447. memBlk->contiguous = (sgt->nents == 1) ? true : false;
  448. return 0;
  449. OnError:
  450. if (pagearray)
  451. {
  452. FreeMemory(pagearray);
  453. pagearray = NULL;
  454. }
  455. if (sgt)
  456. {
  457. dma_buf_unmap_attachment(attachment, sgt, DMA_BIDIRECTIONAL);
  458. }
  459. return status;
  460. }
  461. static struct sg_table *_dmabuf_map(struct dma_buf_attachment *attachment,
  462. enum dma_data_direction direction)
  463. {
  464. struct sg_table *sgt = NULL;
  465. struct dma_buf *dmabuf = attachment->dmabuf;
  466. struct mem_block *memBlk = dmabuf->priv;
  467. int status = 0;
  468. printk("%s\n", __func__);
  469. do
  470. {
  471. ERR_BREAK(GetSGT(memBlk, 0, memBlk->size, (void **)&sgt));
  472. if (dma_map_sg(attachment->dev, sgt->sgl, sgt->nents, direction) == 0)
  473. {
  474. sg_free_table(sgt);
  475. kfree(sgt);
  476. sgt = NULL;
  477. ERR_BREAK(EPERM);
  478. }
  479. }
  480. while (false);
  481. return sgt;
  482. }
  483. static void _dmabuf_unmap(struct dma_buf_attachment *attachment,
  484. struct sg_table *sgt,
  485. enum dma_data_direction direction)
  486. {
  487. dma_unmap_sg(attachment->dev, sgt->sgl, sgt->nents, direction);
  488. sg_free_table(sgt);
  489. kfree(sgt);
  490. }
  491. static int _dmabuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
  492. {
  493. struct mem_block *memBlk = dmabuf->priv;
  494. size_t skipPages = vma->vm_pgoff;
  495. size_t numPages = PAGE_ALIGN(vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  496. int status = 0;
  497. printk("%s, %d: 0x%llx\n", __func__, __LINE__, page_to_phys(nth_page(memBlk->contiguousPages, 0)));
  498. ONERROR(Mmap(memBlk, skipPages, numPages, vma));
  499. OnError:
  500. return IS_ERROR(status) ? -EINVAL : 0;
  501. }
  502. static void _dmabuf_release(struct dma_buf *dmabuf)
  503. {
  504. }
  505. static void *_dmabuf_kmap(struct dma_buf *dmabuf, unsigned long offset)
  506. {
  507. char *kvaddr = NULL;
  508. return (void *)kvaddr;
  509. }
  510. static void _dmabuf_kunmap(struct dma_buf *dmabuf, unsigned long offset, void *ptr)
  511. {
  512. }
  513. static struct dma_buf_ops _dmabuf_ops =
  514. {
  515. .map_dma_buf = _dmabuf_map,
  516. .unmap_dma_buf = _dmabuf_unmap,
  517. .mmap = _dmabuf_mmap,
  518. .release = _dmabuf_release,
  519. #if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)
  520. .map = _dmabuf_kmap,
  521. .unmap = _dmabuf_kunmap,
  522. #endif
  523. };
  524. static int
  525. DMABUF_Export(
  526. IN struct file *filp,
  527. IN unsigned long bus_address,
  528. IN signed int Flags,
  529. OUT signed int *FD
  530. )
  531. {
  532. int status = 0;
  533. struct dma_buf *dmabuf = NULL;
  534. struct mem_block *memBlk = NULL;
  535. struct mem_node *mnode = NULL;
  536. mnode = get_mem_node(filp, bus_address, 0);
  537. if (NULL == mnode)
  538. {
  539. printk("Allocator: Cannot find mem_node with bus address 0x%lx\n", bus_address);
  540. ONERROR(EINVAL);
  541. }
  542. memBlk = &mnode->memBlk;
  543. dmabuf = memBlk->dmabuf;
  544. if (dmabuf == NULL)
  545. {
  546. size_t bytes = memBlk->size;
  547. {
  548. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  549. exp_info.ops = &_dmabuf_ops;
  550. exp_info.size = bytes;
  551. exp_info.flags = Flags;
  552. exp_info.priv = memBlk;
  553. dmabuf = dma_buf_export(&exp_info);
  554. }
  555. if (dmabuf == NULL)
  556. {
  557. ONERROR(EFAULT);
  558. }
  559. memBlk->dmabuf = dmabuf;
  560. }
  561. if (FD)
  562. {
  563. int fd = dma_buf_fd(dmabuf, Flags);
  564. if (fd < 0)
  565. {
  566. ONERROR(EIO);
  567. }
  568. *FD = fd;
  569. }
  570. OnError:
  571. return status;
  572. }
  573. static int
  574. DMABUF_Import(
  575. IN struct file *filp,
  576. IN signed int FD,
  577. OUT unsigned long *bus_address,
  578. OUT unsigned int *size
  579. )
  580. {
  581. int status;
  582. struct mem_block *memBlk = NULL;
  583. struct file_node *fnode = NULL;
  584. struct mem_node *mnode = NULL;
  585. mnode = kzalloc(sizeof(struct mem_node), GFP_KERNEL | __GFP_NORETRY);
  586. if (!mnode)
  587. {
  588. ONERROR(ENOMEM);
  589. }
  590. fnode = get_file_node(filp);
  591. if (NULL == fnode)
  592. {
  593. ONERROR(EINVAL);
  594. }
  595. memBlk = &mnode->memBlk;
  596. /* Import dma buf handle. */
  597. memBlk->dmabuf = dma_buf_get(FD);
  598. if (!memBlk->dmabuf)
  599. {
  600. ONERROR(EFAULT);
  601. }
  602. ONERROR(Attach(memBlk));
  603. *bus_address = memBlk->pagearray[0];
  604. *size = memBlk->size;
  605. mnode->busAddr = memBlk->pagearray[0];
  606. mnode->isImported = 1;
  607. spin_lock(&mem_lock);
  608. list_add_tail(&mnode->link, &fnode->memList);
  609. spin_unlock(&mem_lock);
  610. return 0;
  611. OnError:
  612. if (mnode)
  613. {
  614. kfree(mnode);
  615. }
  616. return status;
  617. }
  618. void
  619. DMABUF_Release(
  620. IN struct file *filp,
  621. IN unsigned long bus_address
  622. )
  623. {
  624. struct mem_block *memBlk = NULL;
  625. struct mem_node *mnode = NULL;
  626. mnode = get_mem_node(filp, bus_address, 1);
  627. if (NULL == mnode)
  628. {
  629. return;
  630. }
  631. memBlk = &mnode->memBlk;
  632. dma_buf_unmap_attachment(memBlk->attachment, memBlk->sgt, DMA_BIDIRECTIONAL);
  633. dma_buf_detach(memBlk->dmabuf, memBlk->attachment);
  634. dma_buf_put(memBlk->dmabuf);
  635. FreeMemory(memBlk->pagearray);
  636. spin_lock(&mem_lock);
  637. list_del(&mnode->link);
  638. spin_unlock(&mem_lock);
  639. kfree(mnode);
  640. }
  641. /***************************************************************************\
  642. ************************ GFP Allocator **********************************
  643. \***************************************************************************/
  644. static void
  645. NonContiguousFree(
  646. IN struct page ** Pages,
  647. IN size_t NumPages
  648. )
  649. {
  650. size_t i;
  651. for (i = 0; i < NumPages; i++)
  652. {
  653. __free_page(Pages[i]);
  654. }
  655. FreeMemory(Pages);
  656. }
  657. static int
  658. NonContiguousAlloc(
  659. IN struct mem_block *MemBlk,
  660. IN size_t NumPages,
  661. IN unsigned int Gfp
  662. )
  663. {
  664. struct page ** pages;
  665. struct page *p;
  666. size_t i, size;
  667. if (NumPages > totalram_pages())
  668. {
  669. return ENOMEM;
  670. }
  671. size = NumPages * sizeof(struct page *);
  672. if (AllocateMemory(size, (void * *)&pages))
  673. return ENOMEM;
  674. for (i = 0; i < NumPages; i++)
  675. {
  676. p = alloc_page(Gfp);
  677. if (!p)
  678. {
  679. NonContiguousFree(pages, i);
  680. return ENOMEM;
  681. }
  682. #if DISCRETE_PAGES
  683. if (i != 0)
  684. {
  685. if (page_to_pfn(pages[i-1]) == page_to_pfn(p)-1)
  686. {
  687. /* Replaced page. */
  688. struct page *l = p;
  689. /* Allocate a page which is not contiguous to previous one. */
  690. p = alloc_page(Gfp);
  691. /* Give replaced page back. */
  692. __free_page(l);
  693. if (!p)
  694. {
  695. NonContiguousFree(pages, i);
  696. return ENOMEM;
  697. }
  698. }
  699. }
  700. #endif
  701. pages[i] = p;
  702. }
  703. MemBlk->nonContiguousPages = pages;
  704. return 0;
  705. }
  706. static int
  707. getPhysical(
  708. IN struct mem_block *MemBlk,
  709. IN unsigned int Offset,
  710. OUT unsigned long * Physical
  711. )
  712. {
  713. struct mem_block *memBlk = MemBlk;
  714. unsigned int offsetInPage = Offset & ~PAGE_MASK;
  715. unsigned int index = Offset / PAGE_SIZE;
  716. if (memBlk->contiguous)
  717. {
  718. *Physical = page_to_phys(nth_page(memBlk->contiguousPages, index));
  719. }
  720. else
  721. {
  722. *Physical = page_to_phys(memBlk->nonContiguousPages[index]);
  723. }
  724. *Physical += offsetInPage;
  725. return 0;
  726. }
  727. int
  728. GFP_Alloc(
  729. IN struct file *filp,
  730. IN unsigned int size,
  731. IN unsigned int Flags,
  732. OUT unsigned long *bus_address
  733. )
  734. {
  735. int status;
  736. size_t i = 0;
  737. unsigned int gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN;
  738. int contiguous = Flags & ALLOC_FLAG_CONTIGUOUS;
  739. size_t numPages = GetPageCount(size, 0);
  740. unsigned long physical = 0;
  741. struct mem_block *memBlk = NULL;
  742. struct file_node *fnode = NULL;
  743. struct mem_node *mnode = NULL;
  744. mnode = kzalloc(sizeof(struct mem_node), GFP_KERNEL | __GFP_NORETRY);
  745. if (!mnode)
  746. {
  747. ONERROR(ENOMEM);
  748. }
  749. fnode = get_file_node(filp);
  750. if (NULL == fnode)
  751. {
  752. ONERROR(EINVAL);
  753. }
  754. memBlk = &mnode->memBlk;
  755. if (Flags & ALLOC_FLAG_4GB_ADDR)
  756. {
  757. /* remove __GFP_HIGHMEM bit, add __GFP_DMA32 bit */
  758. gfp &= ~__GFP_HIGHMEM;
  759. gfp |= __GFP_DMA32;
  760. }
  761. if (contiguous)
  762. {
  763. size_t bytes = numPages << PAGE_SHIFT;
  764. void *addr = NULL;
  765. addr = alloc_pages_exact(bytes, (gfp & ~__GFP_HIGHMEM) | __GFP_NORETRY);
  766. memBlk->contiguousPages = addr ? virt_to_page(addr) : NULL;
  767. if (memBlk->contiguousPages)
  768. {
  769. memBlk->exact = true;
  770. }
  771. if (memBlk->contiguousPages == NULL)
  772. {
  773. int order = get_order(bytes);
  774. if (order >= MAX_ORDER)
  775. {
  776. status = ENOMEM;
  777. goto OnError;
  778. }
  779. memBlk->contiguousPages = alloc_pages(gfp, order);
  780. }
  781. if (memBlk->contiguousPages == NULL)
  782. {
  783. ONERROR(ENOMEM);
  784. }
  785. memBlk->dma_addr = dma_map_page(gdev,
  786. memBlk->contiguousPages, 0, numPages * PAGE_SIZE,
  787. DMA_BIDIRECTIONAL);
  788. if (dma_mapping_error(gdev, memBlk->dma_addr))
  789. {
  790. if (memBlk->exact)
  791. {
  792. free_pages_exact(page_address(memBlk->contiguousPages), bytes);
  793. }
  794. else
  795. {
  796. __free_pages(memBlk->contiguousPages, get_order(bytes));
  797. }
  798. ONERROR(ENOMEM);
  799. }
  800. }
  801. else // non-contiguous pages
  802. {
  803. ONERROR(NonContiguousAlloc(memBlk, numPages, gfp));
  804. // this piece of code is for sanity check
  805. memBlk->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL | __GFP_NORETRY);
  806. if (!memBlk->sgt)
  807. {
  808. ONERROR(ENOMEM);
  809. }
  810. status = sg_alloc_table_from_pages(memBlk->sgt,
  811. memBlk->nonContiguousPages, numPages, 0,
  812. numPages << PAGE_SHIFT, GFP_KERNEL);
  813. memBlk->sgt->orig_nents = memBlk->sgt->nents;
  814. if (status < 0)
  815. {
  816. NonContiguousFree(memBlk->nonContiguousPages, numPages);
  817. ONERROR(ENOMEM);
  818. }
  819. status = dma_map_sg(gdev, memBlk->sgt->sgl, memBlk->sgt->nents, DMA_BIDIRECTIONAL);
  820. if (status != memBlk->sgt->nents)
  821. {
  822. NonContiguousFree(memBlk->nonContiguousPages, numPages);
  823. sg_free_table(memBlk->sgt);
  824. ONERROR(ENOMEM);
  825. }
  826. }
  827. for (i = 0; i < numPages; i++)
  828. {
  829. struct page *page;
  830. if (contiguous)
  831. {
  832. page = nth_page(memBlk->contiguousPages, i);
  833. }
  834. else
  835. {
  836. page = memBlk->nonContiguousPages[i];
  837. }
  838. SetPageReserved(page);
  839. }
  840. memBlk->contiguous = contiguous;
  841. memBlk->numPages = numPages;
  842. memBlk->size = size;
  843. getPhysical(memBlk, 0, &physical);
  844. *bus_address = physical;
  845. mnode->busAddr = physical;
  846. mnode->isImported = 0;
  847. list_add_tail(&mnode->link, &fnode->memList);
  848. printk("Allocated %d bytes (%ld pages) at physical address 0x%lx with %d sg table entries\n",
  849. size, numPages, physical, contiguous ? 1 : memBlk->sgt->nents);
  850. return 0;
  851. OnError:
  852. if (mnode)
  853. {
  854. kfree(mnode);
  855. }
  856. if (memBlk->sgt)
  857. {
  858. kfree(memBlk->sgt);
  859. }
  860. return status;
  861. }
  862. void
  863. GFP_Free(
  864. IN struct file *filp,
  865. IN unsigned long bus_address
  866. )
  867. {
  868. size_t i;
  869. struct page * page;
  870. struct mem_block *memBlk = NULL;
  871. struct mem_node *mnode = NULL;
  872. mnode = get_mem_node(filp, bus_address, 0);
  873. if (NULL == mnode)
  874. {
  875. return;
  876. }
  877. memBlk = &mnode->memBlk;
  878. printk("Free %ld pages from physical address 0x%lx\n", memBlk->numPages, mnode->busAddr);
  879. if (memBlk->contiguous)
  880. {
  881. dma_unmap_page(gdev, memBlk->dma_addr,
  882. memBlk->numPages << PAGE_SHIFT, DMA_FROM_DEVICE);
  883. }
  884. else
  885. {
  886. dma_unmap_sg(gdev, memBlk->sgt->sgl, memBlk->sgt->nents,
  887. DMA_FROM_DEVICE);
  888. sg_free_table(memBlk->sgt);
  889. if (memBlk->sgt)
  890. {
  891. kfree(memBlk->sgt);
  892. }
  893. }
  894. for (i = 0; i < memBlk->numPages; i++)
  895. {
  896. if (memBlk->contiguous)
  897. {
  898. page = nth_page(memBlk->contiguousPages, i);
  899. ClearPageReserved(page);
  900. }
  901. }
  902. if (memBlk->contiguous)
  903. {
  904. if (memBlk->exact == true)
  905. {
  906. free_pages_exact(page_address(memBlk->contiguousPages), memBlk->numPages * PAGE_SIZE);
  907. }
  908. else
  909. {
  910. __free_pages(memBlk->contiguousPages, get_order(memBlk->numPages * PAGE_SIZE));
  911. }
  912. }
  913. else
  914. {
  915. NonContiguousFree(memBlk->nonContiguousPages, memBlk->numPages);
  916. }
  917. list_del(&mnode->link);
  918. kfree(mnode);
  919. }
  920. static int
  921. GFP_MapUser(
  922. IN struct file *filp,
  923. IN struct vm_area_struct *vma
  924. )
  925. {
  926. struct mem_block *memBlk = NULL;
  927. struct mem_node *mnode = NULL;
  928. unsigned long bus_address = vma->vm_pgoff * PAGE_SIZE;
  929. int status = 0;
  930. mnode = get_mem_node(filp, bus_address, 0);
  931. if (NULL == mnode)
  932. {
  933. return EINVAL;
  934. }
  935. memBlk = &mnode->memBlk;
  936. if (Mmap(memBlk, 0, memBlk->numPages, vma))
  937. {
  938. return ENOMEM;
  939. }
  940. memBlk->vma = vma;
  941. DEBUG_PRINT("Map %ld pages from physical address 0x%lx\n", memBlk->numPages, mnode->busAddr);
  942. return status;
  943. }
  944. int allocator_mmap(struct file *filp, struct vm_area_struct *vma)
  945. {
  946. return GFP_MapUser(filp, vma);
  947. }
  948. int allocator_ioctl(void *filp, unsigned int cmd, unsigned long arg)
  949. {
  950. MemallocParams params;
  951. int ret = 0;
  952. if (_IOC_TYPE(cmd) != MEMORY_IOC_MAGIC) return EINVAL;
  953. if (_IOC_NR(cmd) > MEMORY_IOC_MAXNR) return EINVAL;
  954. if (_IOC_DIR(cmd) & _IOC_READ)
  955. ret = !access_ok(arg, _IOC_SIZE(cmd));
  956. else if (_IOC_DIR(cmd) & _IOC_WRITE)
  957. ret = !access_ok(arg, _IOC_SIZE(cmd));
  958. if (ret) return EINVAL;
  959. switch (cmd)
  960. {
  961. case MEMORY_IOC_ALLOCATE:
  962. {
  963. ret = copy_from_user(&params, (void*)arg, sizeof(MemallocParams));
  964. if (!ret)
  965. {
  966. ret = GFP_Alloc(filp, params.size, params.flags, &params.bus_address);
  967. params.translation_offset = 0;
  968. if (!ret)
  969. ret = copy_to_user((MemallocParams *)arg, &params, sizeof(MemallocParams));
  970. }
  971. break;
  972. }
  973. case MEMORY_IOC_FREE:
  974. {
  975. ret = copy_from_user(&params, (void*)arg, sizeof(MemallocParams));
  976. if (!ret)
  977. {
  978. GFP_Free(filp, params.bus_address);
  979. ret = copy_to_user((MemallocParams *)arg, &params, sizeof(MemallocParams));
  980. }
  981. break;
  982. }
  983. case MEMORY_IOC_DMABUF_EXPORT:
  984. {
  985. ret = copy_from_user(&params, (void*)arg, sizeof(MemallocParams));
  986. if (!ret)
  987. {
  988. ret = DMABUF_Export(filp, params.bus_address, params.flags, &params.fd);
  989. if (!ret)
  990. ret = copy_to_user((MemallocParams *)arg, &params, sizeof(MemallocParams));
  991. }
  992. break;
  993. }
  994. case MEMORY_IOC_DMABUF_IMPORT:
  995. {
  996. ret = copy_from_user(&params, (void*)arg, sizeof(MemallocParams));
  997. if (!ret)
  998. {
  999. ret = DMABUF_Import(filp, params.fd, &params.bus_address, &params.size);
  1000. params.translation_offset = 0;
  1001. if (!ret)
  1002. ret = copy_to_user((MemallocParams *)arg, &params, sizeof(MemallocParams));
  1003. }
  1004. break;
  1005. }
  1006. case MEMORY_IOC_DMABUF_RELEASE:
  1007. {
  1008. ret = copy_from_user(&params, (void*)arg, sizeof(MemallocParams));
  1009. if (!ret)
  1010. {
  1011. DMABUF_Release(filp, params.bus_address);
  1012. ret = copy_to_user((MemallocParams *)arg, &params, sizeof(MemallocParams));
  1013. }
  1014. break;
  1015. }
  1016. default:
  1017. ret = EINVAL;
  1018. }
  1019. return ret;
  1020. }
  1021. int allocator_init(struct device *dev)
  1022. {
  1023. int ret = 0;
  1024. gdev = dev;
  1025. INIT_LIST_HEAD(&fileList);
  1026. return ret;
  1027. }
  1028. void allocator_remove(void)
  1029. {
  1030. }
  1031. int allocator_open(struct inode *inode, struct file *filp)
  1032. {
  1033. int ret = 0;
  1034. struct file_node *fnode = NULL;
  1035. if (AllocateMemory(sizeof(struct file_node), (void * *)&fnode))
  1036. return ENOMEM;
  1037. fnode->filp = filp;
  1038. INIT_LIST_HEAD(&fnode->memList);
  1039. spin_lock(&mem_lock);
  1040. list_add_tail(&fnode->link, &fileList);
  1041. spin_unlock(&mem_lock);
  1042. return ret;
  1043. }
  1044. void allocator_release(struct inode *inode, struct file *filp)
  1045. {
  1046. #if 0
  1047. struct file_node *fnode = find_and_delete_file_node(filp);
  1048. struct mem_node *node;
  1049. struct mem_node *temp;
  1050. if (NULL == fnode)
  1051. return;
  1052. list_for_each_entry_safe(node, temp, &fnode->memList, link)
  1053. {
  1054. // this is not expected, memory leak detected!
  1055. printk("Allocator: Found unfreed memory at 0x%lx\n", node->busAddr);
  1056. if (node->isImported)
  1057. DMABUF_Release(filp, node->busAddr);
  1058. else
  1059. GFP_Free(filp, node->busAddr);
  1060. list_del(&node->link);
  1061. FreeMemory(node);
  1062. }
  1063. list_del(&fnode->link);
  1064. FreeMemory(fnode);
  1065. #endif
  1066. }