12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666 |
- /*!
- *****************************************************************************
- * Copyright (c) Imagination Technologies Ltd.
- *
- * The contents of this file are subject to the MIT license as set out below.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- *
- * Alternatively, the contents of this file may be used under the terms of the
- * GNU General Public License Version 2 ("GPL")in which case the provisions of
- * GPL are applicable instead of those above.
- *
- * If you wish to allow use of your version of this file only under the terms
- * of GPL, and not to allow others to use your version of this file under the
- * terms of the MIT license, indicate your decision by deleting the provisions
- * above and replace them with the notice and other provisions required by GPL
- * as set out in the file called "GPLHEADER" included in this distribution. If
- * you do not delete the provisions above, a recipient may use your version of
- * this file under the terms of either the MIT license or GPL.
- *
- * This License is also included in this distribution in the file called
- * "MIT_COPYING".
- *
- *****************************************************************************/
- #include <linux/module.h>
- #include <linux/init.h>
- #include <linux/mm.h>
- #include <linux/idr.h>
- #include <linux/mutex.h>
- #include <linux/list.h>
- #include <linux/slab.h>
- #include <linux/device.h>
- #include <linux/dma-mapping.h>
- #include <img_mem_man.h>
- #include <vha_drv_common.h>
- #include <mmu.h>
- #include <heap.h>
- #include "img_mem_man_priv.h"
- /* Maximum number of processes */
- #define MAX_PROC_CTX 1000
- /* Minimum page size (4KB) bits. */
- #define MIN_PAGE_SIZE_BITS 12
- struct mem_man {
- struct idr heaps;
- struct idr mem_ctxs;
- struct mutex mutex;
- unsigned cache_usage;
- };
- /* define like this, so it is easier to convert to a function argument later */
- static struct mem_man mem_man_data;
- /* wrapper struct for imgmmu_page */
- struct mmu_page {
- struct buffer *buffer;
- struct imgmmu_page page;
- unsigned char type;
- bool bypass_addr_trans;
- bool use_parity;
- };
- static bool trace_physical_pages;
- module_param(trace_physical_pages, bool, 0444);
- MODULE_PARM_DESC(trace_physical_pages,
- "Enables tracing of physical pages being mapped into MMU");
- static bool cache_sync = true;
- module_param(cache_sync, bool, 0444);
- MODULE_PARM_DESC(cache_sync,
- "cache sync mode: 0-no sync; 1-force sync (even if hw provides coherency);");
- /*
- * memory heaps
- */
- static char *get_heap_name(enum img_mem_heap_type type)
- {
- switch (type) {
- case IMG_MEM_HEAP_TYPE_UNIFIED:
- return "unified";
- case IMG_MEM_HEAP_TYPE_CARVEOUT:
- return "carveout";
- case IMG_MEM_HEAP_TYPE_ION:
- return "ion";
- case IMG_MEM_HEAP_TYPE_DMABUF:
- return "dmabuf";
- case IMG_MEM_HEAP_TYPE_COHERENT:
- return "coherent";
- case IMG_MEM_HEAP_TYPE_ANONYMOUS:
- return "anonymous";
- case IMG_MEM_HEAP_TYPE_OCM:
- return "ocm";
- default:
- WARN_ON(type);
- return "unknown";
- }
- }
- int img_mem_add_heap(const struct heap_config *heap_cfg, int *heap_id)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct heap *heap;
- int (*init_fn)(const struct heap_config *heap_cfg, struct heap *heap);
- int ret;
- pr_debug("%s:%d\n", __func__, __LINE__);
- switch (heap_cfg->type) {
- case IMG_MEM_HEAP_TYPE_UNIFIED:
- init_fn = img_mem_unified_init;
- break;
- case IMG_MEM_HEAP_TYPE_COHERENT:
- init_fn = img_mem_coherent_init;
- break;
- #ifdef CONFIG_DMA_SHARED_BUFFER
- case IMG_MEM_HEAP_TYPE_DMABUF:
- init_fn = img_mem_dmabuf_init;
- break;
- #endif
- #ifdef ION_SUPPORTED
- #ifdef CONFIG_ION
- case IMG_MEM_HEAP_TYPE_ION:
- init_fn = img_mem_ion_init;
- break;
- #endif
- #endif
- #ifdef CONFIG_GENERIC_ALLOCATOR
- case IMG_MEM_HEAP_TYPE_CARVEOUT:
- init_fn = img_mem_carveout_init;
- break;
- #endif
- case IMG_MEM_HEAP_TYPE_ANONYMOUS:
- init_fn = img_mem_anonymous_init;
- break;
- case IMG_MEM_HEAP_TYPE_OCM:
- init_fn = img_mem_ocm_init;
- break;
- default:
- pr_err("%s: heap type %d unknown\n", __func__, heap_cfg->type);
- return -EINVAL;
- }
- heap = kmalloc(sizeof(struct heap), GFP_KERNEL);
- if (!heap)
- return -ENOMEM;
- ret = mutex_lock_interruptible(&mem_man->mutex);
- if (ret)
- goto lock_failed;
- ret = idr_alloc(&mem_man->heaps, heap, IMG_MEM_MAN_MIN_HEAP,
- IMG_MEM_MAN_MAX_HEAP, GFP_KERNEL);
- if (ret < 0) {
- pr_err("%s: idr_alloc failed\n", __func__);
- goto alloc_id_failed;
- }
- heap->id = ret;
- heap->type = heap_cfg->type;
- heap->options = heap_cfg->options;
- heap->to_dev_addr = heap_cfg->to_dev_addr;
- heap->to_host_addr = heap_cfg->to_host_addr;
- heap->priv = NULL;
- heap->cache_sync = true;
- heap->alt_cache_attr = heap_cfg->cache_attr;
- ret = init_fn(heap_cfg, heap);
- if (ret) {
- pr_err("%s: heap init failed\n", __func__);
- goto heap_init_failed;
- }
- *heap_id = heap->id;
- mutex_unlock(&mem_man->mutex);
- pr_debug("%s created heap %d type %d (%s)\n",
- __func__, *heap_id, heap_cfg->type, get_heap_name(heap->type));
- return 0;
- heap_init_failed:
- idr_remove(&mem_man->heaps, heap->id);
- alloc_id_failed:
- mutex_unlock(&mem_man->mutex);
- lock_failed:
- kfree(heap);
- return ret;
- }
- EXPORT_SYMBOL(img_mem_add_heap);
- static void _img_mem_del_heap(struct heap *heap)
- {
- struct mem_man *mem_man = &mem_man_data;
- pr_debug("%s heap %d 0x%p\n", __func__, heap->id, heap);
- WARN_ON(!mutex_is_locked(&mem_man->mutex));
- if (heap->ops->destroy)
- heap->ops->destroy(heap);
- idr_remove(&mem_man->heaps, heap->id);
- }
- void img_mem_del_heap(int heap_id)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct heap *heap;
- pr_debug("%s:%d heap %d\n", __func__, __LINE__, heap_id);
- mutex_lock(&mem_man->mutex);
- heap = idr_find(&mem_man->heaps, heap_id);
- if (!heap) {
- pr_warn("%s heap %d not found!\n", __func__, heap_id);
- mutex_unlock(&mem_man->mutex);
- return;
- }
- _img_mem_del_heap(heap);
- mutex_unlock(&mem_man->mutex);
- kfree(heap);
- }
- EXPORT_SYMBOL(img_mem_del_heap);
- int img_mem_get_heap_info(int heap_id, uint8_t *type, uint32_t *attrs)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct heap *heap;
- pr_debug("%s:%d heap %d\n", __func__, __LINE__, heap_id);
- if (heap_id < IMG_MEM_MAN_MIN_HEAP || heap_id > IMG_MEM_MAN_MAX_HEAP) {
- pr_err("%s heap %d does not match internal constraints <%u - %u>!\n",
- __func__, heap_id, IMG_MEM_MAN_MIN_HEAP, IMG_MEM_MAN_MAX_HEAP);
- return -EINVAL;
- }
- mutex_lock(&mem_man->mutex);
- heap = idr_find(&mem_man->heaps, heap_id);
- if (!heap) {
- pr_debug("%s heap %d not found!\n", __func__, heap_id);
- mutex_unlock(&mem_man->mutex);
- return -ENOENT;
- }
- *type = heap->type;
- *attrs = 0;
- if (heap->ops->import)
- *attrs |= IMG_MEM_HEAP_ATTR_IMPORT;
- if (heap->ops->export)
- *attrs |= IMG_MEM_HEAP_ATTR_EXPORT;
- if (heap->ops->alloc && !heap->ops->import)
- *attrs |= IMG_MEM_HEAP_ATTR_INTERNAL;
- if (heap->type == IMG_MEM_HEAP_TYPE_OCM)
- *attrs = IMG_MEM_HEAP_ATTR_SEALED;
- /* User attributes */
- *attrs |= heap->options.ocm.hattr;
- mutex_unlock(&mem_man->mutex);
- return 0;
- }
- EXPORT_SYMBOL(img_mem_get_heap_info);
- /*
- * related to process context (contains SYSMEM heap's functionality in general)
- */
- int img_mem_create_proc_ctx(struct mem_ctx **new_ctx)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct mem_ctx *ctx;
- int ret = 0;
- pr_debug("%s:%d\n", __func__, __LINE__);
- ctx = kzalloc(sizeof(struct mem_ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
- idr_init(&ctx->buffers);
- INIT_LIST_HEAD(&ctx->mmu_ctxs);
- mutex_lock(&mem_man->mutex);
- ret = idr_alloc(&mem_man->mem_ctxs, ctx, 0 , MAX_PROC_CTX,
- GFP_KERNEL);
- if (ret < 0) {
- mutex_unlock(&mem_man->mutex);
- pr_err("%s: idr_alloc failed\n", __func__);
- goto idr_alloc_failed;
- }
- /* Assign id to the newly created context. */
- ctx->id = ret;
- mutex_unlock(&mem_man->mutex);
- pr_debug("%s id:%d\n", __func__, ctx->id);
- *new_ctx = ctx;
- return 0;
- idr_alloc_failed:
- kfree(ctx);
- return ret;
- }
- EXPORT_SYMBOL(img_mem_create_proc_ctx);
- static void _img_mem_free(struct buffer *buffer);
- static void _img_mmu_unmap(struct mmu_ctx_mapping *mapping);
- static void _img_mmu_ctx_destroy(struct mmu_ctx *ctx);
- static void _img_mem_destroy_proc_ctx(struct mem_ctx *ctx)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct buffer *buffer;
- int buf_id;
- pr_debug("%s:%d id:%d\n", __func__, __LINE__, ctx->id);
- WARN_ON(!mutex_is_locked(&mem_man->mutex));
- /* free derelict mmu contexts */
- while (!list_empty(&ctx->mmu_ctxs)) {
- struct mmu_ctx *mc;
- mc = list_first_entry(&ctx->mmu_ctxs,
- struct mmu_ctx, mem_ctx_entry);
- pr_warn("%s: found derelict mmu context %p\n", __func__, mc);
- _img_mmu_ctx_destroy(mc);
- kfree(mc);
- }
- /* free derelict buffers */
- buf_id = IMG_MEM_MAN_MIN_BUFFER;
- buffer = idr_get_next(&ctx->buffers, &buf_id);
- while (buffer) {
- pr_warn("%s: found derelict buffer %d\n", __func__, buf_id);
- _img_mem_free(buffer);
- buf_id = IMG_MEM_MAN_MIN_BUFFER;
- buffer = idr_get_next(&ctx->buffers, &buf_id);
- }
- idr_destroy(&ctx->buffers);
- idr_remove(&mem_man->mem_ctxs, ctx->id);
- }
- void img_mem_destroy_proc_ctx(struct mem_ctx *ctx)
- {
- struct mem_man *mem_man = &mem_man_data;
- pr_debug("%s:%d\n", __func__, __LINE__);
- mutex_lock(&mem_man->mutex);
- _img_mem_destroy_proc_ctx(ctx);
- mutex_unlock(&mem_man->mutex);
- kfree(ctx);
- }
- EXPORT_SYMBOL(img_mem_destroy_proc_ctx);
- static int _img_mem_alloc(struct device *device, struct mem_ctx *ctx,
- struct heap *heap, size_t size,
- enum img_mem_attr attr, struct buffer **buffer_new)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct buffer *buffer;
- int ret;
- /* Allocations for MMU pages are still 4k so CPU page size is enough */
- size_t align = attr & IMG_MEM_ATTR_MMU ?
- imgmmu_get_cpu_page_size() : IMGMMU_GET_MAX_PAGE_SIZE();
- pr_debug("%s heap %p '%s' ctx %p size %zu\n", __func__,
- heap, get_heap_name(heap->type), ctx, size);
- WARN_ON(!mutex_is_locked(&mem_man->mutex));
- if (size == 0) {
- pr_err("%s: buffer size is zero\n", __func__);
- return -EINVAL;
- }
- if (heap->ops == NULL || heap->ops->alloc == NULL) {
- pr_err("%s: no alloc function in heap %d!\n",
- __func__, heap->id);
- return -EINVAL;
- }
- buffer = kzalloc(sizeof(struct buffer), GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
- ret = idr_alloc(&ctx->buffers, buffer,
- (IMG_MEM_MAN_MAX_BUFFER * ctx->id) +
- IMG_MEM_MAN_MIN_BUFFER,
- (IMG_MEM_MAN_MAX_BUFFER * ctx->id) +
- IMG_MEM_MAN_MAX_BUFFER,
- GFP_KERNEL);
- if (ret < 0) {
- pr_err("%s: idr_alloc failed\n", __func__);
- goto idr_alloc_failed;
- }
- buffer->id = ret;
- buffer->request_size = size;
- buffer->actual_size = ((size + align - 1) /
- align) * align;
- buffer->device = device;
- buffer->mem_ctx = ctx;
- buffer->heap = heap;
- INIT_LIST_HEAD(&buffer->mappings);
- buffer->kptr = NULL;
- buffer->priv = NULL;
- /* Check if heap has been registered using an alternative cache attributes */
- if (heap->alt_cache_attr &&
- (heap->alt_cache_attr != (attr & IMG_MEM_ATTR_CACHE_MASK))) {
- pr_debug("%s heap %d changing cache attributes from %x to %x\n",
- __func__, heap->id, attr & IMG_MEM_ATTR_CACHE_MASK,
- heap->alt_cache_attr);
- attr &= ~IMG_MEM_ATTR_CACHE_MASK;
- attr |= heap->alt_cache_attr;
- }
- ret = heap->ops->alloc(device, heap, buffer->actual_size, attr, buffer);
- if (ret) {
- pr_err("%s: heap %d alloc failed\n", __func__, heap->id);
- goto heap_alloc_failed;
- }
- if (heap->type != IMG_MEM_HEAP_TYPE_OCM) {
- __img_pdump_printf(device, "-- Allocating zeroed buffer id:%d size:%zu\n",
- buffer->id, buffer->actual_size);
- __img_pdump_printf(device, "CALLOC "_PMEM_":BLOCK_%d %#zx %#zx 0x0\n",
- buffer->id, buffer->actual_size, align);
- }
- ctx->mem_usage_curr += buffer->actual_size;
- if (ctx->mem_usage_curr > ctx->mem_usage_max)
- ctx->mem_usage_max = ctx->mem_usage_curr;
- *buffer_new = buffer;
- pr_debug("%s heap %p ctx %p created buffer %d (%p) actual_size %zu\n",
- __func__, heap, ctx, buffer->id, buffer, buffer->actual_size);
- return 0;
- heap_alloc_failed:
- idr_remove(&ctx->buffers, buffer->id);
- idr_alloc_failed:
- kfree(buffer);
- return ret;
- }
- int img_mem_alloc(struct device *device, struct mem_ctx *ctx, int heap_id,
- size_t size, enum img_mem_attr attr, int *buf_id)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct heap *heap;
- struct buffer *buffer;
- int ret;
- pr_debug("%s heap %d ctx %p size %zu\n", __func__, heap_id, ctx, size);
- ret = mutex_lock_interruptible(&mem_man->mutex);
- if (ret)
- return ret;
- heap = idr_find(&mem_man->heaps, heap_id);
- if (!heap) {
- pr_err("%s: heap id %d not found\n", __func__, heap_id);
- mutex_unlock(&mem_man->mutex);
- return -EINVAL;
- }
- ret = _img_mem_alloc(device, ctx, heap, size, attr, &buffer);
- if (ret) {
- mutex_unlock(&mem_man->mutex);
- return ret;
- }
- *buf_id = buffer->id;
- mutex_unlock(&mem_man->mutex);
- pr_debug("%s heap %d ctx %p created buffer %d (%p) size %zu\n",
- __func__, heap_id, ctx, *buf_id, buffer, size);
- return ret;
- }
- EXPORT_SYMBOL(img_mem_alloc);
- static int _img_mem_import(struct device *device,
- struct mem_ctx *ctx, struct heap *heap,
- size_t size, enum img_mem_attr attr, uint64_t buf_hnd,
- struct buffer **buffer_new)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct buffer *buffer;
- int ret;
- size_t align = IMGMMU_GET_MAX_PAGE_SIZE();
- WARN_ON(!mutex_is_locked(&mem_man->mutex));
- if (size == 0) {
- pr_err("%s: buffer size is zero\n", __func__);
- return -EINVAL;
- }
- if (heap->ops == NULL || heap->ops->import == NULL) {
- pr_err("%s: no import function in heap %d!\n",
- __func__, heap->id);
- return -EINVAL;
- }
- buffer = kzalloc(sizeof(struct buffer), GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
- ret = idr_alloc(&ctx->buffers, buffer,
- (IMG_MEM_MAN_MAX_BUFFER * ctx->id) +
- IMG_MEM_MAN_MIN_BUFFER,
- (IMG_MEM_MAN_MAX_BUFFER * ctx->id) +
- IMG_MEM_MAN_MAX_BUFFER,
- GFP_KERNEL);
- if (ret < 0) {
- pr_err("%s: idr_alloc failed\n", __func__);
- goto idr_alloc_failed;
- }
- buffer->id = ret;
- buffer->request_size = size;
- buffer->actual_size = ((size + align - 1) /
- align) * align;
- buffer->device = device;
- buffer->mem_ctx = ctx;
- buffer->heap = heap;
- INIT_LIST_HEAD(&buffer->mappings);
- buffer->kptr = NULL;
- buffer->priv = NULL;
- /* If MMU page size is bigger than CPU page size
- * we need an extra check against requested size
- * The aligned size comparing to requested size
- * can't be bigger than CPU page!
- * otherwise it can cause troubles when
- * HW tries to access non existing pages */
- if (buffer->actual_size - buffer->request_size >
- imgmmu_get_cpu_page_size()) {
- pr_err("%s: original buffer size is not MMU page size aligned!\n",
- __func__);
- ret = -EINVAL;
- goto heap_import_failed;
- }
- /* Check if heap has been registered using an alternative cache attributes */
- if (heap->alt_cache_attr &&
- (heap->alt_cache_attr != (attr & IMG_MEM_ATTR_CACHE_MASK))) {
- pr_debug("%s heap %d changing cache attributes from %x to %x\n",
- __func__, heap->id, attr & IMG_MEM_ATTR_CACHE_MASK,
- heap->alt_cache_attr);
- attr &= ~IMG_MEM_ATTR_CACHE_MASK;
- attr |= heap->alt_cache_attr;
- }
- ret = heap->ops->import(device, heap, buffer->actual_size, attr,
- buf_hnd, buffer);
- if (ret) {
- pr_err("%s: heap %d import failed\n", __func__, heap->id);
- goto heap_import_failed;
- }
- __img_pdump_printf(device, "-- Allocating zeroed buffer id:%d size:%zu for imported data\n",
- buffer->id, buffer->actual_size);
- __img_pdump_printf(device, "CALLOC "_PMEM_":BLOCK_%d %#zx %#zx 0x0\n",
- buffer->id, buffer->actual_size, align);
- ctx->mem_usage_curr += buffer->actual_size;
- if (ctx->mem_usage_curr > ctx->mem_usage_max)
- ctx->mem_usage_max = ctx->mem_usage_curr;
- *buffer_new = buffer;
- return 0;
- heap_import_failed:
- idr_remove(&ctx->buffers, buffer->id);
- idr_alloc_failed:
- kfree(buffer);
- return ret;
- }
- int img_mem_import(struct device *device, struct mem_ctx *ctx, int heap_id,
- size_t size, enum img_mem_attr attr, uint64_t buf_hnd,
- int *buf_id)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct heap *heap;
- struct buffer *buffer;
- int ret;
- pr_debug("%s heap %d ctx %p hnd %#llx\n", __func__, heap_id, ctx, buf_hnd);
- ret = mutex_lock_interruptible(&mem_man->mutex);
- if (ret)
- return ret;
- heap = idr_find(&mem_man->heaps, heap_id);
- if (!heap) {
- pr_err("%s: heap id %d not found\n", __func__, heap_id);
- mutex_unlock(&mem_man->mutex);
- return -EINVAL;
- }
- ret = _img_mem_import(device, ctx, heap, size, attr, buf_hnd, &buffer);
- if (ret) {
- mutex_unlock(&mem_man->mutex);
- return ret;
- }
- *buf_id = buffer->id;
- mutex_unlock(&mem_man->mutex);
- pr_debug("%s buf_hnd %#llx heap %d (%s) buffer %d size %zu\n", __func__,
- buf_hnd, heap_id, get_heap_name(heap->type), *buf_id, size);
- pr_debug("%s heap %d ctx %p created buffer %d (%p) size %zu\n",
- __func__, heap_id, ctx, *buf_id, buffer, size);
- return ret;
- }
- EXPORT_SYMBOL(img_mem_import);
- static int _img_mem_export(struct device *device,
- struct mem_ctx *ctx, struct heap *heap,
- size_t size, enum img_mem_attr attr,
- struct buffer *buffer, uint64_t *buf_hnd)
- {
- struct mem_man *mem_man = &mem_man_data;
- int ret;
- WARN_ON(!mutex_is_locked(&mem_man->mutex));
- if (size > buffer->actual_size) {
- pr_err("%s: buffer size (%zu) bigger than actual size (%zu)\n",
- __func__, size, buffer->actual_size);
- return -EINVAL;
- }
- if (heap->ops == NULL || heap->ops->export == NULL) {
- pr_err("%s: no export function in heap %d!\n",
- __func__, heap->id);
- return -EINVAL;
- }
- ret = heap->ops->export(device, heap, buffer->actual_size, attr,
- buffer, buf_hnd);
- if (ret) {
- pr_err("%s: heap %d export failed\n", __func__, heap->id);
- return -EFAULT;
- }
- return ret;
- }
- int img_mem_export(struct device *device, struct mem_ctx *ctx, int buf_id,
- size_t size, enum img_mem_attr attr, uint64_t *buf_hnd)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct heap *heap;
- struct buffer *buffer;
- int ret;
- pr_debug("%s ctx %p buffer id %d\n", __func__, ctx, buf_id);
- ret = mutex_lock_interruptible(&mem_man->mutex);
- if (ret)
- return ret;
- buffer = idr_find(&ctx->buffers, buf_id);
- if (!buffer) {
- pr_err("%s: buffer id %d not found\n", __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return -EINVAL;
- }
- heap = buffer->heap;
- ret = _img_mem_export(device, ctx, heap, size, attr, buffer, buf_hnd);
- if (ret) {
- mutex_unlock(&mem_man->mutex);
- return ret;
- }
- mutex_unlock(&mem_man->mutex);
- pr_debug("%s buf_hnd %#llx heap %d (%s) buffer %d size %zu\n", __func__,
- *buf_hnd, heap->id, get_heap_name(heap->type), buf_id, size);
- pr_debug("%s heap %d ctx %p exported buffer %d (%p) size %zu\n",
- __func__, heap->id, ctx, buf_id, buffer, size);
- return ret;
- }
- EXPORT_SYMBOL(img_mem_export);
- static void _img_mem_free(struct buffer *buffer)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct heap *heap = buffer->heap;
- struct mem_ctx *ctx = buffer->mem_ctx;
- pr_debug("%s buffer 0x%p\n", __func__, buffer);
- WARN_ON(!mutex_is_locked(&mem_man->mutex));
- if (heap->ops == NULL || heap->ops->free == NULL) {
- pr_err("%s: no free function in heap %d!\n",
- __func__, heap->id);
- return;
- }
- while (!list_empty(&buffer->mappings)) {
- struct mmu_ctx_mapping *map;
- map = list_first_entry(&buffer->mappings,
- struct mmu_ctx_mapping, buffer_entry);
- pr_debug("%s: found mapping for buffer %d (size %zu)\n",
- __func__, map->buffer->id, map->buffer->actual_size);
- _img_mmu_unmap(map);
- kfree(map);
- }
- heap->ops->free(heap, buffer);
- if (ctx->mem_usage_curr >= buffer->actual_size)
- ctx->mem_usage_curr -= buffer->actual_size;
- else
- WARN_ON(1);
- idr_remove(&ctx->buffers, buffer->id);
- if (heap->type != IMG_MEM_HEAP_TYPE_OCM) {
- __img_pdump_printf(buffer->device, "-- Freeing buffer id:%d size:%zu\n",
- buffer->id, buffer->actual_size);
- __img_pdump_printf(buffer->device, "FREE "_PMEM_":BLOCK_%d\n", buffer->id);
- }
- kfree(buffer);
- }
- void img_mem_free(struct mem_ctx *ctx, int buf_id)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct buffer *buffer;
- pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
- mutex_lock(&mem_man->mutex);
- buffer = idr_find(&ctx->buffers, buf_id);
- if (!buffer) {
- pr_err("%s: buffer id %d not found\n", __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return;
- }
- _img_mem_free(buffer);
- mutex_unlock(&mem_man->mutex);
- }
- EXPORT_SYMBOL(img_mem_free);
- #ifdef KERNEL_DMA_FENCE_SUPPORT
- /*
- * dma_fence ops
- */
- static const char *_img_mem_sync_get_driver_name(struct dma_fence *f)
- {
- return "buf_sync";
- }
- static const char *_img_mem_sync_get_timeline_name(struct dma_fence *f)
- {
- return "buf_timeline";
- }
- static bool _img_mem_sync_enable_signaling(struct dma_fence *f)
- {
- return true;
- }
- static void _img_mem_sync_release(struct dma_fence *fence)
- {
- dma_fence_free(fence);
- }
- static struct dma_fence_ops dma_fence_ops = {
- .get_driver_name = _img_mem_sync_get_driver_name,
- .get_timeline_name = _img_mem_sync_get_timeline_name,
- .enable_signaling = _img_mem_sync_enable_signaling,
- .release = _img_mem_sync_release,
- .wait = dma_fence_default_wait
- };
- struct dma_fence * img_mem_add_fence(struct mem_ctx *ctx, int buf_id)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct buffer *buffer;
- pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
- mutex_lock(&mem_man->mutex);
- buffer = idr_find(&ctx->buffers, buf_id);
- if (!buffer) {
- pr_err("%s: buffer id %d not found\n", __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return NULL;
- }
- if (buffer->fence) {
- pr_err("%s: fence for buffer id %d already allocated and not freed \n",
- __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return NULL;
- }
- buffer->fence = kmalloc(sizeof(struct buffer_fence), GFP_KERNEL);
- if (!buffer->fence) {
- pr_err("%s: cannot allocate fence for buffer id %d\n", __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return NULL;
- }
- spin_lock_init(&buffer->fence->lock);
- dma_fence_init(&buffer->fence->fence,
- &dma_fence_ops,
- &buffer->fence->lock,
- dma_fence_context_alloc(1),
- 1);
- mutex_unlock(&mem_man->mutex);
- return &buffer->fence->fence;
- }
- EXPORT_SYMBOL(img_mem_add_fence);
- void img_mem_remove_fence(struct mem_ctx *ctx, int buf_id)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct buffer *buffer;
- struct dma_fence *fence = NULL;
- pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
- mutex_lock(&mem_man->mutex);
- buffer = idr_find(&ctx->buffers, buf_id);
- if (!buffer) {
- pr_err("%s: buffer id %d not found\n", __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return;
- }
- if (buffer->fence) {
- fence = &buffer->fence->fence;
- buffer->fence = NULL;
- }
- mutex_unlock(&mem_man->mutex);
- if (fence)
- dma_fence_signal(fence);
- }
- EXPORT_SYMBOL(img_mem_remove_fence);
- int img_mem_signal_fence(struct mem_ctx *ctx, int buf_id)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct buffer *buffer;
- struct dma_fence *fence = NULL;
- int ret = -1;
- pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
- mutex_lock(&mem_man->mutex);
- buffer = idr_find(&ctx->buffers, buf_id);
- if (!buffer) {
- pr_err("%s: buffer id %d not found\n", __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return -1;
- }
- if (buffer->fence) {
- fence = &buffer->fence->fence;
- buffer->fence = NULL;
- }
- mutex_unlock(&mem_man->mutex);
- if (fence)
- ret = dma_fence_signal(fence);
- return ret;
- }
- EXPORT_SYMBOL(img_mem_signal_fence);
- #endif
- static void _img_mem_sync_device_to_cpu(struct buffer *buffer, bool force);
- int img_mem_map_um(struct mem_ctx *ctx, int buf_id, struct vm_area_struct *vma)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct buffer *buffer;
- struct heap *heap;
- int ret;
- pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
- mutex_lock(&mem_man->mutex);
- buffer = idr_find(&ctx->buffers, buf_id);
- if (!buffer) {
- pr_err("%s: buffer id %d not found\n", __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return -EINVAL;
- }
- pr_debug("%s:%d buffer 0x%p\n", __func__, __LINE__, buffer);
- heap = buffer->heap;
- if (heap->ops == NULL || heap->ops->map_um == NULL) {
- pr_err("%s: no map_um in heap %d!\n", __func__, heap->id);
- mutex_unlock(&mem_man->mutex);
- return -EINVAL;
- }
- ret = heap->ops->map_um(heap, buffer, vma);
- /* Always invalidate the buffer when it is mapped into UM for reading */
- if (!ret && (vma->vm_flags & VM_READ) && !(vma->vm_flags & VM_WRITE))
- _img_mem_sync_device_to_cpu(buffer, false);
- mutex_unlock(&mem_man->mutex);
- return ret;
- }
- EXPORT_SYMBOL(img_mem_map_um);
- int img_mem_unmap_um(struct mem_ctx *ctx, int buf_id)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct buffer *buffer;
- struct heap *heap;
- int ret;
- pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
- mutex_lock(&mem_man->mutex);
- buffer = idr_find(&ctx->buffers, buf_id);
- if (!buffer) {
- pr_err("%s: buffer id %d not found\n", __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return -EINVAL;
- }
- pr_debug("%s:%d buffer 0x%p\n", __func__, __LINE__, buffer);
- heap = buffer->heap;
- if (heap->ops == NULL || heap->ops->unmap_um == NULL) {
- pr_err("%s: no map_um in heap %d!\n", __func__, heap->id);
- mutex_unlock(&mem_man->mutex);
- return -EINVAL;
- }
- ret = heap->ops->unmap_um(heap, buffer);
- mutex_unlock(&mem_man->mutex);
- return ret;
- }
- EXPORT_SYMBOL(img_mem_unmap_um);
- static int _img_mem_map_km(struct buffer *buffer)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct heap *heap = buffer->heap;
- pr_debug("%s:%d buffer 0x%p\n", __func__, __LINE__, buffer);
- WARN_ON(!mutex_is_locked(&mem_man->mutex));
- if (heap->ops == NULL || heap->ops->map_km == NULL) {
- pr_err("%s: no map_km in heap %d!\n", __func__, heap->id);
- return -EINVAL;
- }
- return heap->ops->map_km(heap, buffer);
- }
- int img_mem_map_km(struct mem_ctx *ctx, int buf_id)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct buffer *buffer;
- int ret;
- pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
- mutex_lock(&mem_man->mutex);
- buffer = idr_find(&ctx->buffers, buf_id);
- if (!buffer) {
- pr_err("%s: buffer id %d not found\n", __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return -EINVAL;
- }
- ret = _img_mem_map_km(buffer);
- mutex_unlock(&mem_man->mutex);
- return ret;
- }
- EXPORT_SYMBOL(img_mem_map_km);
- static int _img_mem_unmap_km(struct buffer *buffer)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct heap *heap = buffer->heap;
- pr_debug("%s:%d buffer 0x%p\n", __func__, __LINE__, buffer);
- WARN_ON(!mutex_is_locked(&mem_man->mutex));
- if (heap->ops == NULL || heap->ops->unmap_km == NULL) {
- pr_err("%s: no unmap_km in heap %d!\n", __func__, heap->id);
- return -EINVAL;
- }
- return heap->ops->unmap_km(heap, buffer);
- }
- int img_mem_unmap_km(struct mem_ctx *ctx, int buf_id)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct buffer *buffer;
- int ret;
- pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
- mutex_lock(&mem_man->mutex);
- buffer = idr_find(&ctx->buffers, buf_id);
- if (!buffer) {
- pr_err("%s: buffer id %d not found\n", __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return -EINVAL;
- }
- ret = _img_mem_unmap_km(buffer);
- mutex_unlock(&mem_man->mutex);
- return ret;
- }
- EXPORT_SYMBOL(img_mem_unmap_km);
- uint64_t *img_mem_get_page_array(struct mem_ctx *mem_ctx, int buf_id)
- {
- struct buffer *buffer;
- struct heap *heap;
- struct mem_man *mem_man = &mem_man_data;
- uint64_t *addrs = NULL;
- int ret;
- mutex_lock(&mem_man->mutex);
- buffer = idr_find(&mem_ctx->buffers, buf_id);
- if (!buffer) {
- pr_err("%s: buffer id %d not found\n",
- __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return NULL;
- }
- heap = buffer->heap;
- if (heap && heap->ops && heap->ops->get_page_array) {
- ret = heap->ops->get_page_array(heap, buffer, &addrs);
- if (ret || addrs == NULL) {
- pr_err("%s: no page array for heap %d buffer %d\n",
- __func__, heap->id, buffer->id);
- }
- } else
- pr_err("%s: heap %d does not support page arrays\n",
- __func__, heap->id);
- mutex_unlock(&mem_man->mutex);
- return addrs;
- }
- EXPORT_SYMBOL(img_mem_get_page_array);
- /* gets physical address of a single page at given offset */
- uint64_t img_mem_get_single_page(struct mem_ctx *mem_ctx, int buf_id,
- unsigned int offset)
- {
- struct buffer *buffer;
- struct heap *heap;
- struct mem_man *mem_man = &mem_man_data;
- int ret;
- uint64_t addr = 0;
- mutex_lock(&mem_man->mutex);
- buffer = idr_find(&mem_ctx->buffers, buf_id);
- if (!buffer) {
- pr_err("%s: buffer id %d not found\n", __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return -1;
- }
- heap = buffer->heap;
- if (!heap) {
- pr_err("%s: buffer %d does not point any heap it belongs to!\n",
- __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return -1;
- }
- if (heap->ops && heap->ops->get_sg_table) {
- struct sg_table *sgt;
- struct scatterlist *sgl;
- int offs = offset;
- bool use_sg_dma = false;
- ret = heap->ops->get_sg_table(heap, buffer, &sgt, &use_sg_dma);
- if (ret) {
- pr_err("%s: heap %d buffer %d no sg_table!\n",
- __func__, heap->id, buffer->id);
- return -1;
- }
- sgl = sgt->sgl;
- while (sgl) {
- if (use_sg_dma)
- offs -= sg_dma_len(sgl);
- else
- offs -= sgl->length;
- if (offs <= 0)
- break;
- sgl = sg_next(sgl);
- }
- if (!sgl) {
- pr_err("%s: heap %d buffer %d wrong offset %d!\n",
- __func__, heap->id, buffer->id, offset);
- return -1;
- }
- if (use_sg_dma)
- addr = sg_dma_address(sgl);
- else
- addr = sg_phys(sgl);
- } else if (heap->ops && heap->ops->get_page_array) {
- uint64_t *addrs;
- int page_idx = offset / PAGE_SIZE;
- ret = heap->ops->get_page_array(heap, buffer, &addrs);
- if (ret) {
- pr_err("%s: heap %d buffer %d no page array!\n",
- __func__, heap->id, buffer->id);
- return -1;
- }
- if (offset > buffer->actual_size) {
- pr_err("%s: heap %d buffer %d wrong offset %d!\n",
- __func__, heap->id, buffer->id, offset);
- return -1;
- }
- addr = addrs[page_idx];
- }
- mutex_unlock(&mem_man->mutex);
- return addr;
- }
- EXPORT_SYMBOL(img_mem_get_single_page);
- void *img_mem_get_kptr(struct mem_ctx *ctx, int buf_id)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct buffer *buffer;
- void *kptr;
- mutex_lock(&mem_man->mutex);
- buffer = idr_find(&ctx->buffers, buf_id);
- if (!buffer) {
- pr_err("%s: buffer id %d not found\n", __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return NULL;
- }
- kptr = buffer->kptr;
- mutex_unlock(&mem_man->mutex);
- return kptr;
- }
- EXPORT_SYMBOL(img_mem_get_kptr);
- phys_addr_t img_mem_get_dev_addr(struct mem_ctx *mem_ctx, int buf_id,
- phys_addr_t addr)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct buffer *buffer;
- struct heap *heap;
- mutex_lock(&mem_man->mutex);
- buffer = idr_find(&mem_ctx->buffers, buf_id);
- if (!buffer) {
- pr_err("%s: buffer id %d not found\n", __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return addr;
- }
- heap = buffer->heap;
- if (heap->to_dev_addr)
- addr = heap->to_dev_addr(&heap->options, addr);
- mutex_unlock(&mem_man->mutex);
- return addr;
- }
- EXPORT_SYMBOL(img_mem_get_dev_addr);
- int img_mmu_init_cache(struct mmu_ctx *mmu_ctx, unsigned long cache_phys_start,
- uint32_t cache_size)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct pdump_descr* pdump = vha_pdump_dev_get_drvdata(mmu_ctx->device);
- mutex_lock(&mem_man->mutex);
- mmu_ctx->cache_phys_start = cache_phys_start;
- mmu_ctx->cache_size = cache_size;
- if (img_pdump_enabled(pdump) && cache_size && !mem_man->cache_usage) {
- __img_pdump_printf(mmu_ctx->device, "-- Allocating img mem cache buffer size:%u\n", cache_size);
- __img_pdump_printf(mmu_ctx->device, "CALLOC :OCM:BLOCK_CACHE %#x %#zx 0x0\n",
- cache_size, IMGMMU_GET_MAX_PAGE_SIZE());
- }
- mem_man->cache_usage++;
- mutex_unlock(&mem_man->mutex);
- return 0;
- }
- EXPORT_SYMBOL(img_mmu_init_cache);
- int img_mmu_clear_cache(struct mmu_ctx *mmu_ctx)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct pdump_descr* pdump = vha_pdump_dev_get_drvdata(mmu_ctx->device);
- mutex_lock(&mem_man->mutex);
- if (mem_man->cache_usage)
- mem_man->cache_usage--;
- if (img_pdump_enabled(pdump) && mmu_ctx->cache_size && !mem_man->cache_usage) {
- __img_pdump_printf(mmu_ctx->device, "-- Freeing img mem cache buffer size:%u\n",
- mmu_ctx->cache_size);
- __img_pdump_printf(mmu_ctx->device, "FREE :OCM:BLOCK_CACHE\n");
- }
- mutex_unlock(&mem_man->mutex);
- return 0;
- }
- EXPORT_SYMBOL(img_mmu_clear_cache);
- int img_mmu_move_pg_to_cache(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx,
- int buf_id, uint64_t virt_addr, uint32_t page_size, uint32_t page_idx)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct buffer *buffer;
- struct mmu_ctx_mapping *mapping;
- int ret = -EINVAL;
- if (page_size != imgmmu_get_page_size()) {
- pr_err("%s: page sizes does not match!\n", __func__);
- return -EINVAL;
- }
- if (!mmu_ctx->mmu_cat) {
- pr_err("%s: trying to move pages with mmu disabled!\n", __func__);
- return -EINVAL;
- }
- mutex_lock(&mem_man->mutex);
- buffer = idr_find(&mem_ctx->buffers, buf_id);
- if (!buffer) {
- pr_err("%s: buffer id %d not found\n", __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return -EINVAL;
- }
- if (buffer->actual_size <= page_idx * imgmmu_get_page_size()) {
- pr_err("%s: trying to remap out of the buffer boundaries!\n", __func__);
- mutex_unlock(&mem_man->mutex);
- return -EINVAL;
- }
- list_for_each_entry(mapping, &buffer->mappings, buffer_entry) {
- if (mapping->virt_addr == virt_addr) {
- if (mapping->cache_offset + imgmmu_get_page_size() <= mmu_ctx->cache_size) {
- __img_pdump_printf(buffer->device, "-- Move page to CACHE\n");
- ret = imgmmu_cat_override_phys_addr(mmu_ctx->mmu_cat,
- mapping->virt_addr + page_idx * imgmmu_get_page_size(),
- mmu_ctx->cache_phys_start + mapping->cache_offset);
- mapping->cache_offset += imgmmu_get_page_size();
- }
- break;
- }
- }
- mutex_unlock(&mem_man->mutex);
- return ret;
- }
- EXPORT_SYMBOL(img_mmu_move_pg_to_cache);
- static void _img_mem_sync_cpu_to_device(struct buffer *buffer, bool force)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct heap *heap = buffer->heap;
- if (!cache_sync) {
- pr_debug("%s:%d buffer %d size %zu cache synchronization disabled!\n",
- __func__, __LINE__, buffer->id, buffer->actual_size);
- return;
- }
- pr_debug("%s:%d buffer %d size %zu kptr %p cache(%d:%d)\n",
- __func__, __LINE__, buffer->id, buffer->actual_size,
- buffer->kptr, force, heap->cache_sync);
- WARN_ON(!mutex_is_locked(&mem_man->mutex));
- if (heap->ops && heap->ops->sync_cpu_to_dev &&
- (force || heap->cache_sync))
- heap->ops->sync_cpu_to_dev(heap, buffer);
- #ifdef CONFIG_ARM
- dmb();
- #else
- /* Put memory barrier */
- mb();
- #endif
- }
- int img_mem_sync_cpu_to_device(struct mem_ctx *ctx, int buf_id)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct buffer *buffer;
- pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
- mutex_lock(&mem_man->mutex);
- buffer = idr_find(&ctx->buffers, buf_id);
- if (!buffer) {
- pr_err("%s: buffer id %d not found\n", __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return -EINVAL;
- }
- _img_mem_sync_cpu_to_device(buffer, false);
- mutex_unlock(&mem_man->mutex);
- return 0;
- }
- EXPORT_SYMBOL(img_mem_sync_cpu_to_device);
- static void _img_mem_sync_device_to_cpu(struct buffer *buffer, bool force)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct heap *heap = buffer->heap;
- if (!cache_sync) {
- pr_debug("%s:%d buffer %d size %zu cache synchronization disabled!\n",
- __func__, __LINE__, buffer->id, buffer->actual_size);
- return;
- }
- pr_debug("%s:%d buffer %d size %zu kptr %p cache(%d:%d)\n",
- __func__, __LINE__, buffer->id, buffer->actual_size,
- buffer->kptr, force, heap->cache_sync);
- WARN_ON(!mutex_is_locked(&mem_man->mutex));
- if (heap->ops && heap->ops->sync_dev_to_cpu &&
- (force || heap->cache_sync))
- heap->ops->sync_dev_to_cpu(heap, buffer);
- }
- int img_mem_sync_device_to_cpu(struct mem_ctx *ctx, int buf_id)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct buffer *buffer;
- pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
- mutex_lock(&mem_man->mutex);
- buffer = idr_find(&ctx->buffers, buf_id);
- if (!buffer) {
- pr_err("%s: buffer id %d not found\n", __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return -EINVAL;
- }
- _img_mem_sync_device_to_cpu(buffer, false);
- mutex_unlock(&mem_man->mutex);
- return 0;
- }
- EXPORT_SYMBOL(img_mem_sync_device_to_cpu);
- int img_mem_get_usage(const struct mem_ctx *ctx, size_t *max, size_t *curr)
- {
- struct mem_man *mem_man = &mem_man_data;
- mutex_lock(&mem_man->mutex);
- if (max)
- *max = ctx->mem_usage_max;
- if (curr)
- *curr = ctx->mem_usage_curr;
- mutex_unlock(&mem_man->mutex);
- return 0;
- }
- EXPORT_SYMBOL(img_mem_get_usage);
- int img_mmu_get_usage(const struct mem_ctx *ctx, size_t *max, size_t *curr)
- {
- struct mem_man *mem_man = &mem_man_data;
- mutex_lock(&mem_man->mutex);
- if (max)
- *max = ctx->mmu_usage_max;
- if (curr)
- *curr = ctx->mmu_usage_curr;
- mutex_unlock(&mem_man->mutex);
- return 0;
- }
- EXPORT_SYMBOL(img_mmu_get_usage);
- static int img_mmu_cache_get_offset(struct mem_ctx *mem_ctx,
- unsigned long addr, unsigned int *offset)
- {
- struct mmu_ctx *mmu_ctx;
- list_for_each_entry(mmu_ctx, &mem_ctx->mmu_ctxs, mem_ctx_entry) {
- if (addr >= mmu_ctx->cache_phys_start &&
- addr < mmu_ctx->cache_phys_start + mmu_ctx->cache_size) {
- *offset = addr - mmu_ctx->cache_phys_start;
- return 0;
- }
- }
- return -EINVAL;
- }
- /*
- * related to stream MMU context (contains IMGMMU functionality in general)
- */
- static int imgmmu_find_buffer(struct mem_ctx *ctx, uint64_t addr,
- int *buffer_id, unsigned int *buffer_offset)
- {
- struct heap *heap;
- struct buffer *buffer;
- int buf_id;
- unsigned int buf_offset;
- int ret;
- for (buf_id = *buffer_id;
- ((buffer) = idr_get_next(&ctx->buffers, &buf_id)) != NULL; ++buf_id) {
- heap = buffer->heap;
- if (heap->ops && heap->ops->get_sg_table) {
- struct sg_table *sgt;
- struct scatterlist *sgl;
- bool use_sg_dma = false;
- ret = heap->ops->get_sg_table(heap, buffer, &sgt, &use_sg_dma);
- if (ret) {
- pr_err("%s: heap %d buffer %d no sg_table!\n",
- __func__, heap->id, buffer->id);
- return -EINVAL;
- }
- if (buffer->pcache.last_sgl) {
- sgl = buffer->pcache.last_sgl;
- buf_offset = buffer->pcache.last_offset;
- } else {
- sgl = sgt->sgl;
- buf_offset = 0;
- }
- while (sgl) {
- phys_addr_t phys = use_sg_dma ?
- sg_dma_address(sgl) : sg_phys(sgl);
- unsigned int len = use_sg_dma ?
- sg_dma_len(sgl) : sgl->length;
- #if 0
- pr_err("%s: phys %llx len:%d addr:%llx\n",
- __func__, phys, len, addr);
- #endif
- if (phys == addr) {
- #if 0
- pr_err("%s: match @addr:%llx buf:%d offs:%d len:%d\n",
- __func__, addr, buffer->id, buf_offset, len);
- #endif
- *buffer_id = buffer->id;
- *buffer_offset = buf_offset;
- return 0;
- }
- buffer->pcache.last_offset = buf_offset += len;
- buffer->pcache.last_sgl = sgl = sg_next(sgl);
- }
- } else if (heap->ops && heap->ops->get_page_array) {
- uint64_t *addrs;
- int page_idx;
- ret = heap->ops->get_page_array(heap, buffer, &addrs);
- if (ret) {
- pr_err("%s: heap %d buffer %d no page_array!\n",
- __func__, heap->id, buffer->id);
- return -EINVAL;
- }
- if (buffer->pcache.last_sgl) {
- page_idx = buffer->pcache.last_idx;
- buf_offset = buffer->pcache.last_offset;
- } else {
- page_idx = 0;
- buf_offset = 0;
- }
- while (buf_offset < buffer->actual_size) {
- if (addrs[page_idx] == addr) {
- *buffer_id = buffer->id;
- *buffer_offset = buf_offset;
- return 0;
- }
- buffer->pcache.last_idx = page_idx++;
- buffer->pcache.last_offset = buf_offset += PAGE_SIZE;
- }
- } else {
- pr_err("%s: heap %d buffer %d no phys addrs found!\n",
- __func__, heap->id, buffer->id);
- return -EINVAL;
- }
- }
- return -EINVAL;
- }
- static struct imgmmu_page *_page_alloc(void *arg, unsigned char type)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct mmu_ctx *mmu_ctx = arg;
- struct mmu_page *page;
- struct buffer *buffer;
- struct heap *heap;
- int ret;
- pr_debug("%s:%d arg %p\n", __func__, __LINE__, arg);
- WARN_ON(!mutex_is_locked(&mem_man->mutex));
- page = kzalloc(sizeof(struct mmu_page), GFP_KERNEL);
- if (!page)
- return NULL;
- __img_pdump_printf(mmu_ctx->device, "-- Allocating MMU page for %s\n",
- type == IMGMMU_PTYPE_PC ? "PC" :
- type == IMGMMU_PTYPE_PD ? "PD" :
- type == IMGMMU_PTYPE_PT ? "PT" :
- "???");
- ret = _img_mem_alloc(mmu_ctx->device, mmu_ctx->mem_ctx, mmu_ctx->heap,
- imgmmu_get_cpu_page_size(), mmu_ctx->config.alloc_attr, &buffer);
- if (ret) {
- pr_err("%s: img_mem_alloc failed (%d)\n", __func__, ret);
- goto free_page;
- }
- ret = _img_mem_map_km(buffer);
- if (ret) {
- pr_err("%s: img_mem_map_km failed (%d)\n", __func__, ret);
- goto free_buffer;
- }
- WARN_ON(!type);
- page->type = type;
- page->buffer = buffer;
- page->page.cpu_addr = (uintptr_t)buffer->kptr;
- memset((void *)page->page.cpu_addr, 0, imgmmu_get_cpu_page_size());
- if (type == IMGMMU_PTYPE_PT)
- page->use_parity = mmu_ctx->config.use_pte_parity;
- heap = buffer->heap;
- if (heap->ops && heap->ops->get_sg_table) {
- struct sg_table *sgt;
- bool use_sg_dma = false;
- ret = heap->ops->get_sg_table(heap, buffer, &sgt, &use_sg_dma);
- if (ret) {
- pr_err("%s: heap %d buffer %d no sg_table!\n",
- __func__, heap->id, buffer->id);
- ret = -EINVAL;
- goto free_buffer;
- }
- if (use_sg_dma)
- page->page.phys_addr = sg_dma_address(sgt->sgl);
- else
- page->page.phys_addr = sg_phys(sgt->sgl);
- } else if (heap->ops && heap->ops->get_page_array) {
- uint64_t *addrs;
- ret = heap->ops->get_page_array(heap, buffer, &addrs);
- if (ret) {
- pr_err("%s: heap %d buffer %d no page array!\n",
- __func__, heap->id, buffer->id);
- ret = -EINVAL;
- goto free_buffer;
- }
- page->page.phys_addr = *addrs; /* we allocated a single page */
- } else {
- pr_err("%s: heap %d buffer %d no get_sg or get_page_array!\n",
- __func__, heap->id, buffer->id);
- ret = -EINVAL;
- goto free_buffer;
- }
- mmu_ctx->mem_ctx->mmu_usage_curr += buffer->actual_size;
- if (mmu_ctx->mem_ctx->mmu_usage_curr > mmu_ctx->mem_ctx->mmu_usage_max)
- mmu_ctx->mem_ctx->mmu_usage_max = mmu_ctx->mem_ctx->mmu_usage_curr;
- pr_debug("%s:%d virt addr %#lx type:%d\n", __func__, __LINE__,
- page->page.cpu_addr, type);
- pr_debug("%s:%d phys addr %#llx\n", __func__, __LINE__,
- page->page.phys_addr);
- return &page->page;
- free_buffer:
- _img_mem_free(buffer);
- free_page:
- kfree(page);
- return NULL;
- }
- static void _page_free(struct imgmmu_page *arg)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct mmu_page *page;
- page = container_of(arg, struct mmu_page, page);
- WARN_ON(!mutex_is_locked(&mem_man->mutex));
- pr_debug("%s:%d buffer %u\n", __func__, __LINE__, page->buffer->id);
- if (page->buffer->mem_ctx->mmu_usage_curr >= page->buffer->actual_size)
- page->buffer->mem_ctx->mmu_usage_curr -= page->buffer->actual_size;
- else
- WARN_ON(1);
- _img_mem_free(page->buffer);
- kfree(page);
- }
- static inline void __pdump_apply_parity(struct device* dev, uint64_t virt,
- const char *block, unsigned int offset)
- {
- uint8_t bits;
- /* XOR 32 bit pair <paddr & vaadr> */
- __img_pdump_printf(dev,
- "WRW "_PMEM_":$1 %#llx -- Calculate parity bit\n"
- "WRW "_PMEM_":$2 %s:%#x\n"
- "SHR "_PMEM_":$2 "_PMEM_":$2 %d\n"
- "XOR "_PMEM_":$1 "_PMEM_":$1 "_PMEM_":$2\n",
- virt >> MIN_PAGE_SIZE_BITS, block, offset, MIN_PAGE_SIZE_BITS);
- for (bits = 16; bits >= 1; bits>>=1)
- /* XOR 'bits' pair of previous result */
- __img_pdump_printf(dev,
- "AND "_PMEM_":$2 "_PMEM_":$1 %#x\n"
- "SHR "_PMEM_":$1 "_PMEM_":$1 %d\n"
- "XOR "_PMEM_":$1 "_PMEM_":$1 "_PMEM_":$2\n",
- (1<<bits)-1, bits);
- /* Apply parity bit */
- __img_pdump_printf(dev,
- "SHL "_PMEM_":$1 "_PMEM_":$1 %d\n"
- "OR "_PMEM_":$0 "_PMEM_":$0 "_PMEM_":$1 -- Apply parity\n",
- imgmmu_get_pte_parity_shift());
- }
- static void _page_write(struct imgmmu_page *page,
- unsigned int offset, uint64_t entry,
- unsigned int flags, void *priv)
- {
- uint64_t *mem64 = (uint64_t *)page->cpu_addr;
- uint32_t *mem32 = (uint32_t *)mem64;
- struct mmu_page *mmu_page;
- struct heap *heap = NULL;
- struct buffer *buf = (struct buffer*)priv;
- struct pdump_descr* pdump;
- uint32_t entry_shift = 0;
- uint64_t cache_bits = 0;
- uint64_t address = entry & IMG_MMU_PHY_ADDR_MASK;
- uint64_t virt = page->virt_base;
- uint64_t paddr = entry;
- mmu_page = container_of(page, struct mmu_page, page);
- virt += ((1<<imgmmu_get_entry_shift(mmu_page->type))) * offset;
- if (mmu_page->type == IMGMMU_PTYPE_PC ||
- mmu_page->type == IMGMMU_PTYPE_PD)
- heap = mmu_page->buffer->heap;
- else {
- /* PT entries are pointing to buffer which may have been allocated
- * using different heap than the one used for mmu allocations */
- if (buf)
- heap = buf->heap;
- }
- mmu_page->bypass_addr_trans = (flags & IMGMMU_BYPASS_ADDR_TRANS ? true : false);
- /* Mask MMU flags */
- flags &= IMG_MMU_ENTRY_FLAGS_MASK;
- /* skip translation when flags are zero, assuming address is invalid */
- /* or when page is being remapped to on-chip ram*/
- if (flags && heap && heap->to_dev_addr &&
- !mmu_page->bypass_addr_trans)
- paddr = heap->to_dev_addr(&heap->options, paddr);
- WARN(paddr & ~dma_get_mask(mmu_page->buffer->device),
- "%s: Physical address is out of dma mask, "\
- "Set proper dma mask to avoid cache problems, ", __func__);
- WARN(paddr & ~IMG_MMU_PHY_ADDR_MASK,
- "%s: Physical address exceeds hardware capabilities, "\
- "MMU mapping will be likely invalid", __func__);
- paddr &= IMG_MMU_PHY_ADDR_MASK;
- if (trace_physical_pages && flags) {
- if (mmu_page->type == IMGMMU_PTYPE_PC)
- pr_info("%s: sid:%d off %#03x paddr %#016llx flags %#x type:PC\n",
- __func__, buf ? buf->mem_ctx->id : -1, offset, paddr, flags);
- else
- pr_info("%s: sid:%d off %#03x paddr %#016llx flags %#x type:%s virt:%llx\n",
- __func__, buf ? buf->mem_ctx->id : -1, offset, paddr, flags,
- mmu_page->type == IMGMMU_PTYPE_PD ? "PD" :
- mmu_page->type == IMGMMU_PTYPE_PT ? "PT" :
- "???", virt);
- }
- if (mmu_page->type == IMGMMU_PTYPE_PC) {
- /* Offset of PD physical base address(12)-4(PCE flags) */
- entry_shift = 8;
- /* This is 32 bit entry */
- mem32[offset] = (paddr >> entry_shift) | flags;
- } else if (mmu_page->type == IMGMMU_PTYPE_PD) {
- /* Offset of PT physical base address(12)-12(PDE flags) */
- entry_shift = 0;
- /* This is 64 bit entry */
- mem64[offset] = paddr | flags;
- } else if (mmu_page->type == IMGMMU_PTYPE_PT) {
- /* Offset of page physical base address(12)-12(PTE flags) */
- entry_shift = 0;
- cache_bits = imgmmu_get_pte_cache_bits(entry);
- /* This is 64 bit entry */
- mem64[offset] = cache_bits | paddr | flags;
- if (flags && mmu_page->use_parity) {
- uint64_t par_pair = (virt >> MIN_PAGE_SIZE_BITS) |
- ((paddr >> MIN_PAGE_SIZE_BITS) << (sizeof(uint32_t)*8));
- bool par_bit = img_mem_calc_parity(par_pair);
- if (par_bit)
- imgmmu_set_pte_parity(&mem64[offset]);
- if (trace_physical_pages)
- pr_info("%s: [%llx]: %s\n", __func__, mem64[offset],
- par_bit ? "odd parity" : "even parity");
- }
- }
- pdump = vha_pdump_dev_get_drvdata(mmu_page->buffer->device);
- if (img_pdump_enabled(pdump) && flags) {
- /* skip when flags are zero, assuming address is invalid */
- int buffer_id = 0;
- unsigned int buffer_offset = 0;
- int ret;
- if (mmu_page->bypass_addr_trans) {
- ret = img_mmu_cache_get_offset(mmu_page->buffer->mem_ctx, address,
- &buffer_offset);
- if (ret) {
- pr_info("PDUMP: Can't find in cache %#llx\n", address);
- } else {
- /* Cache addresses are only applicable for PT entries */
- WARN_ON(mmu_page->type != IMGMMU_PTYPE_PT);
- __img_pdump_printf(mmu_page->buffer->device,
- "WRW "_PMEM_":$0 :OCM:BLOCK_CACHE:%#x\n"
- "OR "_PMEM_":$0 "_PMEM_":$0 %d\n",
- buffer_offset, flags);
- if (mmu_page->use_parity) {
- const char block[] = ":OCM:BLOCK_CACHE";
- __pdump_apply_parity(mmu_page->buffer->device, virt, block, buffer_offset);
- }
- if (cache_bits)
- __img_pdump_printf(mmu_page->buffer->device,
- "OR "_PMEM_":$0 "_PMEM_":$0 %#llx\n",
- cache_bits);
- __img_pdump_printf(mmu_page->buffer->device,
- "WRW64 "_PMEM_":BLOCK_%d:%#zx "_PMEM_":$0 -- PTE\n",
- mmu_page->buffer->id, offset * sizeof(*mem64));
- }
- } else {
- if (mmu_page->type == IMGMMU_PTYPE_PT && buf)
- buffer_id = buf->id;
- ret = imgmmu_find_buffer(mmu_page->buffer->mem_ctx, address,
- &buffer_id, &buffer_offset);
- if (ret) {
- pr_info("PDUMP: Can't find %#llx\n", address);
- } else if (mmu_page->type == IMGMMU_PTYPE_PC) {
- __img_pdump_printf(mmu_page->buffer->device,
- "WRW "_PMEM_":$0 "_PMEM_":BLOCK_%d:%#x\n"
- "SHR "_PMEM_":$0 "_PMEM_":$0 %d\n"
- "OR "_PMEM_":$0 "_PMEM_":$0 %d\n"
- "WRW "_PMEM_":BLOCK_%d:%#zx "_PMEM_":$0 -- PCE\n",
- buffer_id, buffer_offset,
- entry_shift, flags,
- mmu_page->buffer->id, offset * sizeof(*mem32));
- } else {
- if (mmu_page->type == IMGMMU_PTYPE_PD) {
- __img_pdump_printf(mmu_page->buffer->device,
- "WRW "_PMEM_":$0 "_PMEM_":BLOCK_%d:%#x\n"
- "OR "_PMEM_":$0 "_PMEM_":$0 %d\n"
- "WRW64 "_PMEM_":BLOCK_%d:%#zx "_PMEM_":$0 -- PDE\n",
- buffer_id, buffer_offset, flags,
- mmu_page->buffer->id, offset * sizeof(*mem64));
- } else if (mmu_page->type == IMGMMU_PTYPE_PT) {
- char block[25];
- if (heap->type == IMG_MEM_HEAP_TYPE_OCM)
- snprintf(block, sizeof(block), ":OCM:BLOCK_CACHE");
- else
- snprintf(block, sizeof(block), ""_PMEM_":BLOCK_%d",
- buffer_id);
- __img_pdump_printf(mmu_page->buffer->device,
- "WRW "_PMEM_":$0 %s:%#x\n"
- "OR "_PMEM_":$0 "_PMEM_":$0 %d\n",
- block, buffer_offset, flags);
- if (mmu_page->use_parity)
- __pdump_apply_parity(mmu_page->buffer->device, virt, block, buffer_offset);
- if (cache_bits)
- __img_pdump_printf(mmu_page->buffer->device,
- "OR "_PMEM_":$0 "_PMEM_":$0 %#llx\n",
- cache_bits);
- __img_pdump_printf(mmu_page->buffer->device,
- "WRW64 "_PMEM_":BLOCK_%d:%#zx "_PMEM_":$0 -- PTE\n",
- mmu_page->buffer->id, offset * sizeof(*mem64));
- }
- }
- }
- }
- }
- static uint64_t _page_read(struct imgmmu_page *page,
- unsigned int offset, void *priv,
- unsigned int *flags)
- {
- uint64_t *mem64 = (uint64_t *)page->cpu_addr;
- uint32_t *mem32 = (uint32_t *)mem64;
- struct mmu_page *mmu_page;
- struct heap *heap = NULL;
- uint32_t entry_shift = 0;
- uint64_t entry = 0;
- uint64_t paddr;
- uint64_t virt = page->virt_base;
- struct buffer *buf = (struct buffer*)priv;
- mmu_page = container_of(page, struct mmu_page, page);
- virt += ((1<<imgmmu_get_entry_shift(mmu_page->type))) * offset;
- if (mmu_page->type == IMGMMU_PTYPE_PC ||
- mmu_page->type == IMGMMU_PTYPE_PD)
- heap = mmu_page->buffer->heap;
- else {
- /* PT entries are pointing to buffer which may have been allocated
- * using different heap than the one used for mmu allocations */
- if (buf)
- heap = buf->heap;
- }
- if (mmu_page->type == IMGMMU_PTYPE_PC) {
- /* Offset of PD physical base address(12)-4(PCE flags) */
- entry_shift = 8;
- /* This is 32 bit entry */
- entry = mem32[offset];
- } else if (mmu_page->type == IMGMMU_PTYPE_PD) {
- /* Offset of PT physical base address(12)-12(PDE flags) */
- entry_shift = 0;
- /* This is 64 bit entry */
- entry = mem64[offset];
- } else if (mmu_page->type == IMGMMU_PTYPE_PT) {
- /* Offset of page physical base address(12)-12(PTE flags) */
- entry_shift = 0;
- /* This is 64 bit entry */
- entry = mem64[offset];
- }
- *flags = entry & IMG_MMU_ENTRY_FLAGS_MASK;
- paddr = entry & ~IMG_MMU_ENTRY_FLAGS_MASK;
- if (mmu_page->type == IMGMMU_PTYPE_PT) {
- /* Mask parity and special cache bits */
- paddr &= ~(1ULL<<imgmmu_get_pte_parity_shift());
- paddr &= ~imgmmu_get_pte_cache_bits(entry);
- }
- paddr <<= entry_shift;
- /* Check if physical address set in PTE is within correct range */
- if (paddr & ~IMG_MMU_PHY_ADDR_MASK) {
- pr_err("%s: mmu page entry (%llx) corruption detected (phys)!\n",
- __func__, paddr);
- *flags = IMG_MMU_ENTRY_FLAGS_MASK;
- goto exit;
- }
- /* Check parity */
- if (*flags && mmu_page->type == IMGMMU_PTYPE_PT &&
- mmu_page->use_parity) {
- uint64_t par_pair = (virt >> MIN_PAGE_SIZE_BITS) |
- ((paddr >> MIN_PAGE_SIZE_BITS) << (sizeof(uint32_t)*8));
- bool par_bit = img_mem_calc_parity(par_pair);
- if (trace_physical_pages)
- pr_info("%s: [%llx]: %s\n", __func__, entry,
- par_bit ? "odd parity" : "even parity");
- if ((entry >> imgmmu_get_pte_parity_shift()) != par_bit) {
- pr_err("%s: mmu page entry (%llx) corruption detected (parity)!\n",
- __func__, entry);
- *flags = IMG_MMU_ENTRY_FLAGS_MASK;
- goto exit;
- }
- }
- /* skip translation when flags are zero, assuming address is invalid */
- if (*flags && heap && heap->to_host_addr &&
- !mmu_page->bypass_addr_trans)
- paddr = heap->to_host_addr(&heap->options, paddr);
- /* Check if physical address matches dma mask */
- if (paddr & ~dma_get_mask(mmu_page->buffer->device)) {
- pr_err("%s: mmu page entry (%llx) physical address is out of dma mask!\n"
- "Set proper dma mask to avoid cache problems\n",
- __func__, paddr);
- *flags = IMG_MMU_ENTRY_FLAGS_MASK;
- goto exit;
- }
- /* Sanity check for MMU flags - different on each level */
- if (((mmu_page->type == IMGMMU_PTYPE_PC ||
- mmu_page->type == IMGMMU_PTYPE_PD) &&
- (*flags & ~IMG_MMU_PTE_FLAG_VALID)) ||
- (mmu_page->type == IMGMMU_PTYPE_PT &&
- (*flags & ~(IMG_MMU_PTE_FLAG_VALID|IMG_MMU_PTE_FLAG_READ_ONLY)))) {
- pr_err("%s: mmu page entry corruption detected (flags)!\n",
- __func__);
- *flags = IMG_MMU_ENTRY_FLAGS_MASK;
- }
- exit:
- if (trace_physical_pages && *flags) {
- if (mmu_page->type == IMGMMU_PTYPE_PC)
- pr_info("%s: sid:%d off %#03x paddr %#016llx flags %#x type:PC\n",
- __func__, buf ? buf->mem_ctx->id : -1, offset, paddr, *flags);
- else
- pr_info("%s: sid:%d off %#03x paddr %#016llx flags %#x type:%s virt:%llx\n",
- __func__, buf ? buf->mem_ctx->id : -1, offset, paddr, *flags,
- mmu_page->type == IMGMMU_PTYPE_PD ? "PD" :
- mmu_page->type == IMGMMU_PTYPE_PT ? "PT" :
- "???", virt);
- }
- return paddr;
- }
- static void _update_page(struct imgmmu_page *arg)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct mmu_page *page;
- if (trace_physical_pages)
- pr_debug("%s\n", __func__);
- page = container_of(arg, struct mmu_page, page);
- WARN_ON(!mutex_is_locked(&mem_man->mutex));
- _img_mem_sync_cpu_to_device(page->buffer, true);
- }
- int img_mmu_ctx_create(struct device *device, const struct mmu_config *config,
- struct mem_ctx *mem_ctx, int heap_id,
- int (*callback_fn)(enum img_mmu_callback_type type,
- int buf_id, void *data),
- void *callback_data, struct mmu_ctx **mmu_ctx)
- {
- struct mem_man *mem_man = &mem_man_data;
- static struct imgmmu_info info = {
- .page_alloc = _page_alloc,
- .page_free = _page_free,
- .page_write = _page_write,
- .page_read = _page_read,
- .page_update = _update_page,
- };
- struct mmu_ctx *ctx;
- int res;
- if (((config->addr_width < imgmmu_get_virt_size()) ||
- (config->addr_width < imgmmu_get_phys_size())) &&
- !config->bypass_hw) {
- pr_err("%s: invalid addr_width (%d)!\n",
- __func__, config->addr_width);
- return -EINVAL;
- }
- ctx = kzalloc(sizeof(struct mmu_ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
- ctx->device = device;
- ctx->mem_ctx = mem_ctx;
- memcpy(&ctx->config, config, sizeof(struct mmu_config));
- imgmmu_set_page_size(config->page_size);
- mutex_lock(&mem_man->mutex);
- ctx->heap = idr_find(&mem_man->heaps, heap_id);
- if (!ctx->heap) {
- pr_err("%s: invalid heap_id (%d)!\n", __func__, heap_id);
- mutex_unlock(&mem_man->mutex);
- kfree(ctx);
- return -EINVAL;
- }
- /* Apply offset when needed */
- if (ctx->heap->ops->set_offset) {
- if (ctx->heap->ops->set_offset(ctx->heap, config->bypass_offset)) {
- pr_err("%s: failed to set offset %zu heap_id (%d)!\n",
- __func__, config->bypass_offset, heap_id);
- mutex_unlock(&mem_man->mutex);
- kfree(ctx);
- return -EINVAL;
- }
- pr_debug("%s adding %lx offset bytes to heap %d type %d (%s)\n",
- __func__, config->bypass_offset, ctx->heap->id,
- ctx->heap->type, get_heap_name(ctx->heap->type));
- }
- info.ctx = ctx;
- /* If we are in bypass mode, do not populate hw structures */
- if (!config->bypass_hw) {
- ctx->mmu_cat = imgmmu_cat_create(&info, &res);
- if (res) {
- pr_err("%s: catalogue create failed (%d)!\n",
- __func__, res);
- mutex_unlock(&mem_man->mutex);
- kfree(ctx);
- return -EFAULT;
- }
- } else
- pr_debug("%s imgmmu_cat_create bypass!\n", __func__);
- list_add(&ctx->mem_ctx_entry, &mem_ctx->mmu_ctxs);
- INIT_LIST_HEAD(&ctx->mappings);
- ctx->callback_fn = callback_fn;
- ctx->callback_data = callback_data;
- ctx->id = mem_ctx->id;
- *mmu_ctx = ctx;
- mutex_unlock(&mem_man->mutex);
- return ctx->id;
- }
- EXPORT_SYMBOL(img_mmu_ctx_create);
- static void _img_mmu_ctx_destroy(struct mmu_ctx *ctx)
- {
- struct mem_man *mem_man = &mem_man_data;
- int res;
- WARN_ON(!mutex_is_locked(&mem_man->mutex));
- while (!list_empty(&ctx->mappings)) {
- struct mmu_ctx_mapping *map;
- map = list_first_entry(&ctx->mappings,
- struct mmu_ctx_mapping, mmu_ctx_entry);
- pr_debug("%s: found mapped buffer %d (size %zu)\n",
- __func__, map->buffer->id, map->buffer->request_size);
- _img_mmu_unmap(map);
- kfree(map);
- }
- if (!ctx->config.bypass_hw) {
- res = imgmmu_cat_destroy(ctx->mmu_cat);
- if (res)
- pr_err("imgmmu_cat_destroy failed (%d)!\n", res);
- } else
- pr_debug("%s imgmmu_cat_destroy bypass!\n", __func__);
- list_del(&ctx->mem_ctx_entry);
- }
- void img_mmu_ctx_destroy(struct mmu_ctx *ctx)
- {
- struct mem_man *mem_man = &mem_man_data;
- mutex_lock(&mem_man->mutex);
- _img_mmu_ctx_destroy(ctx);
- mutex_unlock(&mem_man->mutex);
- kfree(ctx);
- }
- EXPORT_SYMBOL(img_mmu_ctx_destroy);
- int img_mmu_map(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx, int buf_id,
- uint64_t virt_addr, unsigned int map_flags)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct mmu_ctx_mapping *mapping;
- struct imgmmu_halloc heap_alloc;
- struct buffer *buffer;
- struct heap *heap;
- int res = 0;
- int ret;
- pr_debug("%s buffer %d virt_addr %#llx\n",
- __func__, buf_id, virt_addr);
- mapping = kzalloc(sizeof(struct mmu_ctx_mapping), GFP_KERNEL);
- if (!mapping)
- return -ENOMEM;
- mutex_lock(&mem_man->mutex);
- buffer = idr_find(&mem_ctx->buffers, buf_id);
- if (!buffer) {
- pr_err("%s: buffer id %d not found\n", __func__, buf_id);
- ret = -EINVAL;
- goto error;
- }
- pr_debug("%s buffer %d 0x%p size %zu virt_addr %#llx\n", __func__,
- buf_id, buffer, buffer->request_size, virt_addr);
- /* Store MMU mapping flags */
- buffer->map_flags = map_flags;
- heap_alloc.vaddr = virt_addr;
- heap_alloc.size = buffer->actual_size;
- mapping->mmu_ctx = mmu_ctx;
- mapping->buffer = buffer;
- mapping->virt_addr = virt_addr;
- if (!mmu_ctx->config.bypass_hw)
- __img_pdump_printf(buffer->device, "-- Mapping "_PMEM_":BLOCK_%d @ 0x%llx\n",
- buf_id, virt_addr);
- heap = buffer->heap;
- if (heap->ops && heap->ops->get_sg_table) {
- struct sg_table *sgt;
- bool use_sg_dma = false;
- ret = heap->ops->get_sg_table(heap, buffer, &sgt, &use_sg_dma);
- if (ret) {
- pr_err("%s: heap %d buffer %d no sg_table!\n",
- __func__, heap->id, buffer->id);
- goto error;
- }
- if (!mmu_ctx->config.bypass_hw)
- mapping->map = imgmmu_cat_map_sg(
- mmu_ctx->mmu_cat,
- sgt->sgl,
- use_sg_dma,
- &heap_alloc,
- map_flags, buffer,
- &res);
- else
- pr_debug("%s imgmmu_cat_map_sg bypass!\n", __func__);
- } else if (heap->ops && heap->ops->get_page_array) {
- uint64_t *addrs;
- ret = heap->ops->get_page_array(heap, buffer, &addrs);
- if (ret) {
- pr_err("%s: heap %d buffer %d no page array!\n",
- __func__, heap->id, buffer->id);
- goto error;
- }
- if (!mmu_ctx->config.bypass_hw)
- mapping->map = imgmmu_cat_map_arr(
- mmu_ctx->mmu_cat,
- addrs,
- &heap_alloc,
- map_flags, buffer,
- &res);
- else
- pr_debug("%s imgmmu_cat_map_arr bypass!\n", __func__);
- } else {
- pr_err("%s: heap %d buffer %d no get_sg or get_page_array!\n",
- __func__, heap->id, buffer->id);
- ret = -EINVAL;
- goto error;
- }
- if (res) {
- pr_err("imgmmu_cat_map failed (%d)!\n", res);
- ret = -EFAULT;
- goto error;
- }
- list_add(&mapping->mmu_ctx_entry, &mmu_ctx->mappings);
- list_add(&mapping->buffer_entry, &mapping->buffer->mappings);
- if (mmu_ctx->callback_fn && !mmu_ctx->config.bypass_hw) {
- ret = mmu_ctx->callback_fn(IMG_MMU_CALLBACK_MAP, buffer->id,
- mmu_ctx->callback_data);
- if (ret) {
- pr_err("%s: imgmmu map callback failed!\n", __func__);
- }
- }
- mutex_unlock(&mem_man->mutex);
- return ret;
- error:
- mutex_unlock(&mem_man->mutex);
- kfree(mapping);
- return ret;
- }
- EXPORT_SYMBOL(img_mmu_map);
- static void _img_mmu_unmap(struct mmu_ctx_mapping *mapping)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct mmu_ctx *ctx = mapping->mmu_ctx;
- int res;
- pr_debug("%s:%d unmapping %p buffer %d\n",
- __func__, __LINE__, mapping, mapping->buffer->id);
- WARN_ON(!mutex_is_locked(&mem_man->mutex));
- if (!ctx->config.bypass_hw) {
- res = imgmmu_cat_unmap(mapping->map);
- if (res)
- pr_warn("imgmmu_cat_unmap failed (%d)!\n", res);
- } else
- pr_debug("%s imgmmu_cat_unmap bypass!\n", __func__);
- list_del(&mapping->mmu_ctx_entry);
- list_del(&mapping->buffer_entry);
- if (ctx->callback_fn && !ctx->config.bypass_hw)
- ctx->callback_fn(IMG_MMU_CALLBACK_UNMAP, mapping->buffer->id,
- ctx->callback_data);
- }
- int img_mmu_unmap(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx, int buf_id)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct mmu_ctx_mapping *mapping;
- struct list_head *lst;
- pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
- mutex_lock(&mem_man->mutex);
- mapping = NULL;
- list_for_each(lst, &mmu_ctx->mappings) {
- struct mmu_ctx_mapping *m;
- m = list_entry(lst, struct mmu_ctx_mapping, mmu_ctx_entry);
- if (m->buffer->id == buf_id) {
- mapping = m;
- break;
- }
- }
- if (!mapping) {
- pr_err("%s: buffer id %d not found\n", __func__, buf_id);
- mutex_unlock(&mem_man->mutex);
- return -EINVAL;
- }
- _img_mmu_unmap(mapping);
- mutex_unlock(&mem_man->mutex);
- kfree(mapping);
- return 0;
- }
- EXPORT_SYMBOL(img_mmu_unmap);
- int img_mmu_get_conf(size_t *page_size, size_t *virt_size)
- {
- if (page_size)
- *page_size = imgmmu_get_page_size();
- if (virt_size)
- *virt_size = imgmmu_get_virt_size();
- return 0;
- }
- EXPORT_SYMBOL(img_mmu_get_conf);
- int img_mmu_get_pc(const struct mmu_ctx *ctx, unsigned int *pc_reg, int *bufid)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct imgmmu_page *page = NULL;
- phys_addr_t addr = 0ULL;
- mutex_lock(&mem_man->mutex);
- *pc_reg = 0;
- if (!ctx->config.bypass_hw) {
- struct mmu_page *mmu_page;
- page = imgmmu_cat_get_page(ctx->mmu_cat);
- if (!page) {
- mutex_unlock(&mem_man->mutex);
- return -EINVAL;
- }
- mmu_page = container_of(page, struct mmu_page, page);
- *bufid = mmu_page->buffer->id;
- addr = page->phys_addr;
- if (ctx->heap->to_dev_addr)
- addr = ctx->heap->to_dev_addr(
- &ctx->heap->options,
- addr);
- /* This is PFN of Page Catalogue phy address */
- *pc_reg = (unsigned int)(addr >>= IMG_MMU_PC_ADDR_SHIFT);
- pr_debug("%s: addr %#llx pc %#llx bufid %d\n", __func__,
- page->phys_addr, addr, *bufid);
- } else
- pr_debug("%s imgmmu_cat_get_page bypass!\n", __func__);
- mutex_unlock(&mem_man->mutex);
- return 0;
- }
- EXPORT_SYMBOL(img_mmu_get_pc);
- phys_addr_t img_mmu_get_paddr(const struct mmu_ctx *ctx,
- uint64_t vaddr, uint8_t *flags)
- {
- struct mem_man *mem_man = &mem_man_data;
- uint64_t entry = 0;
- phys_addr_t paddr = 0;
- *flags = 0;
- mutex_lock(&mem_man->mutex);
- entry = imgmmu_cat_get_pte(ctx->mmu_cat, vaddr);
- if (entry != ~0) {
- *flags = entry & IMG_MMU_ENTRY_FLAGS_MASK;
- paddr = entry & ~IMG_MMU_ENTRY_FLAGS_MASK;
- }
- mutex_unlock(&mem_man->mutex);
- return paddr;
- }
- EXPORT_SYMBOL(img_mmu_get_paddr);
- /*
- * Wrapper functions for virtual address allocator
- */
- int img_mmu_vaa_create(struct device *device,
- uint32_t base, size_t size, struct mmu_vaa **vaa)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct mmu_vaa *ctx;
- int ret = 0;
- if (!size)
- return -EINVAL;
- ctx = kzalloc(sizeof(struct mmu_vaa), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
- mutex_lock(&mem_man->mutex);
- ctx->heap = imgmmu_hcreate(base, imgmmu_get_page_size(),
- size, true, &ret);
- if (ret) {
- pr_err("%s: imgmmu_hcreate failed (%d)!\n", __func__, ret);
- kfree(ctx);
- ret = -EFAULT;
- goto exit;
- }
- INIT_LIST_HEAD(&ctx->entries);
- ctx->device = device;
- *vaa = ctx;
- exit:
- mutex_unlock(&mem_man->mutex);
- return ret;
- }
- EXPORT_SYMBOL(img_mmu_vaa_create);
- int img_mmu_vaa_destroy(struct mmu_vaa *vaa)
- {
- struct mem_man *mem_man = &mem_man_data;
- if (!vaa)
- return -EINVAL;
- mutex_lock(&mem_man->mutex);
- while (!list_empty(&vaa->entries)) {
- struct vaa_entry *entry;
- entry = list_first_entry(&vaa->entries,
- struct vaa_entry, mmu_vaa_entry);
- if (imgmmu_hfree(entry->alloc)) {
- pr_err("%s: imgmmu_hfree failed!\n",
- __func__);
- WARN_ON(1);
- }
- list_del(&entry->mmu_vaa_entry);
- kfree(entry);
- }
- if (imgmmu_hdestroy(vaa->heap)) {
- pr_err("%s: imgmmu_hdestroy failed!\n", __func__);
- /* If some attachments are still active */
- WARN_ON(1);
- }
- kfree(vaa);
- mutex_unlock(&mem_man->mutex);
- return 0;
- }
- EXPORT_SYMBOL(img_mmu_vaa_destroy);
- int img_mmu_vaa_alloc(struct mmu_vaa *vaa, size_t size, uint32_t *addr)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct imgmmu_halloc *alloc;
- struct vaa_entry *entry;
- int ret = 0;
- if (!vaa || !addr || !size)
- return -EINVAL;
- entry = kzalloc(sizeof(struct vaa_entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
- mutex_lock(&mem_man->mutex);
- alloc = imgmmu_hallocate(vaa->heap, size, &ret);
- if (!alloc || ret) {
- pr_err("%s: imgmmu_hallocate failed (%zu)!\n",
- __func__, size);
- kfree(entry);
- ret = -EFAULT;
- goto exit;
- }
- entry->alloc = alloc;
- list_add(&entry->mmu_vaa_entry, &vaa->entries);
- *addr = alloc->vaddr;
- exit:
- mutex_unlock(&mem_man->mutex);
- return ret;
- }
- EXPORT_SYMBOL(img_mmu_vaa_alloc);
- int img_mmu_vaa_free(struct mmu_vaa *vaa, uint32_t addr, size_t size)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct vaa_entry *entry;
- int ret = 0;
- if (!vaa || !size)
- return -EINVAL;
- mutex_lock(&mem_man->mutex);
- entry = list_first_entry(&vaa->entries,
- struct vaa_entry, mmu_vaa_entry);
- while (!entry) {
- if (entry->alloc->vaddr == addr &&
- entry->alloc->size == size)
- break;
- /* advance */
- entry = list_next_entry(entry, mmu_vaa_entry);
- }
- if (!entry) {
- pr_err("%s: allocation not found (0x%x:%zu)!\n",
- __func__, addr, size);
- ret = -EINVAL;
- goto exit;
- }
- if (imgmmu_hfree(entry->alloc)) {
- pr_err("%s: imgmmu_hfree failed (0x%x:%zu)!\n",
- __func__, addr, size);
- ret = -EFAULT;
- goto exit;
- }
- list_del(&entry->mmu_vaa_entry);
- kfree(entry);
- exit:
- mutex_unlock(&mem_man->mutex);
- return ret;
- }
- EXPORT_SYMBOL(img_mmu_vaa_free);
- // Parity look-up table for 8bits
- static unsigned int _parity_lut_[256] =
- {0, 1, 1, 0, 1, 0, 0, 1,
- 1, 0, 0, 1, 0, 1, 1, 0,
- 1, 0, 0, 1, 0, 1, 1, 0,
- 0, 1, 1, 0, 1, 0, 0, 1,
- 1, 0, 0, 1, 0, 1, 1, 0,
- 0, 1, 1, 0, 1, 0, 0, 1,
- 0, 1, 1, 0, 1, 0, 0, 1,
- 1, 0, 0, 1, 0, 1, 1, 0,
- 1, 0, 0, 1, 0, 1, 1, 0,
- 0, 1, 1, 0, 1, 0, 0, 1,
- 0, 1, 1, 0, 1, 0, 0, 1,
- 1, 0, 0, 1, 0, 1, 1, 0,
- 0, 1, 1, 0, 1, 0, 0, 1,
- 1, 0, 0, 1, 0, 1, 1, 0,
- 1, 0, 0, 1, 0, 1, 1, 0,
- 0, 1, 1, 0, 1, 0, 0, 1,
- 1, 0, 0, 1, 0, 1, 1, 0,
- 0, 1, 1, 0, 1, 0, 0, 1,
- 0, 1, 1, 0, 1, 0, 0, 1,
- 1, 0, 0, 1, 0, 1, 1, 0,
- 0, 1, 1, 0, 1, 0, 0, 1,
- 1, 0, 0, 1, 0, 1, 1, 0,
- 1, 0, 0, 1, 0, 1, 1, 0,
- 0, 1, 1, 0, 1, 0, 0, 1,
- 0, 1, 1, 0, 1, 0, 0, 1,
- 1, 0, 0, 1, 0, 1, 1, 0,
- 1, 0, 0, 1, 0, 1, 1, 0,
- 0, 1, 1, 0, 1, 0, 0, 1,
- 1, 0, 0, 1, 0, 1, 1, 0,
- 0, 1, 1, 0, 1, 0, 0, 1,
- 0, 1, 1, 0, 1, 0, 0, 1,
- 1, 0, 0, 1, 0, 1, 1, 0};
- bool img_mem_calc_parity(unsigned long long input)
- {
- // Split by half as number is considered to be of 64 bits
- int bits;
- // Dividing the number into 8-bit chunks while performing X-OR
- for (bits = 32; bits >= 8; bits>>=1)
- input = input ^ (input >> bits);
- return _parity_lut_[input & 0xff] ? true : false;
- }
- EXPORT_SYMBOL(img_mem_calc_parity);
- /*
- * Initialisation
- */
- static int __init img_mem_init(void)
- {
- struct mem_man *mem_man = &mem_man_data;
- pr_debug("%s:%d\n", __func__, __LINE__);
- idr_init(&mem_man->heaps);
- idr_init(&mem_man->mem_ctxs);
- mutex_init(&mem_man->mutex);
- mem_man->cache_usage = 0;
- return 0;
- }
- static void __exit img_mem_exit(void)
- {
- struct mem_man *mem_man = &mem_man_data;
- struct heap *heap;
- struct mem_ctx *ctx;
- int heap_id;
- int ctx_id;
- pr_debug("%s:%d\n", __func__, __LINE__);
- /* keeps mutex checks (WARN_ON) happy, this will never actually wait */
- mutex_lock(&mem_man->mutex);
- ctx_id = 0;
- ctx = idr_get_next(&mem_man->mem_ctxs, &ctx_id);
- while (ctx) {
- pr_warn("%s derelict memory context %p!\n", __func__, ctx);
- _img_mem_destroy_proc_ctx(ctx);
- kfree(ctx);
- ctx_id = 0;
- ctx = idr_get_next(&mem_man->mem_ctxs, &ctx_id);
- }
- heap_id = IMG_MEM_MAN_MIN_HEAP;
- heap = idr_get_next(&mem_man->heaps, &heap_id);
- while (heap) {
- pr_warn("%s derelict heap %d!\n", __func__, heap_id);
- _img_mem_del_heap(heap);
- kfree(heap);
- heap_id = IMG_MEM_MAN_MIN_HEAP;
- heap = idr_get_next(&mem_man->heaps, &heap_id);
- }
- idr_destroy(&mem_man->heaps);
- idr_destroy(&mem_man->mem_ctxs);
- mutex_unlock(&mem_man->mutex);
- mutex_destroy(&mem_man->mutex);
- }
- module_init(img_mem_init);
- module_exit(img_mem_exit);
- MODULE_LICENSE("GPL");
- /*
- * coding style for emacs
- *
- * Local variables:
- * indent-tabs-mode: t
- * tab-width: 8
- * c-basic-offset: 8
- * End:
- */
|