1 /*************************************************************************/ /*!
3 @Title Local card memory allocator
4 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
5 @Description Part of the memory management. This module is responsible for
6 implementing the function callbacks for local card memory.
7 @License Dual MIT/GPLv2
9 The contents of this file are subject to the MIT license as set out below.
11 Permission is hereby granted, free of charge, to any person obtaining a copy
12 of this software and associated documentation files (the "Software"), to deal
13 in the Software without restriction, including without limitation the rights
14 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 copies of the Software, and to permit persons to whom the Software is
16 furnished to do so, subject to the following conditions:
18 The above copyright notice and this permission notice shall be included in
19 all copies or substantial portions of the Software.
21 Alternatively, the contents of this file may be used under the terms of
22 the GNU General Public License Version 2 ("GPL") in which case the provisions
23 of GPL are applicable instead of those above.
25 If you wish to allow use of your version of this file only under the terms of
26 GPL, and not to allow others to use your version of this file under the terms
27 of the MIT license, indicate your decision by deleting the provisions above
28 and replace them with the notice and other provisions required by GPL as set
29 out in the file called "GPL-COPYING" included in this distribution. If you do
30 not delete the provisions above, a recipient may use your version of this file
31 under the terms of either the MIT license or GPL.
33 This License is also included in this distribution in the file called
36 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
37 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
38 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
39 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
40 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
41 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
42 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
43 */ /**************************************************************************/
45 #include "img_types.h"
46 #include "pvr_debug.h"
47 #include "pvrsrv_error.h"
48 #include "pvrsrv_memallocflags.h"
49 #include "rgx_pdump_panics.h"
53 #include "devicemem_server_utils.h"
54 #include "physmem_lma.h"
58 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
59 #include "process_stats.h"
62 #if defined(SUPPORT_GPUVIRT_VALIDATION)
66 typedef struct _PMR_LMALLOCARRAY_DATA_ {
67 PVRSRV_DEVICE_NODE *psDevNode;
68 IMG_INT32 iNumPagesAllocated;
71 * Total number of pages supported by this PMR. (Fixed as of now due the fixed Page table array size)
73 IMG_UINT32 uiTotalNumPages;
74 IMG_UINT32 uiPagesToAlloc;
76 IMG_UINT32 uiLog2AllocSize;
77 IMG_UINT32 uiAllocSize;
78 IMG_DEV_PHYADDR *pasDevPAddr;
80 IMG_BOOL bZeroOnAlloc;
81 IMG_BOOL bPoisonOnAlloc;
82 IMG_BOOL bFwLocalAlloc;
84 /* Tells if allocation is physically backed */
89 record at alloc time whether poisoning will be required when the
92 IMG_BOOL bPoisonOnFree;
94 /* Physical heap and arena pointers for this allocation */
95 PHYS_HEAP* psPhysHeap;
97 PVRSRV_MEMALLOCFLAGS_T uiAllocFlags;
99 } PMR_LMALLOCARRAY_DATA;
101 static PVRSRV_ERROR _MapAlloc(PVRSRV_DEVICE_NODE *psDevNode,
102 IMG_DEV_PHYADDR *psDevPAddr,
104 IMG_BOOL bFwLocalAlloc,
108 IMG_UINT32 ui32CPUCacheFlags = DevmemCPUCacheMode(psDevNode, ulFlags);
109 IMG_CPU_PHYADDR sCpuPAddr;
110 PHYS_HEAP *psPhysHeap;
114 psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
118 psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
121 PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPAddr, psDevPAddr);
123 *pvPtr = OSMapPhysToLin(sCpuPAddr, uiSize, ui32CPUCacheFlags);
126 return PVRSRV_ERROR_OUT_OF_MEMORY;
134 static void _UnMapAlloc(PVRSRV_DEVICE_NODE *psDevNode,
136 IMG_BOOL bFwLocalAlloc,
140 OSUnMapPhysToLin(pvPtr, uiSize, PVRSRV_CPU_CACHE_MODE(ulFlags));
144 _PoisonAlloc(PVRSRV_DEVICE_NODE *psDevNode,
145 IMG_DEV_PHYADDR *psDevPAddr,
146 IMG_BOOL bFwLocalAlloc,
147 IMG_UINT32 uiAllocSize,
148 const IMG_CHAR *pacPoisonData,
151 IMG_UINT32 uiSrcByteIndex;
152 IMG_UINT32 uiDestByteIndex;
153 void *pvKernLin = NULL;
154 IMG_CHAR *pcDest = NULL;
158 eError = _MapAlloc(psDevNode,
162 PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
164 if (eError != PVRSRV_OK)
171 for(uiDestByteIndex=0; uiDestByteIndex<uiAllocSize; uiDestByteIndex++)
173 pcDest[uiDestByteIndex] = pacPoisonData[uiSrcByteIndex];
175 if (uiSrcByteIndex == uiPoisonSize)
181 _UnMapAlloc(psDevNode, uiAllocSize, bFwLocalAlloc, 0,pvKernLin);
186 PVR_DPF((PVR_DBG_ERROR, "Failed to poison allocation"));
191 _ZeroAlloc(PVRSRV_DEVICE_NODE *psDevNode,
192 IMG_DEV_PHYADDR *psDevPAddr,
193 IMG_BOOL bFwLocalAlloc,
194 IMG_UINT32 uiAllocSize)
196 void *pvKernLin = NULL;
199 eError = _MapAlloc(psDevNode,
203 PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
205 if (eError != PVRSRV_OK)
210 /* NOTE: 'CachedMemSet' means the operating system default memset, which
211 * we *assume* in the LMA code will be faster, and doesn't need to
214 OSCachedMemSet(pvKernLin, 0, uiAllocSize);
216 _UnMapAlloc(psDevNode, uiAllocSize, bFwLocalAlloc, 0, pvKernLin);
221 PVR_DPF((PVR_DBG_ERROR, "Failed to zero allocation"));
225 static const IMG_CHAR _AllocPoison[] = "^PoIsOn";
226 static const IMG_UINT32 _AllocPoisonSize = 7;
227 static const IMG_CHAR _FreePoison[] = "<DEAD-BEEF>";
228 static const IMG_UINT32 _FreePoisonSize = 11;
231 _AllocLMPageArray(PVRSRV_DEVICE_NODE *psDevNode,
233 PMR_SIZE_T uiChunkSize,
234 IMG_UINT32 ui32NumPhysChunks,
235 IMG_UINT32 ui32NumVirtChunks,
236 IMG_UINT32 *pabMappingTable,
237 IMG_UINT32 uiLog2PageSize,
239 IMG_BOOL bPoisonOnAlloc,
240 IMG_BOOL bPoisonOnFree,
243 IMG_BOOL bFwLocalAlloc,
244 PHYS_HEAP* psPhysHeap,
245 PVRSRV_MEMALLOCFLAGS_T uiAllocFlags,
246 PMR_LMALLOCARRAY_DATA **ppsPageArrayDataPtr
249 PMR_LMALLOCARRAY_DATA *psPageArrayData = NULL;
252 PVR_ASSERT(!bZero || !bPoisonOnAlloc);
254 if (uiSize >= 0x1000000000ULL)
256 PVR_DPF((PVR_DBG_ERROR,
257 "physmem_lma.c: Do you really want 64GB of physical memory in one go? This is likely a bug"));
258 eError = PVRSRV_ERROR_INVALID_PARAMS;
262 PVR_ASSERT(OSGetPageShift() <= uiLog2PageSize);
264 if ((uiSize & ((1ULL << uiLog2PageSize) - 1)) != 0)
266 eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
270 psPageArrayData = OSAllocZMem(sizeof(PMR_LMALLOCARRAY_DATA));
271 if (psPageArrayData == NULL)
273 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
274 goto errorOnAllocArray;
280 Some allocations require kernel mappings in which case in order
281 to be virtually contiguous we also have to be physically contiguous.
283 psPageArrayData->uiPagesToAlloc = psPageArrayData->uiTotalNumPages = 1;
284 psPageArrayData->uiAllocSize = TRUNCATE_64BITS_TO_32BITS(uiSize);
285 psPageArrayData->uiLog2AllocSize = uiLog2PageSize;
289 IMG_UINT32 uiNumPages;
291 /* Use of cast below is justified by the assertion that follows to
292 prove that no significant bits have been truncated */
293 uiNumPages = (IMG_UINT32)(((uiSize-1)>>uiLog2PageSize) + 1);
294 PVR_ASSERT(((PMR_SIZE_T)uiNumPages << uiLog2PageSize) == uiSize);
295 psPageArrayData->uiTotalNumPages = uiNumPages;
296 if((1 == ui32NumPhysChunks) && (1 == ui32NumVirtChunks))
298 psPageArrayData->uiPagesToAlloc = uiNumPages;
300 psPageArrayData->uiPagesToAlloc = ui32NumPhysChunks;
302 psPageArrayData->uiAllocSize = 1 << uiLog2PageSize;
303 psPageArrayData->uiLog2AllocSize = uiLog2PageSize;
305 psPageArrayData->psDevNode = psDevNode;
306 psPageArrayData->pasDevPAddr = OSAllocMem(sizeof(IMG_DEV_PHYADDR)*
307 psPageArrayData->uiTotalNumPages);
308 if (psPageArrayData->pasDevPAddr == NULL)
310 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
311 goto errorOnAllocAddr;
314 OSCachedMemSet(&psPageArrayData->pasDevPAddr[0], INVALID_PAGE, sizeof(IMG_DEV_PHYADDR)*
315 psPageArrayData->uiTotalNumPages);
317 psPageArrayData->iNumPagesAllocated = 0;
318 psPageArrayData->bZeroOnAlloc = bZero;
319 psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc;
320 psPageArrayData->bPoisonOnFree = bPoisonOnFree;
321 psPageArrayData->bHasLMPages = IMG_FALSE;
322 psPageArrayData->bOnDemand = bOnDemand;
323 psPageArrayData->bFwLocalAlloc = bFwLocalAlloc;
324 psPageArrayData->psPhysHeap = psPhysHeap;
325 psPageArrayData->uiAllocFlags = uiAllocFlags;
327 *ppsPageArrayDataPtr = psPageArrayData;
332 error exit paths follow:
336 OSFreeMem(psPageArrayData);
340 PVR_ASSERT(eError != PVRSRV_OK);
346 _AllocLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable)
349 RA_BASE_T uiCardAddr;
350 RA_LENGTH_T uiActualSize;
351 IMG_UINT32 i,ui32Index=0;
352 IMG_UINT32 uiAllocSize;
353 IMG_UINT32 uiLog2AllocSize;
354 IMG_UINT32 uiRegionId;
355 PVRSRV_DEVICE_NODE *psDevNode;
356 IMG_BOOL bPoisonOnAlloc;
357 IMG_BOOL bZeroOnAlloc;
360 PVR_ASSERT(NULL != psPageArrayData);
361 PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
363 uiAllocSize = psPageArrayData->uiAllocSize;
364 uiLog2AllocSize = psPageArrayData->uiLog2AllocSize;
365 psDevNode = psPageArrayData->psDevNode;
366 bPoisonOnAlloc = psPageArrayData->bPoisonOnAlloc;
367 bZeroOnAlloc = psPageArrayData->bZeroOnAlloc;
369 #if defined(SUPPORT_PVRSRV_GPUVIRT)
370 if (psPageArrayData->bFwLocalAlloc)
372 PVR_ASSERT(psDevNode->uiKernelFwRAIdx < RGXFW_NUM_OS);
373 pArena = psDevNode->psKernelFwMemArena[psDevNode->uiKernelFwRAIdx];
374 psDevNode->uiKernelFwRAIdx = 0;
379 /* Get suitable local memory region for this allocation */
380 uiRegionId = PhysHeapGetRegionId(psPageArrayData->psPhysHeap, psPageArrayData->uiAllocFlags);
382 PVR_ASSERT(uiRegionId < psDevNode->ui32NumOfLocalMemArenas);
383 pArena = psDevNode->apsLocalDevMemArenas[uiRegionId];
386 if(psPageArrayData->uiTotalNumPages < (psPageArrayData->iNumPagesAllocated + psPageArrayData->uiPagesToAlloc))
388 PVR_DPF((PVR_DBG_ERROR,"Pages requested to allocate larger than original PMR alloc Size"));
389 eError = PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
394 #if defined(SUPPORT_GPUVIRT_VALIDATION)
396 IMG_UINT32 ui32OSid=0, ui32OSidReg=0;
397 IMG_BOOL bOSidAxiProt;
400 pId=OSGetCurrentClientProcessIDKM();
401 RetrieveOSidsfromPidList(pId, &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
403 pArena=psDevNode->psOSidSubArena[ui32OSid];
404 PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Giving from OS slot %d",ui32OSid));
408 psPageArrayData->psArena = pArena;
410 for(i=0;i<psPageArrayData->uiPagesToAlloc;i++)
413 /*This part of index finding should happen before allocating page. Just avoiding intricate paths */
414 if(psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc)
420 if(NULL == pui32MapTable)
422 PVR_DPF((PVR_DBG_MESSAGE,"Mapping table cannot be null"));
423 eError = PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY;
427 ui32Index = pui32MapTable[i];
428 if(ui32Index >= psPageArrayData->uiTotalNumPages)
430 PVR_DPF((PVR_DBG_MESSAGE, "%s: Page alloc request Index out of bounds for PMR @0x%p",__func__, psPageArrayData));
431 eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
435 if(INVALID_PAGE != psPageArrayData->pasDevPAddr[ui32Index].uiAddr)
437 PVR_DPF((PVR_DBG_MESSAGE,"Mapping already exists"));
438 eError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS;
443 eError = RA_Alloc(pArena,
445 RA_NO_IMPORT_MULTIPLIER,
447 1ULL << uiLog2AllocSize,
451 NULL); /* No private handle */
453 #if defined(SUPPORT_GPUVIRT_VALIDATION)
455 PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Address: %llu \n",uiCardAddr));
459 if (PVRSRV_OK != eError)
461 PVR_DPF((PVR_DBG_ERROR,"Failed to Allocate the page @index:%d",ui32Index));
462 eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
466 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
467 #if !defined(PVRSRV_ENABLE_MEMORY_STATS)
468 /* Allocation is done a page at a time */
469 PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiActualSize);
472 IMG_CPU_PHYADDR sLocalCpuPAddr;
474 sLocalCpuPAddr.uiAddr = (IMG_UINT64)uiCardAddr;
475 PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
484 psPageArrayData->pasDevPAddr[ui32Index].uiAddr = uiCardAddr;
487 eError = _PoisonAlloc(psDevNode,
488 &psPageArrayData->pasDevPAddr[i],
489 psPageArrayData->bFwLocalAlloc,
493 if (eError !=PVRSRV_OK)
495 PVR_DPF((PVR_DBG_ERROR,"Failed to poison the page"));
502 eError = _ZeroAlloc(psDevNode,
503 &psPageArrayData->pasDevPAddr[i],
504 psPageArrayData->bFwLocalAlloc,
506 if (eError !=PVRSRV_OK)
508 PVR_DPF((PVR_DBG_ERROR,"Failed to zero the page"));
513 psPageArrayData->iNumPagesAllocated += psPageArrayData->uiPagesToAlloc;
514 if(psPageArrayData->iNumPagesAllocated)
516 psPageArrayData->bHasLMPages = IMG_TRUE;
522 error exit paths follow:
526 eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
528 PVR_DPF((PVR_DBG_ERROR,
529 "%s: alloc_pages failed to honour request %d @index: %d of %d pages: (%s)",__func__,
532 psPageArrayData->uiPagesToAlloc,
533 PVRSRVGetErrorStringKM(eError)));
534 while (--i < psPageArrayData->uiPagesToAlloc)
536 if(psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc)
542 if(NULL != pui32MapTable)
543 ui32Index = pui32MapTable[i];
546 if(ui32Index < psPageArrayData->uiTotalNumPages)
548 RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
549 psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE;
552 PVR_ASSERT(eError != PVRSRV_OK);
557 _FreeLMPageArray(PMR_LMALLOCARRAY_DATA *psPageArrayData)
559 OSFreeMem(psPageArrayData->pasDevPAddr);
561 PVR_DPF((PVR_DBG_MESSAGE, "physmem_lma.c: freed local memory array structure for PMR @0x%p", psPageArrayData));
563 OSFreeMem(psPageArrayData);
569 _FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData,IMG_UINT32 *pui32FreeIndices, IMG_UINT32 ui32FreePageCount)
571 IMG_UINT32 uiAllocSize;
572 IMG_UINT32 i,ui32PagesToFree=0,ui32PagesFreed=0,ui32Index=0;
573 RA_ARENA *pArena = psPageArrayData->psArena;
575 #if defined(SUPPORT_PVRSRV_GPUVIRT)
576 PVRSRV_DEVICE_NODE *psDevNode = psPageArrayData->psDevNode;
577 if (psPageArrayData->bFwLocalAlloc)
579 PVR_ASSERT(psDevNode->uiKernelFwRAIdx < RGXFW_NUM_OS);
580 pArena = psDevNode->psKernelFwMemArena[psDevNode->uiKernelFwRAIdx];
581 psDevNode->uiKernelFwRAIdx = 0;
585 PVR_ASSERT(psPageArrayData->bHasLMPages);
587 uiAllocSize = psPageArrayData->uiAllocSize;
589 ui32PagesToFree = (NULL == pui32FreeIndices)?psPageArrayData->uiTotalNumPages:ui32FreePageCount;
591 for (i = 0;i < ui32PagesToFree;i++)
593 if(NULL == pui32FreeIndices)
599 ui32Index = pui32FreeIndices[i];
602 if (INVALID_PAGE != psPageArrayData->pasDevPAddr[ui32Index].uiAddr)
605 if (psPageArrayData->bPoisonOnFree)
607 _PoisonAlloc(psPageArrayData->psDevNode,
608 &psPageArrayData->pasDevPAddr[ui32Index],
609 psPageArrayData->bFwLocalAlloc,
615 RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
617 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
618 #if !defined(PVRSRV_ENABLE_MEMORY_STATS)
619 /* Allocation is done a page at a time */
620 PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiAllocSize);
623 PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
627 psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE;
630 psPageArrayData->iNumPagesAllocated -= ui32PagesFreed;
632 PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
634 if(0 == psPageArrayData->iNumPagesAllocated)
636 psPageArrayData->bHasLMPages = IMG_FALSE;
639 PVR_DPF((PVR_DBG_MESSAGE, "%s: freed %d local memory for PMR @0x%p",__func__,(ui32PagesFreed*uiAllocSize), psPageArrayData));
645 * Implementation of callback functions
649 /* destructor func is called after last reference disappears, but
650 before PMR itself is freed. */
652 PMRFinalizeLocalMem(PMR_IMPL_PRIVDATA pvPriv
656 PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
658 psLMAllocArrayData = pvPriv;
660 /* We can't free pages until now. */
661 if (psLMAllocArrayData->bHasLMPages)
663 eError = _FreeLMPages(psLMAllocArrayData,NULL,0);
664 PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
667 eError = _FreeLMPageArray(psLMAllocArrayData);
668 PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
673 /* callback function for locking the system physical page addresses.
674 As we are LMA there is nothing to do as we control physical memory. */
676 PMRLockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv)
680 PMR_LMALLOCARRAY_DATA *psLMAllocArrayData;
682 psLMAllocArrayData = pvPriv;
684 if (psLMAllocArrayData->bOnDemand)
686 /* Allocate Memory for deferred allocation */
687 eError = _AllocLMPages(psLMAllocArrayData, NULL);
688 if (eError != PVRSRV_OK)
699 PMRUnlockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv
702 PVRSRV_ERROR eError = PVRSRV_OK;
703 PMR_LMALLOCARRAY_DATA *psLMAllocArrayData;
705 psLMAllocArrayData = pvPriv;
707 if (psLMAllocArrayData->bOnDemand)
709 /* Free Memory for deferred allocation */
710 eError = _FreeLMPages(psLMAllocArrayData, NULL, 0);
711 if (eError != PVRSRV_OK)
717 PVR_ASSERT(eError == PVRSRV_OK);
721 /* N.B. It is assumed that PMRLockSysPhysAddressesLocalMem() is called _before_ this function! */
723 PMRSysPhysAddrLocalMem(PMR_IMPL_PRIVDATA pvPriv,
724 IMG_UINT32 ui32Log2PageSize,
725 IMG_UINT32 ui32NumOfPages,
726 IMG_DEVMEM_OFFSET_T *puiOffset,
728 IMG_DEV_PHYADDR *psDevPAddr)
731 IMG_UINT32 uiLog2AllocSize;
732 IMG_UINT32 uiNumAllocs;
733 IMG_UINT64 uiAllocIndex;
734 IMG_DEVMEM_OFFSET_T uiInAllocOffset;
735 PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv;
737 if (psLMAllocArrayData->uiLog2AllocSize < ui32Log2PageSize)
739 PVR_DPF((PVR_DBG_ERROR,
740 "%s: Requested physical addresses from PMR "
741 "for incompatible contiguity %u!",
744 return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
747 uiNumAllocs = psLMAllocArrayData->uiTotalNumPages;
750 PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0);
751 uiLog2AllocSize = psLMAllocArrayData->uiLog2AllocSize;
753 for (idx=0; idx < ui32NumOfPages; idx++)
757 uiAllocIndex = puiOffset[idx] >> uiLog2AllocSize;
758 uiInAllocOffset = puiOffset[idx] - (uiAllocIndex << uiLog2AllocSize);
760 PVR_ASSERT(uiAllocIndex < uiNumAllocs);
761 PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2AllocSize));
763 psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[uiAllocIndex].uiAddr + uiInAllocOffset;
769 for (idx=0; idx < ui32NumOfPages; idx++)
773 psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[0].uiAddr + puiOffset[idx];
782 PMRAcquireKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv,
785 void **ppvKernelAddressOut,
786 IMG_HANDLE *phHandleOut,
790 PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
791 void *pvKernLinAddr = NULL;
792 IMG_UINT32 ui32PageIndex = 0;
793 size_t uiOffsetMask = uiOffset;
795 psLMAllocArrayData = pvPriv;
797 /* Check that we can map this in contiguously */
798 if (psLMAllocArrayData->uiTotalNumPages != 1)
800 size_t uiStart = uiOffset;
801 size_t uiEnd = uiOffset + uiSize - 1;
802 size_t uiPageMask = ~((1 << psLMAllocArrayData->uiLog2AllocSize) - 1);
804 /* We can still map if only one page is required */
805 if ((uiStart & uiPageMask) != (uiEnd & uiPageMask))
807 eError = PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
811 /* Locate the desired physical page to map in */
812 ui32PageIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize;
813 uiOffsetMask = (1U << psLMAllocArrayData->uiLog2AllocSize) - 1;
816 PVR_ASSERT(ui32PageIndex < psLMAllocArrayData->uiTotalNumPages);
818 eError = _MapAlloc(psLMAllocArrayData->psDevNode,
819 &psLMAllocArrayData->pasDevPAddr[ui32PageIndex],
820 psLMAllocArrayData->uiAllocSize,
821 psLMAllocArrayData->bFwLocalAlloc,
825 *ppvKernelAddressOut = ((IMG_CHAR *) pvKernLinAddr) + (uiOffset & uiOffsetMask);
826 *phHandleOut = pvKernLinAddr;
831 error exit paths follow
835 PVR_ASSERT(eError != PVRSRV_OK);
839 static void PMRReleaseKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv,
842 PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
843 void *pvKernLinAddr = NULL;
845 psLMAllocArrayData = (PMR_LMALLOCARRAY_DATA *) pvPriv;
846 pvKernLinAddr = (void *) hHandle;
848 _UnMapAlloc(psLMAllocArrayData->psDevNode,
849 psLMAllocArrayData->uiAllocSize,
850 psLMAllocArrayData->bFwLocalAlloc,
857 CopyBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
858 IMG_DEVMEM_OFFSET_T uiOffset,
862 void (*pfnCopyBytes)(IMG_UINT8 *pcBuffer,
866 PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
867 size_t uiBytesCopied;
868 size_t uiBytesToCopy;
869 size_t uiBytesCopyableFromAlloc;
870 void *pvMapping = NULL;
871 IMG_UINT8 *pcKernelPointer = NULL;
872 size_t uiBufferOffset;
873 IMG_UINT64 uiAllocIndex;
874 IMG_DEVMEM_OFFSET_T uiInAllocOffset;
877 psLMAllocArrayData = pvPriv;
880 uiBytesToCopy = uiBufSz;
883 if (psLMAllocArrayData->uiTotalNumPages > 1)
885 while (uiBytesToCopy > 0)
887 /* we have to map one alloc in at a time */
888 PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0);
889 uiAllocIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize;
890 uiInAllocOffset = uiOffset - (uiAllocIndex << psLMAllocArrayData->uiLog2AllocSize);
891 uiBytesCopyableFromAlloc = uiBytesToCopy;
892 if (uiBytesCopyableFromAlloc + uiInAllocOffset > (1ULL << psLMAllocArrayData->uiLog2AllocSize))
894 uiBytesCopyableFromAlloc = TRUNCATE_64BITS_TO_SIZE_T((1ULL << psLMAllocArrayData->uiLog2AllocSize)-uiInAllocOffset);
897 PVR_ASSERT(uiBytesCopyableFromAlloc != 0);
898 PVR_ASSERT(uiAllocIndex < psLMAllocArrayData->uiTotalNumPages);
899 PVR_ASSERT(uiInAllocOffset < (1ULL << psLMAllocArrayData->uiLog2AllocSize));
901 eError = _MapAlloc(psLMAllocArrayData->psDevNode,
902 &psLMAllocArrayData->pasDevPAddr[uiAllocIndex],
903 psLMAllocArrayData->uiAllocSize,
904 psLMAllocArrayData->bFwLocalAlloc,
905 PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
907 if (eError != PVRSRV_OK)
911 pcKernelPointer = pvMapping;
912 pfnCopyBytes(&pcBuffer[uiBufferOffset], &pcKernelPointer[uiInAllocOffset], uiBytesCopyableFromAlloc);
914 _UnMapAlloc(psLMAllocArrayData->psDevNode,
915 psLMAllocArrayData->uiAllocSize,
916 psLMAllocArrayData->bFwLocalAlloc,
920 uiBufferOffset += uiBytesCopyableFromAlloc;
921 uiBytesToCopy -= uiBytesCopyableFromAlloc;
922 uiOffset += uiBytesCopyableFromAlloc;
923 uiBytesCopied += uiBytesCopyableFromAlloc;
928 PVR_ASSERT((uiOffset + uiBufSz) <= psLMAllocArrayData->uiAllocSize);
929 PVR_ASSERT(psLMAllocArrayData->uiAllocSize != 0);
930 eError = _MapAlloc(psLMAllocArrayData->psDevNode,
931 &psLMAllocArrayData->pasDevPAddr[0],
932 psLMAllocArrayData->uiAllocSize,
933 psLMAllocArrayData->bFwLocalAlloc,
934 PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
936 if (eError != PVRSRV_OK)
940 pcKernelPointer = pvMapping;
941 pfnCopyBytes(pcBuffer, &pcKernelPointer[uiOffset], uiBufSz);
943 _UnMapAlloc(psLMAllocArrayData->psDevNode,
944 psLMAllocArrayData->uiAllocSize,
945 psLMAllocArrayData->bFwLocalAlloc,
949 uiBytesCopied = uiBufSz;
951 *puiNumBytes = uiBytesCopied;
954 *puiNumBytes = uiBytesCopied;
958 static void ReadLocalMem(IMG_UINT8 *pcBuffer,
962 /* NOTE: 'CachedMemCopy' means the operating system default memcpy, which
963 * we *assume* in the LMA code will be faster, and doesn't need to
966 OSCachedMemCopy(pcBuffer, pcPMR, uiSize);
970 PMRReadBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
971 IMG_DEVMEM_OFFSET_T uiOffset,
976 return CopyBytesLocalMem(pvPriv,
984 static void WriteLocalMem(IMG_UINT8 *pcBuffer,
988 /* NOTE: 'CachedMemCopy' means the operating system default memcpy, which
989 * we *assume* in the LMA code will be faster, and doesn't need to
992 OSCachedMemCopy(pcPMR, pcBuffer, uiSize);
996 PMRWriteBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
997 IMG_DEVMEM_OFFSET_T uiOffset,
1000 size_t *puiNumBytes)
1002 return CopyBytesLocalMem(pvPriv,
1010 /*************************************************************************/ /*!
1011 @Function PMRChangeSparseMemLocalMem
1012 @Description This function Changes the sparse mapping by allocating & freeing
1013 of pages. It does also change the GPU maps accordingly
1014 @Return PVRSRV_ERROR failure code
1015 */ /**************************************************************************/
1017 PMRChangeSparseMemLocalMem(PMR_IMPL_PRIVDATA pPriv,
1019 IMG_UINT32 ui32AllocPageCount,
1020 IMG_UINT32 *pai32AllocIndices,
1021 IMG_UINT32 ui32FreePageCount,
1022 IMG_UINT32 *pai32FreeIndices,
1025 PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
1027 IMG_UINT32 ui32AdtnlAllocPages = 0;
1028 IMG_UINT32 ui32AdtnlFreePages = 0;
1029 IMG_UINT32 ui32CommonRequstCount = 0;
1030 IMG_UINT32 ui32Loop = 0;
1031 IMG_UINT32 ui32Index = 0;
1032 IMG_UINT32 uiAllocpgidx;
1033 IMG_UINT32 uiFreepgidx;
1035 PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv;
1036 IMG_DEV_PHYADDR sPhyAddr;
1039 IMG_BOOL bPoisonFail = IMG_FALSE;
1040 IMG_BOOL bZeroFail = IMG_FALSE;
1043 /* Fetch the Page table array represented by the PMR */
1044 IMG_DEV_PHYADDR *psPageArray = psPMRPageArrayData->pasDevPAddr;
1045 PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappigTable(psPMR);
1047 /* The incoming request is classified into two operations independent of
1048 * each other: alloc & free pages.
1049 * These operations can be combined with two mapping operations as well
1050 * which are GPU & CPU space mappings.
1052 * From the alloc and free page requests, the net amount of pages to be
1053 * allocated or freed is computed. Pages that were requested to be freed
1054 * will be reused to fulfil alloc requests.
1056 * The order of operations is:
1057 * 1. Allocate new pages from the OS
1058 * 2. Move the free pages from free request to alloc positions.
1059 * 3. Free the rest of the pages not used for alloc
1061 * Alloc parameters are validated at the time of allocation
1062 * and any error will be handled then. */
1064 if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH))
1066 ui32CommonRequstCount = (ui32AllocPageCount > ui32FreePageCount) ?
1067 ui32FreePageCount : ui32AllocPageCount;
1069 PDUMP_PANIC(SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported");
1072 if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC))
1074 ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequstCount;
1078 ui32AllocPageCount = 0;
1081 if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE))
1083 ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequstCount;
1087 ui32FreePageCount = 0;
1090 if (0 == (ui32CommonRequstCount || ui32AdtnlAllocPages || ui32AdtnlFreePages))
1092 eError = PVRSRV_ERROR_INVALID_PARAMS;
1097 /* Validate the free page indices */
1098 if (ui32FreePageCount)
1100 if (NULL != pai32FreeIndices)
1102 for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
1104 uiFreepgidx = pai32FreeIndices[ui32Loop];
1106 if (uiFreepgidx > psPMRPageArrayData->uiTotalNumPages)
1108 eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
1112 if (INVALID_PAGE == psPageArray[uiFreepgidx].uiAddr)
1114 eError = PVRSRV_ERROR_INVALID_PARAMS;
1119 eError = PVRSRV_ERROR_INVALID_PARAMS;
1124 /*The following block of code verifies any issues with common alloc page indices */
1125 for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++)
1127 uiAllocpgidx = pai32AllocIndices[ui32Loop];
1128 if (uiAllocpgidx > psPMRPageArrayData->uiTotalNumPages)
1130 eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
1134 if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
1136 if ((INVALID_PAGE != psPageArray[uiAllocpgidx].uiAddr) ||
1137 (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx]))
1139 eError = PVRSRV_ERROR_INVALID_PARAMS;
1145 if ((INVALID_PAGE == psPageArray[uiAllocpgidx].uiAddr) ||
1146 (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]))
1148 eError = PVRSRV_ERROR_INVALID_PARAMS;
1157 /* Allocate new pages */
1158 if (0 != ui32AdtnlAllocPages)
1160 /* Say how many pages to allocate */
1161 psPMRPageArrayData->uiPagesToAlloc = ui32AdtnlAllocPages;
1163 eError = _AllocLMPages(psPMRPageArrayData, pai32AllocIndices);
1164 if (PVRSRV_OK != eError)
1166 PVR_DPF((PVR_DBG_ERROR,
1167 "%s: New Addtl Allocation of pages failed",
1172 /* Mark the corresponding pages of translation table as valid */
1173 for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++)
1175 psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop];
1179 ui32Index = ui32Loop;
1181 /* Move the corresponding free pages to alloc request */
1182 for (ui32Loop = 0; ui32Loop < ui32CommonRequstCount; ui32Loop++, ui32Index++)
1185 uiAllocpgidx = pai32AllocIndices[ui32Index];
1186 uiFreepgidx = pai32FreeIndices[ui32Loop];
1187 sPhyAddr = psPageArray[uiAllocpgidx];
1188 psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx];
1190 /* Is remap mem used in real world scenario? Should it be turned to a
1191 * debug feature? The condition check needs to be out of loop, will be
1192 * done at later point though after some analysis */
1193 if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
1195 psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID;
1196 psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
1197 psPageArray[uiFreepgidx].uiAddr = INVALID_PAGE;
1201 psPageArray[uiFreepgidx] = sPhyAddr;
1202 psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx;
1203 psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
1206 /* Be sure to honour the attributes associated with the allocation
1207 * such as zeroing, poisoning etc. */
1208 if (psPMRPageArrayData->bPoisonOnAlloc)
1210 eError = _PoisonAlloc(psPMRPageArrayData->psDevNode,
1211 &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx],
1212 psPMRPageArrayData->bFwLocalAlloc,
1213 psPMRPageArrayData->uiAllocSize,
1217 /* Consider this as a soft failure and go ahead but log error to kernel log */
1218 if (eError != PVRSRV_OK)
1221 bPoisonFail = IMG_TRUE;
1227 if (psPMRPageArrayData->bZeroOnAlloc)
1229 eError = _ZeroAlloc(psPMRPageArrayData->psDevNode,
1230 &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx],
1231 psPMRPageArrayData->bFwLocalAlloc,
1232 psPMRPageArrayData->uiAllocSize);
1233 /* Consider this as a soft failure and go ahead but log error to kernel log */
1234 if (eError != PVRSRV_OK)
1237 /*Don't think we need to zero any pages further*/
1238 bZeroFail = IMG_TRUE;
1245 /*Free the additional free pages */
1246 if (0 != ui32AdtnlFreePages)
1248 ui32Index = ui32Loop;
1249 _FreeLMPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages);
1252 while(ui32Loop++ < ui32AdtnlFreePages)
1254 /*Set the corresponding mapping table entry to invalid address */
1255 psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Index++]] = TRANSLATION_INVALID;
1262 if(IMG_TRUE == bPoisonFail)
1264 PVR_DPF((PVR_DBG_ERROR, "%s: Error in poisoning the page", __FUNCTION__));
1267 if(IMG_TRUE == bZeroFail)
1269 PVR_DPF((PVR_DBG_ERROR, "%s: Error in zeroing the page", __FUNCTION__));
1273 /* Update the PMR memory holding information */
1281 /*************************************************************************/ /*!
1282 @Function PMRChangeSparseMemCPUMapLocalMem
1283 @Description This function Changes CPU maps accordingly
1284 @Return PVRSRV_ERROR failure code
1285 */ /**************************************************************************/
1287 PVRSRV_ERROR PMRChangeSparseMemCPUMapLocalMem(PMR_IMPL_PRIVDATA pPriv,
1289 IMG_UINT64 sCpuVAddrBase,
1290 IMG_UINT32 ui32AllocPageCount,
1291 IMG_UINT32 *pai32AllocIndices,
1292 IMG_UINT32 ui32FreePageCount,
1293 IMG_UINT32 *pai32FreeIndices)
1295 IMG_DEV_PHYADDR *psPageArray;
1296 PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv;
1297 uintptr_t sCpuVABase = sCpuVAddrBase;
1298 IMG_CPU_PHYADDR sCpuAddrPtr;
1301 /*Get the base address of the heap */
1302 PMR_CpuPhysAddr(psPMR,
1303 psPMRPageArrayData->uiLog2AllocSize,
1305 0, /* offset zero here mean first page in the PMR */
1309 /* Phys address of heap is computed here by subtracting the offset of this page
1310 * basically phys address of any page = Base address of heap + offset of the page */
1311 sCpuAddrPtr.uiAddr -= psPMRPageArrayData->pasDevPAddr[0].uiAddr;
1312 psPageArray = psPMRPageArrayData->pasDevPAddr;
1314 return OSChangeSparseMemCPUAddrMap((void **)psPageArray,
1325 static PMR_IMPL_FUNCTAB _sPMRLMAFuncTab = {
1326 /* pfnLockPhysAddresses */
1327 &PMRLockSysPhysAddressesLocalMem,
1328 /* pfnUnlockPhysAddresses */
1329 &PMRUnlockSysPhysAddressesLocalMem,
1330 /* pfnDevPhysAddr */
1331 &PMRSysPhysAddrLocalMem,
1332 /* pfnAcquireKernelMappingData */
1333 &PMRAcquireKernelMappingDataLocalMem,
1334 /* pfnReleaseKernelMappingData */
1335 &PMRReleaseKernelMappingDataLocalMem,
1336 #if defined(INTEGRITY_OS)
1337 /* pfnMapMemoryObject */
1339 /* pfnUnmapMemoryObject */
1343 &PMRReadBytesLocalMem,
1345 &PMRWriteBytesLocalMem,
1350 /* pfnChangeSparseMem*/
1351 &PMRChangeSparseMemLocalMem,
1352 /* pfnChangeSparseMemCPUMap */
1353 &PMRChangeSparseMemCPUMapLocalMem,
1357 &PMRFinalizeLocalMem
1361 PhysmemNewLocalRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
1362 IMG_DEVMEM_SIZE_T uiSize,
1363 IMG_DEVMEM_SIZE_T uiChunkSize,
1364 IMG_UINT32 ui32NumPhysChunks,
1365 IMG_UINT32 ui32NumVirtChunks,
1366 IMG_UINT32 *pui32MappingTable,
1367 IMG_UINT32 uiLog2PageSize,
1368 PVRSRV_MEMALLOCFLAGS_T uiFlags,
1369 const IMG_CHAR *pszAnnotation,
1372 PVRSRV_ERROR eError;
1373 PVRSRV_ERROR eError2;
1375 PMR_LMALLOCARRAY_DATA *psPrivData = NULL;
1376 PMR_FLAGS_T uiPMRFlags;
1377 PHYS_HEAP *psPhysHeap;
1379 IMG_BOOL bPoisonOnAlloc;
1380 IMG_BOOL bPoisonOnFree;
1383 IMG_BOOL bFwLocalAlloc;
1384 IMG_BOOL bCpuLocalAlloc;
1386 if (PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags) &&
1387 (ui32NumPhysChunks == ui32NumVirtChunks))
1393 bContig = IMG_FALSE;
1396 bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE;
1397 bFwLocalAlloc = PVRSRV_CHECK_FW_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
1398 bCpuLocalAlloc = PVRSRV_CHECK_CPU_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
1399 bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
1400 bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
1401 bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ? IMG_TRUE : IMG_FALSE;
1403 if (bZero && bPoisonOnAlloc)
1405 /* Zero on Alloc and Poison on Alloc are mutually exclusive */
1406 eError = PVRSRV_ERROR_INVALID_PARAMS;
1410 /* Silently round up alignment/pagesize if request was less that
1411 PAGE_SHIFT, because it would never be harmful for memory to be
1412 _more_ contiguous that was desired */
1414 uiLog2PageSize = OSGetPageShift() > uiLog2PageSize
1418 /* In case we have a non-sparse allocation tolerate bad requests and round up.
1419 * For sparse allocations the users have to make sure to meet the right
1421 if (ui32NumPhysChunks == ui32NumVirtChunks &&
1422 ui32NumVirtChunks == 1)
1424 /* Round up allocation size to at least a full OSGetPageSize() */
1425 uiSize = PVR_ALIGN(uiSize, OSGetPageSize());
1426 uiChunkSize = uiSize;
1431 psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
1433 else if (bCpuLocalAlloc)
1435 psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL];
1439 psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
1442 /* Create Array structure that holds the physical pages */
1443 eError = _AllocLMPageArray(psDevNode,
1444 uiChunkSize * ui32NumVirtChunks,
1459 if (eError != PVRSRV_OK)
1461 goto errorOnAllocPageArray;
1467 /* Allocate the physical pages */
1468 eError = _AllocLMPages(psPrivData,pui32MappingTable);
1469 if (eError != PVRSRV_OK)
1471 goto errorOnAllocPages;
1475 /* In this instance, we simply pass flags straight through.
1477 Generically, uiFlags can include things that control the PMR
1478 factory, but we don't need any such thing (at the time of
1479 writing!), and our caller specifies all PMR flags so we don't
1480 need to meddle with what was given to us.
1482 uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
1483 /* check no significant bits were lost in cast due to different
1484 bit widths for flags */
1485 PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
1489 PDUMPCOMMENT("Deferred Allocation PMR (LMA)");
1493 eError = PMRCreatePMR(psDevNode,
1508 if (eError != PVRSRV_OK)
1510 PVR_DPF((PVR_DBG_ERROR, "PhysmemNewLocalRamBackedPMR: Unable to create PMR (status=%d)", eError));
1518 if(!bOnDemand && psPrivData->bHasLMPages)
1520 eError2 = _FreeLMPages(psPrivData, NULL,0);
1521 PVR_ASSERT(eError2 == PVRSRV_OK);
1525 eError2 = _FreeLMPageArray(psPrivData);
1526 PVR_ASSERT(eError2 == PVRSRV_OK);
1528 errorOnAllocPageArray:
1530 PVR_ASSERT(eError != PVRSRV_OK);
1534 #if defined(SUPPORT_GPUVIRT_VALIDATION)
1536 struct PidOSidCouplingList
1539 IMG_UINT32 ui32OSid;
1540 IMG_UINT32 ui32OSidReg;
1541 IMG_BOOL bOSidAxiProt;
1543 struct PidOSidCouplingList *psNext;
1545 typedef struct PidOSidCouplingList PidOSidCouplingList;
1547 static PidOSidCouplingList *psPidOSidHead=NULL;
1548 static PidOSidCouplingList *psPidOSidTail=NULL;
1550 void InsertPidOSidsCoupling(IMG_PID pId, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt)
1552 PidOSidCouplingList *psTmp;
1554 PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Inserting (PID/ OSid/ OSidReg/ IsSecure) (%d/ %d/ %d/ %s) into list",
1555 pId,ui32OSid, ui32OSidReg, (bOSidAxiProt)?"Yes":"No"));
1557 psTmp=OSAllocMem(sizeof(PidOSidCouplingList));
1561 PVR_DPF((PVR_DBG_ERROR,"(GPU Virtualization Validation): Memory allocation failed. No list insertion => program will execute normally.\n"));
1566 psTmp->ui32OSid=ui32OSid;
1567 psTmp->ui32OSidReg=ui32OSidReg;
1568 psTmp->bOSidAxiProt = bOSidAxiProt;
1571 if (psPidOSidHead==NULL)
1573 psPidOSidHead=psTmp;
1574 psPidOSidTail=psTmp;
1578 psPidOSidTail->psNext=psTmp;
1579 psPidOSidTail=psTmp;
1585 void RetrieveOSidsfromPidList(IMG_PID pId, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt)
1587 PidOSidCouplingList *psTmp;
1589 for (psTmp=psPidOSidHead;psTmp!=NULL;psTmp=psTmp->psNext)
1591 if (psTmp->pId==pId)
1593 (*pui32OSid) = psTmp->ui32OSid;
1594 (*pui32OSidReg) = psTmp->ui32OSidReg;
1595 (*pbOSidAxiProt) = psTmp->bOSidAxiProt;
1603 (*pbOSidAxiProt) = IMG_FALSE;
1608 void RemovePidOSidCoupling(IMG_PID pId)
1610 PidOSidCouplingList *psTmp, *psPrev=NULL;
1612 for (psTmp=psPidOSidHead; psTmp!=NULL; psTmp=psTmp->psNext)
1614 if (psTmp->pId==pId) break;
1623 PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Deleting Pairing %d / (%d - %d) from list",psTmp->pId, psTmp->ui32OSid, psTmp->ui32OSidReg));
1625 if (psTmp==psPidOSidHead)
1627 if (psPidOSidHead->psNext==NULL)
1636 psPidOSidHead=psPidOSidHead->psNext;
1641 if (psPrev==NULL) return ;
1643 psPrev->psNext=psTmp->psNext;
1644 if (psTmp==psPidOSidTail)
1646 psPidOSidTail=psPrev;