1 /*************************************************************************/ /*!
3 @Title Implementation of PMR functions for OS managed memory
4 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
5 @Description Part of the memory management. This module is responsible for
6 implementing the function callbacks for physical memory borrowed
7 from that normally managed by the operating system.
8 @License Dual MIT/GPLv2
10 The contents of this file are subject to the MIT license as set out below.
12 Permission is hereby granted, free of charge, to any person obtaining a copy
13 of this software and associated documentation files (the "Software"), to deal
14 in the Software without restriction, including without limitation the rights
15 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 copies of the Software, and to permit persons to whom the Software is
17 furnished to do so, subject to the following conditions:
19 The above copyright notice and this permission notice shall be included in
20 all copies or substantial portions of the Software.
22 Alternatively, the contents of this file may be used under the terms of
23 the GNU General Public License Version 2 ("GPL") in which case the provisions
24 of GPL are applicable instead of those above.
26 If you wish to allow use of your version of this file only under the terms of
27 GPL, and not to allow others to use your version of this file under the terms
28 of the MIT license, indicate your decision by deleting the provisions above
29 and replace them with the notice and other provisions required by GPL as set
30 out in the file called "GPL-COPYING" included in this distribution. If you do
31 not delete the provisions above, a recipient may use your version of this file
32 under the terms of either the MIT license or GPL.
34 This License is also included in this distribution in the file called
37 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
38 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
39 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
40 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
41 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
42 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
43 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
44 */ /**************************************************************************/
45 #include <linux/version.h>
46 #include <linux/device.h>
47 #include <linux/dma-mapping.h>
49 #include <linux/slab.h>
50 #include <linux/highmem.h>
51 #include <linux/mm_types.h>
52 #include <linux/vmalloc.h>
53 #include <linux/gfp.h>
54 #include <linux/sched.h>
55 #include <linux/atomic.h>
57 #if defined(CONFIG_X86)
58 #include <asm/cacheflush.h>
62 #include "img_types.h"
63 #include "pvr_debug.h"
64 #include "pvrsrv_error.h"
65 #include "pvrsrv_memallocflags.h"
66 #include "rgx_pdump_panics.h"
67 /* services/server/include/ */
73 #include "devicemem_server_utils.h"
76 #include "physmem_osmem.h"
77 #include "physmem_osmem_linux.h"
79 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
80 #include "process_stats.h"
83 #include "kernel_compatibility.h"
85 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
86 static IMG_UINT32 g_uiMaxOrder = PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM;
88 /* split_page not available on older kernels */
89 #undef PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM
90 #define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 0
91 static IMG_UINT32 g_uiMaxOrder = 0;
94 /* Get/Set/Mask out alloc_page/dma_alloc flag */
95 #define DMA_GET_ADDR(x) (((x) >> 1) << 1)
96 #define DMA_SET_ALLOCPG_ADDR(x) ((x) | ((dma_addr_t)0x01))
97 #define DMA_IS_ALLOCPG_ADDR(x) ((x) & ((dma_addr_t)0x01))
99 typedef struct _PMR_OSPAGEARRAY_DATA_ {
100 /* Device for which this allocation has been made */
101 PVRSRV_DEVICE_NODE *psDevNode;
104 * iNumPagesAllocated:
105 * Number of pages allocated in this PMR so far.
106 * This allows for up to (2^31 - 1) pages. With 4KB pages, that's 8TB of memory for each PMR.
108 IMG_INT32 iNumPagesAllocated;
112 * Total number of pages supported by this PMR. (Fixed as of now due the fixed Page table array size)
113 * number of "pages" (a.k.a. macro pages, compound pages, higher order pages, etc...)
115 IMG_UINT32 uiTotalNumPages;
120 size of each "page" -- this would normally be the same as
121 PAGE_SHIFT, but we support the idea that we may allocate pages
122 in larger chunks for better contiguity, using order>0 in the
123 call to alloc_pages()
125 IMG_UINT32 uiLog2DevPageSize;
128 For non DMA/CMA allocation, pagearray references the pages
129 thus allocated; one entry per compound page when compound
130 pages are used. In addition, for DMA/CMA allocations, we
131 track the returned cpu virtual and device bus address.
133 struct page **pagearray;
134 dma_addr_t *dmaphysarray;
139 record at alloc time whether poisoning will be required when the
143 IMG_BOOL bPoisonOnFree;
144 IMG_BOOL bPoisonOnAlloc;
146 IMG_BOOL bUnpinned; /* Should be protected by page pool lock */
147 IMG_BOOL bIsCMA; /* Is CMA memory allocated via DMA framework */
150 The cache mode of the PMR. Additionally carrying the CPU-Cache-Clean
151 flag, advising us to do cache maintenance on behalf of the caller.
152 NOTE: For DMA/CMA allocations, memory is _always_ uncached.
154 Boolean used to track if we need to revert the cache attributes
155 of the pages used in this allocation. Depends on OS/architecture.
157 IMG_UINT32 ui32CPUCacheFlags;
158 IMG_BOOL bUnsetMemoryType;
159 } PMR_OSPAGEARRAY_DATA;
161 /***********************************
162 * Page pooling for uncached pages *
163 ***********************************/
166 _FreeOSPage_CMA(struct device *dev,
171 struct page *psPage);
174 _FreeOSPage(IMG_UINT32 uiOrder,
175 IMG_BOOL bUnsetMemoryType,
176 struct page *psPage);
179 _FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
180 IMG_UINT32 *pai32FreeIndices,
181 IMG_UINT32 ui32FreePageCount);
184 _FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree,
185 IMG_UINT32 *puiPagesFreed);
188 _ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode,
189 struct page **ppsPage,
190 IMG_UINT32 uiNumPages,
193 static inline PVRSRV_ERROR
194 _ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode,
195 struct page **ppsPage,
196 IMG_UINT32 uiNumPages,
198 IMG_UINT32 ui32CPUCacheFlags);
200 /* A struct for our page pool holding an array of pages.
201 * We always put units of page arrays to the pool but are
202 * able to take individual pages */
205 /* Linkage for page pool LRU list */
206 struct list_head sPagePoolItem;
208 /* How many items are still in the page array */
209 IMG_UINT32 uiItemsRemaining;
210 struct page **ppsPageArray;
212 } LinuxPagePoolEntry;
214 /* A struct for the unpinned items */
217 struct list_head sUnpinPoolItem;
218 PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr;
221 /* Caches to hold page pool and page array structures */
222 static struct kmem_cache *g_psLinuxPagePoolCache = NULL;
223 static struct kmem_cache *g_psLinuxPageArray = NULL;
225 /* Track what is live */
226 static IMG_UINT32 g_ui32UnpinPageCount = 0;
227 static IMG_UINT32 g_ui32PagePoolEntryCount = 0;
229 /* Pool entry limits */
230 #if defined(PVR_LINUX_PHYSMEM_MAX_POOL_PAGES)
231 static const IMG_UINT32 g_ui32PagePoolMaxEntries = PVR_LINUX_PHYSMEM_MAX_POOL_PAGES;
232 static const IMG_UINT32 g_ui32PagePoolMaxEntries_5Percent= PVR_LINUX_PHYSMEM_MAX_POOL_PAGES / 20;
234 static const IMG_UINT32 g_ui32PagePoolMaxEntries = 0;
235 static const IMG_UINT32 g_ui32PagePoolMaxEntries_5Percent = 0;
238 #if defined(PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES)
239 static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries = PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES;
241 static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries = 0;
244 #if defined(CONFIG_X86)
245 #define PHYSMEM_OSMEM_NUM_OF_POOLS 3
246 static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = {
247 PVRSRV_MEMALLOCFLAG_CPU_CACHED,
248 PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
249 PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE
252 #define PHYSMEM_OSMEM_NUM_OF_POOLS 2
253 static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = {
254 PVRSRV_MEMALLOCFLAG_CPU_CACHED,
255 PVRSRV_MEMALLOCFLAG_CPU_UNCACHED
259 /* Global structures we use to manage the page pool */
260 static DEFINE_MUTEX(g_sPagePoolMutex);
262 /* List holding the page array pointers: */
263 static LIST_HEAD(g_sPagePoolList_WB);
264 static LIST_HEAD(g_sPagePoolList_WC);
265 static LIST_HEAD(g_sPagePoolList_UC);
266 static LIST_HEAD(g_sUnpinList);
271 mutex_lock(&g_sPagePoolMutex);
275 _PagePoolTrylock(void)
277 return mutex_trylock(&g_sPagePoolMutex);
281 _PagePoolUnlock(void)
283 mutex_unlock(&g_sPagePoolMutex);
287 _AddUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData)
289 LinuxUnpinEntry *psUnpinEntry;
291 psUnpinEntry = OSAllocMem(sizeof(*psUnpinEntry));
294 PVR_DPF((PVR_DBG_ERROR,
295 "%s: OSAllocMem failed. Cannot add entry to unpin list.",
297 return PVRSRV_ERROR_OUT_OF_MEMORY;
300 psUnpinEntry->psPageArrayDataPtr = psOSPageArrayData;
302 /* Add into pool that the shrinker can access easily*/
303 list_add_tail(&psUnpinEntry->sUnpinPoolItem, &g_sUnpinList);
305 g_ui32UnpinPageCount += psOSPageArrayData->iNumPagesAllocated;
311 _RemoveUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData)
313 LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry;
315 /* Remove from pool */
316 list_for_each_entry_safe(psUnpinEntry,
321 if (psUnpinEntry->psPageArrayDataPtr == psOSPageArrayData)
323 list_del(&psUnpinEntry->sUnpinPoolItem);
328 OSFreeMem(psUnpinEntry);
330 g_ui32UnpinPageCount -= psOSPageArrayData->iNumPagesAllocated;
333 static inline IMG_BOOL
334 _GetPoolListHead(IMG_UINT32 ui32CPUCacheFlags,
335 struct list_head **ppsPoolHead)
337 switch(PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags))
339 case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
340 #if defined(CONFIG_X86)
342 For x86 we need to keep different lists for uncached
343 and write-combined as we must always honour the PAT
344 setting which cares about this difference.
347 *ppsPoolHead = &g_sPagePoolList_WC;
351 case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
352 *ppsPoolHead = &g_sPagePoolList_UC;
355 case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
356 *ppsPoolHead = &g_sPagePoolList_WB;
360 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get pages from pool, "
361 "unknown CPU caching mode.", __func__));
367 static struct shrinker g_sShrinker;
369 /* Returning the number of pages that still reside in the page pool.
370 * Do not count excess pages that will be freed by the defer free thread. */
372 _GetNumberOfPagesInPoolUnlocked(void)
374 unsigned int uiEntryCount;
376 uiEntryCount = (g_ui32PagePoolEntryCount > g_ui32PagePoolMaxEntries) ? g_ui32PagePoolMaxEntries : g_ui32PagePoolEntryCount;
377 return uiEntryCount + g_ui32UnpinPageCount;
380 /* Linux shrinker function that informs the OS about how many pages we are caching and
381 * it is able to reclaim. */
383 _CountObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
387 PVR_ASSERT(psShrinker == &g_sShrinker);
389 (void)psShrinkControl;
391 /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */
392 if (_PagePoolTrylock() == 0)
394 remain = _GetNumberOfPagesInPoolUnlocked();
400 /* Linux shrinker function to reclaim the pages from our page pool */
402 _ScanObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
404 unsigned long uNumToScan = psShrinkControl->nr_to_scan;
405 unsigned long uSurplus = 0;
406 LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry;
407 IMG_UINT32 uiPagesFreed;
409 PVR_ASSERT(psShrinker == &g_sShrinker);
412 /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */
413 if (_PagePoolTrylock() == 0)
416 _FreePagesFromPoolUnlocked(uNumToScan,
418 uNumToScan -= uiPagesFreed;
425 /* Free unpinned memory, starting with LRU entries */
426 list_for_each_entry_safe(psUnpinEntry,
431 PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr = psUnpinEntry->psPageArrayDataPtr;
432 IMG_UINT32 uiNumPages = (psPageArrayDataPtr->uiTotalNumPages > psPageArrayDataPtr->iNumPagesAllocated)?
433 psPageArrayDataPtr->iNumPagesAllocated:psPageArrayDataPtr->uiTotalNumPages;
436 /* Free associated pages */
437 eError = _FreeOSPages(psPageArrayDataPtr,
440 if (eError != PVRSRV_OK)
442 PVR_DPF((PVR_DBG_ERROR,
443 "%s: Shrinker is unable to free unpinned pages. Error: %s (%d)",
445 PVRSRVGetErrorStringKM(eError),
450 /* Remove item from pool */
451 list_del(&psUnpinEntry->sUnpinPoolItem);
453 g_ui32UnpinPageCount -= uiNumPages;
455 /* Check if there is more to free or if we already surpassed the limit */
456 if (uiNumPages < uNumToScan)
458 uNumToScan -= uiNumPages;
461 else if (uiNumPages > uNumToScan)
463 uSurplus += uiNumPages - uNumToScan;
469 uNumToScan -= uiNumPages;
475 if (list_empty(&g_sPagePoolList_WC) &&
476 list_empty(&g_sPagePoolList_UC) &&
477 list_empty(&g_sPagePoolList_WB))
479 PVR_ASSERT(g_ui32PagePoolEntryCount == 0);
481 if (list_empty(&g_sUnpinList))
483 PVR_ASSERT(g_ui32UnpinPageCount == 0);
486 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
489 remain = _GetNumberOfPagesInPoolUnlocked();
494 /* Returning the number of pages freed during the scan */
496 return psShrinkControl->nr_to_scan - uNumToScan + uSurplus;
500 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
502 _ShrinkPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
504 if (psShrinkControl->nr_to_scan != 0)
506 return _ScanObjectsInPagePool(psShrinker, psShrinkControl);
510 /* No pages are being reclaimed so just return the page count */
511 return _CountObjectsInPagePool(psShrinker, psShrinkControl);
515 static struct shrinker g_sShrinker =
517 .shrink = _ShrinkPagePool,
518 .seeks = DEFAULT_SEEKS
521 static struct shrinker g_sShrinker =
523 .count_objects = _CountObjectsInPagePool,
524 .scan_objects = _ScanObjectsInPagePool,
525 .seeks = DEFAULT_SEEKS
529 /* Register the shrinker so Linux can reclaim cached pages */
530 void LinuxInitPhysmem(void)
532 g_psLinuxPageArray = kmem_cache_create("pvr-pa", sizeof(PMR_OSPAGEARRAY_DATA), 0, 0, NULL);
535 g_psLinuxPagePoolCache = kmem_cache_create("pvr-pp", sizeof(LinuxPagePoolEntry), 0, 0, NULL);
536 if (g_psLinuxPagePoolCache)
538 /* Only create the shrinker if we created the cache OK */
539 register_shrinker(&g_sShrinker);
544 /* Unregister the shrinker and remove all pages from the pool that are still left */
545 void LinuxDeinitPhysmem(void)
547 IMG_UINT32 uiPagesFreed;
550 if (_FreePagesFromPoolUnlocked(g_ui32PagePoolEntryCount, &uiPagesFreed) != PVRSRV_OK)
552 PVR_DPF((PVR_DBG_ERROR, "Unable to free all pages from page pool when deinitialising."));
556 PVR_ASSERT(g_ui32PagePoolEntryCount == 0);
558 /* Free the page cache */
559 kmem_cache_destroy(g_psLinuxPagePoolCache);
561 unregister_shrinker(&g_sShrinker);
564 kmem_cache_destroy(g_psLinuxPageArray);
567 static void EnableOOMKiller(void)
569 current->flags &= ~PF_DUMPCORE;
572 static void DisableOOMKiller(void)
574 /* PF_DUMPCORE is treated by the VM as if the OOM killer was disabled.
576 * As oom_killer_disable() is an inline, non-exported function, we
577 * can't use it from a modular driver. Furthermore, the OOM killer
578 * API doesn't look thread safe, which `current' is.
580 WARN_ON(current->flags & PF_DUMPCORE);
581 current->flags |= PF_DUMPCORE;
584 /* Prints out the addresses in a page array for debugging purposes
585 * Define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY locally to activate: */
586 /* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY 1 */
588 _DumpPageArray(struct page **pagearray, IMG_UINT32 uiPagesToPrint)
590 #if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY)
594 printk("Array %p:\n", pagearray);
595 for (i = 0; i < uiPagesToPrint; i++)
597 printk("%p | ", (pagearray)[i]);
603 printk("Array is NULL:\n");
606 PVR_UNREFERENCED_PARAMETER(pagearray);
607 PVR_UNREFERENCED_PARAMETER(uiPagesToPrint);
611 /* Debugging function that dumps out the number of pages for every
612 * page array that is currently in the page pool.
613 * Not defined by default. Define locally to activate feature: */
614 /* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL 1 */
616 _DumpPoolStructure(void)
618 #if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL)
619 LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
620 struct list_head *psPoolHead = NULL;
624 /* Empty all pools */
625 for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++)
628 printk("pool = %u \n", j);
630 /* Get the correct list for this caching mode */
631 if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead))
636 list_for_each_entry_safe(psPagePoolEntry,
641 printk("%u | ", psPagePoolEntry->uiItemsRemaining);
648 /* Will take excess pages from the pool with acquired pool lock and then free
649 * them without pool lock being held.
650 * Designed to run in the deferred free thread. */
652 _FreeExcessPagesFromPool(void)
654 PVRSRV_ERROR eError = PVRSRV_OK;
655 LIST_HEAD(sPagePoolFreeList);
656 LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
657 struct list_head *psPoolHead = NULL;
658 IMG_UINT32 i, j, uiPoolIdx;
659 static IMG_UINT8 uiPoolAccessRandomiser;
660 IMG_BOOL bDone = IMG_FALSE;
662 /* Make sure all pools are drained over time */
663 uiPoolAccessRandomiser++;
665 /* Empty all pools */
666 for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++)
668 uiPoolIdx = (j + uiPoolAccessRandomiser) % PHYSMEM_OSMEM_NUM_OF_POOLS;
670 /* Just lock down to collect pool entries and unlock again before freeing them */
673 /* Get the correct list for this caching mode */
674 if (!_GetPoolListHead(g_aui32CPUCacheFlags[uiPoolIdx], &psPoolHead))
680 /* Traverse pool in reverse order to remove items that exceeded
681 * the pool size first */
682 list_for_each_entry_safe_reverse(psPagePoolEntry,
687 /* Go to free the pages if we collected enough */
688 if (g_ui32PagePoolEntryCount <= g_ui32PagePoolMaxEntries)
694 /* Move item to free list so we can free it later without the pool lock */
695 list_del(&psPagePoolEntry->sPagePoolItem);
696 list_add(&psPagePoolEntry->sPagePoolItem, &sPagePoolFreeList);
698 /* Update counters */
699 g_ui32PagePoolEntryCount -= psPagePoolEntry->uiItemsRemaining;
701 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
702 /* MemStats usually relies on having the bridge lock held, however
703 * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
704 * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
705 * the page pool lock is used to ensure these calls are mutually
708 PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * psPagePoolEntry->uiItemsRemaining);
715 /* Free the pages that we removed from the pool */
716 list_for_each_entry_safe(psPagePoolEntry,
721 #if defined(CONFIG_X86)
722 /* Set the correct page caching attributes on x86 */
723 if (!PVRSRV_CHECK_CPU_CACHED(g_aui32CPUCacheFlags[uiPoolIdx]))
726 ret = set_pages_array_wb(psPagePoolEntry->ppsPageArray,
727 psPagePoolEntry->uiItemsRemaining);
730 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__));
731 eError = PVRSRV_ERROR_FAILED_TO_FREE_PAGES;
736 /* Free the actual pages */
737 for (i = 0; i < psPagePoolEntry->uiItemsRemaining; i++)
739 __free_pages(psPagePoolEntry->ppsPageArray[i], 0);
740 psPagePoolEntry->ppsPageArray[i] = NULL;
743 /* Free the pool entry and page array*/
744 list_del(&psPagePoolEntry->sPagePoolItem);
745 OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
746 kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
749 /* Stop if all excess pages were removed */
759 _DumpPoolStructure();
763 /* Free a certain number of pages from the page pool.
764 * Mainly used in error paths or at deinitialisation to
765 * empty the whole pool. */
767 _FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree,
768 IMG_UINT32 *puiPagesFreed)
770 PVRSRV_ERROR eError = PVRSRV_OK;
771 LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
772 struct list_head *psPoolHead = NULL;
775 *puiPagesFreed = uiMaxPagesToFree;
777 /* Empty all pools */
778 for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++)
781 /* Get the correct list for this caching mode */
782 if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead))
787 /* Free the pages and remove page arrays from the pool if they are exhausted */
788 list_for_each_entry_safe(psPagePoolEntry,
793 IMG_UINT32 uiItemsToFree;
794 struct page **ppsPageArray;
796 /* Check if we are going to free the whole page array or just parts */
797 if (psPagePoolEntry->uiItemsRemaining <= uiMaxPagesToFree)
799 uiItemsToFree = psPagePoolEntry->uiItemsRemaining;
800 ppsPageArray = psPagePoolEntry->ppsPageArray;
804 uiItemsToFree = uiMaxPagesToFree;
805 ppsPageArray = &(psPagePoolEntry->ppsPageArray[psPagePoolEntry->uiItemsRemaining - uiItemsToFree]);
808 #if defined(CONFIG_X86)
809 /* Set the correct page caching attributes on x86 */
810 if (!PVRSRV_CHECK_CPU_CACHED(g_aui32CPUCacheFlags[j]))
813 ret = set_pages_array_wb(ppsPageArray, uiItemsToFree);
816 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__));
817 eError = PVRSRV_ERROR_FAILED_TO_FREE_PAGES;
823 /* Free the actual pages */
824 for (i = 0; i < uiItemsToFree; i++)
826 __free_pages(ppsPageArray[i], 0);
827 ppsPageArray[i] = NULL;
830 /* Reduce counters */
831 uiMaxPagesToFree -= uiItemsToFree;
832 g_ui32PagePoolEntryCount -= uiItemsToFree;
833 psPagePoolEntry->uiItemsRemaining -= uiItemsToFree;
835 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
836 /* MemStats usually relies on having the bridge lock held, however
837 * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
838 * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
839 * the page pool lock is used to ensure these calls are mutually
842 PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * uiItemsToFree);
845 /* Is this pool entry exhausted, delete it */
846 if (psPagePoolEntry->uiItemsRemaining == 0)
848 OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
849 list_del(&psPagePoolEntry->sPagePoolItem);
850 kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
853 /* Return if we have all our pages */
854 if (uiMaxPagesToFree == 0)
862 *puiPagesFreed -= uiMaxPagesToFree;
863 _DumpPoolStructure();
867 /* Get a certain number of pages from the page pool and
868 * copy them directly into a given page array. */
870 _GetPagesFromPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags,
871 IMG_UINT32 uiMaxNumPages,
872 struct page **ppsPageArray,
873 IMG_UINT32 *puiNumReceivedPages)
875 LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
876 struct list_head *psPoolHead = NULL;
879 *puiNumReceivedPages = 0;
881 /* Get the correct list for this caching mode */
882 if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead))
887 /* Check if there are actually items in the list */
888 if (list_empty(psPoolHead))
893 PVR_ASSERT(g_ui32PagePoolEntryCount > 0);
895 /* Receive pages from the pool */
896 list_for_each_entry_safe(psPagePoolEntry,
901 /* Get the pages from this pool entry */
902 for (i = psPagePoolEntry->uiItemsRemaining; i != 0 && *puiNumReceivedPages < uiMaxNumPages; i--)
904 ppsPageArray[*puiNumReceivedPages] = psPagePoolEntry->ppsPageArray[i-1];
905 (*puiNumReceivedPages)++;
906 psPagePoolEntry->uiItemsRemaining--;
909 /* Is this pool entry exhausted, delete it */
910 if (psPagePoolEntry->uiItemsRemaining == 0)
912 OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
913 list_del(&psPagePoolEntry->sPagePoolItem);
914 kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
917 /* Return if we have all our pages */
918 if (*puiNumReceivedPages == uiMaxNumPages)
926 /* Update counters */
927 g_ui32PagePoolEntryCount -= *puiNumReceivedPages;
929 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
930 /* MemStats usually relies on having the bridge lock held, however
931 * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
932 * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
933 * the page pool lock is used to ensure these calls are mutually
936 PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * (*puiNumReceivedPages));
939 _DumpPoolStructure();
943 /* When is it worth waiting for the page pool? */
944 #define PVR_LINUX_PHYSMEM_MIN_PAGES_TO_WAIT_FOR_POOL 64
946 /* Same as _GetPagesFromPoolUnlocked but handles locking and
947 * checks first whether pages from the pool are a valid option. */
949 _GetPagesFromPoolLocked(PVRSRV_DEVICE_NODE *psDevNode,
950 IMG_UINT32 ui32CPUCacheFlags,
951 IMG_UINT32 uiPagesToAlloc,
954 struct page **ppsPageArray,
955 IMG_UINT32 *puiPagesFromPool)
957 /* The page pool stores only order 0 pages. If we need zeroed memory we
958 * directly allocate from the OS because it is faster than doing it ourselves. */
959 if (uiOrder == 0 && !bZero)
961 if (uiPagesToAlloc < PVR_LINUX_PHYSMEM_MIN_PAGES_TO_WAIT_FOR_POOL)
963 /* In case the request is a few pages, just try to acquire the pool lock */
964 if (_PagePoolTrylock() == 0)
971 /* It is worth waiting if many pages were requested.
972 * Freeing an item to the pool is very fast and
973 * the defer free thread will release the lock regularly. */
977 _GetPagesFromPoolUnlocked(ui32CPUCacheFlags,
983 /* Do cache maintenance so allocations from the pool can be
984 * considered clean */
985 if (PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags) &&
986 PVRSRV_CHECK_CPU_CACHE_CLEAN(ui32CPUCacheFlags))
988 _ApplyCacheMaintenance(psDevNode,
998 /* Defer free function to remove excess pages from the page pool.
999 * We do not need the bridge lock for this function */
1001 _CleanupThread_FreePoolPages(void *pvData)
1003 PVRSRV_ERROR eError;
1005 /* Free all that is necessary */
1006 eError = _FreeExcessPagesFromPool();
1007 if(eError != PVRSRV_OK)
1009 PVR_DPF((PVR_DBG_ERROR, "%s: _FreeExcessPagesFromPool failed", __func__));
1019 /* Signal the defer free thread that there are pages in the pool to be cleaned up.
1020 * MUST NOT HOLD THE PAGE POOL LOCK! */
1022 _SignalDeferFree(void)
1024 PVRSRV_CLEANUP_THREAD_WORK *psCleanupThreadFn;
1025 psCleanupThreadFn = OSAllocMem(sizeof(*psCleanupThreadFn));
1027 if(!psCleanupThreadFn)
1029 PVR_DPF((PVR_DBG_ERROR,
1030 "%s: Failed to get memory for deferred page pool cleanup. "
1031 "Trying to free pages immediately",
1036 psCleanupThreadFn->pfnFree = _CleanupThread_FreePoolPages;
1037 psCleanupThreadFn->pvData = psCleanupThreadFn;
1038 psCleanupThreadFn->ui32RetryCount = CLEANUP_THREAD_RETRY_COUNT_DEFAULT;
1039 psCleanupThreadFn->bDependsOnHW = IMG_FALSE;
1040 /* We must not hold the pool lock when calling AddWork because it might call us back to
1041 * free pooled pages directly when unloading the driver */
1042 PVRSRVCleanupThreadAddWork(psCleanupThreadFn);
1048 /* In case we are not able to signal the defer free thread
1049 * we have to cleanup the pool now. */
1050 IMG_UINT32 uiPagesFreed;
1053 if (_FreePagesFromPoolUnlocked(g_ui32PagePoolEntryCount - g_ui32PagePoolMaxEntries,
1054 &uiPagesFreed) != PVRSRV_OK)
1056 PVR_DPF((PVR_DBG_ERROR,
1057 "%s: Unable to free pooled pages!",
1066 /* Moves a page array to the page pool.
1068 * If this function is successful the ppsPageArray is unusable and needs to be
1069 * reallocated in case the _PMR_OSPAGEARRAY_DATA_ will be reused.
1070 * This function expects cached pages to be not in the cache anymore,
1071 * invalidate them before, ideally without using the pool lock. */
1073 _PutPagesToPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags,
1074 struct page **ppsPageArray,
1075 IMG_UINT32 uiEntriesInArray)
1077 LinuxPagePoolEntry *psPagePoolEntry;
1078 struct list_head *psPoolHead = NULL;
1080 /* Check if there is still space in the pool */
1081 if ( (g_ui32PagePoolEntryCount + uiEntriesInArray) >=
1082 (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxExcessEntries) )
1087 /* Get the correct list for this caching mode */
1088 if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead))
1093 /* Fill the new pool entry structure and add it to the pool list */
1094 psPagePoolEntry = kmem_cache_alloc(g_psLinuxPagePoolCache, GFP_KERNEL);
1095 psPagePoolEntry->ppsPageArray = ppsPageArray;
1096 psPagePoolEntry->uiItemsRemaining = uiEntriesInArray;
1098 list_add_tail(&psPagePoolEntry->sPagePoolItem, psPoolHead);
1100 /* Update counters */
1101 g_ui32PagePoolEntryCount += uiEntriesInArray;
1103 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
1104 /* MemStats usually relies on having the bridge lock held, however
1105 * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
1106 * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
1107 * the page pool lock is used to ensure these calls are mutually
1110 PVRSRVStatsIncrMemAllocPoolStat(PAGE_SIZE * uiEntriesInArray);
1113 _DumpPoolStructure();
1117 /* Minimal amount of pages that will go to the pool, everything below is freed directly */
1118 #define PVR_LINUX_PHYSMEM_MIN_PAGES_TO_ADD_TO_POOL 16
1120 /* Same as _PutPagesToPoolUnlocked but handles locking and checks whether the pages are
1121 * suitable to be stored in the page pool. */
1122 static inline IMG_BOOL
1123 _PutPagesToPoolLocked(IMG_UINT32 ui32CPUCacheFlags,
1124 struct page **ppsPageArray,
1127 IMG_UINT32 uiNumPages)
1131 uiNumPages >= PVR_LINUX_PHYSMEM_MIN_PAGES_TO_ADD_TO_POOL)
1135 /* Try to quickly move page array to the pool */
1136 if (_PutPagesToPoolUnlocked(ui32CPUCacheFlags,
1140 if (g_ui32PagePoolEntryCount > (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxEntries_5Percent))
1142 /* Signal defer free to clean up excess pages from pool.
1143 * Allow a little excess before signalling to avoid oscillating behaviour */
1156 /* Could not move pages to pool, continue and free them now */
1163 /* Get the GFP flags that we pass to the page allocator */
1164 static inline unsigned int
1165 _GetGFPFlags(PMR_OSPAGEARRAY_DATA *psPageArrayData)
1167 struct device *psDev = psPageArrayData->psDevNode->psDevConfig->pvOSDevice;
1168 unsigned int gfp_flags = 0;
1169 gfp_flags = GFP_USER | __GFP_NOWARN | __GFP_NOMEMALLOC;
1171 if (*psDev->dma_mask == DMA_BIT_MASK(32))
1174 * Achieved by NOT setting __GFP_HIGHMEM for 32 bit systems and
1175 * setting __GFP_DMA32 for 64 bit systems */
1176 gfp_flags |= __GFP_DMA32;
1180 /* If our system is able to handle large addresses use highmem */
1181 gfp_flags |= __GFP_HIGHMEM;
1184 if (psPageArrayData->bZero)
1186 gfp_flags |= __GFP_ZERO;
1192 /* Poison a page of order uiOrder with string taken from pacPoisonData*/
1194 _PoisonPages(struct page *page,
1196 const IMG_CHAR *pacPoisonData,
1197 size_t uiPoisonSize)
1200 IMG_UINT32 uiSrcByteIndex;
1201 IMG_UINT32 uiDestByteIndex;
1202 IMG_UINT32 uiSubPageIndex;
1206 for (uiSubPageIndex = 0; uiSubPageIndex < (1U << uiOrder); uiSubPageIndex++)
1208 kvaddr = kmap(page + uiSubPageIndex);
1211 for(uiDestByteIndex=0; uiDestByteIndex<PAGE_SIZE; uiDestByteIndex++)
1213 pcDest[uiDestByteIndex] = pacPoisonData[uiSrcByteIndex];
1215 if (uiSrcByteIndex == uiPoisonSize)
1221 flush_dcache_page(page);
1222 kunmap(page + uiSubPageIndex);
1226 static const IMG_CHAR _AllocPoison[] = "^PoIsOn";
1227 static const IMG_UINT32 _AllocPoisonSize = 7;
1228 static const IMG_CHAR _FreePoison[] = "<DEAD-BEEF>";
1229 static const IMG_UINT32 _FreePoisonSize = 11;
1231 /* Allocate and initialise the structure to hold the metadata of the allocation */
1233 _AllocOSPageArray(PVRSRV_DEVICE_NODE *psDevNode,
1234 PMR_SIZE_T uiChunkSize,
1235 IMG_UINT32 ui32NumPhysChunks,
1236 IMG_UINT32 ui32NumVirtChunks,
1237 IMG_UINT32 uiLog2DevPageSize,
1240 IMG_BOOL bPoisonOnAlloc,
1241 IMG_BOOL bPoisonOnFree,
1243 IMG_UINT32 ui32CPUCacheFlags,
1244 PMR_OSPAGEARRAY_DATA **ppsPageArrayDataPtr)
1246 PVRSRV_ERROR eError;
1247 PMR_SIZE_T uiSize = uiChunkSize * ui32NumVirtChunks;
1248 IMG_UINT32 uiNumOSPageSizeVirtPages;
1249 IMG_UINT32 uiNumDevPageSizeVirtPages;
1250 PMR_OSPAGEARRAY_DATA *psPageArrayData;
1251 PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
1253 /* Sanity check of the alloc size */
1254 if (uiSize >= 0x1000000000ULL)
1256 PVR_DPF((PVR_DBG_ERROR,
1257 "%s: Do you really want 64GB of physical memory in one go? "
1258 "This is likely a bug", __func__));
1259 eError = PVRSRV_ERROR_INVALID_PARAMS;
1263 /* Check that we allocate the correct contiguity */
1264 PVR_ASSERT(PAGE_SHIFT <= uiLog2DevPageSize);
1265 if ((uiSize & ((1ULL << uiLog2DevPageSize) - 1)) != 0)
1267 PVR_DPF((PVR_DBG_ERROR,
1268 "Allocation size " PMR_SIZE_FMTSPEC " is not multiple of page size 2^%u !",
1270 uiLog2DevPageSize));
1272 eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
1276 /* Use of cast below is justified by the assertion that follows to
1277 prove that no significant bits have been truncated */
1278 uiNumOSPageSizeVirtPages = (IMG_UINT32) (((uiSize - 1) >> PAGE_SHIFT) + 1);
1279 PVR_ASSERT(((PMR_SIZE_T) uiNumOSPageSizeVirtPages << PAGE_SHIFT) == uiSize);
1280 uiNumDevPageSizeVirtPages = uiNumOSPageSizeVirtPages >> (uiLog2DevPageSize - PAGE_SHIFT);
1282 /* Allocate the struct to hold the metadata */
1283 psPageArrayData = kmem_cache_alloc(g_psLinuxPageArray, GFP_KERNEL);
1284 if (psPageArrayData == NULL)
1286 PVR_DPF((PVR_DBG_ERROR,
1287 "%s: OS refused the memory allocation for the private data.",
1289 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
1294 * Allocate the page array
1296 * We avoid tracking this memory because this structure might go into the page pool.
1297 * The OS can drain the pool asynchronously and when doing that we have to avoid
1298 * any potential deadlocks.
1300 * In one scenario the process stats vmalloc hash table lock is held and then
1301 * the oom-killer softirq is trying to call _ScanObjectsInPagePool(), it must not
1302 * try to acquire the vmalloc hash table lock again.
1304 psPageArrayData->pagearray = OSAllocZMemNoStats(sizeof(struct page *) * uiNumDevPageSizeVirtPages);
1305 if (psPageArrayData->pagearray == NULL)
1307 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
1308 goto e_free_kmem_cache;
1314 /* Allocate additional DMA/CMA cpu kernel virtual address & device bus address array state */
1315 psPageArrayData->dmavirtarray = OSAllocZMemNoStats(sizeof(void*) * uiNumDevPageSizeVirtPages);
1316 if (psPageArrayData->dmavirtarray == NULL)
1318 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
1319 goto e_free_pagearray;
1322 psPageArrayData->dmaphysarray = OSAllocZMemNoStats(sizeof(dma_addr_t) * uiNumDevPageSizeVirtPages);
1323 if (psPageArrayData->dmaphysarray == NULL)
1325 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
1326 goto e_free_cpuvirtaddrarray;
1332 psPageArrayData->psDevNode = psDevNode;
1333 psPageArrayData->iNumPagesAllocated = 0;
1334 psPageArrayData->uiTotalNumPages = uiNumOSPageSizeVirtPages;
1335 psPageArrayData->uiLog2DevPageSize = uiLog2DevPageSize;
1336 psPageArrayData->bZero = bZero;
1337 psPageArrayData->bIsCMA = bIsCMA;
1338 psPageArrayData->bOnDemand = bOnDemand;
1339 psPageArrayData->bUnpinned = IMG_FALSE;
1340 psPageArrayData->bPoisonOnFree = bPoisonOnFree;
1341 psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc;
1342 psPageArrayData->ui32CPUCacheFlags = ui32CPUCacheFlags;
1344 /* Indicate whether this is an allocation with default caching attribute (i.e cached) or not */
1345 if (PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags) ||
1346 PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags))
1348 psPageArrayData->bUnsetMemoryType = IMG_TRUE;
1352 psPageArrayData->bUnsetMemoryType = IMG_FALSE;
1355 *ppsPageArrayDataPtr = psPageArrayData;
1359 e_free_cpuvirtaddrarray:
1360 OSFreeMemNoStats(psPageArrayData->dmavirtarray);
1363 OSFreeMemNoStats(psPageArrayData->pagearray);
1366 kmem_cache_free(g_psLinuxPageArray, psPageArrayData);
1367 PVR_DPF((PVR_DBG_ERROR,
1368 "%s: OS refused the memory allocation for the page pointer table. "
1369 "Did you ask for too much?",
1373 PVR_ASSERT(eError != PVRSRV_OK);
1378 _ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode,
1379 struct page **ppsPage,
1380 IMG_UINT32 uiNumPages,
1383 PVRSRV_ERROR eError = PVRSRV_ERROR_RETRY;
1386 if ((uiNumPages << PAGE_SHIFT) >= PVR_DIRTY_BYTES_FLUSH_THRESHOLD)
1388 /* May fail so fallback to range-based flush */
1389 eError = OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
1392 if (eError != PVRSRV_OK)
1394 for (ui32Idx = 0; ui32Idx < uiNumPages; ++ui32Idx)
1396 IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd;
1399 pvPageVAddr = kmap(ppsPage[ui32Idx]);
1400 sCPUPhysAddrStart.uiAddr = page_to_phys(ppsPage[ui32Idx]);
1401 sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE;
1403 /* If we're zeroing, we need to make sure the cleared memory is pushed out
1404 of the cache before the cache lines are invalidated */
1407 OSFlushCPUCacheRangeKM(psDevNode,
1409 pvPageVAddr + PAGE_SIZE,
1415 OSInvalidateCPUCacheRangeKM(psDevNode,
1417 pvPageVAddr + PAGE_SIZE,
1422 kunmap(ppsPage[ui32Idx]);
1427 /* Change the caching attribute of pages on x86 systems and takes care of
1428 * cache maintenance. This function is supposed to be called once for pages that
1429 * came from alloc_pages().
1431 * Flush/Invalidate pages in case the allocation is not cached. Necessary to
1432 * remove pages from the cache that might be flushed later and corrupt memory. */
1433 static inline PVRSRV_ERROR
1434 _ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode,
1435 struct page **ppsPage,
1436 IMG_UINT32 uiNumPages,
1438 IMG_UINT32 ui32CPUCacheFlags)
1440 PVRSRV_ERROR eError = PVRSRV_OK;
1441 IMG_BOOL bCPUCached = PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags);
1442 IMG_BOOL bCPUUncached = PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags);
1443 IMG_BOOL bCPUWriteCombine = PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags);
1445 if (ppsPage != NULL)
1447 #if defined (CONFIG_X86)
1448 /* On x86 we have to set page cache attributes for non-cached pages.
1449 * The call is implicitly taking care of all flushing/invalidating
1450 * and therefore we can skip the usual cache maintenance after this. */
1451 if (bCPUUncached || bCPUWriteCombine)
1453 /* On X86 if we already have a mapping (e.g. low memory) we need to change the mode of
1454 current mapping before we map it ourselves */
1455 int ret = IMG_FALSE;
1456 PVR_UNREFERENCED_PARAMETER(bFlush);
1458 switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags))
1460 case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
1461 ret = set_pages_array_uc(ppsPage, uiNumPages);
1464 eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE;
1465 PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to UC failed, returned %d", ret));
1469 case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
1470 ret = set_pages_array_wc(ppsPage, uiNumPages);
1473 eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE;
1474 PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to WC failed, returned %d", ret));
1478 case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
1487 /* Cache maintenance if:
1488 * cached && (cleanFlag || bFlush)
1490 * uncached || write-combine
1492 if ( (bCPUCached && (PVRSRV_CHECK_CPU_CACHE_CLEAN(ui32CPUCacheFlags) || bFlush)) ||
1493 bCPUUncached || bCPUWriteCombine )
1495 /* We can be given pages which still remain in the cache.
1496 In order to make sure that the data we write through our mappings
1497 doesn't get overwritten by later cache evictions we invalidate the
1498 pages that are given to us.
1501 This still seems to be true if we request cold pages, it's just less
1502 likely to be in the cache. */
1503 _ApplyCacheMaintenance(psDevNode,
1513 /* Same as _AllocOSPage except it uses DMA framework to perform allocation */
1515 _AllocOSPage_CMA(PMR_OSPAGEARRAY_DATA *psPageArrayData,
1516 unsigned int gfp_flags,
1517 IMG_UINT32 ui32AllocOrder,
1518 IMG_UINT32 ui32MinOrder,
1519 IMG_UINT32 uiPageIndex)
1523 dma_addr_t bus_addr;
1524 size_t alloc_size = PAGE_SIZE << ui32MinOrder;
1525 PVR_UNREFERENCED_PARAMETER(ui32AllocOrder);
1526 PVR_ASSERT(ui32AllocOrder == ui32MinOrder);
1529 virt_addr = dma_alloc_coherent(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
1533 if (virt_addr == NULL)
1535 /* The idea here is primarily to support some older kernels with
1536 broken or non-functioning DMA/CMA implementations (< Linux-3.4)
1537 and to also handle DMA/CMA allocation failures by attempting a
1538 normal page allocation though we expect dma_alloc_coherent()
1539 already attempts this internally also before failing but
1540 nonetheless it does no harm to retry allocation ourself */
1541 page = alloc_pages(gfp_flags, ui32AllocOrder);
1544 /* Taint bus_addr as alloc_page, needed when freeing;
1545 also acquire the low memory page address only, this
1546 prevents mapping possible high memory pages into
1547 kernel virtual address space which might exhaust
1548 the VMALLOC address space */
1549 bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page));
1550 virt_addr = page_address(page);
1554 return PVRSRV_ERROR_OUT_OF_MEMORY;
1559 page = pfn_to_page(bus_addr >> PAGE_SHIFT);
1563 /* Convert OSPageSize-based index into DevicePageSize-based index */
1564 psPageArrayData->dmavirtarray[uiPageIndex >> ui32MinOrder] = virt_addr;
1565 psPageArrayData->dmaphysarray[uiPageIndex >> ui32MinOrder] = bus_addr;
1566 psPageArrayData->pagearray[uiPageIndex >> ui32MinOrder] = page;
1571 /* Allocate a page of order uiAllocOrder and stores it in the page array ppsPage at
1572 * position uiPageIndex.
1574 * If the order is higher than 0, it splits the page into multiples and
1575 * stores them at position uiPageIndex to uiPageIndex+(1<<uiAllocOrder). */
1577 _AllocOSPage(PMR_OSPAGEARRAY_DATA *psPageArrayData,
1578 unsigned int gfp_flags,
1579 IMG_UINT32 uiAllocOrder,
1580 IMG_UINT32 uiMinOrder,
1581 IMG_UINT32 uiPageIndex)
1583 struct page *psPage;
1584 IMG_UINT32 ui32Count;
1586 /* Allocate the page */
1588 psPage = alloc_pages(gfp_flags, uiAllocOrder);
1593 return PVRSRV_ERROR_OUT_OF_MEMORY;
1596 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
1597 /* In case we need to, split the higher order page;
1598 this should only be used for order-0 allocations
1599 as higher order allocations should use DMA/CMA */
1600 if (uiAllocOrder != 0)
1602 split_page(psPage, uiAllocOrder);
1606 /* Store the page (or multiple split pages) in the page array */
1607 for (ui32Count = 0; ui32Count < (1 << uiAllocOrder); ui32Count++)
1609 psPageArrayData->pagearray[uiPageIndex + ui32Count] = &(psPage[ui32Count]);
1615 /* Allocation of OS pages: We may allocate 2^N order pages at a time for two reasons.
1617 * Firstly to support device pages which are larger than OS. By asking the OS for 2^N
1618 * order OS pages at a time we guarantee the device page is contiguous.
1620 * Secondly for performance where we may ask for 2^N order pages to reduce the number
1621 * of calls to alloc_pages, and thus reduce time for huge allocations.
1623 * Regardless of page order requested, we need to break them down to track _OS pages.
1624 * The maximum order requested is increased if all max order allocations were successful.
1625 * If any request fails we reduce the max order.
1628 _AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData)
1630 PVRSRV_ERROR eError;
1631 IMG_UINT32 uiArrayIndex = 0;
1632 IMG_UINT32 ui32Order;
1633 IMG_UINT32 ui32MinOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT;
1634 IMG_BOOL bIncreaseMaxOrder = IMG_TRUE;
1636 IMG_UINT32 ui32NumPageReq;
1637 IMG_UINT32 uiPagesToAlloc;
1638 IMG_UINT32 uiPagesFromPool = 0;
1640 unsigned int gfp_flags = _GetGFPFlags(psPageArrayData);
1641 IMG_UINT32 ui32GfpFlags;
1642 IMG_UINT32 ui32HighOrderGfpFlags = ((gfp_flags & ~__GFP_RECLAIM) | __GFP_NORETRY);
1644 struct page **ppsPageArray = psPageArrayData->pagearray;
1645 struct page **ppsPageAttributeArray = NULL;
1647 uiPagesToAlloc = psPageArrayData->uiTotalNumPages;
1649 /* Try to get pages from the pool since it is faster;
1650 the page pool currently only supports zero-order pages
1651 thus currently excludes all DMA/CMA allocated memory */
1652 _GetPagesFromPoolLocked(psPageArrayData->psDevNode,
1653 psPageArrayData->ui32CPUCacheFlags,
1656 psPageArrayData->bZero,
1660 uiArrayIndex = uiPagesFromPool;
1662 if ((uiPagesToAlloc - uiPagesFromPool) < PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD)
1663 { /* Small allocations: Ask for one device page at a time */
1664 ui32Order = ui32MinOrder;
1665 bIncreaseMaxOrder = IMG_FALSE;
1669 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
1670 /* Large zero-order or none zero-order allocations, ask for
1671 MAX(max-order,min-order) order pages at a time; alloc
1672 failures throttles this down to ZeroOrder allocations */
1673 ui32Order = MAX(g_uiMaxOrder, ui32MinOrder);
1675 /* Because split_pages() is not available on older kernels
1676 we cannot mix-and-match any-order pages in the PMR;
1677 only same-order pages must be present in page array.
1678 So we unconditionally force it to use ui32MinOrder on
1679 these older kernels */
1680 ui32Order = ui32MinOrder;
1684 /* Only if asking for more contiguity than we actually need, let it fail */
1685 ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
1686 ui32NumPageReq = (1 << ui32Order);
1688 while (uiArrayIndex < uiPagesToAlloc)
1690 IMG_UINT32 ui32PageRemain = uiPagesToAlloc - uiArrayIndex;
1692 while (ui32NumPageReq > ui32PageRemain)
1694 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
1695 /* Pages to request is larger than that remaining
1696 so ask for less so never over allocate */
1697 ui32Order = MAX(ui32Order >> 1,ui32MinOrder);
1699 /* Pages to request is larger than that remaining so
1700 do nothing thus over allocate as we do not support
1701 mix/match of any-order pages in PMR page-array in
1702 older kernels (simplifies page free logic) */
1703 PVR_ASSERT(ui32Order == ui32MinOrder);
1705 ui32NumPageReq = (1 << ui32Order);
1706 ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
1709 if (psPageArrayData->bIsCMA)
1711 /* As the DMA/CMA framework rounds-up request to the
1712 next power-of-two, we request multiple uiMinOrder
1713 pages to satisfy allocation request in order to
1714 minimise wasting memory */
1715 eError = _AllocOSPage_CMA(psPageArrayData,
1723 /* Allocate uiOrder pages at uiArrayIndex */
1724 eError = _AllocOSPage(psPageArrayData,
1731 if (eError == PVRSRV_OK)
1733 /* Successful request. Move onto next. */
1734 uiArrayIndex += ui32NumPageReq;
1738 if (ui32Order > ui32MinOrder)
1740 /* Last request failed. Let's ask for less next time */
1741 ui32Order = MAX(ui32Order >> 1,ui32MinOrder);
1742 bIncreaseMaxOrder = IMG_FALSE;
1743 ui32NumPageReq = (1 << ui32Order);
1744 ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
1745 g_uiMaxOrder = ui32Order;
1746 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0))
1747 /* We should not trigger this code path in older kernels,
1748 this is enforced by ensuring ui32Order == ui32MinOrder */
1749 PVR_ASSERT(ui32Order == ui32MinOrder);
1754 /* Failed to alloc pages at required contiguity. Failed allocation */
1755 PVR_DPF((PVR_DBG_ERROR, "%s: alloc_pages failed to honour request at %u of %u, flags = %x, order = %u (%s)",
1761 PVRSRVGetErrorStringKM(eError)));
1762 eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
1768 if (bIncreaseMaxOrder && (g_uiMaxOrder < PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM))
1769 { /* All successful allocations on max order. Let's ask for more next time */
1773 /* Construct table of page pointers to apply attributes */
1774 ppsPageAttributeArray = &ppsPageArray[uiPagesFromPool];
1775 if (psPageArrayData->bIsCMA)
1777 IMG_UINT32 uiIdx, uiIdy, uiIdz;
1779 ppsPageAttributeArray = OSAllocMem(sizeof(struct page *) * uiPagesToAlloc);
1780 if (ppsPageAttributeArray == NULL)
1782 PVR_DPF((PVR_DBG_ERROR, "Failed OSAllocMem() for page attributes table"));
1783 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
1787 for (uiIdx = 0; uiIdx < uiPagesToAlloc; uiIdx += ui32NumPageReq)
1789 uiIdy = uiIdx >> ui32Order;
1790 for (uiIdz = 0; uiIdz < ui32NumPageReq; uiIdz++)
1792 ppsPageAttributeArray[uiIdx+uiIdz] = psPageArrayData->pagearray[uiIdy];
1793 ppsPageAttributeArray[uiIdx+uiIdz] += uiIdz;
1798 /* Do the cache management as required */
1799 eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode,
1800 ppsPageAttributeArray,
1801 uiPagesToAlloc - uiPagesFromPool,
1802 psPageArrayData->bZero,
1803 psPageArrayData->ui32CPUCacheFlags);
1804 if (eError != PVRSRV_OK)
1806 PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes"));
1811 if (psPageArrayData->bIsCMA)
1813 OSFreeMem(ppsPageAttributeArray);
1817 /* Update metadata */
1818 psPageArrayData->iNumPagesAllocated = psPageArrayData->uiTotalNumPages;
1824 IMG_UINT32 ui32PageToFree;
1826 if (psPageArrayData->bIsCMA)
1828 IMG_UINT32 uiDevArrayIndex = uiArrayIndex >> ui32Order;
1829 IMG_UINT32 uiDevPageSize = PAGE_SIZE << ui32Order;
1830 PVR_ASSERT(ui32Order == ui32MinOrder);
1832 if (ppsPageAttributeArray)
1834 OSFreeMem(ppsPageAttributeArray);
1837 for (ui32PageToFree = 0; ui32PageToFree < uiDevArrayIndex; ui32PageToFree++)
1839 _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
1842 psPageArrayData->dmavirtarray[ui32PageToFree],
1843 psPageArrayData->dmaphysarray[ui32PageToFree],
1844 ppsPageArray[ui32PageToFree]);
1845 psPageArrayData->dmaphysarray[ui32PageToFree]= (dma_addr_t)0;
1846 psPageArrayData->dmavirtarray[ui32PageToFree] = NULL;
1847 ppsPageArray[ui32PageToFree] = INVALID_PAGE;
1852 /* Free the pages we got from the pool */
1853 for(ui32PageToFree = 0; ui32PageToFree < uiPagesFromPool; ui32PageToFree++)
1855 _FreeOSPage(ui32MinOrder,
1856 psPageArrayData->bUnsetMemoryType,
1857 ppsPageArray[ui32PageToFree]);
1858 ppsPageArray[ui32PageToFree] = INVALID_PAGE;
1861 for (ui32PageToFree = uiPagesFromPool; ui32PageToFree < uiArrayIndex; ui32PageToFree++)
1863 _FreeOSPage(ui32MinOrder, IMG_FALSE, ppsPageArray[ui32PageToFree]);
1864 ppsPageArray[ui32PageToFree] = INVALID_PAGE;
1872 /* Allocation of OS pages: This function is used for sparse allocations.
1874 * Sparse allocations provide only a proportion of sparse physical backing within the total
1875 * virtual range. Currently we only support sparse allocations on device pages that are OS
1879 _AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData,
1880 IMG_UINT32 *puiAllocIndices,
1881 IMG_UINT32 uiPagesToAlloc)
1883 PVRSRV_ERROR eError;
1885 struct page **ppsPageArray = psPageArrayData->pagearray;
1887 IMG_UINT32 uiPagesFromPool = 0;
1888 unsigned int gfp_flags = _GetGFPFlags(psPageArrayData);
1890 /* We use this page array to receive pages from the pool and then reuse it afterwards to
1891 * store pages that need their cache attribute changed on x86*/
1892 struct page **ppsTempPageArray;
1893 IMG_UINT32 uiTempPageArrayIndex = 0;
1895 /* Allocate the temporary page array that we need here to receive pages
1896 * from the pool and to store pages that need their caching attributes changed */
1897 ppsTempPageArray = OSAllocMem(sizeof(struct page*) * uiPagesToAlloc);
1898 if (ppsTempPageArray == NULL)
1900 PVR_DPF((PVR_DBG_ERROR, "%s: Failed metadata allocation", __FUNCTION__));
1901 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
1905 uiOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT;
1907 /* Check the requested number of pages if they fit in the page array */
1908 if(psPageArrayData->uiTotalNumPages < \
1909 (psPageArrayData->iNumPagesAllocated + uiPagesToAlloc))
1911 PVR_DPF((PVR_DBG_ERROR,
1912 "%s: Trying to allocate more pages than this buffer can handle, "
1913 "Request + Allocated < Max! Request %u, Allocated %u, Max %u.",
1916 psPageArrayData->iNumPagesAllocated,
1917 psPageArrayData->uiTotalNumPages));
1918 eError = PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
1919 goto e_free_temp_array;
1922 /* Try to get pages from the pool since it is faster */
1923 _GetPagesFromPoolLocked(psPageArrayData->psDevNode,
1924 psPageArrayData->ui32CPUCacheFlags,
1927 psPageArrayData->bZero,
1931 /* Allocate pages from the OS or move the pages that we got from the pool
1932 * to the page array */
1934 for (i = 0; i < uiPagesToAlloc; i++)
1936 /* Check if the indices we are allocating are in range */
1937 if (puiAllocIndices[i] >= psPageArrayData->uiTotalNumPages)
1939 PVR_DPF((PVR_DBG_ERROR,
1940 "%s: Given alloc index %u at %u is larger than page array %u.",
1944 psPageArrayData->uiTotalNumPages));
1945 eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
1949 /* Check if there is not already a page allocated at this position */
1950 if (INVALID_PAGE != ppsPageArray[puiAllocIndices[i]])
1952 PVR_DPF((PVR_DBG_ERROR,
1953 "%s: Mapping number %u at page array index %u already exists",
1956 puiAllocIndices[i]));
1957 eError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS;
1961 /* Finally assign a page to the array.
1962 * Either from the pool or allocate a new one. */
1963 if (uiPagesFromPool != 0)
1966 ppsPageArray[puiAllocIndices[i]] = ppsTempPageArray[uiPagesFromPool];
1970 ppsPageArray[puiAllocIndices[i]] = alloc_pages(gfp_flags, uiOrder);
1971 if(ppsPageArray[puiAllocIndices[i]] != NULL)
1973 /* Reusing the temp page array if it has no pool pages anymore */
1974 ppsTempPageArray[uiTempPageArrayIndex] = ppsPageArray[puiAllocIndices[i]];
1975 uiTempPageArrayIndex++;
1979 /* Failed to alloc pages at required contiguity. Failed allocation */
1980 PVR_DPF((PVR_DBG_ERROR,
1981 "%s: alloc_pages failed to honour request at %u of %u, flags = %x, order = %u",
1987 eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
1994 /* Do the cache management as required */
1995 eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode,
1997 uiTempPageArrayIndex,
1998 psPageArrayData->bZero,
1999 psPageArrayData->ui32CPUCacheFlags);
2000 if (eError != PVRSRV_OK)
2002 PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes"));
2006 /* Update metadata */
2007 psPageArrayData->iNumPagesAllocated += uiPagesToAlloc;
2009 /* Free temporary page array */
2010 OSFreeMem(ppsTempPageArray);
2016 IMG_UINT32 ui32PageToFree;
2020 /* Free the pages we got from the pool */
2021 for(ui32PageToFree = 0; ui32PageToFree < uiPagesFromPool; ui32PageToFree++)
2024 psPageArrayData->bUnsetMemoryType,
2025 ppsTempPageArray[ui32PageToFree]);
2028 /* Free the pages we just allocated from the OS */
2029 for(ui32PageToFree = uiPagesFromPool; ui32PageToFree < i; ui32PageToFree++)
2033 ppsPageArray[puiAllocIndices[ui32PageToFree]]);
2035 ppsPageArray[puiAllocIndices[ui32PageToFree]] = (struct page *) INVALID_PAGE;
2040 OSFreeMem(ppsTempPageArray);
2046 /* Allocate pages for a given page array.
2048 * The executed allocation path depends whether an array with allocation
2049 * indices has been passed or not */
2051 _AllocOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
2052 IMG_UINT32 *puiAllocIndices,
2053 IMG_UINT32 uiPagesToAlloc)
2055 PVRSRV_ERROR eError;
2057 struct page **ppsPageArray;
2060 PVR_ASSERT(NULL != psPageArrayData);
2061 if (psPageArrayData->bIsCMA)
2063 PVR_ASSERT(psPageArrayData->dmaphysarray != NULL);
2064 PVR_ASSERT(psPageArrayData->dmavirtarray != NULL);
2066 PVR_ASSERT(psPageArrayData->pagearray != NULL);
2067 PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
2069 ppsPageArray = psPageArrayData->pagearray;
2071 /* Go the sparse alloc path if we have an array with alloc indices.*/
2072 if (puiAllocIndices != NULL)
2074 eError = _AllocOSPages_Sparse(psPageArrayData,
2080 eError = _AllocOSPages_Fast(psPageArrayData);
2083 if (eError != PVRSRV_OK)
2088 if (psPageArrayData->bPoisonOnAlloc)
2090 for (i = 0; i < uiPagesToAlloc; i++)
2092 IMG_UINT32 uiIdx = puiAllocIndices ? puiAllocIndices[i] : i;
2093 _PoisonPages(ppsPageArray[uiIdx],
2100 _DumpPageArray(ppsPageArray, psPageArrayData->uiTotalNumPages);
2102 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
2103 #if defined(PVRSRV_ENABLE_MEMORY_STATS)
2105 for (i = 0; i < uiPagesToAlloc; i++)
2107 IMG_CPU_PHYADDR sCPUPhysAddr;
2108 IMG_UINT32 uiIdx = puiAllocIndices ? puiAllocIndices[i] : i;
2110 sCPUPhysAddr.uiAddr = page_to_phys(ppsPageArray[uiIdx]);
2111 PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
2114 1 << psPageArrayData->uiLog2DevPageSize,
2119 PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, uiPagesToAlloc * PAGE_SIZE);
2123 PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: allocated OS memory for PMR @0x%p", psPageArrayData));
2130 /* Same as _FreeOSPage except free memory using DMA framework */
2132 _FreeOSPage_CMA(struct device *dev,
2136 dma_addr_t dev_addr,
2137 struct page *psPage)
2139 if (DMA_IS_ALLOCPG_ADDR(dev_addr))
2141 #if defined(CONFIG_X86)
2142 void *pvPageVAddr = page_address(psPage);
2145 int ret = set_memory_wb((unsigned long)pvPageVAddr, 1);
2148 PVR_DPF((PVR_DBG_ERROR,
2149 "%s: Failed to reset page attribute",
2154 __free_pages(psPage, uiOrder);
2158 dma_free_coherent(dev, alloc_size, virt_addr, DMA_GET_ADDR(dev_addr));
2162 /* Free a single page back to the OS.
2163 * Make sure the cache type is set back to the default value.
2166 * We must _only_ check bUnsetMemoryType in the case where we need to free
2167 * the page back to the OS since we may have to revert the cache properties
2168 * of the page to the default as given by the OS when it was allocated. */
2170 _FreeOSPage(IMG_UINT32 uiOrder,
2171 IMG_BOOL bUnsetMemoryType,
2172 struct page *psPage)
2175 #if defined(CONFIG_X86)
2177 pvPageVAddr = page_address(psPage);
2179 if (pvPageVAddr && bUnsetMemoryType == IMG_TRUE)
2183 ret = set_memory_wb((unsigned long)pvPageVAddr, 1);
2186 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attribute", __FUNCTION__));
2190 PVR_UNREFERENCED_PARAMETER(bUnsetMemoryType);
2192 __free_pages(psPage, uiOrder);
2195 /* Free the struct holding the metadata */
2197 _FreeOSPagesArray(PMR_OSPAGEARRAY_DATA *psPageArrayData)
2199 PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: freed OS memory for PMR @0x%p", psPageArrayData));
2201 /* Check if the page array actually still exists.
2202 * It might be the case that has been moved to the page pool */
2203 if (psPageArrayData->pagearray != NULL)
2205 OSFreeMemNoStats(psPageArrayData->pagearray);
2208 kmem_cache_free(g_psLinuxPageArray, psPageArrayData);
2213 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
2214 /* _FreeOSPages_MemStats: Depends on the bridge lock already being held */
2216 _FreeOSPages_MemStats(PMR_OSPAGEARRAY_DATA *psPageArrayData,
2217 IMG_UINT32 *pai32FreeIndices,
2218 IMG_UINT32 ui32NumPages)
2220 struct page **ppsPageArray;
2221 #if defined(PVRSRV_ENABLE_MEMORY_STATS)
2222 IMG_UINT32 ui32PageIndex;
2225 PVR_DPF((PVR_DBG_MESSAGE, "%s: psPageArrayData %p, ui32NumPages %u", __FUNCTION__, psPageArrayData, ui32NumPages));
2226 PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0);
2228 ppsPageArray = psPageArrayData->pagearray;
2230 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
2231 #if !defined(PVRSRV_ENABLE_MEMORY_STATS)
2232 PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, ui32NumPages * PAGE_SIZE);
2234 for(ui32PageIndex = 0; ui32PageIndex < ui32NumPages; ui32PageIndex++)
2236 IMG_CPU_PHYADDR sCPUPhysAddr;
2237 IMG_UINT32 uiArrayIndex = (pai32FreeIndices) ? pai32FreeIndices[ui32PageIndex] : ui32PageIndex;
2239 sCPUPhysAddr.uiAddr = page_to_phys(ppsPageArray[uiArrayIndex]);
2240 PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, sCPUPhysAddr.uiAddr);
2245 #endif /* PVRSRV_ENABLE_PROCESS_STATS */
2247 /* Free all or some pages from a sparse page array */
2249 _FreeOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData,
2250 IMG_UINT32 *pai32FreeIndices,
2251 IMG_UINT32 ui32FreePageCount)
2255 IMG_UINT32 uiPageIndex, i = 0, uiTempIdx;
2256 struct page **ppsPageArray;
2257 IMG_UINT32 uiNumPages;
2259 struct page **ppsTempPageArray;
2260 IMG_UINT32 uiTempArraySize;
2262 /* We really should have something to free before we call this */
2263 PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0);
2265 if(pai32FreeIndices == NULL)
2267 uiNumPages = psPageArrayData->uiTotalNumPages;
2268 uiTempArraySize = psPageArrayData->iNumPagesAllocated;
2272 uiNumPages = ui32FreePageCount;
2273 uiTempArraySize = ui32FreePageCount;
2276 /* OSAllocMemNoStats required because this code may be run without the bridge lock held */
2277 ppsTempPageArray = OSAllocMemNoStats(sizeof(struct page*) * uiTempArraySize);
2278 if (ppsTempPageArray == NULL)
2280 PVR_DPF((PVR_DBG_ERROR, "%s: Failed free_pages metadata allocation", __FUNCTION__));
2281 return PVRSRV_ERROR_OUT_OF_MEMORY;
2284 ppsPageArray = psPageArrayData->pagearray;
2285 uiOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT;
2287 /* Poison if necessary */
2288 if (psPageArrayData->bPoisonOnFree)
2290 for (i = 0; i < uiNumPages; i ++)
2292 uiPageIndex = pai32FreeIndices ? pai32FreeIndices[i] : i ;
2293 if(INVALID_PAGE != ppsPageArray[uiPageIndex])
2295 _PoisonPages(ppsPageArray[uiPageIndex],
2303 /* Put pages in a contiguous array so further processing is easier */
2305 for (i = 0; i < uiNumPages; i++)
2307 uiPageIndex = pai32FreeIndices ? pai32FreeIndices[i] : i;
2308 if(INVALID_PAGE != ppsPageArray[uiPageIndex])
2310 ppsTempPageArray[uiTempIdx] = ppsPageArray[uiPageIndex];
2312 ppsPageArray[uiPageIndex] = (struct page *) INVALID_PAGE;
2316 /* Try to move the temp page array to the pool */
2317 bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags,
2319 psPageArrayData->bUnpinned,
2327 /* Free pages and reset page caching attributes on x86 */
2328 #if defined(CONFIG_X86)
2329 if (uiTempIdx != 0 && psPageArrayData->bUnsetMemoryType == IMG_TRUE)
2332 iError = set_pages_array_wb(ppsTempPageArray, uiTempIdx);
2336 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__));
2341 /* Free the pages */
2342 for (i = 0; i < uiTempIdx; i++)
2344 __free_pages(ppsTempPageArray[i], uiOrder);
2347 /* Free the temp page array here if it did not move to the pool */
2348 OSFreeMemNoStats(ppsTempPageArray);
2351 /* Update metadata */
2352 psPageArrayData->iNumPagesAllocated -= uiTempIdx;
2353 PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
2357 /* Free all the pages in a page array */
2359 _FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData)
2364 IMG_UINT32 uiNumPages = psPageArrayData->uiTotalNumPages;
2366 struct page **ppsPageArray = psPageArrayData->pagearray;
2367 uiOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT;
2369 /* We really should have something to free before we call this */
2370 PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0);
2372 /* Poison pages if necessary */
2373 if (psPageArrayData->bPoisonOnFree)
2375 for (i = 0; i < uiNumPages; i++)
2377 _PoisonPages(ppsPageArray[i],
2384 /* Try to move the page array to the pool */
2385 bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags,
2387 psPageArrayData->bUnpinned,
2392 psPageArrayData->pagearray = NULL;
2396 if (psPageArrayData->bIsCMA)
2398 IMG_UINT32 uiDevNumPages = uiNumPages >> uiOrder;
2399 IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder;
2401 for (i = 0; i < uiDevNumPages; i++)
2403 _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
2406 psPageArrayData->dmavirtarray[i],
2407 psPageArrayData->dmaphysarray[i],
2409 psPageArrayData->dmaphysarray[i] = (dma_addr_t)0;
2410 psPageArrayData->dmavirtarray[i] = NULL;
2411 ppsPageArray[i] = INVALID_PAGE;
2416 #if defined(CONFIG_X86)
2417 if (psPageArrayData->bUnsetMemoryType == IMG_TRUE)
2421 ret = set_pages_array_wb(ppsPageArray, uiNumPages);
2424 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__));
2429 for (i = 0; i < uiNumPages; i++)
2431 _FreeOSPage(uiOrder, IMG_FALSE, ppsPageArray[i]);
2432 ppsPageArray[i] = INVALID_PAGE;
2437 /* Update metadata */
2438 psPageArrayData->iNumPagesAllocated = 0;
2442 /* Free pages from a page array.
2443 * Takes care of mem stats and chooses correct free path depending on parameters. */
2445 _FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
2446 IMG_UINT32 *pai32FreeIndices,
2447 IMG_UINT32 ui32FreePageCount)
2449 PVRSRV_ERROR eError;
2450 IMG_UINT32 uiNumPages;
2452 /* Check how many pages do we have to free */
2453 if(pai32FreeIndices == NULL)
2455 uiNumPages = psPageArrayData->iNumPagesAllocated;
2459 uiNumPages = ui32FreePageCount;
2462 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
2463 _FreeOSPages_MemStats(psPageArrayData, pai32FreeIndices, uiNumPages);
2466 /* Go the sparse or non-sparse path */
2467 if (psPageArrayData->iNumPagesAllocated != psPageArrayData->uiTotalNumPages
2468 || pai32FreeIndices != NULL)
2470 eError = _FreeOSPages_Sparse(psPageArrayData,
2476 eError = _FreeOSPages_Fast(psPageArrayData);
2479 if(eError != PVRSRV_OK)
2481 PVR_DPF((PVR_DBG_ERROR, "_FreeOSPages_FreePages failed"));
2484 _DumpPageArray(psPageArrayData->pagearray, psPageArrayData->uiTotalNumPages);
2491 * Implementation of callback functions
2495 /* destructor func is called after last reference disappears, but
2496 before PMR itself is freed. */
2498 PMRFinalizeOSMem(PMR_IMPL_PRIVDATA pvPriv)
2500 PVRSRV_ERROR eError;
2501 PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
2504 /* We can't free pages until now. */
2505 if (psOSPageArrayData->iNumPagesAllocated != 0)
2508 if (psOSPageArrayData->bUnpinned == IMG_TRUE)
2510 _RemoveUnpinListEntryUnlocked(psOSPageArrayData);
2514 eError = _FreeOSPages(psOSPageArrayData,
2517 PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
2520 eError = _FreeOSPagesArray(psOSPageArrayData);
2521 PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
2525 /* callback function for locking the system physical page addresses.
2526 This function must be called before the lookup address func. */
2528 PMRLockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv)
2530 PVRSRV_ERROR eError;
2531 PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
2533 if (psOSPageArrayData->bOnDemand)
2535 /* Allocate Memory for deferred allocation */
2536 eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumPages);
2537 if (eError != PVRSRV_OK)
2548 PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv)
2550 /* Just drops the refcount. */
2551 PVRSRV_ERROR eError = PVRSRV_OK;
2552 PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
2554 if (psOSPageArrayData->bOnDemand)
2556 /* Free Memory for deferred allocation */
2557 eError = _FreeOSPages(psOSPageArrayData,
2560 if (eError != PVRSRV_OK)
2566 PVR_ASSERT (eError == PVRSRV_OK);
2570 /* N.B. It is assumed that PMRLockSysPhysAddressesOSMem() is called _before_ this function! */
2572 PMRSysPhysAddrOSMem(PMR_IMPL_PRIVDATA pvPriv,
2573 IMG_UINT32 ui32Log2PageSize,
2574 IMG_UINT32 ui32NumOfPages,
2575 IMG_DEVMEM_OFFSET_T *puiOffset,
2577 IMG_DEV_PHYADDR *psDevPAddr)
2579 const PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
2580 IMG_UINT32 uiPageSize = 1U << psOSPageArrayData->uiLog2DevPageSize;
2581 IMG_UINT32 uiInPageOffset;
2582 IMG_UINT32 uiPageIndex;
2585 if (psOSPageArrayData->uiLog2DevPageSize < ui32Log2PageSize)
2587 PVR_DPF((PVR_DBG_ERROR,
2588 "%s: Requested physical addresses from PMR "
2589 "for incompatible contiguity %u!",
2592 return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
2595 for (uiIdx=0; uiIdx < ui32NumOfPages; uiIdx++)
2599 uiPageIndex = puiOffset[uiIdx] >> psOSPageArrayData->uiLog2DevPageSize;
2600 uiInPageOffset = puiOffset[uiIdx] - ((IMG_DEVMEM_OFFSET_T)uiPageIndex << psOSPageArrayData->uiLog2DevPageSize);
2602 PVR_ASSERT(uiPageIndex < psOSPageArrayData->uiTotalNumPages);
2603 PVR_ASSERT(uiInPageOffset < uiPageSize);
2605 psDevPAddr[uiIdx].uiAddr = page_to_phys(psOSPageArrayData->pagearray[uiPageIndex]);
2606 psDevPAddr[uiIdx].uiAddr += uiInPageOffset;
2613 typedef struct _PMR_OSPAGEARRAY_KERNMAP_DATA_ {
2615 IMG_UINT32 ui32PageCount;
2616 } PMR_OSPAGEARRAY_KERNMAP_DATA;
2619 PMRAcquireKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv,
2622 void **ppvKernelAddressOut,
2623 IMG_HANDLE *phHandleOut,
2624 PMR_FLAGS_T ulFlags)
2626 PVRSRV_ERROR eError;
2627 PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
2629 pgprot_t prot = PAGE_KERNEL;
2630 IMG_UINT32 ui32PageOffset;
2632 IMG_UINT32 ui32PageCount;
2633 IMG_UINT32 uiLog2DevPageSize = psOSPageArrayData->uiLog2DevPageSize;
2634 PMR_OSPAGEARRAY_KERNMAP_DATA *psData;
2637 Zero offset and size as a special meaning which means map in the
2638 whole of the PMR, this is due to fact that the places that call
2639 this callback might not have access to be able to determine the
2642 if ((uiOffset == 0) && (uiSize == 0))
2646 ui32PageCount = psOSPageArrayData->iNumPagesAllocated;
2652 ui32PageOffset = uiOffset >> uiLog2DevPageSize;
2653 uiMapOffset = uiOffset - (ui32PageOffset << uiLog2DevPageSize);
2654 uiEndoffset = uiOffset + uiSize - 1;
2655 // Add one as we want the count, not the offset
2656 ui32PageCount = (uiEndoffset >> uiLog2DevPageSize) + 1;
2657 ui32PageCount -= ui32PageOffset;
2660 if (psOSPageArrayData->bIsCMA)
2662 prot = pgprot_noncached(prot);
2666 switch (PVRSRV_CPU_CACHE_MODE(psOSPageArrayData->ui32CPUCacheFlags))
2668 case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
2669 prot = pgprot_noncached(prot);
2672 case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
2673 prot = pgprot_writecombine(prot);
2676 case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
2680 eError = PVRSRV_ERROR_INVALID_PARAMS;
2685 psData = OSAllocMem(sizeof(*psData));
2688 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
2692 #if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
2693 pvAddress = vmap(&psOSPageArrayData->pagearray[ui32PageOffset],
2698 pvAddress = vm_map_ram(&psOSPageArrayData->pagearray[ui32PageOffset],
2703 if (pvAddress == NULL)
2705 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
2709 *ppvKernelAddressOut = pvAddress + uiMapOffset;
2710 psData->pvBase = pvAddress;
2711 psData->ui32PageCount = ui32PageCount;
2712 *phHandleOut = psData;
2717 error exit paths follow
2722 PVR_ASSERT(eError != PVRSRV_OK);
2726 static void PMRReleaseKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv,
2729 PMR_OSPAGEARRAY_KERNMAP_DATA *psData = hHandle;
2730 PVR_UNREFERENCED_PARAMETER(pvPriv);
2732 #if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
2733 vunmap(psData->pvBase);
2735 vm_unmap_ram(psData->pvBase, psData->ui32PageCount);
2741 PVRSRV_ERROR PMRUnpinOSMem(PMR_IMPL_PRIVDATA pPriv)
2743 PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv;
2744 PVRSRV_ERROR eError = PVRSRV_OK;
2746 /* Lock down the pool and add the array to the unpin list */
2750 PVR_ASSERT(psOSPageArrayData->bUnpinned == IMG_FALSE);
2751 PVR_ASSERT(psOSPageArrayData->bOnDemand == IMG_FALSE);
2753 eError = _AddUnpinListEntryUnlocked(psOSPageArrayData);
2755 if (eError != PVRSRV_OK)
2757 PVR_DPF((PVR_DBG_ERROR,
2758 "%s: Not able to add allocation to unpinned list (%d).",
2765 psOSPageArrayData->bUnpinned = IMG_TRUE;
2773 PVRSRV_ERROR PMRPinOSMem(PMR_IMPL_PRIVDATA pPriv,
2774 PMR_MAPPING_TABLE *psMappingTable)
2776 PVRSRV_ERROR eError;
2777 PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv;
2778 IMG_UINT32 *pui32MapTable = NULL;
2779 IMG_UINT32 i,j=0, ui32Temp=0;
2784 PVR_ASSERT(psOSPageArrayData->bUnpinned == IMG_TRUE);
2786 psOSPageArrayData->bUnpinned = IMG_FALSE;
2788 /* If there are still pages in the array remove entries from the pool */
2789 if (psOSPageArrayData->iNumPagesAllocated != 0)
2791 _RemoveUnpinListEntryUnlocked(psOSPageArrayData);
2795 goto e_exit_mapalloc_failure;
2799 /* If pages were reclaimed we allocate new ones and
2800 * return PVRSRV_ERROR_PMR_NEW_MEMORY */
2801 if (psMappingTable->ui32NumVirtChunks == 1)
2803 eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumPages);
2807 pui32MapTable = (IMG_UINT32 *)OSAllocMem(sizeof(*pui32MapTable) * psMappingTable->ui32NumPhysChunks);
2808 if(NULL == pui32MapTable)
2810 eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
2811 PVR_DPF((PVR_DBG_ERROR,
2812 "%s: Not able to Alloc Map Table.",
2814 goto e_exit_mapalloc_failure;
2817 for (i = 0,j=0; i < psMappingTable->ui32NumVirtChunks; i++)
2819 ui32Temp = psMappingTable->aui32Translation[i];
2820 if (TRANSLATION_INVALID != ui32Temp)
2822 pui32MapTable[j++] = ui32Temp;
2825 eError = _AllocOSPages(psOSPageArrayData, pui32MapTable, psMappingTable->ui32NumPhysChunks);
2828 if (eError != PVRSRV_OK)
2830 PVR_DPF((PVR_DBG_ERROR,
2831 "%s: Not able to get new pages for unpinned allocation.",
2834 eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
2838 PVR_DPF((PVR_DBG_MESSAGE,
2839 "%s: Allocating new pages for unpinned allocation. "
2840 "Old content is lost!",
2843 eError = PVRSRV_ERROR_PMR_NEW_MEMORY;
2846 OSFreeMem(pui32MapTable);
2847 e_exit_mapalloc_failure:
2851 /*************************************************************************/ /*!
2852 @Function PMRChangeSparseMemOSMem
2853 @Description This function Changes the sparse mapping by allocating & freeing
2854 of pages. It does also change the GPU and CPU maps accordingly
2855 @Return PVRSRV_ERROR failure code
2856 */ /**************************************************************************/
2858 PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv,
2860 IMG_UINT32 ui32AllocPageCount,
2861 IMG_UINT32 *pai32AllocIndices,
2862 IMG_UINT32 ui32FreePageCount,
2863 IMG_UINT32 *pai32FreeIndices,
2866 PVRSRV_ERROR eError;
2868 PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappigTable(psPMR);
2869 PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv;
2870 struct page **psPageArray = psPMRPageArrayData->pagearray;
2871 struct page *psPage;
2873 IMG_UINT32 ui32AdtnlAllocPages = 0; /*<! Number of pages to alloc from the OS */
2874 IMG_UINT32 ui32AdtnlFreePages = 0; /*<! Number of pages to free back to the OS */
2875 IMG_UINT32 ui32CommonRequestCount = 0; /*<! Number of pages to move position in the page array */
2876 IMG_UINT32 ui32Loop = 0;
2877 IMG_UINT32 ui32Index = 0;
2878 IMG_UINT32 uiAllocpgidx ;
2879 IMG_UINT32 uiFreepgidx;
2880 IMG_UINT32 ui32Order = psPMRPageArrayData->uiLog2DevPageSize - PAGE_SHIFT;
2882 /* Check SPARSE flags and calculate pages to allocate and free */
2883 if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH))
2885 ui32CommonRequestCount = (ui32AllocPageCount > ui32FreePageCount) ?
2886 ui32FreePageCount : ui32AllocPageCount;
2888 PDUMP_PANIC(SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported");
2891 if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC))
2893 ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequestCount;
2897 ui32AllocPageCount = 0;
2900 if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE))
2902 ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequestCount;
2906 ui32FreePageCount = 0;
2909 if (0 == (ui32CommonRequestCount || ui32AdtnlAllocPages || ui32AdtnlFreePages))
2911 eError = PVRSRV_ERROR_INVALID_PARAMS;
2915 /* The incoming request is classified into two operations independent of
2916 * each other: alloc & free pages.
2917 * These operations can be combined with two mapping operations as well
2918 * which are GPU & CPU space mappings.
2920 * From the alloc and free page requests, the net amount of pages to be
2921 * allocated or freed is computed. Pages that were requested to be freed
2922 * will be reused to fulfil alloc requests.
2924 * The order of operations is:
2925 * 1. Allocate new pages from the OS
2926 * 2. Move the free pages from free request to alloc positions.
2927 * 3. Free the rest of the pages not used for alloc
2929 * Alloc parameters are validated at the time of allocation
2930 * and any error will be handled then. */
2932 /* Validate the free indices */
2933 if (ui32FreePageCount)
2935 if (NULL != pai32FreeIndices){
2937 for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
2939 uiFreepgidx = pai32FreeIndices[ui32Loop];
2941 if (uiFreepgidx > psPMRPageArrayData->uiTotalNumPages)
2943 eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
2947 if (INVALID_PAGE == psPageArray[uiFreepgidx])
2949 eError = PVRSRV_ERROR_INVALID_PARAMS;
2956 eError = PVRSRV_ERROR_INVALID_PARAMS;
2961 /* Validate the alloc indices */
2962 for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++)
2964 uiAllocpgidx = pai32AllocIndices[ui32Loop];
2966 if (uiAllocpgidx > psPMRPageArrayData->uiTotalNumPages)
2968 eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
2972 if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
2974 if ((INVALID_PAGE != psPageArray[uiAllocpgidx]) ||
2975 (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx]))
2977 eError = PVRSRV_ERROR_INVALID_PARAMS;
2983 if ((INVALID_PAGE == psPageArray[uiAllocpgidx]) ||
2984 (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]) )
2986 eError = PVRSRV_ERROR_INVALID_PARAMS;
2994 /* Allocate new pages from the OS */
2995 if (0 != ui32AdtnlAllocPages)
2997 eError = _AllocOSPages(psPMRPageArrayData, pai32AllocIndices, ui32AdtnlAllocPages);
2998 if (PVRSRV_OK != eError)
3000 PVR_DPF((PVR_DBG_MESSAGE,
3001 "%s: New Addtl Allocation of pages failed",
3006 /*Mark the corresponding pages of translation table as valid */
3007 for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++)
3009 psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop];
3014 ui32Index = ui32Loop;
3016 /* Move the corresponding free pages to alloc request */
3017 for (ui32Loop = 0; ui32Loop < ui32CommonRequestCount; ui32Loop++, ui32Index++)
3019 uiAllocpgidx = pai32AllocIndices[ui32Index];
3020 uiFreepgidx = pai32FreeIndices[ui32Loop];
3021 psPage = psPageArray[uiAllocpgidx];
3022 psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx];
3024 /* Is remap mem used in real world scenario? Should it be turned to a
3025 * debug feature? The condition check needs to be out of loop, will be
3026 * done at later point though after some analysis */
3027 if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
3029 psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID;
3030 psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
3031 psPageArray[uiFreepgidx] = (struct page *)INVALID_PAGE;
3035 psPageArray[uiFreepgidx] = psPage;
3036 psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx;
3037 psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
3040 /* Be sure to honour the attributes associated with the allocation
3041 * such as zeroing, poisoning etc. */
3042 if (psPMRPageArrayData->bPoisonOnAlloc)
3044 _PoisonPages(psPageArray[uiAllocpgidx],
3051 if (psPMRPageArrayData->bZero)
3054 _PoisonPages(psPageArray[uiAllocpgidx],
3062 /* Free the additional free pages */
3063 if (0 != ui32AdtnlFreePages)
3065 eError = _FreeOSPages(psPMRPageArrayData,
3066 &pai32FreeIndices[ui32Loop],
3067 ui32AdtnlFreePages);
3068 if (eError != PVRSRV_OK)
3072 while (ui32Loop < ui32FreePageCount)
3074 psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Loop]] = TRANSLATION_INVALID;
3085 /*************************************************************************/ /*!
3086 @Function PMRChangeSparseMemCPUMapOSMem
3087 @Description This function Changes CPU maps accordingly
3088 @Return PVRSRV_ERROR failure code
3089 */ /**************************************************************************/
3091 PVRSRV_ERROR PMRChangeSparseMemCPUMapOSMem(PMR_IMPL_PRIVDATA pPriv,
3093 IMG_UINT64 sCpuVAddrBase,
3094 IMG_UINT32 ui32AllocPageCount,
3095 IMG_UINT32 *pai32AllocIndices,
3096 IMG_UINT32 ui32FreePageCount,
3097 IMG_UINT32 *pai32FreeIndices)
3099 struct page **psPageArray;
3100 PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv;
3101 IMG_CPU_PHYADDR sCPUPAddr;
3103 sCPUPAddr.uiAddr = 0;
3104 psPageArray = psPMRPageArrayData->pagearray;
3106 return OSChangeSparseMemCPUAddrMap((void **)psPageArray,
3116 static PMR_IMPL_FUNCTAB _sPMROSPFuncTab = {
3117 .pfnLockPhysAddresses = &PMRLockSysPhysAddressesOSMem,
3118 .pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesOSMem,
3119 .pfnDevPhysAddr = &PMRSysPhysAddrOSMem,
3120 .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataOSMem,
3121 .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataOSMem,
3122 .pfnReadBytes = NULL,
3123 .pfnWriteBytes = NULL,
3124 .pfnUnpinMem = &PMRUnpinOSMem,
3125 .pfnPinMem = &PMRPinOSMem,
3126 .pfnChangeSparseMem = &PMRChangeSparseMemOSMem,
3127 .pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapOSMem,
3128 .pfnFinalize = &PMRFinalizeOSMem,
3132 PhysmemNewOSRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
3133 IMG_DEVMEM_SIZE_T uiSize,
3134 IMG_DEVMEM_SIZE_T uiChunkSize,
3135 IMG_UINT32 ui32NumPhysChunks,
3136 IMG_UINT32 ui32NumVirtChunks,
3137 IMG_UINT32 *puiAllocIndices,
3138 IMG_UINT32 uiLog2PageSize,
3139 PVRSRV_MEMALLOCFLAGS_T uiFlags,
3140 const IMG_CHAR *pszAnnotation,
3143 PVRSRV_ERROR eError;
3144 PVRSRV_ERROR eError2;
3146 struct _PMR_OSPAGEARRAY_DATA_ *psPrivData;
3147 PMR_FLAGS_T uiPMRFlags;
3148 PHYS_HEAP *psPhysHeap;
3151 IMG_BOOL bPoisonOnAlloc;
3152 IMG_BOOL bPoisonOnFree;
3156 IMG_UINT32 ui32CPUCacheFlags = DevmemCPUCacheMode(psDevNode, uiFlags);
3157 if (PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags))
3159 ui32CPUCacheFlags |= PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN;
3162 #if defined(PVRSRV_GPUVIRT_GUESTDRV)
3164 * The host driver (but not guest) can still use this factory for firmware
3167 PVR_ASSERT(!PVRSRV_CHECK_FW_LOCAL(uiFlags));
3171 * Silently round up alignment/pagesize if request was less that PAGE_SHIFT
3172 * because it would never be harmful for memory to be _more_ contiguous that
3175 uiLog2PageSize = PAGE_SHIFT > uiLog2PageSize ? PAGE_SHIFT : uiLog2PageSize;
3177 /* In case we have a non-sparse allocation tolerate bad requests and round up.
3178 * For sparse allocations the users have to make sure to meet the right
3180 if (ui32NumPhysChunks == ui32NumVirtChunks &&
3181 ui32NumVirtChunks == 1)
3183 /* Round up allocation size to at least a full PAGE_SIZE */
3184 uiSize = PVR_ALIGN(uiSize, PAGE_SIZE);
3185 uiChunkSize = uiSize;
3189 * Use CMA framework if order is greater than OS page size; please note
3190 * that OSMMapPMRGeneric() has the same expectation as well.
3192 bIsCMA = uiLog2PageSize > PAGE_SHIFT ? IMG_TRUE : IMG_FALSE;
3193 bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE;
3194 bCpuLocal = PVRSRV_CHECK_CPU_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
3195 bFwLocal = PVRSRV_CHECK_FW_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
3196 bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
3197 bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
3198 bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ? IMG_TRUE : IMG_FALSE;
3200 if (bZero && bPoisonOnAlloc)
3202 /* Zero on Alloc and Poison on Alloc are mutually exclusive */
3203 eError = PVRSRV_ERROR_INVALID_PARAMS;
3207 /* Create Array structure that hold the physical pages */
3208 eError = _AllocOSPageArray(psDevNode,
3220 if (eError != PVRSRV_OK)
3222 goto errorOnAllocPageArray;
3227 /* Do we fill the whole page array or just parts (sparse)? */
3228 if (ui32NumPhysChunks == ui32NumVirtChunks)
3230 /* Allocate the physical pages */
3231 eError = _AllocOSPages(psPrivData, NULL, psPrivData->uiTotalNumPages);
3235 if (ui32NumPhysChunks != 0)
3237 /* Calculate the number of pages we want to allocate */
3238 IMG_UINT32 uiPagesToAlloc =
3239 (IMG_UINT32) ((((ui32NumPhysChunks * uiChunkSize) - 1) >> uiLog2PageSize) + 1);
3241 /* Make sure calculation is correct */
3242 PVR_ASSERT(((PMR_SIZE_T) uiPagesToAlloc << uiLog2PageSize) ==
3243 (ui32NumPhysChunks * uiChunkSize) );
3245 /* Allocate the physical pages */
3246 eError = _AllocOSPages(psPrivData, puiAllocIndices,
3251 if (eError != PVRSRV_OK)
3253 goto errorOnAllocPages;
3258 * In this instance, we simply pass flags straight through.
3260 * Generically, uiFlags can include things that control the PMR factory, but
3261 * we don't need any such thing (at the time of writing!), and our caller
3262 * specifies all PMR flags so we don't need to meddle with what was given to
3265 uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
3268 * Check no significant bits were lost in cast due to different bit widths
3271 PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
3275 PDUMPCOMMENT("Deferred Allocation PMR (UMA)");
3280 PDUMPCOMMENT("FW_LOCAL allocation requested");
3281 psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
3285 PDUMPCOMMENT("CPU_LOCAL allocation requested");
3286 psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL];
3290 psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
3293 eError = PMRCreatePMR(psDevNode,
3308 if (eError != PVRSRV_OK)
3320 eError2 = _FreeOSPages(psPrivData, NULL, 0);
3321 PVR_ASSERT(eError2 == PVRSRV_OK);
3325 eError2 = _FreeOSPagesArray(psPrivData);
3326 PVR_ASSERT(eError2 == PVRSRV_OK);
3328 errorOnAllocPageArray:
3330 PVR_ASSERT(eError != PVRSRV_OK);