RK3368 GPU: Rogue N Init.
[firefly-linux-kernel-4.4.55.git] / drivers / staging / imgtec / rogue / physmem_osmem_linux.c
1 /*************************************************************************/ /*!
2 @File
3 @Title          Implementation of PMR functions for OS managed memory
4 @Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
5 @Description    Part of the memory management.  This module is responsible for
6                 implementing the function callbacks for physical memory borrowed
7                 from that normally managed by the operating system.
8 @License        Dual MIT/GPLv2
9
10 The contents of this file are subject to the MIT license as set out below.
11
12 Permission is hereby granted, free of charge, to any person obtaining a copy
13 of this software and associated documentation files (the "Software"), to deal
14 in the Software without restriction, including without limitation the rights
15 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 copies of the Software, and to permit persons to whom the Software is
17 furnished to do so, subject to the following conditions:
18
19 The above copyright notice and this permission notice shall be included in
20 all copies or substantial portions of the Software.
21
22 Alternatively, the contents of this file may be used under the terms of
23 the GNU General Public License Version 2 ("GPL") in which case the provisions
24 of GPL are applicable instead of those above.
25
26 If you wish to allow use of your version of this file only under the terms of
27 GPL, and not to allow others to use your version of this file under the terms
28 of the MIT license, indicate your decision by deleting the provisions above
29 and replace them with the notice and other provisions required by GPL as set
30 out in the file called "GPL-COPYING" included in this distribution. If you do
31 not delete the provisions above, a recipient may use your version of this file
32 under the terms of either the MIT license or GPL.
33
34 This License is also included in this distribution in the file called
35 "MIT-COPYING".
36
37 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
38 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
39 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
40 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
41 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
42 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
43 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
44 */ /**************************************************************************/
45 #include <linux/version.h>
46 #include <linux/device.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/mm.h>
49 #include <linux/slab.h>
50 #include <linux/highmem.h>
51 #include <linux/mm_types.h>
52 #include <linux/vmalloc.h>
53 #include <linux/gfp.h>
54 #include <linux/sched.h>
55 #include <linux/atomic.h>
56 #include <asm/io.h>
57 #if defined(CONFIG_X86)
58 #include <asm/cacheflush.h>
59 #endif
60
61 /* include5/ */
62 #include "img_types.h"
63 #include "pvr_debug.h"
64 #include "pvrsrv_error.h"
65 #include "pvrsrv_memallocflags.h"
66 #include "rgx_pdump_panics.h"
67 /* services/server/include/ */
68 #include "allocmem.h"
69 #include "osfunc.h"
70 #include "pdump_km.h"
71 #include "pmr.h"
72 #include "pmr_impl.h"
73 #include "devicemem_server_utils.h"
74
75 /* ourselves */
76 #include "physmem_osmem.h"
77 #include "physmem_osmem_linux.h"
78
79 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
80 #include "process_stats.h"
81 #endif
82
83 #include "kernel_compatibility.h"
84
85 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
86 static IMG_UINT32 g_uiMaxOrder = PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM;
87 #else
88 /* split_page not available on older kernels */
89 #undef PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM
90 #define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 0
91 static IMG_UINT32 g_uiMaxOrder = 0;
92 #endif
93
94 /* Get/Set/Mask out alloc_page/dma_alloc flag */
95 #define DMA_GET_ADDR(x)                 (((x) >> 1) << 1)
96 #define DMA_SET_ALLOCPG_ADDR(x) ((x) | ((dma_addr_t)0x01))
97 #define DMA_IS_ALLOCPG_ADDR(x)  ((x) & ((dma_addr_t)0x01))
98
99 typedef struct _PMR_OSPAGEARRAY_DATA_ {
100         /* Device for which this allocation has been made */
101         PVRSRV_DEVICE_NODE *psDevNode;
102
103         /*
104          * iNumPagesAllocated:
105          * Number of pages allocated in this PMR so far.
106          * This allows for up to (2^31 - 1) pages. With 4KB pages, that's 8TB of memory for each PMR.
107          */
108         IMG_INT32 iNumPagesAllocated;
109
110         /*
111          * uiTotalNumPages:
112          * Total number of pages supported by this PMR. (Fixed as of now due the fixed Page table array size)
113          *  number of "pages" (a.k.a. macro pages, compound pages, higher order pages, etc...)
114          */
115         IMG_UINT32 uiTotalNumPages;
116
117         /*
118           uiLog2PageSize;
119         
120           size of each "page" -- this would normally be the same as
121           PAGE_SHIFT, but we support the idea that we may allocate pages
122           in larger chunks for better contiguity, using order>0 in the
123           call to alloc_pages()
124         */
125         IMG_UINT32 uiLog2DevPageSize;
126
127         /*
128           For non DMA/CMA allocation, pagearray references the pages
129           thus allocated; one entry per compound page when compound
130           pages are used. In addition, for DMA/CMA allocations, we
131           track the returned cpu virtual and device bus address.
132         */
133         struct page **pagearray;
134         dma_addr_t *dmaphysarray;
135         void **dmavirtarray;
136
137
138         /*
139           record at alloc time whether poisoning will be required when the
140           PMR is freed.
141         */
142         IMG_BOOL bZero;
143         IMG_BOOL bPoisonOnFree;
144         IMG_BOOL bPoisonOnAlloc;
145         IMG_BOOL bOnDemand;
146         IMG_BOOL bUnpinned; /* Should be protected by page pool lock */
147         IMG_BOOL bIsCMA; /* Is CMA memory allocated via DMA framework */
148
149         /*
150           The cache mode of the PMR. Additionally carrying the CPU-Cache-Clean
151           flag, advising us to do cache maintenance on behalf of the caller.
152           NOTE: For DMA/CMA allocations, memory is _always_ uncached.
153
154           Boolean used to track if we need to revert the cache attributes
155           of the pages used in this allocation. Depends on OS/architecture.
156         */
157         IMG_UINT32 ui32CPUCacheFlags;
158         IMG_BOOL bUnsetMemoryType;
159 } PMR_OSPAGEARRAY_DATA;
160
161 /***********************************
162  * Page pooling for uncached pages *
163  ***********************************/
164
165 static INLINE void
166 _FreeOSPage_CMA(struct device *dev,
167                                 size_t alloc_size,
168                                 IMG_UINT32 uiOrder,
169                                 void *virt_addr,
170                                 dma_addr_t dev_addr,
171                                 struct page *psPage);
172
173 static void
174 _FreeOSPage(IMG_UINT32 uiOrder,
175                         IMG_BOOL bUnsetMemoryType,
176                         struct page *psPage);
177
178 static PVRSRV_ERROR
179 _FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
180                         IMG_UINT32 *pai32FreeIndices,
181                         IMG_UINT32 ui32FreePageCount);
182
183 static PVRSRV_ERROR
184 _FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree,
185                                                    IMG_UINT32 *puiPagesFreed);
186
187 static inline void
188 _ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode,
189                                         struct page **ppsPage,
190                                         IMG_UINT32 uiNumPages,
191                                         IMG_BOOL bFlush);
192
193 static inline PVRSRV_ERROR
194 _ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode,
195                                         struct page **ppsPage,
196                                         IMG_UINT32 uiNumPages,
197                                         IMG_BOOL bFlush,
198                                         IMG_UINT32 ui32CPUCacheFlags);
199
200 /* A struct for our page pool holding an array of pages.
201  * We always put units of page arrays to the pool but are
202  * able to take individual pages */
203 typedef struct
204 {
205         /* Linkage for page pool LRU list */
206         struct list_head sPagePoolItem;
207
208         /* How many items are still in the page array */
209         IMG_UINT32 uiItemsRemaining;
210         struct page **ppsPageArray;
211
212 } LinuxPagePoolEntry;
213
214 /* A struct for the unpinned items */
215 typedef struct
216 {
217         struct list_head sUnpinPoolItem;
218         PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr;
219 } LinuxUnpinEntry;
220
221 /* Caches to hold page pool and page array structures */
222 static struct kmem_cache *g_psLinuxPagePoolCache = NULL;
223 static struct kmem_cache *g_psLinuxPageArray = NULL;
224
225 /* Track what is live */
226 static IMG_UINT32 g_ui32UnpinPageCount = 0;
227 static IMG_UINT32 g_ui32PagePoolEntryCount = 0;
228
229 /* Pool entry limits */
230 #if defined(PVR_LINUX_PHYSMEM_MAX_POOL_PAGES)
231 static const IMG_UINT32 g_ui32PagePoolMaxEntries = PVR_LINUX_PHYSMEM_MAX_POOL_PAGES;
232 static const IMG_UINT32 g_ui32PagePoolMaxEntries_5Percent= PVR_LINUX_PHYSMEM_MAX_POOL_PAGES / 20;
233 #else
234 static const IMG_UINT32 g_ui32PagePoolMaxEntries = 0;
235 static const IMG_UINT32 g_ui32PagePoolMaxEntries_5Percent = 0;
236 #endif
237
238 #if defined(PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES)
239 static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries = PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES;
240 #else
241 static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries = 0;
242 #endif
243
244 #if defined(CONFIG_X86)
245 #define PHYSMEM_OSMEM_NUM_OF_POOLS 3
246 static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = {
247         PVRSRV_MEMALLOCFLAG_CPU_CACHED,
248         PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
249         PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE
250 };
251 #else
252 #define PHYSMEM_OSMEM_NUM_OF_POOLS 2
253 static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = {
254         PVRSRV_MEMALLOCFLAG_CPU_CACHED,
255         PVRSRV_MEMALLOCFLAG_CPU_UNCACHED
256 };
257 #endif
258
259 /* Global structures we use to manage the page pool */
260 static DEFINE_MUTEX(g_sPagePoolMutex);
261
262 /* List holding the page array pointers: */
263 static LIST_HEAD(g_sPagePoolList_WB);
264 static LIST_HEAD(g_sPagePoolList_WC);
265 static LIST_HEAD(g_sPagePoolList_UC);
266 static LIST_HEAD(g_sUnpinList);
267
268 static inline void
269 _PagePoolLock(void)
270 {
271         mutex_lock(&g_sPagePoolMutex);
272 }
273
274 static inline int
275 _PagePoolTrylock(void)
276 {
277         return mutex_trylock(&g_sPagePoolMutex);
278 }
279
280 static inline void
281 _PagePoolUnlock(void)
282 {
283         mutex_unlock(&g_sPagePoolMutex);
284 }
285
286 static PVRSRV_ERROR
287 _AddUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData)
288 {
289         LinuxUnpinEntry *psUnpinEntry;
290
291         psUnpinEntry = OSAllocMem(sizeof(*psUnpinEntry));
292         if (!psUnpinEntry)
293         {
294                 PVR_DPF((PVR_DBG_ERROR,
295                                 "%s: OSAllocMem failed. Cannot add entry to unpin list.",
296                                 __func__));
297                 return PVRSRV_ERROR_OUT_OF_MEMORY;
298         }
299
300         psUnpinEntry->psPageArrayDataPtr = psOSPageArrayData;
301
302         /* Add into pool that the shrinker can access easily*/
303         list_add_tail(&psUnpinEntry->sUnpinPoolItem, &g_sUnpinList);
304
305         g_ui32UnpinPageCount += psOSPageArrayData->iNumPagesAllocated;
306
307         return PVRSRV_OK;
308 }
309
310 static void
311 _RemoveUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData)
312 {
313         LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry;
314
315         /* Remove from pool */
316         list_for_each_entry_safe(psUnpinEntry,
317                                  psTempUnpinEntry,
318                                  &g_sUnpinList,
319                                  sUnpinPoolItem)
320         {
321                 if (psUnpinEntry->psPageArrayDataPtr == psOSPageArrayData)
322                 {
323                         list_del(&psUnpinEntry->sUnpinPoolItem);
324                         break;
325                 }
326         }
327
328         OSFreeMem(psUnpinEntry);
329
330         g_ui32UnpinPageCount -= psOSPageArrayData->iNumPagesAllocated;
331 }
332
333 static inline IMG_BOOL
334 _GetPoolListHead(IMG_UINT32 ui32CPUCacheFlags,
335                                  struct list_head **ppsPoolHead)
336 {
337         switch(PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags))
338         {
339                 case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
340 #if defined(CONFIG_X86)
341                 /*
342                         For x86 we need to keep different lists for uncached
343                         and write-combined as we must always honour the PAT
344                         setting which cares about this difference.
345                 */
346
347                         *ppsPoolHead = &g_sPagePoolList_WC;
348                         break;
349 #endif
350
351                 case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
352                         *ppsPoolHead = &g_sPagePoolList_UC;
353                         break;
354
355                 case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
356                         *ppsPoolHead = &g_sPagePoolList_WB;
357                         break;
358
359                 default:
360                         PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get pages from pool, "
361                                          "unknown CPU caching mode.", __func__));
362                         return IMG_FALSE;
363         }
364         return IMG_TRUE;
365 }
366
367 static struct shrinker g_sShrinker;
368
369 /* Returning the number of pages that still reside in the page pool.
370  * Do not count excess pages that will be freed by the defer free thread. */
371 static unsigned long
372 _GetNumberOfPagesInPoolUnlocked(void)
373 {
374         unsigned int uiEntryCount;
375
376         uiEntryCount = (g_ui32PagePoolEntryCount > g_ui32PagePoolMaxEntries) ? g_ui32PagePoolMaxEntries : g_ui32PagePoolEntryCount;
377         return uiEntryCount + g_ui32UnpinPageCount;
378 }
379
380 /* Linux shrinker function that informs the OS about how many pages we are caching and
381  * it is able to reclaim. */
382 static unsigned long
383 _CountObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
384 {
385         int remain;
386
387         PVR_ASSERT(psShrinker == &g_sShrinker);
388         (void)psShrinker;
389         (void)psShrinkControl;
390
391         /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */
392         if (_PagePoolTrylock() == 0)
393                 return 0;
394         remain = _GetNumberOfPagesInPoolUnlocked();
395         _PagePoolUnlock();
396
397         return remain;
398 }
399
400 /* Linux shrinker function to reclaim the pages from our page pool */
401 static unsigned long
402 _ScanObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
403 {
404         unsigned long uNumToScan = psShrinkControl->nr_to_scan;
405         unsigned long uSurplus = 0;
406         LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry;
407         IMG_UINT32 uiPagesFreed;
408
409         PVR_ASSERT(psShrinker == &g_sShrinker);
410         (void)psShrinker;
411
412         /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */
413         if (_PagePoolTrylock() == 0)
414                 return SHRINK_STOP;
415
416         _FreePagesFromPoolUnlocked(uNumToScan,
417                                                            &uiPagesFreed);
418         uNumToScan -= uiPagesFreed;
419
420         if (uNumToScan == 0)
421         {
422                 goto e_exit;
423         }
424
425         /* Free unpinned memory, starting with LRU entries */
426         list_for_each_entry_safe(psUnpinEntry,
427                                                          psTempUnpinEntry,
428                                                          &g_sUnpinList,
429                                                          sUnpinPoolItem)
430         {
431                 PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr = psUnpinEntry->psPageArrayDataPtr;
432                 IMG_UINT32 uiNumPages = (psPageArrayDataPtr->uiTotalNumPages > psPageArrayDataPtr->iNumPagesAllocated)?
433                                                                 psPageArrayDataPtr->iNumPagesAllocated:psPageArrayDataPtr->uiTotalNumPages;
434                 PVRSRV_ERROR eError;
435
436                 /* Free associated pages */
437                 eError = _FreeOSPages(psPageArrayDataPtr,
438                                                           NULL,
439                                                           0);
440                 if (eError != PVRSRV_OK)
441                 {
442                         PVR_DPF((PVR_DBG_ERROR,
443                                         "%s: Shrinker is unable to free unpinned pages. Error: %s (%d)",
444                                          __FUNCTION__,
445                                          PVRSRVGetErrorStringKM(eError),
446                                          eError));
447                         goto e_exit;
448                 }
449
450                 /* Remove item from pool */
451                 list_del(&psUnpinEntry->sUnpinPoolItem);
452
453                 g_ui32UnpinPageCount -= uiNumPages;
454
455                 /* Check if there is more to free or if we already surpassed the limit */
456                 if (uiNumPages < uNumToScan)
457                 {
458                         uNumToScan -= uiNumPages;
459
460                 }
461                 else if (uiNumPages > uNumToScan)
462                 {
463                         uSurplus += uiNumPages - uNumToScan;
464                         uNumToScan = 0;
465                         goto e_exit;
466                 }
467                 else
468                 {
469                         uNumToScan -= uiNumPages;
470                         goto e_exit;
471                 }
472         }
473
474 e_exit:
475         if (list_empty(&g_sPagePoolList_WC) &&
476                 list_empty(&g_sPagePoolList_UC) &&
477                 list_empty(&g_sPagePoolList_WB))
478         {
479                 PVR_ASSERT(g_ui32PagePoolEntryCount == 0);
480         }
481         if (list_empty(&g_sUnpinList))
482         {
483                 PVR_ASSERT(g_ui32UnpinPageCount == 0);
484         }
485
486 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
487         {
488                 int remain;
489                 remain = _GetNumberOfPagesInPoolUnlocked();
490                 _PagePoolUnlock();
491                 return remain;
492         }
493 #else
494         /* Returning the  number of pages freed during the scan */
495         _PagePoolUnlock();
496         return psShrinkControl->nr_to_scan - uNumToScan + uSurplus;
497 #endif
498 }
499
500 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
501 static int
502 _ShrinkPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
503 {
504         if (psShrinkControl->nr_to_scan != 0)
505         {
506                 return _ScanObjectsInPagePool(psShrinker, psShrinkControl);
507         }
508         else
509         {
510                 /* No pages are being reclaimed so just return the page count */
511                 return _CountObjectsInPagePool(psShrinker, psShrinkControl);
512         }
513 }
514
515 static struct shrinker g_sShrinker =
516 {
517         .shrink = _ShrinkPagePool,
518         .seeks = DEFAULT_SEEKS
519 };
520 #else
521 static struct shrinker g_sShrinker =
522 {
523         .count_objects = _CountObjectsInPagePool,
524         .scan_objects = _ScanObjectsInPagePool,
525         .seeks = DEFAULT_SEEKS
526 };
527 #endif
528
529 /* Register the shrinker so Linux can reclaim cached pages */
530 void LinuxInitPhysmem(void)
531 {
532         g_psLinuxPageArray = kmem_cache_create("pvr-pa", sizeof(PMR_OSPAGEARRAY_DATA), 0, 0, NULL);
533
534         _PagePoolLock();
535         g_psLinuxPagePoolCache = kmem_cache_create("pvr-pp", sizeof(LinuxPagePoolEntry), 0, 0, NULL);
536         if (g_psLinuxPagePoolCache)
537         {
538                 /* Only create the shrinker if we created the cache OK */
539                 register_shrinker(&g_sShrinker);
540         }
541         _PagePoolUnlock();
542 }
543
544 /* Unregister the shrinker and remove all pages from the pool that are still left */
545 void LinuxDeinitPhysmem(void)
546 {
547         IMG_UINT32 uiPagesFreed;
548
549         _PagePoolLock();
550         if (_FreePagesFromPoolUnlocked(g_ui32PagePoolEntryCount, &uiPagesFreed) != PVRSRV_OK)
551         {
552                 PVR_DPF((PVR_DBG_ERROR, "Unable to free all pages from page pool when deinitialising."));
553                 PVR_ASSERT(0);
554         }
555
556         PVR_ASSERT(g_ui32PagePoolEntryCount == 0);
557
558         /* Free the page cache */
559         kmem_cache_destroy(g_psLinuxPagePoolCache);
560
561         unregister_shrinker(&g_sShrinker);
562         _PagePoolUnlock();
563
564         kmem_cache_destroy(g_psLinuxPageArray);
565 }
566
567 static void EnableOOMKiller(void)
568 {
569         current->flags &= ~PF_DUMPCORE;
570 }
571
572 static void DisableOOMKiller(void)
573 {
574         /* PF_DUMPCORE is treated by the VM as if the OOM killer was disabled.
575          *
576          * As oom_killer_disable() is an inline, non-exported function, we
577          * can't use it from a modular driver. Furthermore, the OOM killer
578          * API doesn't look thread safe, which `current' is.
579          */
580         WARN_ON(current->flags & PF_DUMPCORE);
581         current->flags |= PF_DUMPCORE;
582 }
583
584 /* Prints out the addresses in a page array for debugging purposes
585  * Define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY locally to activate: */
586 /* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY 1 */
587 static inline void
588 _DumpPageArray(struct page **pagearray, IMG_UINT32 uiPagesToPrint)
589 {
590 #if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY)
591         IMG_UINT32 i;
592         if (pagearray)
593         {
594                 printk("Array %p:\n", pagearray);
595                 for (i = 0; i < uiPagesToPrint; i++)
596                 {
597                         printk("%p | ", (pagearray)[i]);
598                 }
599                 printk("\n");
600         }
601         else
602         {
603                 printk("Array is NULL:\n");
604         }
605 #else
606         PVR_UNREFERENCED_PARAMETER(pagearray);
607         PVR_UNREFERENCED_PARAMETER(uiPagesToPrint);
608 #endif
609 }
610
611 /* Debugging function that dumps out the number of pages for every
612  * page array that is currently in the page pool.
613  * Not defined by default. Define locally to activate feature: */
614 /* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL 1 */
615 static void
616 _DumpPoolStructure(void)
617 {
618 #if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL)
619         LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
620         struct list_head *psPoolHead = NULL;
621         IMG_UINT32  j;
622
623         printk("\n");
624         /* Empty all pools */
625         for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++)
626         {
627
628                 printk("pool = %u \n", j);
629
630                 /* Get the correct list for this caching mode */
631                 if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead))
632                 {
633                         break;
634                 }
635
636                 list_for_each_entry_safe(psPagePoolEntry,
637                                                                  psTempPoolEntry,
638                                                                  psPoolHead,
639                                                                  sPagePoolItem)
640                 {
641                         printk("%u | ", psPagePoolEntry->uiItemsRemaining);
642                 }
643                 printk("\n");
644         }
645 #endif
646 }
647
648 /* Will take excess pages from the pool with acquired pool lock and then free
649  * them without pool lock being held.
650  * Designed to run in the deferred free thread. */
651 static PVRSRV_ERROR
652 _FreeExcessPagesFromPool(void)
653 {
654         PVRSRV_ERROR eError = PVRSRV_OK;
655         LIST_HEAD(sPagePoolFreeList);
656         LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
657         struct list_head *psPoolHead = NULL;
658         IMG_UINT32 i, j, uiPoolIdx;
659         static IMG_UINT8 uiPoolAccessRandomiser;
660         IMG_BOOL bDone = IMG_FALSE;
661
662         /* Make sure all pools are drained over time */
663         uiPoolAccessRandomiser++;
664
665         /* Empty all pools */
666         for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++)
667         {
668                 uiPoolIdx = (j + uiPoolAccessRandomiser) % PHYSMEM_OSMEM_NUM_OF_POOLS;
669
670                 /* Just lock down to collect pool entries and unlock again before freeing them */
671                 _PagePoolLock();
672
673                 /* Get the correct list for this caching mode */
674                 if (!_GetPoolListHead(g_aui32CPUCacheFlags[uiPoolIdx], &psPoolHead))
675                 {
676                         _PagePoolUnlock();
677                         break;
678                 }
679
680                 /* Traverse pool in reverse order to remove items that exceeded
681                  * the pool size first */
682                 list_for_each_entry_safe_reverse(psPagePoolEntry,
683                                                                                  psTempPoolEntry,
684                                                                                  psPoolHead,
685                                                                                  sPagePoolItem)
686                 {
687                         /* Go to free the pages if we collected enough */
688                         if (g_ui32PagePoolEntryCount <= g_ui32PagePoolMaxEntries)
689                         {
690                                 bDone = IMG_TRUE;
691                                 break;
692                         }
693
694                         /* Move item to free list so we can free it later without the pool lock */
695                         list_del(&psPagePoolEntry->sPagePoolItem);
696                         list_add(&psPagePoolEntry->sPagePoolItem, &sPagePoolFreeList);
697
698                         /* Update counters */
699                         g_ui32PagePoolEntryCount -= psPagePoolEntry->uiItemsRemaining;
700
701 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
702         /* MemStats usually relies on having the bridge lock held, however
703          * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
704          * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
705          * the page pool lock is used to ensure these calls are mutually
706          * exclusive
707          */
708         PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * psPagePoolEntry->uiItemsRemaining);
709 #endif
710                 }
711
712                 _PagePoolUnlock();
713
714
715                 /* Free the pages that we removed from the pool */
716                 list_for_each_entry_safe(psPagePoolEntry,
717                                                                  psTempPoolEntry,
718                                                                  &sPagePoolFreeList,
719                                                                  sPagePoolItem)
720                 {
721 #if defined(CONFIG_X86)
722                         /* Set the correct page caching attributes on x86 */
723                         if (!PVRSRV_CHECK_CPU_CACHED(g_aui32CPUCacheFlags[uiPoolIdx]))
724                         {
725                                 int ret;
726                                 ret = set_pages_array_wb(psPagePoolEntry->ppsPageArray,
727                                                                                  psPagePoolEntry->uiItemsRemaining);
728                                 if (ret)
729                                 {
730                                         PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__));
731                                         eError = PVRSRV_ERROR_FAILED_TO_FREE_PAGES;
732                                         goto e_exit;
733                                 }
734                         }
735 #endif
736                         /* Free the actual pages */
737                         for (i = 0; i < psPagePoolEntry->uiItemsRemaining; i++)
738                         {
739                                 __free_pages(psPagePoolEntry->ppsPageArray[i], 0);
740                                 psPagePoolEntry->ppsPageArray[i] = NULL;
741                         }
742
743                         /* Free the pool entry and page array*/
744                         list_del(&psPagePoolEntry->sPagePoolItem);
745                         OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
746                         kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
747                 }
748
749                 /* Stop if all excess pages were removed */
750                 if (bDone)
751                 {
752                         eError = PVRSRV_OK;
753                         goto e_exit;
754                 }
755
756         }
757
758 e_exit:
759         _DumpPoolStructure();
760         return eError;
761 }
762
763 /* Free a certain number of pages from the page pool.
764  * Mainly used in error paths or at deinitialisation to
765  * empty the whole pool. */
766 static PVRSRV_ERROR
767 _FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree,
768                                                    IMG_UINT32 *puiPagesFreed)
769 {
770         PVRSRV_ERROR eError = PVRSRV_OK;
771         LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
772         struct list_head *psPoolHead = NULL;
773         IMG_UINT32 i, j;
774
775         *puiPagesFreed = uiMaxPagesToFree;
776
777         /* Empty all pools */
778         for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++)
779         {
780
781                 /* Get the correct list for this caching mode */
782                 if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead))
783                 {
784                         break;
785                 }
786
787                 /* Free the pages and remove page arrays from the pool if they are exhausted */
788                 list_for_each_entry_safe(psPagePoolEntry,
789                                                                  psTempPoolEntry,
790                                                                  psPoolHead,
791                                                                  sPagePoolItem)
792                 {
793                         IMG_UINT32 uiItemsToFree;
794                         struct page **ppsPageArray;
795
796                         /* Check if we are going to free the whole page array or just parts */
797                         if (psPagePoolEntry->uiItemsRemaining <= uiMaxPagesToFree)
798                         {
799                                 uiItemsToFree = psPagePoolEntry->uiItemsRemaining;
800                                 ppsPageArray = psPagePoolEntry->ppsPageArray;
801                         }
802                         else
803                         {
804                                 uiItemsToFree = uiMaxPagesToFree;
805                                 ppsPageArray = &(psPagePoolEntry->ppsPageArray[psPagePoolEntry->uiItemsRemaining - uiItemsToFree]);
806                         }
807
808 #if defined(CONFIG_X86)
809                         /* Set the correct page caching attributes on x86 */
810                         if (!PVRSRV_CHECK_CPU_CACHED(g_aui32CPUCacheFlags[j]))
811                         {
812                                 int ret;
813                                 ret = set_pages_array_wb(ppsPageArray, uiItemsToFree);
814                                 if (ret)
815                                 {
816                                         PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__));
817                                         eError = PVRSRV_ERROR_FAILED_TO_FREE_PAGES;
818                                         goto e_exit;
819                                 }
820                         }
821 #endif
822
823                         /* Free the actual pages */
824                         for (i = 0; i < uiItemsToFree; i++)
825                         {
826                                 __free_pages(ppsPageArray[i], 0);
827                                 ppsPageArray[i] = NULL;
828                         }
829
830                         /* Reduce counters */
831                         uiMaxPagesToFree -= uiItemsToFree;
832                         g_ui32PagePoolEntryCount -= uiItemsToFree;
833                         psPagePoolEntry->uiItemsRemaining -= uiItemsToFree;
834
835 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
836         /* MemStats usually relies on having the bridge lock held, however
837          * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
838          * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
839          * the page pool lock is used to ensure these calls are mutually
840          * exclusive
841          */
842         PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * uiItemsToFree);
843 #endif
844
845                         /* Is this pool entry exhausted, delete it */
846                         if (psPagePoolEntry->uiItemsRemaining == 0)
847                         {
848                                 OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
849                                 list_del(&psPagePoolEntry->sPagePoolItem);
850                                 kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
851                         }
852
853                         /* Return if we have all our pages */
854                         if (uiMaxPagesToFree == 0)
855                         {
856                                 goto e_exit;
857                         }
858                 }
859         }
860
861 e_exit:
862         *puiPagesFreed -= uiMaxPagesToFree;
863         _DumpPoolStructure();
864         return eError;
865 }
866
867 /* Get a certain number of pages from the page pool and
868  * copy them directly into a given page array. */
869 static void
870 _GetPagesFromPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags,
871                                                   IMG_UINT32 uiMaxNumPages,
872                                                   struct page **ppsPageArray,
873                                                   IMG_UINT32 *puiNumReceivedPages)
874 {
875         LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
876         struct list_head *psPoolHead = NULL;
877         IMG_UINT32 i;
878
879         *puiNumReceivedPages = 0;
880
881         /* Get the correct list for this caching mode */
882         if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead))
883         {
884                 return;
885         }
886
887         /* Check if there are actually items in the list */
888         if (list_empty(psPoolHead))
889         {
890                 return;
891         }
892
893         PVR_ASSERT(g_ui32PagePoolEntryCount > 0);
894
895         /* Receive pages from the pool */
896         list_for_each_entry_safe(psPagePoolEntry,
897                                                          psTempPoolEntry,
898                                                          psPoolHead,
899                                                          sPagePoolItem)
900         {
901                 /* Get the pages from this pool entry */
902                 for (i = psPagePoolEntry->uiItemsRemaining; i != 0 && *puiNumReceivedPages < uiMaxNumPages; i--)
903                 {
904                         ppsPageArray[*puiNumReceivedPages] = psPagePoolEntry->ppsPageArray[i-1];
905                         (*puiNumReceivedPages)++;
906                         psPagePoolEntry->uiItemsRemaining--;
907                 }
908
909                 /* Is this pool entry exhausted, delete it */
910                 if (psPagePoolEntry->uiItemsRemaining == 0)
911                 {
912                         OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
913                         list_del(&psPagePoolEntry->sPagePoolItem);
914                         kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
915                 }
916
917                 /* Return if we have all our pages */
918                 if (*puiNumReceivedPages == uiMaxNumPages)
919                 {
920                         goto exit_ok;
921                 }
922         }
923
924 exit_ok:
925
926         /* Update counters */
927         g_ui32PagePoolEntryCount -= *puiNumReceivedPages;
928
929 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
930         /* MemStats usually relies on having the bridge lock held, however
931          * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
932          * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
933          * the page pool lock is used to ensure these calls are mutually
934          * exclusive
935          */
936         PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * (*puiNumReceivedPages));
937 #endif
938
939         _DumpPoolStructure();
940         return;
941 }
942
943 /* When is it worth waiting for the page pool? */
944 #define PVR_LINUX_PHYSMEM_MIN_PAGES_TO_WAIT_FOR_POOL 64
945
946 /* Same as _GetPagesFromPoolUnlocked but handles locking and
947  * checks first whether pages from the pool are a valid option. */
948 static inline void
949 _GetPagesFromPoolLocked(PVRSRV_DEVICE_NODE *psDevNode,
950                                                 IMG_UINT32 ui32CPUCacheFlags,
951                                                 IMG_UINT32 uiPagesToAlloc,
952                                                 IMG_UINT32 uiOrder,
953                                                 IMG_BOOL bZero,
954                                                 struct page **ppsPageArray,
955                                                 IMG_UINT32 *puiPagesFromPool)
956 {
957         /* The page pool stores only order 0 pages. If we need zeroed memory we
958          * directly allocate from the OS because it is faster than doing it ourselves. */
959         if (uiOrder == 0 && !bZero)
960         {
961                 if (uiPagesToAlloc < PVR_LINUX_PHYSMEM_MIN_PAGES_TO_WAIT_FOR_POOL)
962                 {
963                         /* In case the request is a few pages, just try to acquire the pool lock */
964                         if (_PagePoolTrylock() == 0)
965                         {
966                                 return;
967                         }
968                 }
969                 else
970                 {
971                         /* It is worth waiting if many pages were requested.
972                          * Freeing an item to the pool is very fast and
973                          * the defer free thread will release the lock regularly. */
974                         _PagePoolLock();
975                 }
976
977                 _GetPagesFromPoolUnlocked(ui32CPUCacheFlags,
978                                                                   uiPagesToAlloc,
979                                                                   ppsPageArray,
980                                                                   puiPagesFromPool);
981                 _PagePoolUnlock();
982
983                 /* Do cache maintenance so allocations from the pool can be
984                  * considered clean */
985                 if (PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags) &&
986                     PVRSRV_CHECK_CPU_CACHE_CLEAN(ui32CPUCacheFlags))
987                 {
988                         _ApplyCacheMaintenance(psDevNode,
989                                                                    ppsPageArray,
990                                                                    *puiPagesFromPool,
991                                                                    IMG_FALSE);
992                 }
993         }
994
995         return;
996 }
997
998 /* Defer free function to remove excess pages from the page pool.
999  * We do not need the bridge lock for this function */
1000 static PVRSRV_ERROR
1001 _CleanupThread_FreePoolPages(void *pvData)
1002 {
1003         PVRSRV_ERROR eError;
1004
1005         /* Free all that is necessary */
1006         eError = _FreeExcessPagesFromPool();
1007         if(eError != PVRSRV_OK)
1008         {
1009                 PVR_DPF((PVR_DBG_ERROR, "%s: _FreeExcessPagesFromPool failed", __func__));
1010                 goto e_exit;
1011         }
1012
1013         OSFreeMem(pvData);
1014
1015 e_exit:
1016         return eError;
1017 }
1018
1019 /* Signal the defer free thread that there are pages in the pool to be cleaned up.
1020  * MUST NOT HOLD THE PAGE POOL LOCK! */
1021 static void
1022 _SignalDeferFree(void)
1023 {
1024         PVRSRV_CLEANUP_THREAD_WORK *psCleanupThreadFn;
1025         psCleanupThreadFn = OSAllocMem(sizeof(*psCleanupThreadFn));
1026
1027         if(!psCleanupThreadFn)
1028         {
1029                 PVR_DPF((PVR_DBG_ERROR,
1030                                  "%s: Failed to get memory for deferred page pool cleanup. "
1031                                  "Trying to free pages immediately",
1032                                  __FUNCTION__));
1033                 goto e_oom_exit;
1034         }
1035
1036         psCleanupThreadFn->pfnFree = _CleanupThread_FreePoolPages;
1037         psCleanupThreadFn->pvData = psCleanupThreadFn;
1038         psCleanupThreadFn->ui32RetryCount = CLEANUP_THREAD_RETRY_COUNT_DEFAULT;
1039         psCleanupThreadFn->bDependsOnHW = IMG_FALSE;
1040         /* We must not hold the pool lock when calling AddWork because it might call us back to
1041          * free pooled pages directly when unloading the driver  */
1042         PVRSRVCleanupThreadAddWork(psCleanupThreadFn);
1043
1044         return;
1045
1046 e_oom_exit:
1047         {
1048                 /* In case we are not able to signal the defer free thread
1049                  * we have to cleanup the pool now. */
1050                 IMG_UINT32 uiPagesFreed;
1051
1052                 _PagePoolLock();
1053                 if (_FreePagesFromPoolUnlocked(g_ui32PagePoolEntryCount - g_ui32PagePoolMaxEntries,
1054                                                                            &uiPagesFreed) != PVRSRV_OK)
1055                 {
1056                         PVR_DPF((PVR_DBG_ERROR,
1057                                          "%s: Unable to free pooled pages!",
1058                                          __FUNCTION__));
1059                 }
1060                 _PagePoolUnlock();
1061
1062                 return;
1063         }
1064 }
1065
1066 /* Moves a page array to the page pool.
1067  *
1068  * If this function is successful the ppsPageArray is unusable and needs to be
1069  * reallocated in case the _PMR_OSPAGEARRAY_DATA_ will be reused.
1070  * This function expects cached pages to be not in the cache anymore,
1071  * invalidate them before, ideally without using the pool lock. */
1072 static IMG_BOOL
1073 _PutPagesToPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags,
1074                                                 struct page **ppsPageArray,
1075                                                 IMG_UINT32 uiEntriesInArray)
1076 {
1077         LinuxPagePoolEntry *psPagePoolEntry;
1078         struct list_head *psPoolHead = NULL;
1079
1080         /* Check if there is still space in the pool */
1081         if ( (g_ui32PagePoolEntryCount + uiEntriesInArray) >=
1082                  (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxExcessEntries) )
1083         {
1084                 return IMG_FALSE;
1085         }
1086
1087         /* Get the correct list for this caching mode */
1088         if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead))
1089         {
1090                 return IMG_FALSE;
1091         }
1092
1093         /* Fill the new pool entry structure and add it to the pool list */
1094         psPagePoolEntry = kmem_cache_alloc(g_psLinuxPagePoolCache, GFP_KERNEL);
1095         psPagePoolEntry->ppsPageArray = ppsPageArray;
1096         psPagePoolEntry->uiItemsRemaining = uiEntriesInArray;
1097
1098         list_add_tail(&psPagePoolEntry->sPagePoolItem, psPoolHead);
1099
1100         /* Update counters */
1101         g_ui32PagePoolEntryCount += uiEntriesInArray;
1102
1103 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
1104         /* MemStats usually relies on having the bridge lock held, however
1105          * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
1106          * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
1107          * the page pool lock is used to ensure these calls are mutually
1108          * exclusive
1109          */
1110         PVRSRVStatsIncrMemAllocPoolStat(PAGE_SIZE * uiEntriesInArray);
1111 #endif
1112
1113         _DumpPoolStructure();
1114         return IMG_TRUE;
1115 }
1116
1117 /* Minimal amount of pages that will go to the pool, everything below is freed directly */
1118 #define PVR_LINUX_PHYSMEM_MIN_PAGES_TO_ADD_TO_POOL 16
1119
1120 /* Same as _PutPagesToPoolUnlocked but handles locking and checks whether the pages are
1121  * suitable to be stored in the page pool. */
1122 static inline IMG_BOOL
1123 _PutPagesToPoolLocked(IMG_UINT32 ui32CPUCacheFlags,
1124                                           struct page **ppsPageArray,
1125                                           IMG_BOOL bUnpinned,
1126                                           IMG_UINT32 uiOrder,
1127                                           IMG_UINT32 uiNumPages)
1128 {
1129         if (uiOrder == 0 &&
1130                 !bUnpinned &&
1131                 uiNumPages >= PVR_LINUX_PHYSMEM_MIN_PAGES_TO_ADD_TO_POOL)
1132         {
1133                 _PagePoolLock();
1134
1135                 /* Try to quickly move page array to the pool */
1136                 if (_PutPagesToPoolUnlocked(ui32CPUCacheFlags,
1137                                                                         ppsPageArray,
1138                                                                         uiNumPages) )
1139                 {
1140                         if (g_ui32PagePoolEntryCount > (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxEntries_5Percent))
1141                         {
1142                                 /* Signal defer free to clean up excess pages from pool.
1143                                  * Allow a little excess before signalling to avoid oscillating behaviour */
1144                                 _PagePoolUnlock();
1145                                 _SignalDeferFree();
1146                         }
1147                         else
1148                         {
1149                                 _PagePoolUnlock();
1150                         }
1151
1152                         /* All done */
1153                         return IMG_TRUE;
1154                 }
1155
1156                 /* Could not move pages to pool, continue and free them now  */
1157                 _PagePoolUnlock();
1158         }
1159
1160         return IMG_FALSE;
1161 }
1162
1163 /* Get the GFP flags that we pass to the page allocator */
1164 static inline unsigned int
1165 _GetGFPFlags(PMR_OSPAGEARRAY_DATA *psPageArrayData)
1166 {
1167         struct device *psDev = psPageArrayData->psDevNode->psDevConfig->pvOSDevice;
1168         unsigned int gfp_flags = 0;
1169         gfp_flags = GFP_USER | __GFP_NOWARN | __GFP_NOMEMALLOC;
1170
1171         if (*psDev->dma_mask == DMA_BIT_MASK(32))
1172         {
1173                 /* Limit to 32 bit.
1174                  * Achieved by NOT setting __GFP_HIGHMEM for 32 bit systems and
1175          * setting __GFP_DMA32 for 64 bit systems */
1176                 gfp_flags |= __GFP_DMA32;
1177         }
1178         else
1179         {
1180                 /* If our system is able to handle large addresses use highmem */
1181                 gfp_flags |= __GFP_HIGHMEM;
1182         }
1183
1184         if (psPageArrayData->bZero)
1185         {
1186                 gfp_flags |= __GFP_ZERO;
1187         }
1188
1189         return gfp_flags;
1190 }
1191
1192 /* Poison a page of order uiOrder with string taken from pacPoisonData*/
1193 static void
1194 _PoisonPages(struct page *page,
1195                          IMG_UINT32 uiOrder,
1196                          const IMG_CHAR *pacPoisonData,
1197                          size_t uiPoisonSize)
1198 {
1199         void *kvaddr;
1200         IMG_UINT32 uiSrcByteIndex;
1201         IMG_UINT32 uiDestByteIndex;
1202         IMG_UINT32 uiSubPageIndex;
1203         IMG_CHAR *pcDest;
1204
1205         uiSrcByteIndex = 0;
1206         for (uiSubPageIndex = 0; uiSubPageIndex < (1U << uiOrder); uiSubPageIndex++)
1207         {
1208                 kvaddr = kmap(page + uiSubPageIndex);
1209                 pcDest = kvaddr;
1210
1211                 for(uiDestByteIndex=0; uiDestByteIndex<PAGE_SIZE; uiDestByteIndex++)
1212                 {
1213                         pcDest[uiDestByteIndex] = pacPoisonData[uiSrcByteIndex];
1214                         uiSrcByteIndex++;
1215                         if (uiSrcByteIndex == uiPoisonSize)
1216                         {
1217                                 uiSrcByteIndex = 0;
1218                         }
1219                 }
1220
1221                 flush_dcache_page(page);
1222                 kunmap(page + uiSubPageIndex);
1223         }
1224 }
1225
1226 static const IMG_CHAR _AllocPoison[] = "^PoIsOn";
1227 static const IMG_UINT32 _AllocPoisonSize = 7;
1228 static const IMG_CHAR _FreePoison[] = "<DEAD-BEEF>";
1229 static const IMG_UINT32 _FreePoisonSize = 11;
1230
1231 /* Allocate and initialise the structure to hold the metadata of the allocation */
1232 static PVRSRV_ERROR
1233 _AllocOSPageArray(PVRSRV_DEVICE_NODE *psDevNode,
1234                                   PMR_SIZE_T uiChunkSize,
1235                                   IMG_UINT32 ui32NumPhysChunks,
1236                                   IMG_UINT32 ui32NumVirtChunks,
1237                                   IMG_UINT32 uiLog2DevPageSize,
1238                                   IMG_BOOL bZero,
1239                                   IMG_BOOL bIsCMA,
1240                                   IMG_BOOL bPoisonOnAlloc,
1241                                   IMG_BOOL bPoisonOnFree,
1242                                   IMG_BOOL bOnDemand,
1243                                   IMG_UINT32 ui32CPUCacheFlags,
1244                                   PMR_OSPAGEARRAY_DATA **ppsPageArrayDataPtr)
1245 {
1246         PVRSRV_ERROR eError;
1247         PMR_SIZE_T uiSize = uiChunkSize * ui32NumVirtChunks;
1248         IMG_UINT32 uiNumOSPageSizeVirtPages;
1249         IMG_UINT32 uiNumDevPageSizeVirtPages;
1250         PMR_OSPAGEARRAY_DATA *psPageArrayData;
1251         PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
1252
1253         /* Sanity check of the alloc size */
1254         if (uiSize >= 0x1000000000ULL)
1255         {
1256                 PVR_DPF((PVR_DBG_ERROR,
1257                                  "%s: Do you really want 64GB of physical memory in one go? "
1258                                  "This is likely a bug", __func__));
1259                 eError = PVRSRV_ERROR_INVALID_PARAMS;
1260                 goto e_freed_none;
1261         }
1262
1263         /* Check that we allocate the correct contiguity */
1264         PVR_ASSERT(PAGE_SHIFT <= uiLog2DevPageSize);
1265         if ((uiSize & ((1ULL << uiLog2DevPageSize) - 1)) != 0)
1266         {
1267                 PVR_DPF((PVR_DBG_ERROR,
1268                                 "Allocation size " PMR_SIZE_FMTSPEC " is not multiple of page size 2^%u !",
1269                                  uiSize,
1270                                  uiLog2DevPageSize));
1271
1272                 eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
1273                 goto e_freed_none;
1274         }
1275
1276         /* Use of cast below is justified by the assertion that follows to
1277            prove that no significant bits have been truncated */
1278         uiNumOSPageSizeVirtPages = (IMG_UINT32) (((uiSize - 1) >> PAGE_SHIFT) + 1);
1279         PVR_ASSERT(((PMR_SIZE_T) uiNumOSPageSizeVirtPages << PAGE_SHIFT) == uiSize);
1280         uiNumDevPageSizeVirtPages = uiNumOSPageSizeVirtPages >> (uiLog2DevPageSize - PAGE_SHIFT);
1281
1282         /* Allocate the struct to hold the metadata */
1283         psPageArrayData = kmem_cache_alloc(g_psLinuxPageArray, GFP_KERNEL);
1284         if (psPageArrayData == NULL)
1285         {
1286                 PVR_DPF((PVR_DBG_ERROR,
1287                                  "%s: OS refused the memory allocation for the private data.",
1288                                  __func__));
1289                 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
1290                 goto e_freed_none;
1291         }
1292
1293         /* 
1294          * Allocate the page array
1295          *
1296          * We avoid tracking this memory because this structure might go into the page pool.
1297          * The OS can drain the pool asynchronously and when doing that we have to avoid
1298          * any potential deadlocks.
1299          *
1300          * In one scenario the process stats vmalloc hash table lock is held and then
1301          * the oom-killer softirq is trying to call _ScanObjectsInPagePool(), it must not
1302          * try to acquire the vmalloc hash table lock again.
1303          */
1304         psPageArrayData->pagearray = OSAllocZMemNoStats(sizeof(struct page *) * uiNumDevPageSizeVirtPages);
1305         if (psPageArrayData->pagearray == NULL)
1306         {
1307                 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
1308                 goto e_free_kmem_cache;
1309         }
1310         else
1311         {
1312                 if (bIsCMA)
1313                 {
1314                         /* Allocate additional DMA/CMA cpu kernel virtual address & device bus address array state */
1315                         psPageArrayData->dmavirtarray = OSAllocZMemNoStats(sizeof(void*) * uiNumDevPageSizeVirtPages);
1316                         if (psPageArrayData->dmavirtarray == NULL)
1317                         {
1318                                 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
1319                                 goto e_free_pagearray;
1320                         }
1321
1322                         psPageArrayData->dmaphysarray = OSAllocZMemNoStats(sizeof(dma_addr_t) * uiNumDevPageSizeVirtPages);
1323                         if (psPageArrayData->dmaphysarray == NULL)
1324                         {
1325                                 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
1326                                 goto e_free_cpuvirtaddrarray;
1327                         }
1328                 }
1329         }
1330
1331         /* Init metadata */
1332         psPageArrayData->psDevNode = psDevNode;
1333         psPageArrayData->iNumPagesAllocated = 0;
1334         psPageArrayData->uiTotalNumPages = uiNumOSPageSizeVirtPages;
1335         psPageArrayData->uiLog2DevPageSize = uiLog2DevPageSize;
1336         psPageArrayData->bZero = bZero;
1337         psPageArrayData->bIsCMA = bIsCMA;
1338         psPageArrayData->bOnDemand = bOnDemand;
1339         psPageArrayData->bUnpinned = IMG_FALSE;
1340         psPageArrayData->bPoisonOnFree = bPoisonOnFree;
1341         psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc;
1342         psPageArrayData->ui32CPUCacheFlags = ui32CPUCacheFlags;
1343
1344         /* Indicate whether this is an allocation with default caching attribute (i.e cached) or not */
1345         if (PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags) ||
1346                 PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags))
1347         {
1348                 psPageArrayData->bUnsetMemoryType = IMG_TRUE;
1349         }
1350         else
1351         {
1352                 psPageArrayData->bUnsetMemoryType = IMG_FALSE;
1353         }
1354
1355         *ppsPageArrayDataPtr = psPageArrayData;
1356         return PVRSRV_OK;
1357
1358 /* Error path */
1359 e_free_cpuvirtaddrarray:
1360         OSFreeMemNoStats(psPageArrayData->dmavirtarray);
1361
1362 e_free_pagearray:
1363         OSFreeMemNoStats(psPageArrayData->pagearray);
1364
1365 e_free_kmem_cache:
1366         kmem_cache_free(g_psLinuxPageArray, psPageArrayData);
1367         PVR_DPF((PVR_DBG_ERROR,
1368                          "%s: OS refused the memory allocation for the page pointer table. "
1369                          "Did you ask for too much?", 
1370                          __func__));
1371
1372 e_freed_none:
1373         PVR_ASSERT(eError != PVRSRV_OK);
1374         return eError;
1375 }
1376
1377 static inline void
1378 _ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode,
1379                                            struct page **ppsPage,
1380                                            IMG_UINT32 uiNumPages,
1381                                            IMG_BOOL bFlush)
1382 {
1383         PVRSRV_ERROR eError = PVRSRV_ERROR_RETRY;
1384         IMG_UINT32 ui32Idx;
1385
1386         if ((uiNumPages << PAGE_SHIFT) >= PVR_DIRTY_BYTES_FLUSH_THRESHOLD)
1387         {
1388                 /* May fail so fallback to range-based flush */
1389                 eError = OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
1390         }
1391
1392         if (eError != PVRSRV_OK)
1393         {
1394                 for (ui32Idx = 0; ui32Idx < uiNumPages;  ++ui32Idx)
1395                 {
1396                         IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd;
1397                         void *pvPageVAddr;
1398
1399                         pvPageVAddr = kmap(ppsPage[ui32Idx]);
1400                         sCPUPhysAddrStart.uiAddr = page_to_phys(ppsPage[ui32Idx]);
1401                         sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE;
1402
1403                         /* If we're zeroing, we need to make sure the cleared memory is pushed out
1404                            of the cache before the cache lines are invalidated */
1405                         if (bFlush)
1406                         {
1407                                 OSFlushCPUCacheRangeKM(psDevNode,
1408                                                                            pvPageVAddr,
1409                                                                            pvPageVAddr + PAGE_SIZE,
1410                                                                            sCPUPhysAddrStart,
1411                                                                            sCPUPhysAddrEnd);
1412                         }
1413                         else
1414                         {
1415                                 OSInvalidateCPUCacheRangeKM(psDevNode,
1416                                                                                         pvPageVAddr,
1417                                                                                         pvPageVAddr + PAGE_SIZE,
1418                                                                                         sCPUPhysAddrStart,
1419                                                                                         sCPUPhysAddrEnd);
1420                         }
1421
1422                         kunmap(ppsPage[ui32Idx]);
1423                 }
1424         }
1425 }
1426
1427 /* Change the caching attribute of pages on x86 systems and takes care of
1428  * cache maintenance. This function is supposed to be called once for pages that
1429  * came from alloc_pages().
1430  *
1431  * Flush/Invalidate pages in case the allocation is not cached. Necessary to
1432  * remove pages from the cache that might be flushed later and corrupt memory. */
1433 static inline PVRSRV_ERROR
1434 _ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode,
1435                                            struct page **ppsPage,
1436                                            IMG_UINT32 uiNumPages,
1437                                            IMG_BOOL bFlush,
1438                                            IMG_UINT32 ui32CPUCacheFlags)
1439 {
1440         PVRSRV_ERROR eError = PVRSRV_OK;
1441         IMG_BOOL bCPUCached = PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags);
1442         IMG_BOOL bCPUUncached = PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags);
1443         IMG_BOOL bCPUWriteCombine = PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags);
1444
1445         if (ppsPage != NULL)
1446         {
1447 #if defined (CONFIG_X86)
1448                 /* On x86 we have to set page cache attributes for non-cached pages.
1449                  * The call is implicitly taking care of all flushing/invalidating
1450                  * and therefore we can skip the usual cache maintenance after this. */
1451                 if (bCPUUncached || bCPUWriteCombine)
1452                 {
1453                         /*  On X86 if we already have a mapping (e.g. low memory) we need to change the mode of
1454                                 current mapping before we map it ourselves      */
1455                         int ret = IMG_FALSE;
1456                         PVR_UNREFERENCED_PARAMETER(bFlush);
1457
1458                         switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags))
1459                         {
1460                                 case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
1461                                         ret = set_pages_array_uc(ppsPage, uiNumPages);
1462                                         if (ret)
1463                                         {
1464                                                 eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE;
1465                                                 PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to UC failed, returned %d", ret));
1466                                         }
1467                                         break;
1468
1469                                 case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
1470                                         ret = set_pages_array_wc(ppsPage, uiNumPages);
1471                                         if (ret)
1472                                         {
1473                                                 eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE;
1474                                                 PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to WC failed, returned %d", ret));
1475                                         }
1476                                         break;
1477
1478                                 case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
1479                                         break;
1480
1481                                 default:
1482                                         break;
1483                         }
1484                 }
1485                 else
1486 #endif
1487                 /* Cache maintenance if:
1488                  *     cached && (cleanFlag || bFlush)
1489                  * OR
1490                  *     uncached || write-combine
1491                  */
1492                 if ( (bCPUCached && (PVRSRV_CHECK_CPU_CACHE_CLEAN(ui32CPUCacheFlags) || bFlush)) ||
1493                       bCPUUncached || bCPUWriteCombine )
1494                 {
1495                         /*  We can be given pages which still remain in the cache.
1496                                 In order to make sure that the data we write through our mappings
1497                                 doesn't get overwritten by later cache evictions we invalidate the
1498                                 pages that are given to us.
1499
1500                                 Note:
1501                                 This still seems to be true if we request cold pages, it's just less
1502                                 likely to be in the cache. */
1503                         _ApplyCacheMaintenance(psDevNode,
1504                                                                    ppsPage,
1505                                                                    uiNumPages,
1506                                                                    bFlush);
1507                 }
1508         }
1509
1510         return eError;
1511 }
1512
1513 /* Same as _AllocOSPage except it uses DMA framework to perform allocation */
1514 static PVRSRV_ERROR
1515 _AllocOSPage_CMA(PMR_OSPAGEARRAY_DATA *psPageArrayData,
1516                                 unsigned int gfp_flags,
1517                                 IMG_UINT32 ui32AllocOrder,
1518                                 IMG_UINT32 ui32MinOrder,
1519                                 IMG_UINT32 uiPageIndex)
1520 {
1521         void *virt_addr;
1522         struct page *page;
1523         dma_addr_t bus_addr;
1524         size_t alloc_size = PAGE_SIZE << ui32MinOrder;
1525         PVR_UNREFERENCED_PARAMETER(ui32AllocOrder);
1526         PVR_ASSERT(ui32AllocOrder == ui32MinOrder);
1527
1528         DisableOOMKiller();
1529         virt_addr = dma_alloc_coherent(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
1530                                                                   alloc_size,
1531                                                                   &bus_addr,
1532                                                                   gfp_flags);
1533         if (virt_addr == NULL)
1534         {
1535                 /* The idea here is primarily to support some older kernels with
1536                    broken or non-functioning DMA/CMA implementations (< Linux-3.4)
1537                    and to also handle DMA/CMA allocation failures by attempting a
1538                    normal page allocation though we expect dma_alloc_coherent()
1539                    already attempts this internally also before failing but
1540                    nonetheless it does no harm to retry allocation ourself */
1541                 page = alloc_pages(gfp_flags, ui32AllocOrder);
1542                 if (page)
1543                 {
1544                         /* Taint bus_addr as alloc_page, needed when freeing;
1545                            also acquire the low memory page address only, this
1546                            prevents mapping possible high memory pages into
1547                            kernel virtual address space which might exhaust
1548                            the VMALLOC address space */
1549                         bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page));
1550                         virt_addr = page_address(page);
1551                 }
1552                 else
1553                 {
1554                         return PVRSRV_ERROR_OUT_OF_MEMORY;
1555                 }
1556         }
1557         else
1558         {
1559                 page = pfn_to_page(bus_addr >> PAGE_SHIFT);
1560         }
1561         EnableOOMKiller();
1562
1563         /* Convert OSPageSize-based index into DevicePageSize-based index */
1564         psPageArrayData->dmavirtarray[uiPageIndex >> ui32MinOrder] = virt_addr;
1565         psPageArrayData->dmaphysarray[uiPageIndex >> ui32MinOrder] = bus_addr;
1566         psPageArrayData->pagearray[uiPageIndex >> ui32MinOrder] = page;
1567
1568         return PVRSRV_OK;
1569 }
1570
1571 /* Allocate a page of order uiAllocOrder and stores it in the page array ppsPage at
1572  * position uiPageIndex.
1573  *
1574  * If the order is higher than 0, it splits the page into multiples and
1575  * stores them at position uiPageIndex to uiPageIndex+(1<<uiAllocOrder). */
1576 static PVRSRV_ERROR
1577 _AllocOSPage(PMR_OSPAGEARRAY_DATA *psPageArrayData,
1578                         unsigned int gfp_flags,
1579                         IMG_UINT32 uiAllocOrder,
1580                         IMG_UINT32 uiMinOrder,
1581                         IMG_UINT32 uiPageIndex)
1582 {
1583         struct page *psPage;
1584         IMG_UINT32 ui32Count;
1585
1586         /* Allocate the page */
1587         DisableOOMKiller();
1588         psPage = alloc_pages(gfp_flags, uiAllocOrder);
1589         EnableOOMKiller();
1590
1591         if (psPage == NULL)
1592         {
1593                 return PVRSRV_ERROR_OUT_OF_MEMORY;
1594         }
1595
1596 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
1597         /* In case we need to, split the higher order page;
1598            this should only be used for order-0 allocations
1599            as higher order allocations should use DMA/CMA */
1600         if (uiAllocOrder != 0)
1601         {
1602                 split_page(psPage, uiAllocOrder);
1603         }
1604 #endif
1605
1606         /* Store the page (or multiple split pages) in the page array */
1607         for (ui32Count = 0; ui32Count < (1 << uiAllocOrder); ui32Count++)
1608         {
1609                 psPageArrayData->pagearray[uiPageIndex + ui32Count] = &(psPage[ui32Count]);
1610         }
1611
1612         return PVRSRV_OK;
1613 }
1614
1615 /* Allocation of OS pages: We may allocate 2^N order pages at a time for two reasons.
1616  *
1617  * Firstly to support device pages which are larger than OS. By asking the OS for 2^N
1618  * order OS pages at a time we guarantee the device page is contiguous.
1619  *
1620  * Secondly for performance where we may ask for 2^N order pages to reduce the number
1621  * of calls to alloc_pages, and thus reduce time for huge allocations.
1622  *
1623  * Regardless of page order requested, we need to break them down to track _OS pages.
1624  * The maximum order requested is increased if all max order allocations were successful.
1625  * If any request fails we reduce the max order.
1626  */
1627 static PVRSRV_ERROR
1628 _AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData)
1629 {
1630         PVRSRV_ERROR eError;
1631         IMG_UINT32 uiArrayIndex = 0;
1632         IMG_UINT32 ui32Order;
1633         IMG_UINT32 ui32MinOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT;
1634         IMG_BOOL bIncreaseMaxOrder = IMG_TRUE;
1635
1636         IMG_UINT32 ui32NumPageReq;
1637         IMG_UINT32 uiPagesToAlloc;
1638         IMG_UINT32 uiPagesFromPool = 0;
1639
1640         unsigned int gfp_flags = _GetGFPFlags(psPageArrayData);
1641         IMG_UINT32 ui32GfpFlags;
1642         IMG_UINT32 ui32HighOrderGfpFlags = ((gfp_flags & ~__GFP_RECLAIM) | __GFP_NORETRY);
1643
1644         struct page **ppsPageArray = psPageArrayData->pagearray;
1645         struct page **ppsPageAttributeArray = NULL;
1646
1647         uiPagesToAlloc = psPageArrayData->uiTotalNumPages;
1648
1649         /* Try to get pages from the pool since it is faster;
1650            the page pool currently only supports zero-order pages
1651            thus currently excludes all DMA/CMA allocated memory */
1652         _GetPagesFromPoolLocked(psPageArrayData->psDevNode,
1653                                                         psPageArrayData->ui32CPUCacheFlags,
1654                                                         uiPagesToAlloc,
1655                                                         ui32MinOrder,
1656                                                         psPageArrayData->bZero,
1657                                                         ppsPageArray,
1658                                                         &uiPagesFromPool);
1659
1660         uiArrayIndex = uiPagesFromPool;
1661
1662         if ((uiPagesToAlloc - uiPagesFromPool) < PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD)
1663         {       /* Small allocations: Ask for one device page at a time */
1664                 ui32Order = ui32MinOrder;
1665                 bIncreaseMaxOrder = IMG_FALSE;
1666         }
1667         else
1668         {
1669 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
1670                 /* Large zero-order or none zero-order allocations, ask for
1671                    MAX(max-order,min-order) order pages at a time; alloc
1672                    failures throttles this down to ZeroOrder allocations */
1673                 ui32Order = MAX(g_uiMaxOrder, ui32MinOrder);
1674 #else
1675                 /* Because split_pages() is not available on older kernels
1676                    we cannot mix-and-match any-order pages in the PMR;
1677                    only same-order pages must be present in page array.
1678                    So we unconditionally force it to use ui32MinOrder on
1679                    these older kernels */
1680                 ui32Order = ui32MinOrder;
1681 #endif
1682         }
1683
1684         /* Only if asking for more contiguity than we actually need, let it fail */
1685         ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
1686         ui32NumPageReq = (1 << ui32Order);
1687
1688         while (uiArrayIndex < uiPagesToAlloc)
1689         {
1690                 IMG_UINT32 ui32PageRemain = uiPagesToAlloc - uiArrayIndex;
1691
1692                 while (ui32NumPageReq > ui32PageRemain)
1693                 {
1694 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
1695                         /* Pages to request is larger than that remaining
1696                            so ask for less so never over allocate */
1697                         ui32Order = MAX(ui32Order >> 1,ui32MinOrder);
1698 #else
1699                         /* Pages to request is larger than that remaining so
1700                            do nothing thus over allocate as we do not support
1701                            mix/match of any-order pages in PMR page-array in
1702                            older kernels (simplifies page free logic) */
1703                         PVR_ASSERT(ui32Order == ui32MinOrder);
1704 #endif
1705                         ui32NumPageReq = (1 << ui32Order);
1706                         ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
1707                 }
1708
1709                 if (psPageArrayData->bIsCMA)
1710                 {
1711                         /* As the DMA/CMA framework rounds-up request to the
1712                            next power-of-two, we request multiple uiMinOrder
1713                            pages to satisfy allocation request in order to
1714                            minimise wasting memory */
1715                         eError =  _AllocOSPage_CMA(psPageArrayData,
1716                                                                            ui32GfpFlags,
1717                                                                            ui32Order,
1718                                                                            ui32MinOrder,
1719                                                                            uiArrayIndex);
1720                 }
1721                 else
1722                 {
1723                         /* Allocate uiOrder pages at uiArrayIndex */
1724                         eError = _AllocOSPage(psPageArrayData,
1725                                                                   ui32GfpFlags,
1726                                                                   ui32Order,
1727                                                                   ui32MinOrder,
1728                                                                   uiArrayIndex);
1729                 }
1730
1731                 if (eError == PVRSRV_OK)
1732                 {
1733                         /* Successful request. Move onto next. */
1734                         uiArrayIndex += ui32NumPageReq;
1735                 }
1736                 else
1737                 {
1738                         if (ui32Order > ui32MinOrder)
1739                         {
1740                                 /* Last request failed. Let's ask for less next time */
1741                                 ui32Order = MAX(ui32Order >> 1,ui32MinOrder);
1742                                 bIncreaseMaxOrder = IMG_FALSE;
1743                                 ui32NumPageReq = (1 << ui32Order);
1744                                 ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
1745                                 g_uiMaxOrder = ui32Order;
1746 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0))
1747                                 /* We should not trigger this code path in older kernels,
1748                                    this is enforced by ensuring ui32Order == ui32MinOrder */
1749                                 PVR_ASSERT(ui32Order == ui32MinOrder);
1750 #endif
1751                         }
1752                         else
1753                         {
1754                                 /* Failed to alloc pages at required contiguity. Failed allocation */
1755                                 PVR_DPF((PVR_DBG_ERROR, "%s: alloc_pages failed to honour request at %u of %u, flags = %x, order = %u (%s)",
1756                                                                 __FUNCTION__,
1757                                                                 uiArrayIndex,
1758                                                                 uiPagesToAlloc,
1759                                                                 ui32GfpFlags,
1760                                                                 ui32Order,
1761                                                                 PVRSRVGetErrorStringKM(eError)));
1762                                 eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
1763                                 goto e_free_pages;
1764                         }
1765                 }
1766         }
1767
1768         if (bIncreaseMaxOrder && (g_uiMaxOrder < PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM))
1769         {       /* All successful allocations on max order. Let's ask for more next time */
1770                 g_uiMaxOrder++;
1771         }
1772
1773         /* Construct table of page pointers to apply attributes */
1774         ppsPageAttributeArray = &ppsPageArray[uiPagesFromPool];
1775         if (psPageArrayData->bIsCMA)
1776         {
1777                 IMG_UINT32 uiIdx, uiIdy, uiIdz;
1778
1779                 ppsPageAttributeArray = OSAllocMem(sizeof(struct page *) * uiPagesToAlloc);
1780                 if (ppsPageAttributeArray == NULL)
1781                 {
1782                         PVR_DPF((PVR_DBG_ERROR, "Failed OSAllocMem() for page attributes table"));
1783                         eError = PVRSRV_ERROR_OUT_OF_MEMORY;
1784                         goto e_free_pages;
1785                 }
1786
1787                 for (uiIdx = 0; uiIdx < uiPagesToAlloc; uiIdx += ui32NumPageReq)
1788                 {
1789                         uiIdy = uiIdx >> ui32Order;
1790                         for (uiIdz = 0; uiIdz < ui32NumPageReq; uiIdz++)
1791                         {
1792                                 ppsPageAttributeArray[uiIdx+uiIdz] = psPageArrayData->pagearray[uiIdy];
1793                                 ppsPageAttributeArray[uiIdx+uiIdz] += uiIdz;
1794                         }
1795                 }
1796         }
1797
1798         /* Do the cache management as required */
1799         eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode,
1800                                                                         ppsPageAttributeArray,
1801                                                                         uiPagesToAlloc - uiPagesFromPool,
1802                                                                         psPageArrayData->bZero,
1803                                                                         psPageArrayData->ui32CPUCacheFlags);
1804         if (eError != PVRSRV_OK)
1805         {
1806                 PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes"));
1807                 goto e_free_pages;
1808         }
1809         else
1810         {
1811                 if (psPageArrayData->bIsCMA)
1812                 {
1813                         OSFreeMem(ppsPageAttributeArray);
1814                 }
1815         }
1816
1817         /* Update metadata */
1818         psPageArrayData->iNumPagesAllocated = psPageArrayData->uiTotalNumPages;
1819         return PVRSRV_OK;
1820
1821 /* Error path */
1822 e_free_pages:
1823         {
1824                 IMG_UINT32 ui32PageToFree;
1825
1826                 if (psPageArrayData->bIsCMA)
1827                 {
1828                         IMG_UINT32 uiDevArrayIndex = uiArrayIndex >> ui32Order;
1829                         IMG_UINT32 uiDevPageSize = PAGE_SIZE << ui32Order;
1830                         PVR_ASSERT(ui32Order == ui32MinOrder);
1831
1832                         if (ppsPageAttributeArray)
1833                         {
1834                                 OSFreeMem(ppsPageAttributeArray);
1835                         }
1836
1837                         for (ui32PageToFree = 0; ui32PageToFree < uiDevArrayIndex; ui32PageToFree++)
1838                         {
1839                                 _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
1840                                                                 uiDevPageSize,
1841                                                                 ui32MinOrder,
1842                                                                 psPageArrayData->dmavirtarray[ui32PageToFree],
1843                                                                 psPageArrayData->dmaphysarray[ui32PageToFree],
1844                                                                 ppsPageArray[ui32PageToFree]);
1845                                 psPageArrayData->dmaphysarray[ui32PageToFree]= (dma_addr_t)0;
1846                                 psPageArrayData->dmavirtarray[ui32PageToFree] = NULL;
1847                                 ppsPageArray[ui32PageToFree] = INVALID_PAGE;
1848                         }
1849                 }
1850                 else
1851                 {
1852                         /* Free the pages we got from the pool */
1853                         for(ui32PageToFree = 0; ui32PageToFree < uiPagesFromPool; ui32PageToFree++)
1854                         {
1855                                 _FreeOSPage(ui32MinOrder,
1856                                                         psPageArrayData->bUnsetMemoryType,
1857                                                         ppsPageArray[ui32PageToFree]);
1858                                 ppsPageArray[ui32PageToFree] = INVALID_PAGE;
1859                         }
1860
1861                         for (ui32PageToFree = uiPagesFromPool; ui32PageToFree < uiArrayIndex; ui32PageToFree++)
1862                         {
1863                                 _FreeOSPage(ui32MinOrder, IMG_FALSE, ppsPageArray[ui32PageToFree]);     
1864                                 ppsPageArray[ui32PageToFree] = INVALID_PAGE;
1865                         }
1866                 }
1867
1868                 return eError;
1869         }
1870 }
1871
1872 /* Allocation of OS pages: This function is used for sparse allocations.
1873  *
1874  * Sparse allocations provide only a proportion of sparse physical backing within the total
1875  * virtual range. Currently we only support sparse allocations on device pages that are OS
1876  * page sized.
1877 */
1878 static PVRSRV_ERROR
1879 _AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData,
1880                                          IMG_UINT32 *puiAllocIndices,
1881                                          IMG_UINT32 uiPagesToAlloc)
1882 {
1883         PVRSRV_ERROR eError;
1884         IMG_UINT32 i;
1885         struct page **ppsPageArray = psPageArrayData->pagearray;
1886         IMG_UINT32 uiOrder;
1887         IMG_UINT32 uiPagesFromPool = 0;
1888         unsigned int gfp_flags = _GetGFPFlags(psPageArrayData);
1889
1890          /* We use this page array to receive pages from the pool and then reuse it afterwards to
1891          * store pages that need their cache attribute changed on x86*/
1892         struct page **ppsTempPageArray;
1893         IMG_UINT32 uiTempPageArrayIndex = 0;
1894
1895         /* Allocate the temporary page array that we need here to receive pages
1896          * from the pool and to store pages that need their caching attributes changed */
1897         ppsTempPageArray = OSAllocMem(sizeof(struct page*) * uiPagesToAlloc);
1898         if (ppsTempPageArray == NULL)
1899         {
1900                 PVR_DPF((PVR_DBG_ERROR, "%s: Failed metadata allocation", __FUNCTION__));
1901                 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
1902                 goto e_exit;
1903         }
1904
1905         uiOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT;
1906
1907         /* Check the requested number of pages if they fit in the page array */
1908         if(psPageArrayData->uiTotalNumPages < \
1909                                 (psPageArrayData->iNumPagesAllocated + uiPagesToAlloc))
1910         {
1911                 PVR_DPF((PVR_DBG_ERROR,
1912                                  "%s: Trying to allocate more pages than this buffer can handle, "
1913                                  "Request + Allocated < Max! Request %u, Allocated %u, Max %u.",
1914                                  __FUNCTION__,
1915                                  uiPagesToAlloc,
1916                                  psPageArrayData->iNumPagesAllocated,
1917                                  psPageArrayData->uiTotalNumPages));
1918                 eError = PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
1919                 goto e_free_temp_array;
1920         }
1921
1922         /* Try to get pages from the pool since it is faster */
1923         _GetPagesFromPoolLocked(psPageArrayData->psDevNode,
1924                                                         psPageArrayData->ui32CPUCacheFlags,
1925                                                         uiPagesToAlloc,
1926                                                         uiOrder,
1927                                                         psPageArrayData->bZero,
1928                                                         ppsTempPageArray,
1929                                                         &uiPagesFromPool);
1930
1931         /* Allocate pages from the OS or move the pages that we got from the pool
1932          * to the page array */
1933         DisableOOMKiller();
1934         for (i = 0; i < uiPagesToAlloc; i++)
1935         {
1936                 /* Check if the indices we are allocating are in range */
1937                 if (puiAllocIndices[i] >= psPageArrayData->uiTotalNumPages)
1938                 {
1939                         PVR_DPF((PVR_DBG_ERROR,
1940                                          "%s: Given alloc index %u at %u is larger than page array %u.",
1941                                          __FUNCTION__,
1942                                          i,
1943                                          puiAllocIndices[i],
1944                                          psPageArrayData->uiTotalNumPages));
1945                         eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
1946                         goto e_free_pages;
1947                 }
1948
1949                 /* Check if there is not already a page allocated at this position */
1950                 if (INVALID_PAGE != ppsPageArray[puiAllocIndices[i]])
1951                 {
1952                         PVR_DPF((PVR_DBG_ERROR,
1953                                          "%s: Mapping number %u at page array index %u already exists",
1954                                          __func__,
1955                                          i,
1956                                          puiAllocIndices[i]));
1957                         eError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS;
1958                         goto e_free_pages;
1959                 }
1960
1961                 /* Finally assign a page to the array.
1962                  * Either from the pool or allocate a new one. */
1963                 if (uiPagesFromPool != 0)
1964                 {
1965                         uiPagesFromPool--;
1966                         ppsPageArray[puiAllocIndices[i]] =  ppsTempPageArray[uiPagesFromPool];
1967                 }
1968                 else
1969                 {
1970                         ppsPageArray[puiAllocIndices[i]] = alloc_pages(gfp_flags, uiOrder);
1971                         if(ppsPageArray[puiAllocIndices[i]] != NULL)
1972                         {
1973                                 /* Reusing the temp page array if it has no pool pages anymore */
1974                                 ppsTempPageArray[uiTempPageArrayIndex] = ppsPageArray[puiAllocIndices[i]];
1975                                 uiTempPageArrayIndex++;
1976                         }
1977                         else
1978                         {
1979                                 /* Failed to alloc pages at required contiguity. Failed allocation */
1980                                 PVR_DPF((PVR_DBG_ERROR,
1981                                                  "%s: alloc_pages failed to honour request at %u of %u, flags = %x, order = %u",
1982                                                  __FUNCTION__,
1983                                                  i,
1984                                                  uiPagesToAlloc,
1985                                                  gfp_flags,
1986                                                  uiOrder));
1987                                 eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
1988                                 goto e_free_pages;
1989                         }
1990                 }
1991         }
1992         EnableOOMKiller();
1993
1994         /* Do the cache management as required */
1995         eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode,
1996                                                                         ppsTempPageArray,
1997                                                                         uiTempPageArrayIndex,
1998                                                                         psPageArrayData->bZero,
1999                                                                         psPageArrayData->ui32CPUCacheFlags);
2000         if (eError != PVRSRV_OK)
2001         {
2002                 PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes"));
2003                 goto e_free_pages;
2004         }
2005
2006         /* Update metadata */
2007         psPageArrayData->iNumPagesAllocated += uiPagesToAlloc;
2008
2009         /* Free temporary page array */
2010         OSFreeMem(ppsTempPageArray);
2011         return PVRSRV_OK;
2012
2013 /* Error path */
2014 e_free_pages:
2015         {
2016                 IMG_UINT32 ui32PageToFree;
2017
2018                 EnableOOMKiller();
2019
2020                 /* Free the pages we got from the pool */
2021                 for(ui32PageToFree = 0; ui32PageToFree < uiPagesFromPool; ui32PageToFree++)
2022                 {
2023                         _FreeOSPage(0,
2024                                                 psPageArrayData->bUnsetMemoryType,
2025                                                 ppsTempPageArray[ui32PageToFree]);
2026                 }
2027
2028                 /* Free the pages we just allocated from the OS */
2029                 for(ui32PageToFree = uiPagesFromPool; ui32PageToFree < i; ui32PageToFree++)
2030                 {
2031                         _FreeOSPage(0,
2032                                                 IMG_FALSE,
2033                                                 ppsPageArray[puiAllocIndices[ui32PageToFree]]);
2034
2035                         ppsPageArray[puiAllocIndices[ui32PageToFree]] = (struct page *) INVALID_PAGE;
2036                 }
2037         }
2038
2039 e_free_temp_array:
2040         OSFreeMem(ppsTempPageArray);
2041
2042 e_exit:
2043         return eError;
2044 }
2045
2046 /* Allocate pages for a given page array.
2047  *
2048  * The executed allocation path depends whether an array with allocation
2049  * indices has been passed or not */
2050 static PVRSRV_ERROR
2051 _AllocOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
2052                           IMG_UINT32 *puiAllocIndices,
2053                           IMG_UINT32 uiPagesToAlloc)
2054 {
2055         PVRSRV_ERROR eError;
2056         IMG_UINT32 i;
2057         struct page **ppsPageArray;
2058
2059         /* Sanity checks */
2060         PVR_ASSERT(NULL != psPageArrayData);
2061         if (psPageArrayData->bIsCMA)
2062         {
2063                 PVR_ASSERT(psPageArrayData->dmaphysarray != NULL);
2064                 PVR_ASSERT(psPageArrayData->dmavirtarray != NULL);
2065         }
2066         PVR_ASSERT(psPageArrayData->pagearray != NULL);
2067         PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
2068
2069         ppsPageArray = psPageArrayData->pagearray;
2070
2071         /* Go the sparse alloc path if we have an array with alloc indices.*/
2072         if (puiAllocIndices != NULL)
2073         {
2074                 eError =  _AllocOSPages_Sparse(psPageArrayData,
2075                                                                            puiAllocIndices,
2076                                                                            uiPagesToAlloc);
2077         }
2078         else
2079         {
2080                 eError =  _AllocOSPages_Fast(psPageArrayData);
2081         }
2082
2083         if (eError != PVRSRV_OK)
2084         {
2085                 goto e_exit;
2086         }
2087
2088         if (psPageArrayData->bPoisonOnAlloc)
2089         {
2090                 for (i = 0; i < uiPagesToAlloc; i++)
2091                 {
2092                         IMG_UINT32 uiIdx = puiAllocIndices ? puiAllocIndices[i] : i;
2093                         _PoisonPages(ppsPageArray[uiIdx],
2094                                                  0,
2095                                                  _AllocPoison,
2096                                                  _AllocPoisonSize);
2097                 }
2098         }
2099
2100         _DumpPageArray(ppsPageArray, psPageArrayData->uiTotalNumPages);
2101
2102 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
2103 #if defined(PVRSRV_ENABLE_MEMORY_STATS)
2104         {
2105                 for (i = 0; i < uiPagesToAlloc; i++)
2106                 {
2107                         IMG_CPU_PHYADDR sCPUPhysAddr;
2108                         IMG_UINT32 uiIdx = puiAllocIndices ? puiAllocIndices[i] : i;
2109
2110                         sCPUPhysAddr.uiAddr = page_to_phys(ppsPageArray[uiIdx]);
2111                         PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
2112                                                                                  NULL,
2113                                                                                  sCPUPhysAddr,
2114                                                                                  1 << psPageArrayData->uiLog2DevPageSize,
2115                                                                                  NULL);
2116                 }
2117         }
2118 #else
2119         PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, uiPagesToAlloc * PAGE_SIZE);
2120 #endif
2121 #endif
2122
2123         PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: allocated OS memory for PMR @0x%p", psPageArrayData));
2124         return PVRSRV_OK;
2125
2126 e_exit:
2127         return eError;
2128 }
2129
2130 /* Same as _FreeOSPage except free memory using DMA framework */
2131 static INLINE void
2132 _FreeOSPage_CMA(struct device *dev,
2133                                 size_t alloc_size,
2134                                 IMG_UINT32 uiOrder,
2135                                 void *virt_addr,
2136                                 dma_addr_t dev_addr,
2137                                 struct page *psPage)
2138 {
2139         if (DMA_IS_ALLOCPG_ADDR(dev_addr))
2140         {
2141 #if defined(CONFIG_X86)
2142                 void *pvPageVAddr = page_address(psPage);
2143                 if (pvPageVAddr)
2144                 {
2145                         int ret = set_memory_wb((unsigned long)pvPageVAddr, 1);
2146                         if (ret)
2147                         {
2148                                 PVR_DPF((PVR_DBG_ERROR, 
2149                                                 "%s: Failed to reset page attribute",
2150                                                 __FUNCTION__));
2151                         }
2152                 }
2153 #endif
2154                 __free_pages(psPage, uiOrder);
2155         }
2156         else
2157         {
2158                 dma_free_coherent(dev, alloc_size, virt_addr, DMA_GET_ADDR(dev_addr));
2159         }
2160 }
2161
2162 /* Free a single page back to the OS.
2163  * Make sure the cache type is set back to the default value.
2164  *
2165  * Note:
2166  * We must _only_ check bUnsetMemoryType in the case where we need to free
2167  * the page back to the OS since we may have to revert the cache properties
2168  * of the page to the default as given by the OS when it was allocated. */
2169 static void
2170 _FreeOSPage(IMG_UINT32 uiOrder,
2171                         IMG_BOOL bUnsetMemoryType,
2172                         struct page *psPage)
2173 {
2174
2175 #if defined(CONFIG_X86)
2176         void *pvPageVAddr;
2177         pvPageVAddr = page_address(psPage);
2178
2179         if (pvPageVAddr && bUnsetMemoryType == IMG_TRUE)
2180         {
2181                 int ret;
2182
2183                 ret = set_memory_wb((unsigned long)pvPageVAddr, 1);
2184                 if (ret)
2185                 {
2186                         PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attribute", __FUNCTION__));
2187                 }
2188         }
2189 #else
2190         PVR_UNREFERENCED_PARAMETER(bUnsetMemoryType);
2191 #endif
2192         __free_pages(psPage, uiOrder);
2193 }
2194
2195 /* Free the struct holding the metadata */
2196 static PVRSRV_ERROR
2197 _FreeOSPagesArray(PMR_OSPAGEARRAY_DATA *psPageArrayData)
2198 {
2199         PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: freed OS memory for PMR @0x%p", psPageArrayData));
2200
2201         /* Check if the page array actually still exists.
2202          * It might be the case that has been moved to the page pool */
2203         if (psPageArrayData->pagearray != NULL)
2204         {
2205                 OSFreeMemNoStats(psPageArrayData->pagearray);
2206         }
2207
2208         kmem_cache_free(g_psLinuxPageArray, psPageArrayData);
2209
2210         return PVRSRV_OK;
2211 }
2212
2213 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
2214 /* _FreeOSPages_MemStats: Depends on the bridge lock already being held */
2215 static void
2216 _FreeOSPages_MemStats(PMR_OSPAGEARRAY_DATA *psPageArrayData,
2217                                         IMG_UINT32 *pai32FreeIndices,
2218                                         IMG_UINT32 ui32NumPages)
2219 {
2220         struct page **ppsPageArray;
2221         #if defined(PVRSRV_ENABLE_MEMORY_STATS)
2222         IMG_UINT32 ui32PageIndex;
2223         #endif
2224
2225         PVR_DPF((PVR_DBG_MESSAGE, "%s: psPageArrayData %p, ui32NumPages %u", __FUNCTION__, psPageArrayData, ui32NumPages));
2226         PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0);
2227
2228         ppsPageArray = psPageArrayData->pagearray;
2229
2230 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
2231 #if !defined(PVRSRV_ENABLE_MEMORY_STATS)
2232                 PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, ui32NumPages * PAGE_SIZE);
2233 #else
2234                 for(ui32PageIndex = 0; ui32PageIndex < ui32NumPages; ui32PageIndex++)
2235                 {
2236                         IMG_CPU_PHYADDR sCPUPhysAddr;
2237                         IMG_UINT32 uiArrayIndex = (pai32FreeIndices) ? pai32FreeIndices[ui32PageIndex] : ui32PageIndex;
2238
2239                         sCPUPhysAddr.uiAddr = page_to_phys(ppsPageArray[uiArrayIndex]);
2240                         PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, sCPUPhysAddr.uiAddr);
2241                 }
2242 #endif
2243 #endif
2244 }
2245 #endif /* PVRSRV_ENABLE_PROCESS_STATS */
2246
2247 /* Free all or some pages from a sparse page array */
2248 static PVRSRV_ERROR
2249 _FreeOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData,
2250                                         IMG_UINT32 *pai32FreeIndices,
2251                                         IMG_UINT32 ui32FreePageCount)
2252 {
2253         IMG_BOOL bSuccess;
2254         IMG_UINT32 uiOrder;
2255         IMG_UINT32 uiPageIndex, i = 0, uiTempIdx;
2256         struct page **ppsPageArray;
2257         IMG_UINT32 uiNumPages;
2258
2259         struct page **ppsTempPageArray;
2260         IMG_UINT32 uiTempArraySize;
2261
2262         /* We really should have something to free before we call this */
2263         PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0);
2264
2265         if(pai32FreeIndices == NULL)
2266         {
2267                 uiNumPages = psPageArrayData->uiTotalNumPages;
2268                 uiTempArraySize = psPageArrayData->iNumPagesAllocated;
2269         }
2270         else
2271         {
2272                 uiNumPages = ui32FreePageCount;
2273                 uiTempArraySize = ui32FreePageCount;
2274         }
2275
2276         /* OSAllocMemNoStats required because this code may be run without the bridge lock held */
2277         ppsTempPageArray = OSAllocMemNoStats(sizeof(struct page*) * uiTempArraySize);
2278         if (ppsTempPageArray == NULL)
2279         {
2280                 PVR_DPF((PVR_DBG_ERROR, "%s: Failed free_pages metadata allocation", __FUNCTION__));
2281                 return PVRSRV_ERROR_OUT_OF_MEMORY;
2282         }
2283
2284         ppsPageArray = psPageArrayData->pagearray;
2285         uiOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT;
2286
2287         /* Poison if necessary */
2288         if (psPageArrayData->bPoisonOnFree)
2289         {
2290                 for (i  = 0; i  < uiNumPages; i ++)
2291                 {
2292                         uiPageIndex = pai32FreeIndices ? pai32FreeIndices[i] : i ;
2293                         if(INVALID_PAGE != ppsPageArray[uiPageIndex])
2294                         {
2295                                 _PoisonPages(ppsPageArray[uiPageIndex],
2296                                                          0,
2297                                                          _FreePoison,
2298                                                          _FreePoisonSize);
2299                         }
2300                 }
2301         }
2302
2303         /* Put pages in a contiguous array so further processing is easier */
2304         uiTempIdx = 0;
2305         for (i = 0; i < uiNumPages; i++)
2306         {
2307                 uiPageIndex = pai32FreeIndices ? pai32FreeIndices[i] : i;
2308                 if(INVALID_PAGE != ppsPageArray[uiPageIndex])
2309                 {
2310                         ppsTempPageArray[uiTempIdx] = ppsPageArray[uiPageIndex];
2311                         uiTempIdx++;
2312                         ppsPageArray[uiPageIndex] = (struct page *) INVALID_PAGE;
2313                 }
2314         }
2315
2316         /* Try to move the temp page array to the pool */
2317         bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags,
2318                                                                          ppsTempPageArray,
2319                                                                          psPageArrayData->bUnpinned,
2320                                                                          uiOrder,
2321                                                                          uiTempIdx);
2322         if (bSuccess)
2323         {
2324                 goto exit_ok;
2325         }
2326
2327         /* Free pages and reset page caching attributes on x86 */
2328 #if defined(CONFIG_X86)
2329         if (uiTempIdx != 0 && psPageArrayData->bUnsetMemoryType == IMG_TRUE)
2330         {
2331                 int iError;
2332                 iError = set_pages_array_wb(ppsTempPageArray, uiTempIdx);
2333
2334                 if (iError)
2335                 {
2336                         PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__));
2337                 }
2338         }
2339 #endif
2340
2341         /* Free the pages */
2342         for (i = 0; i < uiTempIdx; i++)
2343         {
2344                 __free_pages(ppsTempPageArray[i], uiOrder);
2345         }
2346
2347         /* Free the temp page array here if it did not move to the pool */
2348         OSFreeMemNoStats(ppsTempPageArray);
2349
2350 exit_ok:
2351         /* Update metadata */
2352         psPageArrayData->iNumPagesAllocated -= uiTempIdx;
2353         PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
2354         return PVRSRV_OK;
2355 }
2356
2357 /* Free all the pages in a page array */
2358 static PVRSRV_ERROR
2359 _FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData)
2360 {
2361         IMG_BOOL bSuccess;
2362         IMG_UINT32 uiOrder;
2363         IMG_UINT32 i = 0;
2364         IMG_UINT32 uiNumPages = psPageArrayData->uiTotalNumPages;
2365
2366         struct page **ppsPageArray = psPageArrayData->pagearray;
2367         uiOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT;
2368
2369         /* We really should have something to free before we call this */
2370         PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0);
2371
2372         /* Poison pages if necessary */
2373         if (psPageArrayData->bPoisonOnFree)
2374         {
2375                 for (i = 0; i < uiNumPages; i++)
2376                 {
2377                         _PoisonPages(ppsPageArray[i],
2378                                                  0,
2379                                                  _FreePoison,
2380                                                  _FreePoisonSize);
2381                 }
2382         }
2383
2384         /* Try to move the page array to the pool */
2385         bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags,
2386                                                                          ppsPageArray,
2387                                                                          psPageArrayData->bUnpinned,
2388                                                                          uiOrder,
2389                                                                          uiNumPages);
2390         if (bSuccess)
2391         {
2392                 psPageArrayData->pagearray = NULL;
2393                 goto exit_ok;
2394         }
2395
2396         if (psPageArrayData->bIsCMA)
2397         {
2398                 IMG_UINT32 uiDevNumPages = uiNumPages >> uiOrder;
2399                 IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder;
2400
2401                 for (i = 0; i < uiDevNumPages; i++)
2402                 {
2403                         _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
2404                                                         uiDevPageSize,
2405                                                         uiOrder,
2406                                                         psPageArrayData->dmavirtarray[i],
2407                                                         psPageArrayData->dmaphysarray[i],
2408                                                         ppsPageArray[i]);
2409                         psPageArrayData->dmaphysarray[i] = (dma_addr_t)0;
2410                         psPageArrayData->dmavirtarray[i] = NULL;
2411                         ppsPageArray[i] = INVALID_PAGE;
2412                 }
2413         }
2414         else
2415         {
2416 #if defined(CONFIG_X86)
2417                 if (psPageArrayData->bUnsetMemoryType == IMG_TRUE)
2418                 {
2419                         int ret;
2420
2421                         ret = set_pages_array_wb(ppsPageArray, uiNumPages);
2422                         if (ret)
2423                         {
2424                                 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__));
2425                         }
2426                 }
2427 #endif
2428
2429                 for (i = 0; i < uiNumPages; i++)
2430                 {
2431                         _FreeOSPage(uiOrder, IMG_FALSE, ppsPageArray[i]);
2432                         ppsPageArray[i] = INVALID_PAGE;
2433                 }
2434         }
2435
2436 exit_ok:
2437         /* Update metadata */
2438         psPageArrayData->iNumPagesAllocated = 0;
2439         return PVRSRV_OK;
2440 }
2441
2442 /* Free pages from a page array.
2443  * Takes care of mem stats and chooses correct free path depending on parameters. */
2444 static PVRSRV_ERROR
2445 _FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
2446                          IMG_UINT32 *pai32FreeIndices,
2447                          IMG_UINT32 ui32FreePageCount)
2448 {
2449         PVRSRV_ERROR eError;
2450         IMG_UINT32 uiNumPages;
2451
2452         /* Check how many pages do we have to free */
2453         if(pai32FreeIndices == NULL)
2454         {
2455                 uiNumPages = psPageArrayData->iNumPagesAllocated;
2456         }
2457         else
2458         {
2459                 uiNumPages = ui32FreePageCount;
2460         }
2461
2462 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
2463         _FreeOSPages_MemStats(psPageArrayData, pai32FreeIndices, uiNumPages);
2464 #endif
2465
2466         /* Go the sparse or non-sparse path */
2467         if (psPageArrayData->iNumPagesAllocated != psPageArrayData->uiTotalNumPages
2468                 || pai32FreeIndices != NULL)
2469         {
2470                 eError = _FreeOSPages_Sparse(psPageArrayData,
2471                                                                          pai32FreeIndices,
2472                                                                          uiNumPages);
2473         }
2474         else
2475         {
2476                 eError = _FreeOSPages_Fast(psPageArrayData);
2477         }
2478
2479         if(eError != PVRSRV_OK)
2480         {
2481                 PVR_DPF((PVR_DBG_ERROR, "_FreeOSPages_FreePages failed"));
2482         }
2483
2484         _DumpPageArray(psPageArrayData->pagearray, psPageArrayData->uiTotalNumPages);
2485
2486         return eError;
2487 }
2488
2489 /*
2490  *
2491  * Implementation of callback functions
2492  *
2493  */
2494
2495 /* destructor func is called after last reference disappears, but
2496    before PMR itself is freed. */
2497 static PVRSRV_ERROR
2498 PMRFinalizeOSMem(PMR_IMPL_PRIVDATA pvPriv)
2499 {
2500         PVRSRV_ERROR eError;
2501         PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
2502
2503
2504         /*  We can't free pages until now. */
2505         if (psOSPageArrayData->iNumPagesAllocated != 0)
2506         {
2507                 _PagePoolLock();
2508                 if (psOSPageArrayData->bUnpinned == IMG_TRUE)
2509                 {
2510                         _RemoveUnpinListEntryUnlocked(psOSPageArrayData);
2511                 }
2512                 _PagePoolUnlock();
2513
2514                 eError = _FreeOSPages(psOSPageArrayData,
2515                                                           NULL,
2516                                                           0);
2517                 PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
2518         }
2519
2520         eError = _FreeOSPagesArray(psOSPageArrayData);
2521         PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
2522         return PVRSRV_OK;
2523 }
2524
2525 /* callback function for locking the system physical page addresses.
2526    This function must be called before the lookup address func. */
2527 static PVRSRV_ERROR
2528 PMRLockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv)
2529 {
2530         PVRSRV_ERROR eError;
2531         PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
2532
2533         if (psOSPageArrayData->bOnDemand)
2534         {
2535                 /* Allocate Memory for deferred allocation */
2536                 eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumPages);
2537                 if (eError != PVRSRV_OK)
2538                 {
2539                         return eError;
2540                 }
2541         }
2542
2543         eError = PVRSRV_OK;
2544         return eError;
2545 }
2546
2547 static PVRSRV_ERROR
2548 PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv)
2549 {
2550         /* Just drops the refcount. */
2551         PVRSRV_ERROR eError = PVRSRV_OK;
2552         PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
2553
2554         if (psOSPageArrayData->bOnDemand)
2555         {
2556                 /* Free Memory for deferred allocation */
2557                 eError = _FreeOSPages(psOSPageArrayData,
2558                                                           NULL,
2559                                                           0);
2560                 if (eError != PVRSRV_OK)
2561                 {
2562                         return eError;
2563                 }
2564         }
2565
2566         PVR_ASSERT (eError == PVRSRV_OK);
2567         return eError;
2568 }
2569
2570 /* N.B.  It is assumed that PMRLockSysPhysAddressesOSMem() is called _before_ this function! */
2571 static PVRSRV_ERROR
2572 PMRSysPhysAddrOSMem(PMR_IMPL_PRIVDATA pvPriv,
2573                                         IMG_UINT32 ui32Log2PageSize,
2574                                         IMG_UINT32 ui32NumOfPages,
2575                                         IMG_DEVMEM_OFFSET_T *puiOffset,
2576                                         IMG_BOOL *pbValid,
2577                                         IMG_DEV_PHYADDR *psDevPAddr)
2578 {
2579         const PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
2580         IMG_UINT32 uiPageSize = 1U << psOSPageArrayData->uiLog2DevPageSize;
2581         IMG_UINT32 uiInPageOffset;
2582         IMG_UINT32 uiPageIndex;
2583         IMG_UINT32 uiIdx;
2584
2585         if (psOSPageArrayData->uiLog2DevPageSize < ui32Log2PageSize)
2586         {
2587                 PVR_DPF((PVR_DBG_ERROR,
2588                          "%s: Requested physical addresses from PMR "
2589                          "for incompatible contiguity %u!",
2590                          __FUNCTION__,
2591                          ui32Log2PageSize));
2592                 return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
2593         }
2594
2595         for (uiIdx=0; uiIdx < ui32NumOfPages; uiIdx++)
2596         {
2597                 if (pbValid[uiIdx])
2598                 {
2599                         uiPageIndex = puiOffset[uiIdx] >> psOSPageArrayData->uiLog2DevPageSize;
2600                         uiInPageOffset = puiOffset[uiIdx] - ((IMG_DEVMEM_OFFSET_T)uiPageIndex << psOSPageArrayData->uiLog2DevPageSize);
2601
2602                         PVR_ASSERT(uiPageIndex < psOSPageArrayData->uiTotalNumPages);
2603                         PVR_ASSERT(uiInPageOffset < uiPageSize);
2604
2605                         psDevPAddr[uiIdx].uiAddr = page_to_phys(psOSPageArrayData->pagearray[uiPageIndex]);
2606                         psDevPAddr[uiIdx].uiAddr += uiInPageOffset;
2607                 }
2608         }
2609
2610         return PVRSRV_OK;
2611 }
2612
2613 typedef struct _PMR_OSPAGEARRAY_KERNMAP_DATA_ {
2614         void *pvBase;
2615         IMG_UINT32 ui32PageCount;
2616 } PMR_OSPAGEARRAY_KERNMAP_DATA;
2617
2618 static PVRSRV_ERROR
2619 PMRAcquireKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv,
2620                                                                  size_t uiOffset,
2621                                                                  size_t uiSize,
2622                                                                  void **ppvKernelAddressOut,
2623                                                                  IMG_HANDLE *phHandleOut,
2624                                                                  PMR_FLAGS_T ulFlags)
2625 {
2626         PVRSRV_ERROR eError;
2627         PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
2628         void *pvAddress;
2629         pgprot_t prot = PAGE_KERNEL;
2630         IMG_UINT32 ui32PageOffset;
2631         size_t uiMapOffset;
2632         IMG_UINT32 ui32PageCount;
2633         IMG_UINT32 uiLog2DevPageSize = psOSPageArrayData->uiLog2DevPageSize;
2634         PMR_OSPAGEARRAY_KERNMAP_DATA *psData;
2635
2636         /*
2637                 Zero offset and size as a special meaning which means map in the
2638                 whole of the PMR, this is due to fact that the places that call
2639                 this callback might not have access to be able to determine the
2640                 physical size
2641         */
2642         if ((uiOffset == 0) && (uiSize == 0))
2643         {
2644                 ui32PageOffset = 0;
2645                 uiMapOffset = 0;
2646                 ui32PageCount = psOSPageArrayData->iNumPagesAllocated;
2647         }
2648         else
2649         {
2650                 size_t uiEndoffset;
2651
2652                 ui32PageOffset = uiOffset >> uiLog2DevPageSize;
2653                 uiMapOffset = uiOffset - (ui32PageOffset << uiLog2DevPageSize);
2654                 uiEndoffset = uiOffset + uiSize - 1;
2655                 // Add one as we want the count, not the offset
2656                 ui32PageCount = (uiEndoffset >> uiLog2DevPageSize) + 1;
2657                 ui32PageCount -= ui32PageOffset;
2658         }
2659
2660         if (psOSPageArrayData->bIsCMA)
2661         {
2662                 prot = pgprot_noncached(prot);
2663         }
2664         else
2665         {
2666                 switch (PVRSRV_CPU_CACHE_MODE(psOSPageArrayData->ui32CPUCacheFlags))
2667                 {
2668                         case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
2669                                         prot = pgprot_noncached(prot);
2670                                         break;
2671
2672                         case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
2673                                         prot = pgprot_writecombine(prot);
2674                                         break;
2675
2676                         case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
2677                                         break;
2678
2679                         default:
2680                                         eError = PVRSRV_ERROR_INVALID_PARAMS;
2681                                         goto e0;
2682                 }
2683         }
2684
2685         psData = OSAllocMem(sizeof(*psData));
2686         if (psData == NULL)
2687         {
2688                 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
2689                 goto e0;
2690         }
2691
2692 #if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
2693         pvAddress = vmap(&psOSPageArrayData->pagearray[ui32PageOffset],
2694                                          ui32PageCount,
2695                                          VM_READ | VM_WRITE,
2696                                          prot);
2697 #else
2698         pvAddress = vm_map_ram(&psOSPageArrayData->pagearray[ui32PageOffset],
2699                                                    ui32PageCount,
2700                                                    -1,
2701                                                    prot);
2702 #endif
2703         if (pvAddress == NULL)
2704         {
2705                 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
2706                 goto e1;
2707         }
2708
2709         *ppvKernelAddressOut = pvAddress + uiMapOffset;
2710         psData->pvBase = pvAddress;
2711         psData->ui32PageCount = ui32PageCount;
2712         *phHandleOut = psData;
2713
2714         return PVRSRV_OK;
2715
2716         /*
2717           error exit paths follow
2718         */
2719  e1:
2720         OSFreeMem(psData);
2721  e0:
2722         PVR_ASSERT(eError != PVRSRV_OK);
2723         return eError;
2724 }
2725
2726 static void PMRReleaseKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv,
2727                                                                                          IMG_HANDLE hHandle)
2728 {
2729     PMR_OSPAGEARRAY_KERNMAP_DATA *psData = hHandle;
2730         PVR_UNREFERENCED_PARAMETER(pvPriv);
2731
2732 #if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
2733         vunmap(psData->pvBase);
2734 #else
2735         vm_unmap_ram(psData->pvBase, psData->ui32PageCount);
2736 #endif
2737         OSFreeMem(psData);
2738 }
2739
2740 static
2741 PVRSRV_ERROR PMRUnpinOSMem(PMR_IMPL_PRIVDATA pPriv)
2742 {
2743         PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv;
2744         PVRSRV_ERROR eError = PVRSRV_OK;
2745
2746         /* Lock down the pool and add the array to the unpin list */
2747         _PagePoolLock();
2748
2749         /* Sanity check */
2750         PVR_ASSERT(psOSPageArrayData->bUnpinned == IMG_FALSE);
2751         PVR_ASSERT(psOSPageArrayData->bOnDemand == IMG_FALSE);
2752
2753         eError = _AddUnpinListEntryUnlocked(psOSPageArrayData);
2754
2755         if (eError != PVRSRV_OK)
2756         {
2757                 PVR_DPF((PVR_DBG_ERROR,
2758                          "%s: Not able to add allocation to unpinned list (%d).",
2759                          __FUNCTION__,
2760                          eError));
2761
2762                 goto e_exit;
2763         }
2764
2765         psOSPageArrayData->bUnpinned = IMG_TRUE;
2766
2767 e_exit:
2768         _PagePoolUnlock();
2769         return eError;
2770 }
2771
2772 static
2773 PVRSRV_ERROR PMRPinOSMem(PMR_IMPL_PRIVDATA pPriv,
2774                                                 PMR_MAPPING_TABLE *psMappingTable)
2775 {
2776         PVRSRV_ERROR eError;
2777         PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv;
2778         IMG_UINT32  *pui32MapTable = NULL;
2779         IMG_UINT32 i,j=0, ui32Temp=0;
2780
2781         _PagePoolLock();
2782
2783         /* Sanity check */
2784         PVR_ASSERT(psOSPageArrayData->bUnpinned == IMG_TRUE);
2785
2786         psOSPageArrayData->bUnpinned = IMG_FALSE;
2787
2788         /* If there are still pages in the array remove entries from the pool */
2789         if (psOSPageArrayData->iNumPagesAllocated != 0)
2790         {
2791                 _RemoveUnpinListEntryUnlocked(psOSPageArrayData);
2792                 _PagePoolUnlock();
2793
2794                 eError = PVRSRV_OK;
2795                 goto e_exit_mapalloc_failure;
2796         }
2797         _PagePoolUnlock();
2798
2799         /* If pages were reclaimed we allocate new ones and
2800          * return PVRSRV_ERROR_PMR_NEW_MEMORY  */
2801         if (psMappingTable->ui32NumVirtChunks == 1)
2802         {
2803                 eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumPages);
2804         }
2805         else
2806         {
2807                 pui32MapTable = (IMG_UINT32 *)OSAllocMem(sizeof(*pui32MapTable) * psMappingTable->ui32NumPhysChunks);
2808                 if(NULL == pui32MapTable)
2809                 {
2810                         eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
2811                         PVR_DPF((PVR_DBG_ERROR,
2812                                          "%s: Not able to Alloc Map Table.",
2813                                          __FUNCTION__));
2814                         goto e_exit_mapalloc_failure;
2815                 }
2816
2817                 for (i = 0,j=0; i < psMappingTable->ui32NumVirtChunks; i++)
2818                 {
2819                         ui32Temp = psMappingTable->aui32Translation[i];
2820                         if (TRANSLATION_INVALID != ui32Temp)
2821                         {
2822                                 pui32MapTable[j++] = ui32Temp;
2823                         }
2824                 }
2825                 eError = _AllocOSPages(psOSPageArrayData, pui32MapTable, psMappingTable->ui32NumPhysChunks);
2826         }
2827
2828         if (eError != PVRSRV_OK)
2829         {
2830                 PVR_DPF((PVR_DBG_ERROR,
2831                                  "%s: Not able to get new pages for unpinned allocation.",
2832                                  __FUNCTION__));
2833
2834                 eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
2835                 goto e_exit;
2836         }
2837
2838         PVR_DPF((PVR_DBG_MESSAGE,
2839                          "%s: Allocating new pages for unpinned allocation. "
2840                          "Old content is lost!",
2841                          __FUNCTION__));
2842
2843         eError = PVRSRV_ERROR_PMR_NEW_MEMORY;
2844
2845 e_exit:
2846         OSFreeMem(pui32MapTable);
2847 e_exit_mapalloc_failure:
2848         return eError;
2849 }
2850
2851 /*************************************************************************/ /*!
2852 @Function       PMRChangeSparseMemOSMem
2853 @Description    This function Changes the sparse mapping by allocating & freeing
2854                                 of pages. It does also change the GPU and CPU maps accordingly
2855 @Return         PVRSRV_ERROR failure code
2856 */ /**************************************************************************/
2857 static PVRSRV_ERROR
2858 PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv,
2859                                                 const PMR *psPMR,
2860                                                 IMG_UINT32 ui32AllocPageCount,
2861                                                 IMG_UINT32 *pai32AllocIndices,
2862                                                 IMG_UINT32 ui32FreePageCount,
2863                                                 IMG_UINT32 *pai32FreeIndices,
2864                                                 IMG_UINT32 uiFlags)
2865 {
2866         PVRSRV_ERROR eError;
2867
2868         PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappigTable(psPMR);
2869         PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv;
2870         struct page **psPageArray = psPMRPageArrayData->pagearray;
2871         struct page *psPage;
2872
2873         IMG_UINT32 ui32AdtnlAllocPages = 0; /*<! Number of pages to alloc from the OS */
2874         IMG_UINT32 ui32AdtnlFreePages = 0; /*<! Number of pages to free back to the OS */
2875         IMG_UINT32 ui32CommonRequestCount = 0; /*<! Number of pages to move position in the page array */
2876         IMG_UINT32 ui32Loop = 0;
2877         IMG_UINT32 ui32Index = 0;
2878         IMG_UINT32 uiAllocpgidx ;
2879         IMG_UINT32 uiFreepgidx;
2880         IMG_UINT32 ui32Order =  psPMRPageArrayData->uiLog2DevPageSize - PAGE_SHIFT;
2881
2882         /* Check SPARSE flags and calculate pages to allocate and free */
2883         if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH))
2884         {
2885                 ui32CommonRequestCount = (ui32AllocPageCount > ui32FreePageCount) ?
2886                                 ui32FreePageCount : ui32AllocPageCount;
2887
2888                 PDUMP_PANIC(SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported");
2889         }
2890
2891         if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC))
2892         {
2893                 ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequestCount;
2894         }
2895         else
2896         {
2897                 ui32AllocPageCount = 0;
2898         }
2899
2900         if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE))
2901         {
2902                 ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequestCount;
2903         }
2904         else
2905         {
2906                 ui32FreePageCount = 0;
2907         }
2908
2909         if (0 == (ui32CommonRequestCount || ui32AdtnlAllocPages || ui32AdtnlFreePages))
2910         {
2911                 eError = PVRSRV_ERROR_INVALID_PARAMS;
2912                 return eError;
2913         }
2914
2915         /* The incoming request is classified into two operations independent of
2916          * each other: alloc & free pages.
2917          * These operations can be combined with two mapping operations as well
2918          * which are GPU & CPU space mappings.
2919          *
2920          * From the alloc and free page requests, the net amount of pages to be
2921          * allocated or freed is computed. Pages that were requested to be freed
2922          * will be reused to fulfil alloc requests.
2923          *
2924          * The order of operations is:
2925          * 1. Allocate new pages from the OS
2926          * 2. Move the free pages from free request to alloc positions.
2927          * 3. Free the rest of the pages not used for alloc
2928          *
2929          * Alloc parameters are validated at the time of allocation
2930          * and any error will be handled then. */
2931
2932         /* Validate the free indices */
2933         if (ui32FreePageCount)
2934         {
2935                 if (NULL != pai32FreeIndices){
2936
2937                         for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
2938                         {
2939                                 uiFreepgidx = pai32FreeIndices[ui32Loop];
2940
2941                                 if (uiFreepgidx > psPMRPageArrayData->uiTotalNumPages)
2942                                 {
2943                                         eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
2944                                         goto e0;
2945                                 }
2946
2947                                 if (INVALID_PAGE == psPageArray[uiFreepgidx])
2948                                 {
2949                                         eError = PVRSRV_ERROR_INVALID_PARAMS;
2950                                         goto e0;
2951                                 }
2952                         }
2953                 }
2954                 else
2955                 {
2956                         eError = PVRSRV_ERROR_INVALID_PARAMS;
2957                         return eError;
2958                 }
2959         }
2960
2961         /* Validate the alloc indices */
2962         for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++)
2963         {
2964                 uiAllocpgidx = pai32AllocIndices[ui32Loop];
2965
2966                 if (uiAllocpgidx > psPMRPageArrayData->uiTotalNumPages)
2967                 {
2968                         eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
2969                         goto e0;
2970                 }
2971
2972                 if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
2973                 {
2974                         if ((INVALID_PAGE !=  psPageArray[uiAllocpgidx]) ||
2975                             (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx]))
2976                         {
2977                                 eError = PVRSRV_ERROR_INVALID_PARAMS;
2978                                 goto e0;
2979                         }
2980                 }
2981                 else
2982                 {
2983                         if ((INVALID_PAGE ==  psPageArray[uiAllocpgidx]) ||
2984                             (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]) )
2985                         {
2986                                 eError = PVRSRV_ERROR_INVALID_PARAMS;
2987                                 goto e0;
2988                         }
2989                 }
2990         }
2991
2992         ui32Loop = 0;
2993
2994         /* Allocate new pages from the OS */
2995         if (0 != ui32AdtnlAllocPages)
2996         {
2997                         eError = _AllocOSPages(psPMRPageArrayData, pai32AllocIndices, ui32AdtnlAllocPages);
2998                         if (PVRSRV_OK != eError)
2999                         {
3000                                 PVR_DPF((PVR_DBG_MESSAGE,
3001                                          "%s: New Addtl Allocation of pages failed",
3002                                          __FUNCTION__));
3003                                 goto e0;
3004                         }
3005
3006                         /*Mark the corresponding pages of translation table as valid */
3007                         for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++)
3008                         {
3009                                 psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop];
3010                         }
3011         }
3012
3013
3014         ui32Index = ui32Loop;
3015
3016         /* Move the corresponding free pages to alloc request */
3017         for (ui32Loop = 0; ui32Loop < ui32CommonRequestCount; ui32Loop++, ui32Index++)
3018         {
3019                 uiAllocpgidx = pai32AllocIndices[ui32Index];
3020                 uiFreepgidx =  pai32FreeIndices[ui32Loop];
3021                 psPage = psPageArray[uiAllocpgidx];
3022                 psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx];
3023
3024                 /* Is remap mem used in real world scenario? Should it be turned to a
3025                  *  debug feature? The condition check needs to be out of loop, will be
3026                  *  done at later point though after some analysis */
3027                 if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
3028                 {
3029                         psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID;
3030                         psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
3031                         psPageArray[uiFreepgidx] = (struct page *)INVALID_PAGE;
3032                 }
3033                 else
3034                 {
3035                         psPageArray[uiFreepgidx] = psPage;
3036                         psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx;
3037                         psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
3038                 }
3039
3040                 /* Be sure to honour the attributes associated with the allocation
3041                  * such as zeroing, poisoning etc. */
3042                 if (psPMRPageArrayData->bPoisonOnAlloc)
3043                 {
3044                         _PoisonPages(psPageArray[uiAllocpgidx],
3045                                      ui32Order,
3046                                      _AllocPoison,
3047                                      _AllocPoisonSize);
3048                 }
3049                 else
3050                 {
3051                         if (psPMRPageArrayData->bZero)
3052                         {
3053                                 char a = 0;
3054                                 _PoisonPages(psPageArray[uiAllocpgidx],
3055                                              ui32Order,
3056                                              &a,
3057                                              1);
3058                         }
3059                 }
3060         }
3061
3062         /* Free the additional free pages */
3063         if (0 != ui32AdtnlFreePages)
3064         {
3065                 eError = _FreeOSPages(psPMRPageArrayData,
3066                                       &pai32FreeIndices[ui32Loop],
3067                                       ui32AdtnlFreePages);
3068                 if (eError != PVRSRV_OK)
3069                 {
3070                         goto e0;
3071                 }
3072                 while (ui32Loop < ui32FreePageCount)
3073                 {
3074                         psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Loop]] = TRANSLATION_INVALID;
3075                         ui32Loop++;
3076                 }
3077         }
3078
3079         eError = PVRSRV_OK;
3080
3081 e0:
3082         return eError;
3083 }
3084
3085 /*************************************************************************/ /*!
3086 @Function       PMRChangeSparseMemCPUMapOSMem
3087 @Description    This function Changes CPU maps accordingly
3088 @Return         PVRSRV_ERROR failure code
3089 */ /**************************************************************************/
3090 static
3091 PVRSRV_ERROR PMRChangeSparseMemCPUMapOSMem(PMR_IMPL_PRIVDATA pPriv,
3092                                            const PMR *psPMR,
3093                                            IMG_UINT64 sCpuVAddrBase,
3094                                            IMG_UINT32 ui32AllocPageCount,
3095                                            IMG_UINT32 *pai32AllocIndices,
3096                                            IMG_UINT32 ui32FreePageCount,
3097                                            IMG_UINT32 *pai32FreeIndices)
3098 {
3099         struct page **psPageArray;
3100         PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv;
3101         IMG_CPU_PHYADDR sCPUPAddr;
3102
3103         sCPUPAddr.uiAddr = 0;
3104         psPageArray = psPMRPageArrayData->pagearray;
3105
3106         return OSChangeSparseMemCPUAddrMap((void **)psPageArray,
3107                                            sCpuVAddrBase,
3108                                            sCPUPAddr,
3109                                            ui32AllocPageCount,
3110                                            pai32AllocIndices,
3111                                            ui32FreePageCount,
3112                                            pai32FreeIndices,
3113                                            IMG_FALSE);
3114 }
3115
3116 static PMR_IMPL_FUNCTAB _sPMROSPFuncTab = {
3117     .pfnLockPhysAddresses = &PMRLockSysPhysAddressesOSMem,
3118     .pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesOSMem,
3119     .pfnDevPhysAddr = &PMRSysPhysAddrOSMem,
3120     .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataOSMem,
3121     .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataOSMem,
3122     .pfnReadBytes = NULL,
3123     .pfnWriteBytes = NULL,
3124     .pfnUnpinMem = &PMRUnpinOSMem,
3125     .pfnPinMem = &PMRPinOSMem,
3126     .pfnChangeSparseMem = &PMRChangeSparseMemOSMem,
3127     .pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapOSMem,
3128     .pfnFinalize = &PMRFinalizeOSMem,
3129 };
3130
3131 PVRSRV_ERROR
3132 PhysmemNewOSRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
3133                                                  IMG_DEVMEM_SIZE_T uiSize,
3134                                                  IMG_DEVMEM_SIZE_T uiChunkSize,
3135                                                  IMG_UINT32 ui32NumPhysChunks,
3136                                                  IMG_UINT32 ui32NumVirtChunks,
3137                                                  IMG_UINT32 *puiAllocIndices,
3138                                                  IMG_UINT32 uiLog2PageSize,
3139                                                  PVRSRV_MEMALLOCFLAGS_T uiFlags,
3140                                                  const IMG_CHAR *pszAnnotation,
3141                                                  PMR **ppsPMRPtr)
3142 {
3143         PVRSRV_ERROR eError;
3144         PVRSRV_ERROR eError2;
3145         PMR *psPMR;
3146         struct _PMR_OSPAGEARRAY_DATA_ *psPrivData;
3147         PMR_FLAGS_T uiPMRFlags;
3148         PHYS_HEAP *psPhysHeap;
3149         IMG_BOOL bZero;
3150         IMG_BOOL bIsCMA;
3151         IMG_BOOL bPoisonOnAlloc;
3152         IMG_BOOL bPoisonOnFree;
3153         IMG_BOOL bOnDemand;
3154         IMG_BOOL bCpuLocal;
3155         IMG_BOOL bFwLocal;
3156         IMG_UINT32 ui32CPUCacheFlags = DevmemCPUCacheMode(psDevNode, uiFlags);
3157         if (PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags))
3158         {
3159                 ui32CPUCacheFlags |= PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN;
3160         }
3161
3162 #if defined(PVRSRV_GPUVIRT_GUESTDRV)
3163         /*
3164          * The host driver (but not guest) can still use this factory for firmware
3165          * allocations
3166          */
3167         PVR_ASSERT(!PVRSRV_CHECK_FW_LOCAL(uiFlags));
3168 #endif
3169
3170         /*
3171          * Silently round up alignment/pagesize if request was less that PAGE_SHIFT
3172          * because it would never be harmful for memory to be _more_ contiguous that
3173          * was desired.
3174          */
3175         uiLog2PageSize = PAGE_SHIFT > uiLog2PageSize ? PAGE_SHIFT : uiLog2PageSize;
3176
3177         /* In case we have a non-sparse allocation tolerate bad requests and round up.
3178          * For sparse allocations the users have to make sure to meet the right
3179          * requirements. */
3180         if (ui32NumPhysChunks == ui32NumVirtChunks &&
3181                 ui32NumVirtChunks == 1)
3182         {
3183                 /* Round up allocation size to at least a full PAGE_SIZE */
3184                 uiSize = PVR_ALIGN(uiSize, PAGE_SIZE);
3185                 uiChunkSize = uiSize;
3186         }
3187
3188         /* 
3189          * Use CMA framework if order is greater than OS page size; please note
3190          * that OSMMapPMRGeneric() has the same expectation as well.
3191          */
3192         bIsCMA = uiLog2PageSize > PAGE_SHIFT ? IMG_TRUE : IMG_FALSE;
3193         bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE;
3194         bCpuLocal = PVRSRV_CHECK_CPU_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
3195         bFwLocal = PVRSRV_CHECK_FW_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
3196         bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
3197         bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
3198         bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ? IMG_TRUE : IMG_FALSE;
3199
3200         if (bZero && bPoisonOnAlloc)
3201         {
3202                 /* Zero on Alloc and Poison on Alloc are mutually exclusive */
3203                 eError = PVRSRV_ERROR_INVALID_PARAMS;
3204                 goto errorOnParam;
3205         }
3206
3207         /* Create Array structure that hold the physical pages */
3208         eError = _AllocOSPageArray(psDevNode,
3209                                                            uiChunkSize,
3210                                                            ui32NumPhysChunks,
3211                                                            ui32NumVirtChunks,
3212                                                            uiLog2PageSize,
3213                                                            bZero,
3214                                                            bIsCMA,
3215                                                            bPoisonOnAlloc,
3216                                                            bPoisonOnFree,
3217                                                            bOnDemand,
3218                                                            ui32CPUCacheFlags,
3219                                                            &psPrivData);
3220         if (eError != PVRSRV_OK)
3221         {
3222                 goto errorOnAllocPageArray;
3223         }
3224
3225         if (!bOnDemand)
3226         {
3227                 /* Do we fill the whole page array or just parts (sparse)? */
3228                 if (ui32NumPhysChunks == ui32NumVirtChunks)
3229                 {
3230                         /* Allocate the physical pages */
3231                         eError = _AllocOSPages(psPrivData, NULL, psPrivData->uiTotalNumPages);
3232                 }
3233                 else
3234                 {
3235                         if (ui32NumPhysChunks != 0)
3236                         {
3237                                 /* Calculate the number of pages we want to allocate */
3238                                 IMG_UINT32 uiPagesToAlloc =
3239                                         (IMG_UINT32) ((((ui32NumPhysChunks * uiChunkSize) - 1) >> uiLog2PageSize) + 1);
3240
3241                                 /* Make sure calculation is correct */
3242                                 PVR_ASSERT(((PMR_SIZE_T) uiPagesToAlloc << uiLog2PageSize) ==
3243                                                    (ui32NumPhysChunks * uiChunkSize) );
3244
3245                                 /* Allocate the physical pages */
3246                                 eError = _AllocOSPages(psPrivData, puiAllocIndices,
3247                                                                            uiPagesToAlloc);
3248                         }
3249                 }
3250
3251                 if (eError != PVRSRV_OK)
3252                 {
3253                         goto errorOnAllocPages;
3254                 }
3255         }
3256
3257         /*
3258          * In this instance, we simply pass flags straight through.
3259          *
3260          * Generically, uiFlags can include things that control the PMR factory, but
3261          * we don't need any such thing (at the time of writing!), and our caller
3262          * specifies all PMR flags so we don't need to meddle with what was given to
3263          * us.
3264          */
3265         uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
3266
3267         /*
3268          * Check no significant bits were lost in cast due to different bit widths
3269          * for flags
3270          */
3271         PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
3272
3273         if (bOnDemand)
3274         {
3275                 PDUMPCOMMENT("Deferred Allocation PMR (UMA)");
3276         }
3277
3278         if (bFwLocal)
3279         {
3280                 PDUMPCOMMENT("FW_LOCAL allocation requested");
3281                 psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
3282         }
3283         else if (bCpuLocal)
3284         {
3285                 PDUMPCOMMENT("CPU_LOCAL allocation requested");
3286                 psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL];
3287         }
3288         else
3289         {
3290                 psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
3291         }
3292
3293         eError = PMRCreatePMR(psDevNode,
3294                                                   psPhysHeap,
3295                                                   uiSize,
3296                                                   uiChunkSize,
3297                                                   ui32NumPhysChunks,
3298                                                   ui32NumVirtChunks,
3299                                                   puiAllocIndices,
3300                                                   uiLog2PageSize,
3301                                                   uiPMRFlags,
3302                                                   pszAnnotation,
3303                                                   &_sPMROSPFuncTab,
3304                                                   psPrivData,
3305                                                   PMR_TYPE_OSMEM,
3306                                                   &psPMR,
3307                                                   IMG_FALSE);
3308         if (eError != PVRSRV_OK)
3309         {
3310                 goto errorOnCreate;
3311         }
3312
3313         *ppsPMRPtr = psPMR;
3314
3315         return PVRSRV_OK;
3316
3317 errorOnCreate:
3318         if (!bOnDemand)
3319         {
3320                 eError2 = _FreeOSPages(psPrivData, NULL, 0);
3321                 PVR_ASSERT(eError2 == PVRSRV_OK);
3322         }
3323
3324 errorOnAllocPages:
3325         eError2 = _FreeOSPagesArray(psPrivData);
3326         PVR_ASSERT(eError2 == PVRSRV_OK);
3327
3328 errorOnAllocPageArray:
3329 errorOnParam:
3330         PVR_ASSERT(eError != PVRSRV_OK);
3331         return eError;
3332 }