1 /*************************************************************************/ /*!
3 @Title Device specific initialisation routines
4 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
5 @Description Device specific MMU initialisation
6 @License Dual MIT/GPLv2
8 The contents of this file are subject to the MIT license as set out below.
10 Permission is hereby granted, free of charge, to any person obtaining a copy
11 of this software and associated documentation files (the "Software"), to deal
12 in the Software without restriction, including without limitation the rights
13 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 copies of the Software, and to permit persons to whom the Software is
15 furnished to do so, subject to the following conditions:
17 The above copyright notice and this permission notice shall be included in
18 all copies or substantial portions of the Software.
20 Alternatively, the contents of this file may be used under the terms of
21 the GNU General Public License Version 2 ("GPL") in which case the provisions
22 of GPL are applicable instead of those above.
24 If you wish to allow use of your version of this file only under the terms of
25 GPL, and not to allow others to use your version of this file under the terms
26 of the MIT license, indicate your decision by deleting the provisions above
27 and replace them with the notice and other provisions required by GPL as set
28 out in the file called "GPL-COPYING" included in this distribution. If you do
29 not delete the provisions above, a recipient may use your version of this file
30 under the terms of either the MIT license or GPL.
32 This License is also included in this distribution in the file called
35 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
36 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
37 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
38 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
39 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
40 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
41 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42 */ /**************************************************************************/
43 #include "rgxmmuinit.h"
44 #include "rgxmmudefs_km.h"
47 #include "img_types.h"
48 #include "mmu_common.h"
49 #include "pdump_mmu.h"
51 #include "pvr_debug.h"
52 #include "pvrsrv_error.h"
53 #include "rgx_memallocflags.h"
54 #include "rgx_heaps.h"
59 /* units represented in a bitfield */
60 #define UNITS_IN_BITFIELD(Mask, Shift) ((Mask >> Shift) + 1)
64 * Bits of PT, PD and PC not involving addresses
67 #define RGX_MMUCTRL_PTE_PROTMASK (RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN | \
68 RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN | \
69 RGX_MMUCTRL_PT_DATA_PM_SRC_EN | \
70 RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN | \
71 RGX_MMUCTRL_PT_DATA_CC_EN | \
72 RGX_MMUCTRL_PT_DATA_READ_ONLY_EN | \
73 RGX_MMUCTRL_PT_DATA_VALID_EN)
75 #define RGX_MMUCTRL_PDE_PROTMASK (RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN | \
76 ~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK | \
77 RGX_MMUCTRL_PD_DATA_VALID_EN)
79 #define RGX_MMUCTRL_PCE_PROTMASK (RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN | \
80 RGX_MMUCTRL_PC_DATA_VALID_EN)
84 static MMU_PxE_CONFIG sRGXMMUPCEConfig;
85 static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig;
90 * Configuration for heaps with 4kB Data-Page size
94 static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP;
95 static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP;
96 static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP;
97 static MMU_PAGESIZECONFIG gsPageSizeConfig4KB;
102 * Configuration for heaps with 16kB Data-Page size
106 static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP;
107 static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP;
108 static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP;
109 static MMU_PAGESIZECONFIG gsPageSizeConfig16KB;
114 * Configuration for heaps with 64kB Data-Page size
118 static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP;
119 static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP;
120 static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP;
121 static MMU_PAGESIZECONFIG gsPageSizeConfig64KB;
126 * Configuration for heaps with 256kB Data-Page size
130 static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP;
131 static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP;
132 static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP;
133 static MMU_PAGESIZECONFIG gsPageSizeConfig256KB;
138 * Configuration for heaps with 1MB Data-Page size
142 static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP;
143 static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP;
144 static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP;
145 static MMU_PAGESIZECONFIG gsPageSizeConfig1MB;
150 * Configuration for heaps with 2MB Data-Page size
154 static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP;
155 static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP;
156 static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP;
157 static MMU_PAGESIZECONFIG gsPageSizeConfig2MB;
160 /* Forward declaration of protection bits derivation functions, for
161 the following structure */
162 static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
163 static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags);
164 static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
165 static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags);
166 static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
167 static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags);
169 static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
170 const MMU_PxE_CONFIG **ppsMMUPDEConfig,
171 const MMU_PxE_CONFIG **ppsMMUPTEConfig,
172 const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
175 static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv);
177 static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize);
178 static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize);
180 static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes;
182 PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode)
184 /* Setup of Px Entries:
187 * PAGE TABLE (8 Byte):
189 * | 62 | 61...40 | 39...12 (varies) | 11...6 | 5 | 4 | 3 | 2 | 1 | 0 |
190 * | PM/Meta protect | VP Page (39:18) | Physical Page | VP Page (17:12) | Entry Pending | PM src | SLC Bypass Ctrl | Cache Coherency | Read Only | Valid |
193 * PAGE DIRECTORY (8 Byte):
195 * | 40 | 39...5 (varies) | 4 | 3...1 | 0 |
196 * | Entry Pending | Page Table base address | (reserved) | Page Size | Valid |
199 * PAGE CATALOGUE (4 Byte):
201 * | 31...4 | 3...2 | 1 | 0 |
202 * | Page Directory base address | (reserved) | Entry Pending | Valid |
207 /* Example how to get the PD address from a PC entry.
208 * The procedure is the same for PD and PT entries to retrieve PT and Page addresses:
210 * 1) sRGXMMUPCEConfig.uiAddrMask applied to PC entry with '&':
211 * | 31...4 | 3...2 | 1 | 0 |
212 * | PD Addr | 0 | 0 | 0 |
214 * 2) sRGXMMUPCEConfig.uiAddrShift applied with '>>':
218 * 3) sRGXMMUPCEConfig.uiAddrLog2Align applied with '<<':
225 sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName =
226 PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]);
229 * Setup sRGXMMUPCEConfig
231 sRGXMMUPCEConfig.uiBytesPerEntry = 4; /* 32 bit entries */
232 sRGXMMUPCEConfig.uiAddrMask = 0xfffffff0; /* Mask to get significant address bits of PC entry i.e. the address of the PD */
234 sRGXMMUPCEConfig.uiAddrShift = 4; /* Shift this many bits to get PD address */
235 sRGXMMUPCEConfig.uiAddrLog2Align = 12; /* Alignment of PD physical addresses. */
237 sRGXMMUPCEConfig.uiProtMask = RGX_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits (pending | valid)*/
238 sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to get the statis bits */
240 sRGXMMUPCEConfig.uiValidEnMask = RGX_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */
241 sRGXMMUPCEConfig.uiValidEnShift = RGX_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to get entry valid bit */
244 * Setup sRGXMMUTopLevelDevVAddrConfig
246 sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; /* Mask to get PC index applied to a 40 bit virt. device address */
247 sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PC index */
248 sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask,
249 sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift));
251 sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; /* Mask to get PD index applied to a 40 bit virt. device address */
252 sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PD index */
253 sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask,
254 sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift));
258 * Configuration for heaps with 4kB Data-Page size
263 * Setup sRGXMMUPDEConfig_4KBDP
265 sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 8;
267 sRGXMMUPDEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
268 sRGXMMUPDEConfig_4KBDP.uiAddrShift = 12;
269 sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = 12;
271 sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
272 sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 1;
274 sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
275 sRGXMMUPDEConfig_4KBDP.uiProtShift = 0;
277 sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
278 sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
281 * Setup sRGXMMUPTEConfig_4KBDP
283 sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 8;
285 sRGXMMUPTEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffff000);
286 sRGXMMUPTEConfig_4KBDP.uiAddrShift = 12;
287 sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = 12; /* Alignment of the physical addresses of the pages NOT PTs */
289 sRGXMMUPTEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
290 sRGXMMUPTEConfig_4KBDP.uiProtShift = 0;
292 sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
293 sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
296 * Setup sRGXMMUDevVAddrConfig_4KBDP
298 sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
299 sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
300 sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask,
301 sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift));
303 sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
304 sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
305 sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask,
306 sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift));
308 sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK;
309 sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT;
310 sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask,
311 sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift));
313 sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff);
314 sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0;
315 sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = 0;
318 * Setup gsPageSizeConfig4KB
320 gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP;
321 gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP;
322 gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP;
323 gsPageSizeConfig4KB.uiRefCount = 0;
324 gsPageSizeConfig4KB.uiMaxRefCount = 0;
329 * Configuration for heaps with 16kB Data-Page size
334 * Setup sRGXMMUPDEConfig_16KBDP
336 sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 8;
338 sRGXMMUPDEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
339 sRGXMMUPDEConfig_16KBDP.uiAddrShift = 10;
340 sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 10;
342 sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
343 sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 1;
345 sRGXMMUPDEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
346 sRGXMMUPDEConfig_16KBDP.uiProtShift = 0;
348 sRGXMMUPDEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
349 sRGXMMUPDEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
352 * Setup sRGXMMUPTEConfig_16KBDP
354 sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 8;
356 sRGXMMUPTEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xffffffc000);
357 sRGXMMUPTEConfig_16KBDP.uiAddrShift = 14;
358 sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 14;
360 sRGXMMUPTEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
361 sRGXMMUPTEConfig_16KBDP.uiProtShift = 0;
363 sRGXMMUPTEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
364 sRGXMMUPTEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
367 * Setup sRGXMMUDevVAddrConfig_16KBDP
369 sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
370 sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
371 sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask,
372 sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift));
375 sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
376 sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
377 sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask,
378 sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift));
381 sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001fc000);
382 sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 14;
383 sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask,
384 sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift));
386 sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000003fff);
387 sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0;
388 sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0;
391 * Setup gsPageSizeConfig16KB
393 gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP;
394 gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP;
395 gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP;
396 gsPageSizeConfig16KB.uiRefCount = 0;
397 gsPageSizeConfig16KB.uiMaxRefCount = 0;
402 * Configuration for heaps with 64kB Data-Page size
407 * Setup sRGXMMUPDEConfig_64KBDP
409 sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 8;
411 sRGXMMUPDEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
412 sRGXMMUPDEConfig_64KBDP.uiAddrShift = 8;
413 sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 8;
415 sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
416 sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 1;
418 sRGXMMUPDEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
419 sRGXMMUPDEConfig_64KBDP.uiProtShift = 0;
421 sRGXMMUPDEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
422 sRGXMMUPDEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
425 * Setup sRGXMMUPTEConfig_64KBDP
427 sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 8;
429 sRGXMMUPTEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xffffff0000);
430 sRGXMMUPTEConfig_64KBDP.uiAddrShift =16;
431 sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = 16;
433 sRGXMMUPTEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
434 sRGXMMUPTEConfig_64KBDP.uiProtShift = 0;
436 sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
437 sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
440 * Setup sRGXMMUDevVAddrConfig_64KBDP
442 sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
443 sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
444 sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask,
445 sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift));
448 sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
449 sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
450 sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask,
451 sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift));
454 sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001f0000);
455 sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = 16;
456 sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask,
457 sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift));
460 sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff);
461 sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0;
462 sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = 0;
465 * Setup gsPageSizeConfig64KB
467 gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP;
468 gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP;
469 gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP;
470 gsPageSizeConfig64KB.uiRefCount = 0;
471 gsPageSizeConfig64KB.uiMaxRefCount = 0;
476 * Configuration for heaps with 256kB Data-Page size
481 * Setup sRGXMMUPDEConfig_256KBDP
483 sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 8;
485 sRGXMMUPDEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
486 sRGXMMUPDEConfig_256KBDP.uiAddrShift = 6;
487 sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 6;
489 sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
490 sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 1;
492 sRGXMMUPDEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
493 sRGXMMUPDEConfig_256KBDP.uiProtShift = 0;
495 sRGXMMUPDEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
496 sRGXMMUPDEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
499 * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP
501 sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 8;
503 sRGXMMUPTEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffc0000);
504 sRGXMMUPTEConfig_256KBDP.uiAddrShift = 18;
505 sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 18;
507 sRGXMMUPTEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
508 sRGXMMUPTEConfig_256KBDP.uiProtShift = 0;
510 sRGXMMUPTEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
511 sRGXMMUPTEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
514 * Setup sRGXMMUDevVAddrConfig_256KBDP
516 sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
517 sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
518 sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask,
519 sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift));
522 sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
523 sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
524 sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask,
525 sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift));
528 sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001c0000);
529 sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 18;
530 sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask,
531 sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift));
534 sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000003ffff);
535 sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0;
536 sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0;
539 * Setup gsPageSizeConfig256KB
541 gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP;
542 gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP;
543 gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP;
544 gsPageSizeConfig256KB.uiRefCount = 0;
545 gsPageSizeConfig256KB.uiMaxRefCount = 0;
548 * Setup sRGXMMUPDEConfig_1MBDP
550 sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 8;
552 sRGXMMUPDEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
553 sRGXMMUPDEConfig_1MBDP.uiAddrShift = 4;
554 sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 4;
556 sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
557 sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 1;
559 sRGXMMUPDEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
560 sRGXMMUPDEConfig_1MBDP.uiProtShift = 0;
562 sRGXMMUPDEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
563 sRGXMMUPDEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
566 * Setup sRGXMMUPTEConfig_1MBDP
568 sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8;
570 sRGXMMUPTEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffff00000);
571 sRGXMMUPTEConfig_1MBDP.uiAddrShift = 20;
572 sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 20;
574 sRGXMMUPTEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
575 sRGXMMUPTEConfig_1MBDP.uiProtShift = 0;
577 sRGXMMUPTEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
578 sRGXMMUPTEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
581 * Setup sRGXMMUDevVAddrConfig_1MBDP
583 sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
584 sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
585 sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask,
586 sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift));
589 sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
590 sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
591 sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask,
592 sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift));
595 sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000100000);
596 sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 20;
597 sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask,
598 sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift));
601 sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00000fffff);
602 sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0;
603 sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0;
606 * Setup gsPageSizeConfig1MB
608 gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP;
609 gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP;
610 gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP;
611 gsPageSizeConfig1MB.uiRefCount = 0;
612 gsPageSizeConfig1MB.uiMaxRefCount = 0;
615 * Setup sRGXMMUPDEConfig_2MBDP
617 sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 8;
619 sRGXMMUPDEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
620 sRGXMMUPDEConfig_2MBDP.uiAddrShift = 4;
621 sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 4;
623 sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
624 sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 1;
626 sRGXMMUPDEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
627 sRGXMMUPDEConfig_2MBDP.uiProtShift = 0;
629 sRGXMMUPDEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
630 sRGXMMUPDEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
633 * Setup sRGXMMUPTEConfig_2MBDP
635 sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 8;
637 sRGXMMUPTEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xffffe00000);
638 sRGXMMUPTEConfig_2MBDP.uiAddrShift = 21;
639 sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 21;
641 sRGXMMUPTEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
642 sRGXMMUPTEConfig_2MBDP.uiProtShift = 0;
644 sRGXMMUPTEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
645 sRGXMMUPTEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
648 * Setup sRGXMMUDevVAddrConfig_2MBDP
650 sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
651 sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
652 sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask,
653 sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift));
656 sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
657 sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
658 sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask,
659 sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift));
662 sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000000000);
663 sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 21;
664 sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask,
665 sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift));
668 sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00001fffff);
669 sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0;
670 sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0;
673 * Setup gsPageSizeConfig2MB
675 gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP;
676 gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP;
677 gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP;
678 gsPageSizeConfig2MB.uiRefCount = 0;
679 gsPageSizeConfig2MB.uiMaxRefCount = 0;
682 * Setup sRGXMMUDeviceAttributes
684 sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT;
685 sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_3;
686 sRGXMMUDeviceAttributes.ui32BaseAlign = RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT;
687 sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPCEConfig;
688 sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig;
690 /* Functions for deriving page table/dir/cat protection bits */
691 sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8;
692 sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4;
693 sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8;
694 sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4;
695 sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8;
696 sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4;
698 /* Functions for establishing configurations for PDE/PTE/DEVVADDR
700 sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB;
701 sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB;
703 sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4;
704 sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8;
706 psDeviceNode->psMMUDevAttrs = &sRGXMMUDeviceAttributes;
711 PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode)
718 psDeviceNode->pfnMMUGetContextID = NULL;
721 psDeviceNode->psMMUDevAttrs = NULL;
724 PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:"));
725 PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d",
726 gsPageSizeConfig4KB.uiMaxRefCount));
727 PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d",
728 gsPageSizeConfig4KB.uiRefCount));
729 PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d",
730 gsPageSizeConfig16KB.uiMaxRefCount));
731 PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d",
732 gsPageSizeConfig16KB.uiRefCount));
733 PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d",
734 gsPageSizeConfig64KB.uiMaxRefCount));
735 PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d",
736 gsPageSizeConfig64KB.uiRefCount));
737 PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d",
738 gsPageSizeConfig256KB.uiMaxRefCount));
739 PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d",
740 gsPageSizeConfig256KB.uiRefCount));
741 PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d",
742 gsPageSizeConfig1MB.uiMaxRefCount));
743 PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d",
744 gsPageSizeConfig1MB.uiRefCount));
745 PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d",
746 gsPageSizeConfig2MB.uiMaxRefCount));
747 PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d",
748 gsPageSizeConfig2MB.uiRefCount));
750 if (gsPageSizeConfig4KB.uiRefCount > 0 ||
751 gsPageSizeConfig16KB.uiRefCount > 0 ||
752 gsPageSizeConfig64KB.uiRefCount > 0 ||
753 gsPageSizeConfig256KB.uiRefCount > 0 ||
754 gsPageSizeConfig1MB.uiRefCount > 0 ||
755 gsPageSizeConfig2MB.uiRefCount > 0
758 PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)"));
764 /*************************************************************************/ /*!
765 @Function RGXDerivePCEProt4
766 @Description calculate the PCE protection flags based on a 4 byte entry
768 */ /**************************************************************************/
769 static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags)
771 return (uiProtFlags & MMU_PROTFLAGS_INVALID)?0:RGX_MMUCTRL_PC_DATA_VALID_EN;
775 /*************************************************************************/ /*!
776 @Function RGXDerivePCEProt8
777 @Description calculate the PCE protection flags based on an 8 byte entry
779 */ /**************************************************************************/
780 static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
782 PVR_UNREFERENCED_PARAMETER(uiProtFlags);
783 PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
785 PVR_DPF((PVR_DBG_ERROR, "8-byte PCE not supported on this device"));
790 /*************************************************************************/ /*!
791 @Function RGXDerivePDEProt4
792 @Description derive the PDE protection flags based on a 4 byte entry
794 */ /**************************************************************************/
795 static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags)
797 PVR_UNREFERENCED_PARAMETER(uiProtFlags);
798 PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device"));
803 /*************************************************************************/ /*!
804 @Function RGXDerivePDEProt8
805 @Description derive the PDE protection flags based on an 8 byte entry
807 @Input uiLog2DataPageSize The log2 of the required page size.
808 E.g, for 4KiB pages, this parameter must be 12.
809 For 2MiB pages, it must be set to 21.
812 */ /**************************************************************************/
813 static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
815 IMG_UINT64 ret_value = 0; // 0 means invalid
817 if (! (uiProtFlags & MMU_PROTFLAGS_INVALID)) // if not invalid
819 switch (uiLog2DataPageSize)
821 case RGX_HEAP_4KB_PAGE_SHIFT:
822 ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB;
824 case RGX_HEAP_16KB_PAGE_SHIFT:
825 ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB;
827 case RGX_HEAP_64KB_PAGE_SHIFT:
828 ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB;
830 case RGX_HEAP_256KB_PAGE_SHIFT:
831 ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB;
833 case RGX_HEAP_1MB_PAGE_SHIFT:
834 ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB;
836 case RGX_HEAP_2MB_PAGE_SHIFT:
837 ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB;
840 PVR_DPF((PVR_DBG_ERROR,
841 "%s:%d: in function<%s>: Invalid parameter log2_page_size. Expected {12, 14, 16, 18, 20, 21}. Got [%u]",
842 __FILE__, __LINE__, __FUNCTION__, uiLog2DataPageSize));
849 /*************************************************************************/ /*!
850 @Function RGXDerivePTEProt4
851 @Description calculate the PTE protection flags based on a 4 byte entry
853 */ /**************************************************************************/
854 static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags)
856 PVR_UNREFERENCED_PARAMETER(uiProtFlags);
857 PVR_DPF((PVR_DBG_ERROR, "4-byte PTE not supported on this device"));
862 /*************************************************************************/ /*!
863 @Function RGXDerivePTEProt8
864 @Description calculate the PTE protection flags based on an 8 byte entry
866 */ /**************************************************************************/
867 static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
869 IMG_UINT64 ui64MMUFlags=0;
871 PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
873 if(((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE))
877 else if(MMU_PROTFLAGS_READABLE & uiProtFlags)
880 ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_READ_ONLY_EN;
882 else if(MMU_PROTFLAGS_WRITEABLE & uiProtFlags)
885 PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: write-only is not possible on this device"));
887 else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0)
889 PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: neither read nor write specified..."));
892 /* cache coherency */
893 if(MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags)
895 ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_CC_EN;
899 if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0)
901 ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN;
904 if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0)
906 ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_VALID_EN;
909 if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags)
911 ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN;
918 /*************************************************************************/ /*!
919 @Function RGXGetPageSizeConfig
920 @Description Set up configuration for variable sized data pages.
921 RGXPutPageSizeConfigCB has to be called to ensure correct
924 */ /**************************************************************************/
925 static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
926 const MMU_PxE_CONFIG **ppsMMUPDEConfig,
927 const MMU_PxE_CONFIG **ppsMMUPTEConfig,
928 const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
931 MMU_PAGESIZECONFIG *psPageSizeConfig;
933 switch (uiLog2DataPageSize)
935 case RGX_HEAP_4KB_PAGE_SHIFT:
936 psPageSizeConfig = &gsPageSizeConfig4KB;
938 case RGX_HEAP_16KB_PAGE_SHIFT:
939 psPageSizeConfig = &gsPageSizeConfig16KB;
941 case RGX_HEAP_64KB_PAGE_SHIFT:
942 psPageSizeConfig = &gsPageSizeConfig64KB;
944 case RGX_HEAP_256KB_PAGE_SHIFT:
945 psPageSizeConfig = &gsPageSizeConfig256KB;
947 case RGX_HEAP_1MB_PAGE_SHIFT:
948 psPageSizeConfig = &gsPageSizeConfig1MB;
950 case RGX_HEAP_2MB_PAGE_SHIFT:
951 psPageSizeConfig = &gsPageSizeConfig2MB;
954 PVR_DPF((PVR_DBG_ERROR,
955 "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
956 uiLog2DataPageSize));
957 return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
960 /* Refer caller's pointers to the data */
961 *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig;
962 *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig;
963 *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig;
965 #if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
966 /* Increment ref-count - not that we're allocating anything here
967 (I'm using static structs), but one day we might, so we want
968 the Get/Put code to be balanced properly */
969 psPageSizeConfig->uiRefCount ++;
971 /* This is purely for debug statistics */
972 psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount,
973 psPageSizeConfig->uiRefCount);
976 *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize;
977 PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv);
982 /*************************************************************************/ /*!
983 @Function RGXPutPageSizeConfig
984 @Description Tells this code that the mmu module is done with the
985 configurations set in RGXGetPageSizeConfig. This can
987 Called after RGXGetPageSizeConfigCB.
989 */ /**************************************************************************/
990 static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv)
992 #if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
993 MMU_PAGESIZECONFIG *psPageSizeConfig;
994 IMG_UINT32 uiLog2DataPageSize;
996 uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv;
998 switch (uiLog2DataPageSize)
1000 case RGX_HEAP_4KB_PAGE_SHIFT:
1001 psPageSizeConfig = &gsPageSizeConfig4KB;
1003 case RGX_HEAP_16KB_PAGE_SHIFT:
1004 psPageSizeConfig = &gsPageSizeConfig16KB;
1006 case RGX_HEAP_64KB_PAGE_SHIFT:
1007 psPageSizeConfig = &gsPageSizeConfig64KB;
1009 case RGX_HEAP_256KB_PAGE_SHIFT:
1010 psPageSizeConfig = &gsPageSizeConfig256KB;
1012 case RGX_HEAP_1MB_PAGE_SHIFT:
1013 psPageSizeConfig = &gsPageSizeConfig1MB;
1015 case RGX_HEAP_2MB_PAGE_SHIFT:
1016 psPageSizeConfig = &gsPageSizeConfig2MB;
1019 PVR_DPF((PVR_DBG_ERROR,
1020 "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
1021 uiLog2DataPageSize));
1022 return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
1025 /* Ref-count here is not especially useful, but it's an extra
1026 check that the API is being used correctly */
1027 psPageSizeConfig->uiRefCount --;
1029 PVR_UNREFERENCED_PARAMETER(hPriv);
1034 static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize)
1036 PVR_UNREFERENCED_PARAMETER(ui32PDE);
1037 PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
1038 PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device"));
1039 return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
1042 static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize)
1044 switch (ui64PDE & (~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK))
1046 case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB:
1047 *pui32Log2PageSize = RGX_HEAP_4KB_PAGE_SHIFT;
1049 case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB:
1050 *pui32Log2PageSize = RGX_HEAP_16KB_PAGE_SHIFT;
1052 case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB:
1053 *pui32Log2PageSize = RGX_HEAP_64KB_PAGE_SHIFT;
1055 case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB:
1056 *pui32Log2PageSize = RGX_HEAP_256KB_PAGE_SHIFT;
1058 case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB:
1059 *pui32Log2PageSize = RGX_HEAP_1MB_PAGE_SHIFT;
1061 case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB:
1062 *pui32Log2PageSize = RGX_HEAP_2MB_PAGE_SHIFT;
1065 return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;