1 /*************************************************************************/ /*!
3 @Title Linux OS PMR functions
4 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
5 @License Dual MIT/GPLv2
7 The contents of this file are subject to the MIT license as set out below.
9 Permission is hereby granted, free of charge, to any person obtaining a copy
10 of this software and associated documentation files (the "Software"), to deal
11 in the Software without restriction, including without limitation the rights
12 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 copies of the Software, and to permit persons to whom the Software is
14 furnished to do so, subject to the following conditions:
16 The above copyright notice and this permission notice shall be included in
17 all copies or substantial portions of the Software.
19 Alternatively, the contents of this file may be used under the terms of
20 the GNU General Public License Version 2 ("GPL") in which case the provisions
21 of GPL are applicable instead of those above.
23 If you wish to allow use of your version of this file only under the terms of
24 GPL, and not to allow others to use your version of this file under the terms
25 of the MIT license, indicate your decision by deleting the provisions above
26 and replace them with the notice and other provisions required by GPL as set
27 out in the file called "GPL-COPYING" included in this distribution. If you do
28 not delete the provisions above, a recipient may use your version of this file
29 under the terms of either the MIT license or GPL.
31 This License is also included in this distribution in the file called
34 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
35 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
36 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
37 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
38 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
39 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
40 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
41 */ /**************************************************************************/
46 #include <linux/dma-mapping.h>
47 #if defined(CONFIG_L4)
48 #include <asm/api-l4env/api.h>
50 #include <linux/version.h>
51 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
52 #include <linux/pfn_t.h>
53 #include <linux/pfn.h>
57 #include "pvr_debug.h"
59 #include "devicemem_server_utils.h"
63 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
64 #include "process_stats.h"
67 #include "kernel_compatibility.h"
71 * Use vm_insert_page because remap_pfn_range has issues when mapping HIGHMEM
72 * pages with default memory attributes; these HIGHMEM pages are skipped in
73 * set_pages_array_[uc,wc] during allocation; see reserve_pfn_range().
74 * Also vm_insert_page is faster.
77 * Use vm_insert_page because it is faster.
80 * Use remap_pfn_range by default because it does not issue a cache flush.
81 * It is known that ARM32 benefits from this. When other platforms become
82 * available it has to be investigated if this assumption holds for them as well.
84 * Since vm_insert_page does more precise memory accounting we have the build
85 * flag PVR_MMAP_USE_VM_INSERT that forces its use. This is useful as a debug
89 #if defined(CONFIG_X86) || defined(PVR_MMAP_USE_VM_INSERT)
90 #define PMR_OS_USE_VM_INSERT_PAGE 1
93 static void MMapPMROpen(struct vm_area_struct *ps_vma)
95 PMR *psPMR = ps_vma->vm_private_data;
97 /* Our VM flags should ensure this function never gets called */
98 PVR_DPF((PVR_DBG_WARNING,
99 "%s: Unexpected mmap open call, this is probably an application bug.",
101 PVR_DPF((PVR_DBG_WARNING,
102 "%s: vma struct: 0x%p, vAddr: %#lX, length: %#lX, PMR pointer: 0x%p",
106 ps_vma->vm_end - ps_vma->vm_start,
109 /* In case we get called anyway let's do things right by increasing the refcount and
110 * locking down the physical addresses. */
113 if (PMRLockSysPhysAddresses(psPMR) != PVRSRV_OK)
115 PVR_DPF((PVR_DBG_ERROR, "%s: Could not lock down physical addresses, aborting.", __func__));
120 static void MMapPMRClose(struct vm_area_struct *ps_vma)
122 PMR *psPMR = ps_vma->vm_private_data;
124 #if defined(PVRSRV_ENABLE_PROCESS_STATS)
125 #if defined(PVRSRV_ENABLE_MEMORY_STATS)
127 uintptr_t vAddr = ps_vma->vm_start;
129 while (vAddr < ps_vma->vm_end)
132 PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT64)vAddr);
137 PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, ps_vma->vm_end - ps_vma->vm_start);
141 PMRUnlockSysPhysAddresses(psPMR);
146 * This vma operation is used to read data from mmap regions. It is called
147 * by access_process_vm, which is called to handle PTRACE_PEEKDATA ptrace
148 * requests and reads from /proc/<pid>/mem.
150 static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr,
151 void *buf, int len, int write)
153 PMR *psPMR = ps_vma->vm_private_data;
154 unsigned long ulOffset = addr - ps_vma->vm_start;
155 size_t uiBytesCopied;
157 int iRetVal = -EINVAL;
161 eError = PMR_WriteBytes(psPMR,
162 (IMG_DEVMEM_OFFSET_T) ulOffset,
169 eError = PMR_ReadBytes(psPMR,
170 (IMG_DEVMEM_OFFSET_T) ulOffset,
176 if (eError != PVRSRV_OK)
178 PVR_DPF((PVR_DBG_ERROR, "%s: Error from %s (%d)",
180 write ? "PMR_WriteBytes" : "PMR_ReadBytes",
185 iRetVal = uiBytesCopied;
191 static const struct vm_operations_struct gsMMapOps =
193 .open = &MMapPMROpen,
194 .close = &MMapPMRClose,
195 .access = MMapVAccess,
198 static INLINE int _OSMMapPMR(PVRSRV_DEVICE_NODE *psDevNode,
199 struct vm_area_struct *ps_vma,
200 IMG_DEVMEM_OFFSET_T uiOffset,
201 IMG_CPU_PHYADDR *psCpuPAddr,
202 IMG_UINT32 uiLog2PageSize,
203 IMG_BOOL bUseVMInsertPage,
204 IMG_BOOL bUseMixedMap)
207 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
213 #if defined(CONFIG_L4)
214 IMG_CPU_VIRTADDR pvCpuVAddr;
216 /* Use L4LINUX function, removes per-arch code-path */
217 pvCpuVAddr = l4x_phys_to_virt(psCpuPAddr->uiAddr);
218 if (pvCpuVAddr == NULL)
223 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
224 sPFN = phys_to_pfn_t((uintptr_t)pvCpuVAddr, 0);
226 uiPFN = ((uintptr_t) pvCpuVAddr) >> PAGE_SHIFT;
228 #else /* defined(CONFIG_L4) */
229 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
230 sPFN = phys_to_pfn_t(psCpuPAddr->uiAddr, 0);
232 uiPFN = psCpuPAddr->uiAddr >> PAGE_SHIFT;
233 PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr->uiAddr);
238 * vm_insert_page() allows insertion of individual pages into user
239 * VMA space _only_ if page is a order-zero allocated page
241 if (bUseVMInsertPage)
246 * This path is just for debugging. It should be
247 * equivalent to the remap_pfn_range() path.
249 iStatus = vm_insert_mixed(ps_vma,
250 ps_vma->vm_start + uiOffset,
251 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
259 /* Since kernel 3.7 this sets VM_MIXEDMAP internally */
260 iStatus = vm_insert_page(ps_vma,
261 ps_vma->vm_start + uiOffset,
262 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
263 pfn_t_to_page(sPFN));
272 NOTE: Regarding absence of dma_mmap_coherent() in _OSMMapPMR()
274 The current services mmap model maps in a PMR's full-length size
275 into the user VMA & applies any user specified offset to the kernel
276 returned zero-offset based VA in services client; this essentially
277 means services server ignores ps_vma->vm_pgoff (this houses hPMR)
280 Furthermore, during a DMA/CMA memory allocation, multiple order-n
281 pages are used to satisfy an allocation request due to DMA/CMA
282 framework rounding-up allocation size to next power-of-two which
283 can lead to wasted memory (so we don't allocate using single call).
285 The combination of the above two issues mean that we cannot use the
286 dma_mmap_coherent() for a number of reasons outlined below:
288 - Services mmap semantics does not fit with dma_mmap_coherent()
289 which requires proper ps_vma->vm_pgoff; seeing this houses a
290 hPMR handle value, calls into dma_mmap_coherent() fails. This
291 could be avoided by forcing ps_vma->vm_pgoff to zero but the
292 ps_vma->vm_pgoff is applied to DMA bus address PFN and not
293 user VMA which is always mapped at ps_vma->vm_start.
295 - As multiple order-n pages are used for DMA/CMA allocations, a
296 single dma_mmap_coherent() call with a vma->vm_pgoff set to
297 zero cannot (maybe) be used because there is no guarantee that
298 all of the multiple order-n pages in the PMR are physically
299 contiguous from the first entry to the last. Whilst this is
300 highly likely to be the case, there is no guarantee that it
301 will be so we cannot depend on this being the case.
303 The solution is to manually mmap DMA/CMA pages into user VMA
304 using remap_pfn_range() directly. Furthermore, accounting is
305 always compromised for DMA/CMA allocations.
307 size_t uiNumContiguousBytes = 1ULL << uiLog2PageSize;
309 iStatus = remap_pfn_range(ps_vma,
310 ps_vma->vm_start + uiOffset,
311 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
316 uiNumContiguousBytes,
317 ps_vma->vm_page_prot);
324 OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData)
326 struct vm_area_struct *ps_vma = pOSMMapData;
327 PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR);
331 IMG_DEVMEM_OFFSET_T uiOffset;
332 IMG_UINT32 ui32CPUCacheFlags;
334 IMG_CPU_PHYADDR asCpuPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
335 IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
336 IMG_UINT32 uiOffsetIdx;
337 IMG_UINT32 uiNumOfPFNs;
338 IMG_UINT32 uiLog2PageSize;
339 IMG_CPU_PHYADDR *psCpuPAddr;
341 IMG_BOOL bUseMixedMap = IMG_FALSE;
342 IMG_BOOL bUseVMInsertPage = IMG_FALSE;
344 eError = PMRLockSysPhysAddresses(psPMR);
345 if (eError != PVRSRV_OK)
350 if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
351 ((ps_vma->vm_flags & VM_SHARED) == 0))
353 eError = PVRSRV_ERROR_INVALID_PARAMS;
357 sPageProt = vm_get_page_prot(ps_vma->vm_flags);
359 ui32CPUCacheFlags = DevmemCPUCacheMode(psDevNode, PMR_Flags(psPMR));
360 switch (ui32CPUCacheFlags)
362 case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
363 sPageProt = pgprot_noncached(sPageProt);
366 case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
367 sPageProt = pgprot_writecombine(sPageProt);
370 case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
372 /* Do not set to write-combine for plato */
373 #if !defined(PLATO_MEMORY_CONFIG)
374 PHYS_HEAP *psPhysHeap = PMR_PhysHeap(psPMR);
376 if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA)
377 sPageProt = pgprot_writecombine(sPageProt);
383 eError = PVRSRV_ERROR_INVALID_PARAMS;
386 ps_vma->vm_page_prot = sPageProt;
388 ps_vma->vm_flags |= VM_IO;
390 /* Don't include the mapping in core dumps */
391 ps_vma->vm_flags |= VM_DONTDUMP;
394 * Disable mremap because our nopage handler assumes all
395 * page requests have already been validated.
397 ps_vma->vm_flags |= VM_DONTEXPAND;
399 /* Don't allow mapping to be inherited across a process fork */
400 ps_vma->vm_flags |= VM_DONTCOPY;
402 uiLength = ps_vma->vm_end - ps_vma->vm_start;
404 /* Is this mmap targeting non order-zero pages or does it use pfn mappings?
405 * If yes, don't use vm_insert_page */
406 uiLog2PageSize = PMR_GetLog2Contiguity(psPMR);
407 #if defined(PMR_OS_USE_VM_INSERT_PAGE)
408 bUseVMInsertPage = (uiLog2PageSize == PAGE_SHIFT) && (PMR_GetType(psPMR) != PMR_TYPE_EXTMEM);
411 /* Can we use stack allocations */
412 uiNumOfPFNs = uiLength >> uiLog2PageSize;
413 if (uiNumOfPFNs > PMR_MAX_TRANSLATION_STACK_ALLOC)
415 psCpuPAddr = OSAllocMem(uiNumOfPFNs * sizeof(*psCpuPAddr));
416 if (psCpuPAddr == NULL)
418 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
422 /* Should allocation fail, clean-up here before exiting */
423 pbValid = OSAllocMem(uiNumOfPFNs * sizeof(*pbValid));
426 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
427 OSFreeMem(psCpuPAddr);
433 psCpuPAddr = asCpuPAddr;
437 /* Obtain map range pfns */
438 eError = PMR_CpuPhysAddr(psPMR,
444 if (eError != PVRSRV_OK)
450 * Scan the map range for pfns without struct page* handling. If
451 * we find one, this is a mixed map, and we can't use vm_insert_page()
452 * NOTE: vm_insert_page() allows insertion of individual pages into user
453 * VMA space _only_ if said page is an order-zero allocated page.
455 if (bUseVMInsertPage)
457 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
463 for (uiOffsetIdx = 0; uiOffsetIdx < uiNumOfPFNs; ++uiOffsetIdx)
465 if (pbValid[uiOffsetIdx])
467 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
468 sPFN = phys_to_pfn_t(psCpuPAddr[uiOffsetIdx].uiAddr, 0);
470 if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0)
472 uiPFN = psCpuPAddr[uiOffsetIdx].uiAddr >> PAGE_SHIFT;
473 PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr[uiOffsetIdx].uiAddr);
475 if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0)
476 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
478 bUseMixedMap = IMG_TRUE;
486 ps_vma->vm_flags |= VM_MIXEDMAP;
491 ps_vma->vm_flags |= VM_PFNMAP;
494 /* For each PMR page-size contiguous bytes, map page(s) into user VMA */
495 for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<uiLog2PageSize)
497 uiOffsetIdx = uiOffset >> uiLog2PageSize;
499 * Only map in pages that are valid, any that aren't will be
500 * picked up by the nopage handler which will return a zeroed
503 if (pbValid[uiOffsetIdx])
505 iStatus = _OSMMapPMR(psDevNode,
508 &psCpuPAddr[uiOffsetIdx],
514 /* Failure error code doesn't get propagated */
515 eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED;
520 #if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS)
521 PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
522 (void*)(uintptr_t)(ps_vma->vm_start + uiOffset),
523 psCpuPAddr[uiOffsetIdx],
529 #if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS)
530 PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, uiNumOfPFNs * PAGE_SIZE);
533 if (psCpuPAddr != asCpuPAddr)
535 OSFreeMem(psCpuPAddr);
539 /* let us see the PMR so we can unlock it later */
540 ps_vma->vm_private_data = psPMR;
542 /* Install open and close handlers for ref-counting */
543 ps_vma->vm_ops = &gsMMapOps;
546 * Take a reference on the PMR so that it can't be freed while mapped
547 * into the user process.
553 /* Error exit paths follow */
555 if (psCpuPAddr != asCpuPAddr)
557 OSFreeMem(psCpuPAddr);
561 PVR_DPF((PVR_DBG_ERROR, "don't know how to handle this error. Abort!"));
562 PMRUnlockSysPhysAddresses(psPMR);