for (ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
{
- eError = CacheOpExec(ppsPMR[ui32Idx],
- puiOffset[ui32Idx],
- puiSize[ui32Idx],
- puiCacheOp[ui32Idx]);
+ (void)CacheOpExec(ppsPMR[ui32Idx],
+ puiOffset[ui32Idx],
+ puiSize[ui32Idx],
+ puiCacheOp[ui32Idx]);
}
/* No CacheOp fence dependencies */
}
else
{
+ IMG_DEVMEM_SIZE_T uiLogicalSize;
CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL;
for (ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
/* As PVRSRV_CACHE_OP_INVALIDATE is used to transfer
device memory buffer ownership back to processor
we cannot defer it so must action it immediately */
- if (puiCacheOp[ui32Idx] == PVRSRV_CACHE_OP_INVALIDATE)
+ if (puiCacheOp[ui32Idx] & PVRSRV_CACHE_OP_INVALIDATE)
{
eError = CacheOpExec (ppsPMR[ui32Idx],
puiOffset[ui32Idx],
continue;
}
+ /* Ensure request is valid before deferring to CacheOp thread */
+ eError = PMR_LogicalSize(ppsPMR[ui32Idx], &uiLogicalSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((CACHEOP_DPFL,
+ "%s: PMR_LogicalSize failed (%u), cannot defer CacheOp",
+ __FUNCTION__, eError));
+
+ /* Signal the CacheOp thread to ensure queued items get processed */
+ (void) OSEventObjectSignal(psPVRSRVData->hCacheOpThreadEventObject);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+
+ return eError;
+ }
+ else if ((puiOffset[ui32Idx]+puiSize[ui32Idx]) > uiLogicalSize)
+ {
+ PVR_DPF((CACHEOP_DPFL,
+ "%s: Invalid parameters, cannot defer CacheOp",
+ __FUNCTION__));
+
+ /* Signal the CacheOp thread to ensure queued items get processed */
+ (void) OSEventObjectSignal(psPVRSRVData->hCacheOpThreadEventObject);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+
+ return PVRSRV_ERROR_INVALID_PARAMS;;
+ }
+
/* For now use dynamic alloc, static CCB _might_ be faster */
psCacheOpWorkItem = OSAllocMem(sizeof(CACHEOP_WORK_ITEM));
if (psCacheOpWorkItem == NULL)
for (ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
{
- eError = CacheOpExec(ppsPMR[ui32Idx],
- puiOffset[ui32Idx],
- puiSize[ui32Idx],
- puiCacheOp[ui32Idx]);
+ PVRSRV_ERROR eError2 = CacheOpExec(ppsPMR[ui32Idx],
+ puiOffset[ui32Idx],
+ puiSize[ui32Idx],
+ puiCacheOp[ui32Idx]);
+ if (eError2 != PVRSRV_OK)
+ {
+ eError = eError2;
+ PVR_DPF((CACHEOP_DPFL,
+ "%s: CacheOpExec failed (%u)",
+ __FUNCTION__, eError));
+ }
}
/* For immediate RBF, common/completed are identical */
for (ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
{
uiCacheOp = SetCacheOp(uiCacheOp, puiCacheOp[ui32Idx]);
- if (puiCacheOp[ui32Idx] == PVRSRV_CACHE_OP_INVALIDATE)
+ if (puiCacheOp[ui32Idx] & PVRSRV_CACHE_OP_INVALIDATE)
{
/* Cannot be deferred, action now */
bHasInvalidate = IMG_TRUE;
PVRSRV_CACHE_OP uiCacheOp)
{
PVRSRV_ERROR eError;
+ IMG_DEVMEM_SIZE_T uiLogicalSize;
IMG_BOOL bUsedGlobalFlush = IMG_FALSE;
#if defined(CACHEOP_DEBUG)
/* This interface is always synchronous and not deferred;
OSAtomicIncrement(&ghCommonCacheOpSeqNum) : sCacheOpWorkItem.ui32OpSeqNum;
#endif
+ eError = PMR_LogicalSize(psPMR, &uiLogicalSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((CACHEOP_DPFL,
+ "%s: PMR_LogicalSize failed (%u)",
+ __FUNCTION__, eError));
+ goto e0;
+ }
+ else if ((uiOffset+uiSize) > uiLogicalSize)
+ {
+ PVR_DPF((CACHEOP_DPFL,
+ "%s: Invalid parameters",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
/* Perform range-based cache maintenance operation */
eError = PMRLockSysPhysAddresses(psPMR);
if (eError != PVRSRV_OK)