*****************************************************************************/
-
-
+#include <linux/string.h>
#include "gc_hal_kernel_precomp.h"
#define _GC_OBJ_ZONE gcvZONE_KERNEL
#if (0==gcdPAGE_ALLOC_LIMIT)
// dkm : force gcvSURF_TILE_STATUS use contiguous memory
- if(gcvSURF_TILE_STATUS == Type) pool = gcvPOOL_CONTIGUOUS;
+ //if(gcvSURF_TILE_STATUS == Type) pool = gcvPOOL_CONTIGUOUS;
#endif
do
pool = gcvPOOL_SYSTEM;
}
else
- if (pool == gcvPOOL_SYSTEM)
+ if ((pool == gcvPOOL_SYSTEM)
+ && (Type != gcvSURF_TILE_STATUS)
+ )
{
/* Advance to contiguous memory. */
pool = gcvPOOL_CONTIGUOUS;
/* Return pool used for allocation. */
*Pool = pool;
} else {
- printk("_AllocateMemory fail! pool=%d, Bytes=%d, Type=%d\n", pool, (int)Bytes, Type);
+ printk("_AllocateMemory fail! pool=%d->%d, Bytes=%d, Type=%d\n", *Pool, pool, (int)Bytes, Type);
}
/* Return status. */
gcmkONERROR(
gckOS_GetBaseAddress(Kernel->os,
&Interface->u.GetBaseAddress.baseAddress));
+ strcpy(Interface->u.GetBaseAddress.fwVersion, GPU_FW_VERSION);
+#if BUILD_FOR_1_28
+ strcat(Interface->u.GetBaseAddress.fwVersion, "_for1.28");
+#endif
break;
case gcvHAL_QUERY_VIDEO_MEMORY:
#include <linux/sched.h>
#include <linux/slab.h>
#include <asm/atomic.h>
-#ifdef NO_DMA_COHERENT
+#if USE_DMA_COHERENT
#include <linux/dma-mapping.h>
#endif /* NO_DMA_COHERENT */
#include <linux/delay.h>
#include <mach/pmu.h>
#include <mach/cru.h>
-#define IOREMAP_IN_NOPAGE 0
#if !USE_NEW_LINUX_SIGNAL
#define USER_SIGNAL_TABLE_LEN_INIT 64
gcsMapedNonPagedCache * cacheTail;
gctINT pageNum;
- struct page * pageCache[100];
+ #if USE_DMA_COHERENT
+ gctSTRING addr[100];
+ dma_addr_t dmaHandle[100];
+ #else
+ struct page * pageCache[100];
+ #endif
#endif
};
#if gcdkUSE_MAPED_NONPAGE_CACHE
for(os->pageNum=0; os->pageNum<50; os->pageNum++) {
+ #if USE_DMA_COHERENT
+ os->addr[os->pageNum] =
+ #if (2==gcdENABLE_MEM_CACHE)
+ dma_alloc_writecombine(NULL, 5 * PAGE_SIZE, &os->dmaHandle[os->pageNum], GFP_ATOMIC);
+ #else
+ dma_alloc_coherent(NULL, 5 * PAGE_SIZE, &os->dmaHandle[os->pageNum], GFP_ATOMIC);
+ #endif
+ #else
os->pageCache[os->pageNum] = alloc_pages(GFP_KERNEL | GFP_DMA, get_order(5 * PAGE_SIZE));
+ #endif
}
- //printk("os->pageNum = %d\n", os->pageNum);
#endif
/* Return pointer to the gckOS object. */
_FreeAllMapedNonPagedCache(Os, 0);
for(i=0; i<Os->pageNum; i++) {
- if(Os->pageCache[i]) {
- free_pages((unsigned long)page_address(Os->pageCache[i]), get_order(5 * PAGE_SIZE));
- }
+ #if USE_DMA_COHERENT
+ if(Os->addr[i]) dma_free_coherent(gcvNULL, 5 * PAGE_SIZE, Os->addr[i], Os->dmaHandle[i]);
+ #else
+ if(Os->pageCache[i]) free_pages((unsigned long)page_address(Os->pageCache[i]), get_order(5 * PAGE_SIZE));
+ #endif
}
#endif
return gcvSTATUS_OUT_OF_RESOURCES;
}
-#ifndef NO_DMA_COHERENT
+#if USE_DMA_COHERENT
+ #if (2==gcdENABLE_MEM_CACHE)
+ if (dma_mmap_writecombine(NULL,
+ #else
if (dma_mmap_coherent(NULL,
+ #endif
mdlMap->vma,
mdl->addr,
mdl->dmaHandle,
gctINT numPages;
PLINUX_MDL mdl;
PLINUX_MDL_MAP mdlMap = 0;
-#if IOREMAP_IN_NOPAGE
gctSTRING addr;
-#endif
-#ifdef NO_DMA_COHERENT
+#if !USE_DMA_COHERENT
struct page * page;
long size, order;
gctPOINTER vaddr, reserved_vaddr;
MEMORY_LOCK(Os);
-#ifndef NO_DMA_COHERENT
- addr = dma_alloc_coherent(NULL,
- mdl->numPages * PAGE_SIZE,
- &mdl->dmaHandle,
- GFP_ATOMIC);
+#if USE_DMA_COHERENT
+ #if gcdkUSE_MAPED_NONPAGE_CACHE
+ if(5==mdl->numPages && Os->pageNum>0 && Os->addr[Os->pageNum-1]) {
+ Os->pageNum--;
+ addr = Os->addr[Os->pageNum];
+ mdl->dmaHandle = Os->dmaHandle[Os->pageNum];
+ Os->addr[Os->pageNum] = gcvNULL;
+ Os->dmaHandle[Os->pageNum] = 0;
+ } else {
+ #if (2==gcdENABLE_MEM_CACHE)
+ addr = dma_alloc_writecombine(NULL,
+ #else
+ addr = dma_alloc_coherent(NULL,
+ #endif
+ mdl->numPages * PAGE_SIZE,
+ &mdl->dmaHandle,
+ GFP_ATOMIC);
+ }
+ #else
+ #if (2==gcdENABLE_MEM_CACHE)
+ addr = dma_alloc_writecombine(NULL,
+ #else
+ addr = dma_alloc_coherent(NULL,
+ #endif
+ mdl->numPages * PAGE_SIZE,
+ &mdl->dmaHandle,
+ GFP_ATOMIC);
+ #endif
#else
size = mdl->numPages * PAGE_SIZE;
order = get_order(size);
reserved_size -= PAGE_SIZE;
}
-#if IOREMAP_IN_NOPAGE
- // dkm: gcdENABLE_MEM_CACHE
- #if (1==gcdENABLE_MEM_CACHE)
- addr = ioremap_cached(virt_to_phys(vaddr), size);
- #else
- addr = ioremap_nocache(virt_to_phys(vaddr), size);
- #endif
+// dkm: gcdENABLE_MEM_CACHE
+#if (1==gcdENABLE_MEM_CACHE)
+ addr = ioremap_cached(virt_to_phys(vaddr), size);
+#else
+ addr = ioremap_nocache(virt_to_phys(vaddr), size);
#endif
+
mdl->dmaHandle = virt_to_phys(vaddr);
mdl->kaddr = vaddr;
#endif
-#if IOREMAP_IN_NOPAGE
if (addr == gcvNULL)
{
gcmkTRACE_ZONE(gcvLEVEL_INFO,
return gcvSTATUS_OUT_OF_MEMORY;
}
-#endif
if ((Os->baseAddress & 0x80000000) != (mdl->dmaHandle & 0x80000000))
{
| (Os->baseAddress & 0x80000000);
}
-#if IOREMAP_IN_NOPAGE
mdl->addr = addr;
-#else
- mdl->addr = vaddr;
-#endif
/*
* We will not do any mapping from here.
return gcvSTATUS_OUT_OF_RESOURCES;
}
-#ifndef NO_DMA_COHERENT
+#if USE_DMA_COHERENT
+ #if (2==gcdENABLE_MEM_CACHE)
+ if (dma_mmap_writecombine(NULL,
+ #else
if (dma_mmap_coherent(NULL,
+ #endif
mdlMap->vma,
mdl->addr,
mdl->dmaHandle,
// dkm: add
struct mm_struct * mm;
-#ifdef NO_DMA_COHERENT
+#if !USE_DMA_COHERENT
unsigned size;
gctPOINTER vaddr;
#endif /* NO_DMA_COHERENT */
if(MemLock) MEMORY_LOCK(Os);
-#ifndef NO_DMA_COHERENT
- dma_free_coherent(gcvNULL,
- mdl->numPages * PAGE_SIZE,
- mdl->addr,
- mdl->dmaHandle);
+#if USE_DMA_COHERENT
+ #if gcdkUSE_MAPED_NONPAGE_CACHE
+ if(5==mdl->numPages && Os->pageNum<100 && !Os->addr[Os->pageNum]) {
+ Os->addr[Os->pageNum] = mdl->addr;
+ Os->dmaHandle[Os->pageNum] = mdl->dmaHandle;
+ Os->pageNum ++;
+ } else {
+ dma_free_coherent(gcvNULL,
+ mdl->numPages * PAGE_SIZE,
+ mdl->addr,
+ mdl->dmaHandle);
+ }
+ #else
+ dma_free_coherent(gcvNULL,
+ mdl->numPages * PAGE_SIZE,
+ mdl->addr,
+ mdl->dmaHandle);
+ #endif
#else
size = mdl->numPages * PAGE_SIZE;
vaddr = mdl->kaddr;
free_pages((unsigned long)mdl->kaddr, get_order(mdl->numPages * PAGE_SIZE));
#endif
-#if IOREMAP_IN_NOPAGE
iounmap(mdl->addr);
-#endif
#endif /* NO_DMA_COHERENT */
mdlMap = mdl->maps;