\r
static inline void rga_queue_power_off_work(void)\r
{\r
- queue_delayed_work(system_nrt_wq, &drvdata->power_off_work, RGA_POWER_OFF_DELAY);\r
+ queue_delayed_work(system_wq, &drvdata->power_off_work, RGA_POWER_OFF_DELAY);\r
}\r
\r
/* Caller must hold rga_service.lock */\r
unsigned long *mmu_buf_virtual;\r
uint32_t i;\r
uint32_t *buf_p;\r
-\r
+ uint32_t *buf;
+
/* malloc pre scale mid buf mmu table */\r
mmu_buf = kzalloc(1024*8, GFP_KERNEL);\r
mmu_buf_virtual = kzalloc(1024*2*sizeof(unsigned long), GFP_KERNEL);\r
\r
buf_p = kmalloc(1024*256, GFP_KERNEL);\r
rga_mmu_buf.buf_virtual = buf_p;\r
- rga_mmu_buf.buf = (uint32_t *)virt_to_phys((void *)((unsigned long)buf_p));\r
+#if (defined(CONFIG_ARM) && defined(CONFIG_ARM_LPAE))
+ buf = (uint32_t *)(uint32_t)virt_to_phys((void *)((unsigned long)buf_p));
+#else
+ buf = (uint32_t *)virt_to_phys((void *)((unsigned long)buf_p));
+#endif
+ rga_mmu_buf.buf = buf;
rga_mmu_buf.front = 0;\r
rga_mmu_buf.back = 64*1024;\r
rga_mmu_buf.size = 64*1024;\r