-/* \r
+/*\r
* Copyright (C) 2012 ROCKCHIP, Inc.\r
*\r
* This software is licensed under the terms of the GNU General Public\r
* but WITHOUT ANY WARRANTY; without even the implied warranty of\r
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
* GNU General Public License for more details.\r
- * \r
+ *\r
*/\r
\r
+#define pr_fmt(fmt) "rga: " fmt\r
#include <linux/kernel.h>\r
#include <linux/init.h>\r
#include <linux/module.h>\r
#include <mach/irqs.h>\r
#include <linux/fs.h>\r
#include <asm/uaccess.h>\r
-#include <linux/firmware.h>\r
#include <linux/miscdevice.h>\r
#include <linux/poll.h>\r
#include <linux/delay.h>\r
#include <linux/timer.h>\r
#include <linux/time.h>\r
#include <asm/cacheflush.h>\r
-#include <linux/compiler.h>\r
#include <linux/slab.h>\r
#include <linux/fb.h>\r
-\r
+#include <linux/wakelock.h>\r
\r
#include "rga.h"\r
#include "rga_reg_info.h"\r
struct miscdevice miscdev;\r
struct device dev;\r
void *rga_base;\r
- int irq0;\r
- \r
+ int irq;\r
+\r
struct delayed_work power_off_work;\r
void (*rga_irq_callback)(int rga_retval); //callback function used by aync call\r
-};\r
+ struct wake_lock wake_lock;\r
\r
+ struct clk *aclk_rga;\r
+ struct clk *hclk_rga;\r
+ struct clk *pd_rga;\r
+};\r
\r
-static struct rga_drvdata *drvdata = NULL;\r
+static struct rga_drvdata *drvdata;\r
rga_service_info rga_service;\r
\r
-static struct clk *aclk_rga;\r
-static struct clk *hclk_rga;\r
-static struct clk *pd_rga;\r
-\r
-\r
static int rga_blit_async(rga_session *session, struct rga_req *req);\r
static void rga_del_running_list(void);\r
static void rga_del_running_list_timeout(void);\r
-static void rga_try_set_reg(uint32_t num);\r
+static void rga_try_set_reg(void);\r
\r
\r
/* Logging */\r
\r
rga_write(1, RGA_SYS_CTRL); //RGA_SYS_CTRL\r
\r
- for(i = 0; i < RGA_RESET_TIMEOUT; i++) \r
- {\r
+ for(i = 0; i < RGA_RESET_TIMEOUT; i++)\r
+ {\r
reg = rga_read(RGA_SYS_CTRL) & 1; //RGA_SYS_CTRL\r
\r
if(reg == 0)\r
int running;\r
int num_done;\r
struct rga_reg *reg, *reg_tmp;\r
- rga_session *session, *session_tmp; \r
- struct list_head *next; \r
+ rga_session *session, *session_tmp;\r
+ struct list_head *next;\r
\r
running = atomic_read(&rga_service.total_running);\r
printk("rga total_running %d\n", running);\r
\r
/* Dump waiting list info */\r
if (!list_empty(&rga_service.waiting))\r
- { \r
+ {\r
next = &rga_service.waiting;\r
\r
printk("rga_service dump waiting list\n");\r
reg = list_entry(next->next, struct rga_reg, status_link);\r
running = atomic_read(®->session->task_running);\r
num_done = atomic_read(®->session->num_done);\r
- printk("rga session pid %d, done %d, running %d\n", reg->session->pid, num_done, running); \r
+ printk("rga session pid %d, done %d, running %d\n", reg->session->pid, num_done, running);\r
next = next->next;\r
}\r
- while(!list_empty(next)); \r
+ while(!list_empty(next));\r
}\r
\r
/* Dump running list info */\r
if (!list_empty(&rga_service.running))\r
{\r
printk("rga_service dump running list\n");\r
- \r
+\r
next = &rga_service.running;\r
do\r
{\r
reg = list_entry(next->next, struct rga_reg, status_link);\r
running = atomic_read(®->session->task_running);\r
num_done = atomic_read(®->session->num_done);\r
- printk("rga session pid %d, done %d, running %d:\n", reg->session->pid, num_done, running); \r
+ printk("rga session pid %d, done %d, running %d:\n", reg->session->pid, num_done, running);\r
next = next->next;\r
}\r
- while(!list_empty(next)); \r
+ while(!list_empty(next));\r
}\r
- \r
- list_for_each_entry_safe(session, session_tmp, &rga_service.session, list_session) \r
+\r
+ list_for_each_entry_safe(session, session_tmp, &rga_service.session, list_session)\r
{\r
printk("session pid %d:\n", session->pid);\r
running = atomic_read(&session->task_running);\r
printk("task_running %d\n", running);\r
- list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) \r
+ list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link)\r
{\r
printk("waiting register set 0x%.8x\n", (unsigned int)reg);\r
}\r
- list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) \r
+ list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link)\r
{\r
printk("running register set 0x%.8x\n", (unsigned int)reg);\r
}\r
}\r
}\r
\r
-\r
+/* Caller must hold rga_service.lock */\r
static void rga_power_on(void)\r
{\r
- //printk("rga_power_on\n");\r
- //cancel_delayed_work_sync(&drvdata->power_off_work);\r
- spin_lock_bh(&rga_service.lock_power);\r
+ cancel_delayed_work_sync(&drvdata->power_off_work);\r
+ queue_delayed_work(system_nrt_wq, &drvdata->power_off_work, RGA_POWER_OFF_DELAY);\r
if (rga_service.enable)\r
- goto out;\r
+ return;\r
\r
- clk_enable(aclk_rga);\r
- clk_enable(hclk_rga);\r
- clk_enable(pd_rga);\r
+ clk_enable(drvdata->aclk_rga);\r
+ clk_enable(drvdata->hclk_rga);\r
+ clk_enable(drvdata->pd_rga);\r
+ wake_lock(&drvdata->wake_lock);\r
rga_service.enable = true;\r
-out:\r
- spin_unlock_bh(&rga_service.lock_power);\r
}\r
\r
-\r
-static void rga_power_off(struct work_struct *work)\r
+/* Caller must hold rga_service.lock */\r
+static void rga_power_off(void)\r
{\r
- int total_running;\r
+ int total_running;\r
\r
- spin_lock_bh(&rga_service.lock_power); \r
- if(!rga_service.enable)\r
- {\r
- spin_unlock_bh(&rga_service.lock_power);\r
+ if (!rga_service.enable) {\r
return;\r
}\r
\r
- rga_service.enable = false;\r
-\r
- printk("rga_power_off\n");\r
-\r
- total_running = atomic_read(&rga_service.total_running);\r
+ total_running = atomic_read(&rga_service.total_running);\r
if (total_running) {\r
- pr_err("power off when %d task running!!\n", total_running); \r
+ pr_err("power off when %d task running!!\n", total_running);\r
mdelay(50);\r
- pr_err("delay 50 ms for running task\n"); \r
- rga_dump();\r
+ pr_err("delay 50 ms for running task\n");\r
+ rga_dump();\r
}\r
- \r
- clk_disable(pd_rga);\r
- clk_disable(aclk_rga);\r
- clk_disable(hclk_rga);\r
- spin_unlock_bh(&rga_service.lock_power);\r
- \r
+\r
+ clk_disable(drvdata->pd_rga);\r
+ clk_disable(drvdata->aclk_rga);\r
+ clk_disable(drvdata->hclk_rga);\r
+ wake_unlock(&drvdata->wake_lock);\r
+ rga_service.enable = false;\r
+}\r
+\r
+static void rga_power_off_work(struct work_struct *work)\r
+{\r
+ mutex_lock(&rga_service.lock);\r
+ rga_power_off();\r
+ mutex_unlock(&rga_service.lock);\r
}\r
\r
static int rga_flush(rga_session *session, unsigned long arg)\r
-{ \r
+{\r
int ret = 0;\r
int ret_timeout;\r
- unsigned long flag;\r
- \r
+\r
#if RGA_TEST_FLUSH_TIME\r
ktime_t start;\r
ktime_t end;\r
start = ktime_get();\r
#endif\r
\r
- ret_timeout = wait_event_interruptible_timeout(session->wait, atomic_read(&session->done), RGA_TIMEOUT_DELAY); \r
- \r
+ ret_timeout = wait_event_interruptible_timeout(session->wait, atomic_read(&session->done), RGA_TIMEOUT_DELAY);\r
+\r
if (unlikely(ret_timeout < 0)) {\r
- pr_err("flush pid %d wait task ret %d\n", session->pid, ret); \r
- spin_lock_irqsave(&rga_service.lock, flag);\r
+ pr_err("flush pid %d wait task ret %d\n", session->pid, ret);\r
+ mutex_lock(&rga_service.lock);\r
rga_del_running_list();\r
- spin_unlock_irqrestore(&rga_service.lock, flag);\r
+ mutex_unlock(&rga_service.lock);\r
ret = -ETIMEDOUT;\r
} else if (0 == ret_timeout) {\r
pr_err("flush pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));\r
printk("bus = %.8x\n", rga_read(RGA_INT));\r
- spin_lock_irqsave(&rga_service.lock, flag);\r
+ mutex_lock(&rga_service.lock);\r
rga_del_running_list_timeout();\r
- spin_unlock_irqrestore(&rga_service.lock, flag);\r
- rga_try_set_reg(1);\r
+ rga_try_set_reg();\r
+ mutex_unlock(&rga_service.lock);\r
ret = -ETIMEDOUT;\r
}\r
\r
end = ktime_sub(end, start);\r
printk("one flush wait time %d\n", (int)ktime_to_us(end));\r
#endif\r
- \r
+\r
return ret;\r
}\r
\r
static int rga_get_result(rga_session *session, unsigned long arg)\r
{\r
//printk("rga_get_result %d\n",drvdata->rga_result);\r
- \r
+\r
int ret = 0;\r
\r
int num_done;\r
\r
num_done = atomic_read(&session->num_done);\r
- \r
+\r
if (unlikely(copy_to_user((void __user *)arg, &num_done, sizeof(int)))) {\r
printk("copy_to_user failed\n");\r
ERR("copy_to_user failed\n");\r
- ret = -EFAULT; \r
+ ret = -EFAULT;\r
}\r
return ret;\r
}\r
\r
if(!((req->render_mode == color_fill_mode) || (req->render_mode == line_point_drawing_mode)))\r
{\r
- if (unlikely((req->src.act_w <= 0) || (req->src.act_w > 8191) || (req->src.act_h <= 0) || (req->src.act_h > 8191))) \r
+ if (unlikely((req->src.act_w <= 0) || (req->src.act_w > 8191) || (req->src.act_h <= 0) || (req->src.act_h > 8191)))\r
{\r
ERR("invalid source resolution act_w = %d, act_h = %d\n", req->src.act_w, req->src.act_h);\r
return -EINVAL;\r
\r
if(!((req->render_mode == color_fill_mode) || (req->render_mode == line_point_drawing_mode)))\r
{\r
- if (unlikely((req->src.vir_w <= 0) || (req->src.vir_w > 8191) || (req->src.vir_h <= 0) || (req->src.vir_h > 8191))) \r
+ if (unlikely((req->src.vir_w <= 0) || (req->src.vir_w > 8191) || (req->src.vir_h <= 0) || (req->src.vir_h > 8191)))\r
{\r
ERR("invalid source resolution vir_w = %d, vir_h = %d\n", req->src.vir_w, req->src.vir_h);\r
return -EINVAL;\r
}\r
\r
//check dst width and height\r
- if (unlikely((req->dst.act_w <= 0) || (req->dst.act_w > 2048) || (req->dst.act_h <= 0) || (req->dst.act_h > 2048))) \r
+ if (unlikely((req->dst.act_w <= 0) || (req->dst.act_w > 2048) || (req->dst.act_h <= 0) || (req->dst.act_h > 2048)))\r
{\r
ERR("invalid destination resolution act_w = %d, act_h = %d\n", req->dst.act_w, req->dst.act_h);\r
return -EINVAL;\r
}\r
\r
- if (unlikely((req->dst.vir_w <= 0) || (req->dst.vir_w > 2048) || (req->dst.vir_h <= 0) || (req->dst.vir_h > 2048))) \r
+ if (unlikely((req->dst.vir_w <= 0) || (req->dst.vir_w > 2048) || (req->dst.vir_h <= 0) || (req->dst.vir_h > 2048)))\r
{\r
ERR("invalid destination resolution vir_w = %d, vir_h = %d\n", req->dst.vir_w, req->dst.vir_h);\r
return -EINVAL;\r
ERR("invalid dst_vir_w act_h = %d, vir_h = %d\n", req->dst.act_w, req->dst.vir_w);\r
return -EINVAL;\r
}\r
- \r
+\r
return 0;\r
}\r
\r
static void rga_copy_reg(struct rga_reg *reg, uint32_t offset)\r
-{ \r
+{\r
uint32_t i;\r
uint32_t *cmd_buf;\r
uint32_t *reg_p;\r
{\r
printk(KERN_ERR "task_running is no zero\n");\r
}\r
- \r
+\r
atomic_add(1, &rga_service.cmd_num);\r
- atomic_add(1, ®->session->task_running); \r
- \r
+ atomic_add(1, ®->session->task_running);\r
+\r
cmd_buf = (uint32_t *)rga_service.cmd_buff + offset*28;\r
reg_p = (uint32_t *)reg->cmd_reg;\r
\r
{\r
cmd_buf[i] = reg_p[i];\r
}\r
- \r
+\r
dsb();\r
}\r
\r
\r
static struct rga_reg * rga_reg_init(rga_session *session, struct rga_req *req)\r
{\r
- unsigned long flag;\r
uint32_t ret;\r
struct rga_reg *reg = kzalloc(sizeof(struct rga_reg), GFP_KERNEL);\r
if (NULL == reg) {\r
//memcpy(®->req, req, sizeof(struct rga_req));\r
\r
reg->MMU_base = NULL;\r
- \r
+\r
if (req->mmu_info.mmu_en)\r
{\r
ret = rga_set_mmu_info(reg, req);\r
- if(ret < 0) \r
+ if(ret < 0)\r
{\r
printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__);\r
- if(reg != NULL) \r
- { \r
+ if(reg != NULL)\r
+ {\r
kfree(reg);\r
}\r
- return NULL; \r
+ return NULL;\r
}\r
}\r
\r
printk("gen reg info error\n");\r
if(reg != NULL)\r
{\r
- kfree(reg); \r
+ kfree(reg);\r
}\r
return NULL;\r
}\r
\r
- spin_lock_irqsave(&rga_service.lock, flag);\r
+ mutex_lock(&rga_service.lock);\r
list_add_tail(®->status_link, &rga_service.waiting);\r
list_add_tail(®->session_link, &session->waiting);\r
- spin_unlock_irqrestore(&rga_service.lock, flag);\r
+ mutex_unlock(&rga_service.lock);\r
\r
return reg;\r
}\r
\r
static struct rga_reg * rga_reg_init_2(rga_session *session, struct rga_req *req0, struct rga_req *req1)\r
{\r
- unsigned long flag;\r
uint32_t ret;\r
\r
struct rga_reg *reg0, *reg1;\r
reg1 = NULL;\r
\r
do\r
- { \r
+ {\r
reg0 = kzalloc(sizeof(struct rga_reg), GFP_KERNEL);\r
if (NULL == reg0) {\r
pr_err("%s [%d] kmalloc fail in rga_reg_init\n", __FUNCTION__, __LINE__);\r
break;\r
}\r
\r
- reg0->session = session; \r
+ reg0->session = session;\r
INIT_LIST_HEAD(®0->session_link);\r
INIT_LIST_HEAD(®0->status_link);\r
\r
ret = rga_set_mmu_info(reg0, req0);\r
if(ret < 0) {\r
printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__);\r
- break; \r
+ break;\r
}\r
}\r
- \r
+\r
RGA_gen_reg_info(req0, (uint8_t *)reg0->cmd_reg);\r
- \r
+\r
if(req1->mmu_info.mmu_en)\r
{\r
ret = rga_set_mmu_info(reg1, req1);\r
if(ret < 0) {\r
printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__);\r
- break; \r
+ break;\r
}\r
}\r
- \r
+\r
RGA_gen_reg_info(req1, (uint8_t *)reg1->cmd_reg);\r
- \r
- spin_lock_irqsave(&rga_service.lock, flag);\r
+\r
+ mutex_lock(&rga_service.lock);\r
list_add_tail(®0->status_link, &rga_service.waiting);\r
list_add_tail(®0->session_link, &session->waiting);\r
- list_add_tail(®1->status_link, &rga_service.waiting); \r
+ list_add_tail(®1->status_link, &rga_service.waiting);\r
list_add_tail(®1->session_link, &session->waiting);\r
- spin_unlock_irqrestore(&rga_service.lock, flag);\r
+ mutex_unlock(&rga_service.lock);\r
\r
return reg1;\r
}\r
return NULL;\r
}\r
\r
-\r
+/* Caller must hold rga_service.lock */\r
static void rga_reg_deinit(struct rga_reg *reg)\r
{\r
list_del_init(®->session_link);\r
list_del_init(®->status_link);\r
- kfree(reg); \r
+ kfree(reg);\r
}\r
\r
+/* Caller must hold rga_service.lock */\r
static void rga_reg_from_wait_to_run(struct rga_reg *reg)\r
{\r
list_del_init(®->status_link);\r
list_add_tail(®->session_link, ®->session->running);\r
}\r
\r
+/* Caller must hold rga_service.lock */\r
static void rga_service_session_clear(rga_session *session)\r
{\r
struct rga_reg *reg, *n;\r
\r
- list_for_each_entry_safe(reg, n, &session->waiting, session_link) \r
+ list_for_each_entry_safe(reg, n, &session->waiting, session_link)\r
{\r
rga_reg_deinit(reg);\r
}\r
\r
- list_for_each_entry_safe(reg, n, &session->running, session_link) \r
+ list_for_each_entry_safe(reg, n, &session->running, session_link)\r
{\r
rga_reg_deinit(reg);\r
}\r
}\r
\r
-static void rga_try_set_reg(uint32_t num)\r
+/* Caller must hold rga_service.lock */\r
+static void rga_try_set_reg(void)\r
{\r
- unsigned long flag;\r
struct rga_reg *reg ;\r
- \r
- if (!num)\r
- {\r
- #if RGA_TEST \r
- printk("rga try set reg cmd num is 0\n");\r
- #endif\r
- \r
- return;\r
- }\r
\r
- udelay(3);\r
- \r
- spin_lock_irqsave(&rga_service.lock, flag);\r
- if (!list_empty(&rga_service.waiting)) \r
+ if (list_empty(&rga_service.running))\r
{\r
- do\r
- { \r
- if(!list_empty(&rga_service.running)) \r
- { \r
- break;\r
+ if (!list_empty(&rga_service.waiting))\r
+ {\r
+ /* RGA is idle */\r
+ reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link);\r
+\r
+ rga_power_on();\r
+ udelay(3);\r
+\r
+ rga_copy_reg(reg, 0);\r
+ rga_reg_from_wait_to_run(reg);\r
+\r
+ dmac_flush_range(&rga_service.cmd_buff[0], &rga_service.cmd_buff[28]);\r
+ outer_flush_range(virt_to_phys(&rga_service.cmd_buff[0]),virt_to_phys(&rga_service.cmd_buff[28]));\r
+\r
+ rga_soft_reset();\r
+ rga_write(0, RGA_MMU_CTRL);\r
+\r
+ /* CMD buff */\r
+ rga_write(virt_to_phys(rga_service.cmd_buff), RGA_CMD_ADDR);\r
+\r
+#if RGA_TEST\r
+ {\r
+ //printk(KERN_DEBUG "cmd_addr = %.8x\n", rga_read(RGA_CMD_ADDR));\r
+ uint32_t i;\r
+ uint32_t *p;\r
+ p = rga_service.cmd_buff;\r
+ printk(KERN_DEBUG "CMD_REG\n");\r
+ for (i=0; i<7; i++)\r
+ printk("%.8x %.8x %.8x %.8x\n", p[0 + i*4], p[1+i*4], p[2 + i*4], p[3 + i*4]);\r
}\r
- else \r
- { \r
- /* RGA is idle */\r
- reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link); \r
- rga_copy_reg(reg, 0); \r
- rga_reg_from_wait_to_run(reg);\r
- \r
- dmac_flush_range(&rga_service.cmd_buff[0], &rga_service.cmd_buff[28]);\r
- outer_flush_range(virt_to_phys(&rga_service.cmd_buff[0]),virt_to_phys(&rga_service.cmd_buff[28]));\r
-\r
- rga_soft_reset();\r
- rga_write(0, RGA_MMU_CTRL); \r
- \r
- /* CMD buff */\r
- rga_write(virt_to_phys(rga_service.cmd_buff), RGA_CMD_ADDR);\r
- \r
- #if RGA_TEST\r
- {\r
- //printk(KERN_DEBUG "cmd_addr = %.8x\n", rga_read(RGA_CMD_ADDR));\r
- uint32_t i;\r
- uint32_t *p;\r
- p = rga_service.cmd_buff; \r
- printk(KERN_DEBUG "CMD_REG\n");\r
- for (i=0; i<7; i++) \r
- printk("%.8x %.8x %.8x %.8x\n", p[0 + i*4], p[1+i*4], p[2 + i*4], p[3 + i*4]); \r
- }\r
- #endif\r
-\r
- /* master mode */\r
- rga_write((0x1<<2)|(0x1<<3), RGA_SYS_CTRL);\r
- \r
- /* All CMD finish int */\r
- rga_write(rga_read(RGA_INT)|(0x1<<10)|(0x1<<8), RGA_INT);\r
- \r
- /* Start proc */\r
- atomic_set(®->session->done, 0); \r
- rga_write(0x1, RGA_CMD_CTRL);\r
- \r
- #if RGA_TEST\r
- {\r
- uint32_t i;\r
- printk(KERN_DEBUG "CMD_READ_BACK_REG\n");\r
- for (i=0; i<7; i++) \r
- printk(KERN_DEBUG "%.8x %.8x %.8x %.8x\n", rga_read(0x100 + i*16 + 0), \r
- rga_read(0x100 + i*16 + 4), rga_read(0x100 + i*16 + 8), rga_read(0x100 + i*16 + 12)); \r
- }\r
- #endif\r
+#endif\r
+\r
+ /* master mode */\r
+ rga_write((0x1<<2)|(0x1<<3), RGA_SYS_CTRL);\r
+\r
+ /* All CMD finish int */\r
+ rga_write(rga_read(RGA_INT)|(0x1<<10)|(0x1<<8), RGA_INT);\r
+\r
+ /* Start proc */\r
+ atomic_set(®->session->done, 0);\r
+ rga_write(0x1, RGA_CMD_CTRL);\r
+\r
+#if RGA_TEST\r
+ {\r
+ uint32_t i;\r
+ printk(KERN_DEBUG "CMD_READ_BACK_REG\n");\r
+ for (i=0; i<7; i++)\r
+ printk(KERN_DEBUG "%.8x %.8x %.8x %.8x\n", rga_read(0x100 + i*16 + 0),\r
+ rga_read(0x100 + i*16 + 4), rga_read(0x100 + i*16 + 8), rga_read(0x100 + i*16 + 12));\r
}\r
- num--;\r
+#endif\r
}\r
- while(0);\r
- }\r
- spin_unlock_irqrestore(&rga_service.lock, flag); \r
+ else\r
+ {\r
+// rga_power_off();\r
+ }\r
+ }\r
}\r
\r
\r
-#if RGA_TEST \r
+#if RGA_TEST\r
static void print_info(struct rga_req *req)\r
-{ \r
- printk("src.yrgb_addr = %.8x, src.uv_addr = %.8x, src.v_addr = %.8x\n", \r
+{\r
+ printk("src.yrgb_addr = %.8x, src.uv_addr = %.8x, src.v_addr = %.8x\n",\r
req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr);\r
- printk("src : act_w = %d, act_h = %d, vir_w = %d, vir_h = %d\n", \r
+ printk("src : act_w = %d, act_h = %d, vir_w = %d, vir_h = %d\n",\r
req->src.act_w, req->src.act_h, req->src.vir_w, req->src.vir_h);\r
printk("src : x_offset = %.8x y_offset = %.8x\n", req->src.x_offset, req->src.y_offset);\r
- \r
- printk("dst.yrgb_addr = %.8x, dst.uv_addr = %.8x, dst.v_addr = %.8x\n", \r
- req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr); \r
+\r
+ printk("dst.yrgb_addr = %.8x, dst.uv_addr = %.8x, dst.v_addr = %.8x\n",\r
+ req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr);\r
printk("dst : x_offset = %.8x y_offset = %.8x\n", req->dst.x_offset, req->dst.y_offset);\r
- printk("dst : act_w = %d, act_h = %d, vir_w = %d, vir_h = %d\n", \r
+ printk("dst : act_w = %d, act_h = %d, vir_w = %d, vir_h = %d\n",\r
req->dst.act_w, req->dst.act_h, req->dst.vir_w, req->dst.vir_h);\r
\r
- printk("clip.xmin = %d, clip.xmax = %d. clip.ymin = %d, clip.ymax = %d\n", \r
- req->clip.xmin, req->clip.xmax, req->clip.ymin, req->clip.ymax); \r
+ printk("clip.xmin = %d, clip.xmax = %d. clip.ymin = %d, clip.ymax = %d\n",\r
+ req->clip.xmin, req->clip.xmax, req->clip.ymin, req->clip.ymax);\r
}\r
#endif\r
\r
-\r
+/* Caller must hold rga_service.lock */\r
static void rga_del_running_list(void)\r
{\r
struct rga_reg *reg;\r
- \r
+\r
while(!list_empty(&rga_service.running))\r
{\r
reg = list_entry(rga_service.running.next, struct rga_reg, status_link);\r
reg->MMU_base = NULL;\r
}\r
atomic_sub(1, ®->session->task_running);\r
- atomic_sub(1, &rga_service.total_running);\r
- \r
+ atomic_sub(1, &rga_service.total_running);\r
+\r
if(list_empty(®->session->waiting))\r
{\r
- atomic_set(®->session->done, 1); \r
+ atomic_set(®->session->done, 1);\r
wake_up_interruptible_sync(®->session->wait);\r
}\r
- \r
- rga_reg_deinit(reg); \r
- } \r
+\r
+ rga_reg_deinit(reg);\r
+ }\r
}\r
\r
+/* Caller must hold rga_service.lock */\r
static void rga_del_running_list_timeout(void)\r
{\r
struct rga_reg *reg;\r
- \r
+\r
while(!list_empty(&rga_service.running))\r
{\r
reg = list_entry(rga_service.running.next, struct rga_reg, status_link);\r
{\r
kfree(reg->MMU_base);\r
}\r
- \r
+\r
atomic_sub(1, ®->session->task_running);\r
- atomic_sub(1, &rga_service.total_running);\r
+ atomic_sub(1, &rga_service.total_running);\r
+\r
\r
- \r
#if 0\r
printk("RGA_INT is %.8x\n", rga_read(RGA_INT));\r
printk("reg->session->task_running = %d\n", atomic_read(®->session->task_running));\r
\r
{\r
uint32_t *p, i;\r
- p = reg->cmd_reg; \r
- for (i=0; i<7; i++) \r
+ p = reg->cmd_reg;\r
+ for (i=0; i<7; i++)\r
printk("%.8x %.8x %.8x %.8x\n", p[0 + i*4], p[1+i*4], p[2 + i*4], p[3 + i*4]);\r
- \r
+\r
}\r
#endif\r
- \r
+\r
if(list_empty(®->session->waiting))\r
{\r
atomic_set(®->session->done, 1);\r
wake_up_interruptible_sync(®->session->wait);\r
}\r
- \r
- rga_reg_deinit(reg); \r
- } \r
+\r
+ rga_reg_deinit(reg);\r
+ }\r
}\r
\r
\r
break;\r
case RK_FORMAT_YCrCb_420_SP :\r
break;\r
- case RK_FORMAT_YCrCb_420_P : \r
+ case RK_FORMAT_YCrCb_420_P :\r
break;\r
default :\r
break;\r
}\r
- \r
+\r
}\r
\r
\r
static int rga_blit(rga_session *session, struct rga_req *req)\r
{\r
int ret = -1;\r
- int num = 0; \r
+ int num = 0;\r
struct rga_reg *reg;\r
struct rga_req *req2;\r
- unsigned long flag;\r
\r
uint32_t saw, sah, daw, dah;\r
\r
- req2 = NULL; \r
- \r
+ req2 = NULL;\r
+\r
saw = req->src.act_w;\r
sah = req->src.act_h;\r
daw = req->dst.act_w;\r
dah = req->dst.act_h;\r
\r
do\r
- { \r
- if((req->render_mode == bitblt_mode) && (((saw>>1) >= daw) || ((sah>>1) >= dah))) \r
- { \r
- /* generate 2 cmd for pre scale */ \r
+ {\r
+ if((req->render_mode == bitblt_mode) && (((saw>>1) >= daw) || ((sah>>1) >= dah)))\r
+ {\r
+ /* generate 2 cmd for pre scale */\r
req2 = kzalloc(sizeof(struct rga_req), GFP_KERNEL);\r
if(NULL == req2) {\r
- return -EFAULT; \r
+ return -EFAULT;\r
}\r
\r
ret = rga_check_param(req);\r
printk("req 0 argument is inval\n");\r
break;\r
}\r
- \r
- ret = RGA_gen_two_pro(req, req2); \r
+\r
+ ret = RGA_gen_two_pro(req, req2);\r
if(ret == -EINVAL) {\r
break;\r
}\r
printk("req 1 argument is inval\n");\r
break;\r
}\r
- \r
+\r
ret = rga_check_param(req2);\r
if(ret == -EINVAL) {\r
printk("req 2 argument is inval\n");\r
reg = rga_reg_init_2(session, req, req2);\r
if(reg == NULL) {\r
break;\r
- } \r
+ }\r
num = 2;\r
}\r
- else \r
+ else\r
{\r
/* check value if legal */\r
ret = rga_check_param(req);\r
\r
if(req->render_mode == bitblt_mode)\r
{\r
- rga_mem_addr_sel(req); \r
+ rga_mem_addr_sel(req);\r
}\r
- \r
+\r
reg = rga_reg_init(session, req);\r
if(reg == NULL) {\r
break;\r
- } \r
- num = 1; \r
- } \r
+ }\r
+ num = 1;\r
+ }\r
\r
- //atomic_set(®->int_enable, 1);\r
- \r
- spin_lock_irqsave(&rga_service.lock, flag);\r
+ mutex_lock(&rga_service.lock);\r
atomic_add(num, &rga_service.total_running);\r
- spin_unlock_irqrestore(&rga_service.lock, flag);\r
- \r
- rga_try_set_reg(1);\r
- \r
- return 0; \r
+ rga_try_set_reg();\r
+ mutex_unlock(&rga_service.lock);\r
+\r
+ return 0;\r
}\r
while(0);\r
- \r
+\r
if(NULL != req2)\r
{\r
kfree(req2);\r
static int rga_blit_async(rga_session *session, struct rga_req *req)\r
{\r
int ret = -1;\r
- \r
+\r
#if RGA_TEST\r
printk("*** rga_blit_async proc ***\n");\r
print_info(req);\r
#endif\r
- \r
+\r
ret = rga_blit(session, req);\r
- \r
+\r
return ret;\r
}\r
\r
{\r
int ret = -1;\r
int ret_timeout = 0;\r
- unsigned long flag;\r
- \r
+\r
#if RGA_TEST\r
printk("*** rga_blit_sync proc ***\n");\r
print_info(req);\r
\r
#if RGA_TEST_TIME\r
rga_start = ktime_get();\r
- #endif \r
- \r
+ #endif\r
+\r
ret = rga_blit(session, req);\r
- \r
+\r
ret_timeout = wait_event_interruptible_timeout(session->wait, atomic_read(&session->done), RGA_TIMEOUT_DELAY);\r
- \r
- if (unlikely(ret_timeout< 0)) \r
+\r
+ if (unlikely(ret_timeout< 0))\r
{\r
- pr_err("sync pid %d wait task ret %d\n", session->pid, ret_timeout); \r
- spin_lock_irqsave(&rga_service.lock, flag);\r
+ pr_err("sync pid %d wait task ret %d\n", session->pid, ret_timeout);\r
+ mutex_lock(&rga_service.lock);\r
rga_del_running_list();\r
- spin_unlock_irqrestore(&rga_service.lock, flag);\r
+ mutex_unlock(&rga_service.lock);\r
ret = -ETIMEDOUT;\r
- } \r
+ }\r
else if (0 == ret_timeout)\r
{\r
pr_err("sync pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));\r
- spin_lock_irqsave(&rga_service.lock, flag);\r
+ mutex_lock(&rga_service.lock);\r
rga_del_running_list_timeout();\r
- spin_unlock_irqrestore(&rga_service.lock, flag);\r
- rga_try_set_reg(1);\r
+ rga_try_set_reg();\r
+ mutex_unlock(&rga_service.lock);\r
ret = -ETIMEDOUT;\r
}\r
\r
rga_end = ktime_sub(rga_end, rga_start);\r
printk("sync one cmd end time %d\n", (int)ktime_to_us(rga_end));\r
#endif\r
- \r
- return ret; \r
+\r
+ return ret;\r
}\r
\r
\r
struct rga_req *req;\r
int ret = 0;\r
rga_session *session = (rga_session *)file->private_data;\r
- \r
- if (NULL == session) \r
+\r
+ if (NULL == session)\r
{\r
printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);\r
return -EINVAL;\r
}\r
\r
- mutex_lock(&rga_service.mutex);\r
- \r
req = kzalloc(sizeof(struct rga_req), GFP_KERNEL);\r
- if(req == NULL) \r
+ if(req == NULL)\r
{\r
printk("%s [%d] get rga_req mem failed\n",__FUNCTION__,__LINE__);\r
- mutex_unlock(&rga_service.mutex);\r
return -EINVAL;\r
}\r
- \r
+\r
+ mutex_lock(&rga_service.mutex);\r
+\r
switch (cmd)\r
{\r
case RGA_BLIT_SYNC:\r
- if (unlikely(copy_from_user(req, (struct rga_req*)arg, sizeof(struct rga_req)))) \r
+ if (unlikely(copy_from_user(req, (struct rga_req*)arg, sizeof(struct rga_req))))\r
{\r
ERR("copy_from_user failed\n");\r
ret = -EFAULT;\r
break;\r
}\r
- rga_power_on();\r
ret = rga_blit_sync(session, req);\r
break;\r
case RGA_BLIT_ASYNC:\r
- if (unlikely(copy_from_user(req, (struct rga_req*)arg, sizeof(struct rga_req)))) \r
+ if (unlikely(copy_from_user(req, (struct rga_req*)arg, sizeof(struct rga_req))))\r
{\r
ERR("copy_from_user failed\n");\r
ret = -EFAULT;\r
break;\r
}\r
- rga_power_on();\r
\r
if((atomic_read(&rga_service.total_running) > 16))\r
{\r
- ret = rga_blit_sync(session, req); \r
+ ret = rga_blit_sync(session, req);\r
}\r
else\r
{\r
- ret = rga_blit_async(session, req); \r
+ ret = rga_blit_async(session, req);\r
}\r
break;\r
case RGA_FLUSH:\r
break;\r
}\r
\r
- if(req != NULL) {\r
+ mutex_unlock(&rga_service.mutex);\r
+\r
kfree(req);\r
- }\r
- \r
- mutex_unlock(&rga_service.mutex);\r
- \r
+\r
return ret;\r
}\r
\r
static int rga_open(struct inode *inode, struct file *file)\r
{\r
- unsigned long flag;\r
rga_session *session = kzalloc(sizeof(rga_session), GFP_KERNEL);\r
if (NULL == session) {\r
pr_err("unable to allocate memory for rga_session.");\r
\r
session->pid = current->pid;\r
//printk(KERN_DEBUG "+");\r
- \r
+\r
INIT_LIST_HEAD(&session->waiting);\r
INIT_LIST_HEAD(&session->running);\r
INIT_LIST_HEAD(&session->list_session);\r
init_waitqueue_head(&session->wait);\r
- spin_lock_irqsave(&rga_service.lock, flag);\r
- list_add_tail(&session->list_session, &rga_service.session); \r
- spin_unlock_irqrestore(&rga_service.lock, flag);\r
+ mutex_lock(&rga_service.lock);\r
+ list_add_tail(&session->list_session, &rga_service.session);\r
+ mutex_unlock(&rga_service.lock);\r
atomic_set(&session->task_running, 0);\r
atomic_set(&session->num_done, 0);\r
- \r
+\r
file->private_data = (void *)session;\r
\r
- //DBG("*** rga dev opened by pid %d *** \n", session->pid); \r
+ //DBG("*** rga dev opened by pid %d *** \n", session->pid);\r
return nonseekable_open(inode, file);\r
- \r
+\r
}\r
\r
static int rga_release(struct inode *inode, struct file *file)\r
{\r
int task_running;\r
- unsigned long flag;\r
rga_session *session = (rga_session *)file->private_data;\r
if (NULL == session)\r
return -EINVAL;\r
//printk(KERN_DEBUG "-");\r
task_running = atomic_read(&session->task_running);\r
\r
- if (task_running) \r
+ if (task_running)\r
{\r
- pr_err("rga_service session %d still has %d task running when closing\n", session->pid, task_running); \r
+ pr_err("rga_service session %d still has %d task running when closing\n", session->pid, task_running);\r
msleep(100);\r
- /*ͬ²½*/ \r
+ /*ͬ²½*/\r
}\r
- \r
+\r
wake_up_interruptible_sync(&session->wait);\r
- spin_lock_irqsave(&rga_service.lock, flag);\r
+ mutex_lock(&rga_service.lock);\r
list_del(&session->list_session);\r
rga_service_session_clear(session);\r
kfree(session);\r
- spin_unlock_irqrestore(&rga_service.lock, flag);\r
+ mutex_unlock(&rga_service.lock);\r
\r
//DBG("*** rga dev close ***\n");\r
return 0;\r
}\r
\r
-static irqreturn_t rga_irq(int irq, void *dev_id)\r
+static irqreturn_t rga_irq_thread(int irq, void *dev_id)\r
{\r
- unsigned long flag;\r
- \r
- /*clear INT */\r
- rga_write(rga_read(RGA_INT) | (0x1<<6) | (0x1<<7) | (0x1<<4), RGA_INT);\r
+ mutex_lock(&rga_service.lock);\r
+ if (rga_service.enable) {\r
+ rga_del_running_list();\r
+ rga_try_set_reg();\r
+ }\r
+ mutex_unlock(&rga_service.lock);\r
\r
- spin_lock_irqsave(&rga_service.lock, flag);\r
- rga_del_running_list();\r
- \r
- if(!list_empty(&rga_service.waiting))\r
- {\r
- spin_unlock_irqrestore(&rga_service.lock, flag);\r
- rga_try_set_reg(1);\r
- }\r
- else\r
- {\r
- spin_unlock_irqrestore(&rga_service.lock, flag);\r
- }\r
- \r
return IRQ_HANDLED;\r
}\r
\r
-static int rga_suspend(struct platform_device *pdev, pm_message_t state)\r
+static irqreturn_t rga_irq(int irq, void *dev_id)\r
{\r
- uint32_t enable;\r
- \r
- enable = rga_service.enable; \r
- rga_power_off(NULL);\r
- rga_service.enable = enable;\r
-\r
- return 0;\r
-}\r
-\r
-static int rga_resume(struct platform_device *pdev)\r
-{ \r
- if(rga_service.enable)\r
- {\r
- rga_service.enable = false;\r
- rga_power_on();\r
- rga_try_set_reg(1);\r
- }\r
- \r
- return 0;\r
-}\r
+ /*clear INT */\r
+ rga_write(rga_read(RGA_INT) | (0x1<<6) | (0x1<<7) | (0x1<<4), RGA_INT);\r
\r
-static void rga_shutdown(struct platform_device *pdev)\r
-{\r
- pr_cont("shutdown..."); \r
- rga_power_off(NULL); \r
- pr_cont("done\n");\r
+ return IRQ_WAKE_THREAD;\r
}\r
\r
struct file_operations rga_fops = {\r
struct rga_drvdata *data;\r
int ret = 0;\r
\r
- data = kzalloc(sizeof(struct rga_drvdata), GFP_KERNEL);\r
-\r
- INIT_LIST_HEAD(&rga_service.waiting);\r
+ INIT_LIST_HEAD(&rga_service.waiting);\r
INIT_LIST_HEAD(&rga_service.running);\r
INIT_LIST_HEAD(&rga_service.done);\r
INIT_LIST_HEAD(&rga_service.session);\r
- spin_lock_init(&rga_service.lock);\r
- spin_lock_init(&rga_service.lock_power);\r
- atomic_set(&rga_service.total_running, 0);\r
- atomic_set(&rga_service.src_format_swt, 0);\r
- rga_service.last_prc_src_format = 1; /* default is yuv first*/\r
- rga_service.enable = false;\r
- \r
+ mutex_init(&rga_service.lock);\r
+ mutex_init(&rga_service.mutex);\r
+ atomic_set(&rga_service.total_running, 0);\r
+ atomic_set(&rga_service.src_format_swt, 0);\r
+ rga_service.last_prc_src_format = 1; /* default is yuv first*/\r
+ rga_service.enable = false;\r
+\r
+ data = kzalloc(sizeof(struct rga_drvdata), GFP_KERNEL);\r
if(NULL == data)\r
{\r
ERR("failed to allocate driver data.\n");\r
- return -ENOMEM;\r
- }\r
- \r
- pd_rga = clk_get(NULL, "pd_rga");\r
- aclk_rga = clk_get(NULL, "aclk_rga"); \r
- if (IS_ERR(aclk_rga))\r
- {\r
- ERR("failed to find rga axi clock source.\n");\r
- ret = -ENOENT;\r
- goto err_clock;\r
+ return -ENOMEM;\r
}\r
\r
- hclk_rga = clk_get(NULL, "hclk_rga");\r
- if (IS_ERR(hclk_rga))\r
- {\r
- ERR("failed to find rga ahb clock source.\n");\r
- ret = -ENOENT;\r
- goto err_clock;\r
- }\r
+ INIT_DELAYED_WORK(&data->power_off_work, rga_power_off_work);\r
+ wake_lock_init(&data->wake_lock, WAKE_LOCK_SUSPEND, "rga");\r
+\r
+ data->pd_rga = clk_get(NULL, "pd_rga");\r
+ data->aclk_rga = clk_get(NULL, "aclk_rga");\r
+ data->hclk_rga = clk_get(NULL, "hclk_rga");\r
\r
- rga_power_on();\r
- \r
/* map the memory */\r
- if (!request_mem_region(RK30_RGA_PHYS, RK30_RGA_SIZE, "rga_io")) \r
- {\r
+ if (!request_mem_region(RK30_RGA_PHYS, RK30_RGA_SIZE, "rga_io"))\r
+ {\r
pr_info("failed to reserve rga HW regs\n");\r
return -EBUSY;\r
}\r
}\r
\r
/* get the IRQ */\r
- data->irq0 = pdev->resource[1].start;\r
- printk("rga irq %d\n", data->irq0);\r
- if (data->irq0 <= 0)\r
+ data->irq = platform_get_irq(pdev, 0);\r
+ if (data->irq <= 0)\r
{\r
- ERR("failed to get rga irq resource (%d).\n", data->irq0);\r
- ret = data->irq0;\r
+ ERR("failed to get rga irq resource (%d).\n", data->irq);\r
+ ret = data->irq;\r
goto err_irq;\r
}\r
\r
/* request the IRQ */\r
- ret = request_irq(data->irq0, rga_irq, 0/*IRQF_DISABLED*/, "rga", pdev);\r
+ ret = request_threaded_irq(data->irq, rga_irq, rga_irq_thread, 0, "rga", pdev);\r
if (ret)\r
{\r
ERR("rga request_irq failed (%d).\n", ret);\r
goto err_irq;\r
}\r
\r
- mutex_init(&rga_service.mutex);\r
- INIT_DELAYED_WORK(&data->power_off_work, rga_power_off);\r
- data->rga_irq_callback = NULL;\r
- \r
platform_set_drvdata(pdev, data);\r
drvdata = data;\r
- \r
+\r
ret = misc_register(&rga_dev);\r
if(ret)\r
{\r
goto err_misc_register;\r
}\r
\r
- rga_power_off(NULL);\r
- \r
- DBG("RGA Driver loaded succesfully\n");\r
+ pr_info("Driver loaded succesfully\n");\r
\r
- return 0; \r
+ return 0;\r
\r
err_misc_register:\r
- free_irq(data->irq0, pdev);\r
+ free_irq(data->irq, pdev);\r
err_irq:\r
iounmap(data->rga_base);\r
err_ioremap:\r
-err_clock:\r
+ wake_lock_destroy(&data->wake_lock);\r
kfree(data);\r
\r
return ret;\r
static int rga_drv_remove(struct platform_device *pdev)\r
{\r
struct rga_drvdata *data = platform_get_drvdata(pdev);\r
- DBG("%s [%d]\n",__FUNCTION__,__LINE__);\r
+ DBG("%s [%d]\n",__FUNCTION__,__LINE__);\r
\r
- misc_deregister(&(data->miscdev));\r
- free_irq(data->irq0, &data->miscdev);\r
- iounmap((void __iomem *)(data->rga_base)); \r
+ wake_lock_destroy(&data->wake_lock);\r
+ misc_deregister(&(data->miscdev));\r
+ free_irq(data->irq, &data->miscdev);\r
+ iounmap((void __iomem *)(data->rga_base));\r
\r
- if(aclk_rga) {\r
- clk_put(aclk_rga);\r
- }\r
- \r
- if(hclk_rga) {\r
- clk_put(hclk_rga);\r
- }\r
+ clk_put(data->pd_rga);\r
+ clk_put(data->aclk_rga);\r
+ clk_put(data->hclk_rga);\r
\r
- kfree(data);\r
- return 0;\r
+ kfree(data);\r
+ return 0;\r
}\r
\r
static struct platform_driver rga_driver = {\r
.probe = rga_drv_probe,\r
.remove = __devexit_p(rga_drv_remove),\r
- .suspend = rga_suspend,\r
- .resume = rga_resume,\r
- .shutdown = rga_shutdown,\r
.driver = {\r
.owner = THIS_MODULE,\r
.name = "rga",\r
\r
/* malloc pre scale mid buf mmu table */\r
mmu_buf = kzalloc(1024*8, GFP_KERNEL);\r
- if(mmu_buf == NULL) \r
+ if(mmu_buf == NULL)\r
{\r
printk(KERN_ERR "RGA get Pre Scale buff failed. \n");\r
return -1;\r
\r
/* malloc 8 M buf */\r
for(i=0; i<2048; i++)\r
- { \r
+ {\r
buf_p = (uint32_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);\r
if(buf_p == NULL)\r
{\r
printk(KERN_ERR "RGA init pre scale buf falied\n");\r
return -ENOMEM;\r
}\r
- \r
- mmu_buf[i] = virt_to_phys((void *)((uint32_t)buf_p)); \r
+\r
+ mmu_buf[i] = virt_to_phys((void *)((uint32_t)buf_p));\r
}\r
- \r
- rga_service.pre_scale_buf = (uint32_t *)mmu_buf; \r
+\r
+ rga_service.pre_scale_buf = (uint32_t *)mmu_buf;\r
\r
if ((ret = platform_driver_register(&rga_driver)) != 0)\r
{\r
}\r
\r
//rga_test_0();\r
- \r
- INFO("Module initialized.\n"); \r
- \r
+\r
+ INFO("Module initialized.\n");\r
+\r
return 0;\r
}\r
\r
{\r
uint32_t i;\r
\r
- rga_power_off(NULL);\r
+ rga_power_off();\r
\r
for(i=0; i<2048; i++)\r
{\r
if((uint32_t *)rga_service.pre_scale_buf[i] != NULL)\r
{\r
__free_page((void *)rga_service.pre_scale_buf[i]);\r
- } \r
+ }\r
}\r
- \r
+\r
if(rga_service.pre_scale_buf != NULL) {\r
kfree((uint8_t *)rga_service.pre_scale_buf);\r
}\r
- platform_driver_unregister(&rga_driver); \r
+ platform_driver_unregister(&rga_driver);\r
}\r
\r
\r
\r
//dmac_flush_range(&src_buf[0], &src_buf[1920*1080]);\r
//outer_flush_range(virt_to_phys(&src_buf[0]),virt_to_phys(&src_buf[1024*1024]));\r
- \r
+\r
#if 0\r
memset(src_buf, 0x80, 800*480*4);\r
memset(dst_buf, 0xcc, 800*480*4);\r
- \r
+\r
dmac_flush_range(&dst_buf[0], &dst_buf[800*480]);\r
outer_flush_range(virt_to_phys(&dst_buf[0]),virt_to_phys(&dst_buf[800*480]));\r
#endif\r
- \r
+\r
req.src.act_w = 320;\r
req.src.act_h = 240;\r
\r
\r
//req.render_mode = color_fill_mode;\r
//req.fg_color = 0x80ffffff;\r
- \r
+\r
req.rotate_mode = 1;\r
req.scale_mode = 2;\r
\r
\r
fb->var.xres = 1280;\r
fb->var.yres = 800;\r
- \r
+\r
fb->var.red.length = 8;\r
fb->var.red.offset = 0;\r
fb->var.red.msb_right = 0;\r
- \r
+\r
fb->var.green.length = 8;\r
fb->var.green.offset = 8;\r
fb->var.green.msb_right = 0;\r
- \r
+\r
fb->var.blue.length = 8;\r
- \r
+\r
fb->var.blue.offset = 16;\r
fb->var.blue.msb_right = 0;\r
- \r
+\r
fb->var.transp.length = 8;\r
fb->var.transp.offset = 24;\r
fb->var.transp.msb_right = 0;\r
\r
fb->fix.smem_start = virt_to_phys(dst);\r
\r
- rk_direct_fb_show(fb); \r
- \r
+ rk_direct_fb_show(fb);\r
+\r
}\r
\r
#endif\r