* but WITHOUT ANY WARRANTY; without even the implied warranty of\r
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
* GNU General Public License for more details.\r
- *\r
+ * \r
*/\r
\r
#include <linux/kernel.h>\r
#include <asm/cacheflush.h>\r
#include <linux/compiler.h>\r
#include <linux/slab.h>\r
+#include <linux/fb.h>\r
+\r
\r
\r
#include "rga.h"\r
#include "RGA_API.h"\r
\r
\r
+#define RGA_TEST 1\r
+\r
#define PRE_SCALE_BUF_SIZE 2048*1024*4\r
\r
#define RGA_POWER_OFF_DELAY 4*HZ /* 4s */\r
#define RGA_TIMEOUT_DELAY 2*HZ /* 2s */\r
\r
\r
-\r
-\r
static struct rga_drvdata *drvdata = NULL;\r
rga_service_info rga_service;\r
\r
+#if 1//def RGA_TEST\r
+//uint32_t dst_buf[800*480*4];\r
+#endif\r
+\r
\r
static int rga_blit_async(rga_session *session, struct rga_req *req);\r
\r
\r
\r
/* Logging */\r
-#define RGA_DEBUG 0\r
+#define RGA_DEBUG 1\r
#if RGA_DEBUG\r
#define DBG(format, args...) printk(KERN_DEBUG "%s: " format, DRIVER_NAME, ## args)\r
-#define ERR(format, args...) printk(KERN_DEBUG "%s: " format, DRIVER_NAME, ## args)\r
-#define WARNING(format, args...) printk(KERN_DEBUG "%s: " format, DRIVER_NAME, ## args)\r
-#define INFO(format, args...) printk(KERN_DEBUG "%s: " format, DRIVER_NAME, ## args)\r
+#define ERR(format, args...) printk(KERN_ERR "%s: " format, DRIVER_NAME, ## args)\r
+#define WARNING(format, args...) printk(KERN_WARN "%s: " format, DRIVER_NAME, ## args)\r
+#define INFO(format, args...) printk(KERN_INFO "%s: " format, DRIVER_NAME, ## args)\r
#else\r
#define DBG(format, args...)\r
#define ERR(format, args...)\r
static void rga_dump(void)\r
{\r
int running;\r
+ int num_done;\r
struct rga_reg *reg, *reg_tmp;\r
- rga_session *session, *session_tmp;\r
+ rga_session *session, *session_tmp; \r
+ struct list_head *next; \r
\r
running = atomic_read(&rga_service.total_running);\r
- printk("total_running %d\n", running);\r
+ printk("rga total_running %d\n", running);\r
+\r
+ /* Dump waiting list info */\r
+ if (!list_empty(&rga_service.waiting))\r
+ { \r
+ next = &rga_service.waiting;\r
+\r
+ printk("rga_service dump waiting list\n");\r
+\r
+ do\r
+ {\r
+ reg = list_entry(next->next, struct rga_reg, status_link);\r
+ running = atomic_read(®->session->task_running);\r
+ num_done = atomic_read(®->session->num_done);\r
+ printk("rga session pid %d, done %d, running %d\n", reg->session->pid, num_done, running); \r
+ next = next->next;\r
+ }\r
+ while(!list_empty(next)); \r
+ }\r
+\r
+ /* Dump running list info */\r
+ if (!list_empty(&rga_service.running))\r
+ {\r
+ printk("rga_service dump running list\n");\r
+ \r
+ next = &rga_service.running;\r
+ do\r
+ {\r
+ reg = list_entry(next->next, struct rga_reg, status_link);\r
+ running = atomic_read(®->session->task_running);\r
+ num_done = atomic_read(®->session->num_done);\r
+ printk("rga session pid %d, done %d, running %d:\n", reg->session->pid, num_done, running); \r
+ next = next->next;\r
+ }\r
+ while(!list_empty(next)); \r
+ }\r
+ \r
\r
list_for_each_entry_safe(session, session_tmp, &rga_service.session, list_session) \r
{\r
\r
int ret;\r
\r
- ret = wait_event_interruptible_timeout(session->wait, session->done, RGA_TIMEOUT_DELAY);\r
+ ret = wait_event_interruptible_timeout(session->wait, atomic_read(&session->done), RGA_TIMEOUT_DELAY);\r
\r
if (unlikely(ret < 0)) {\r
pr_err("pid %d wait task ret %d\n", session->pid, ret);\r
}\r
\r
\r
+static int rga_get_result(rga_session *session, unsigned long arg)\r
+{\r
+ //printk("rga_get_result %d\n",drvdata->rga_result);\r
+ \r
+ int ret = 0;\r
+\r
+ int num_done;\r
+\r
+ num_done = atomic_read(&session->num_done);\r
+ \r
+ if (unlikely(copy_to_user((void __user *)arg, &num_done, sizeof(int)))) {\r
+ printk("copy_to_user failed\n");\r
+ ERR("copy_to_user failed\n");\r
+ ret = -EFAULT; \r
+ }\r
+ //idle_condition = 1;\r
+ //dmac_clean_range((const void*)&idle_condition,(const void*)&idle_condition+4);\r
+ //wake_up_interruptible_sync(&blit_wait_queue);\r
+ return ret;\r
+}\r
+\r
+\r
+\r
static int rga_check_param(const struct rga_req *req)\r
{\r
#if 1\r
uint32_t *cmd_buf;\r
uint32_t *reg_p;\r
\r
- atomic_add(1, &rga_service.total_running);\r
+ atomic_add(1, &rga_service.cmd_num);\r
atomic_add(1, ®->session->task_running);\r
\r
cmd_buf = (uint32_t *)rga_service.cmd_buff + offset*28;\r
reg_p = (uint32_t *)reg->cmd_reg;\r
- \r
- for(i=0; i<28; i++) \r
+\r
+ for(i=0; i<28; i++)\r
{\r
cmd_buf[i] = reg_p[i];\r
- } \r
+ }\r
+ \r
+ dsb();\r
}\r
\r
\r
static struct rga_reg * rga_reg_init(rga_session *session, struct rga_req *req)\r
{\r
unsigned long flag;\r
+ uint32_t ret;\r
struct rga_reg *reg = kmalloc(sizeof(struct rga_reg), GFP_KERNEL);\r
if (NULL == reg) {\r
pr_err("kmalloc fail in rga_reg_init\n");\r
INIT_LIST_HEAD(®->session_link);\r
INIT_LIST_HEAD(®->status_link);\r
\r
- rga_set_mmu_info(reg, req);\r
- RGA_gen_reg_info(req, (uint8_t *)reg->cmd_reg); \r
+ if (req->mmu_info.mmu_en)\r
+ {\r
+ ret = rga_set_mmu_info(reg, req);\r
+ if(ret < 0) \r
+ {\r
+ printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__);\r
+ if(reg != NULL) \r
+ { \r
+ kfree(reg);\r
+ }\r
+ return NULL; \r
+ }\r
+ }\r
+ \r
+ RGA_gen_reg_info(req, (uint8_t *)reg->cmd_reg);\r
\r
spin_lock_irqsave(&rga_service.lock, flag);\r
list_add_tail(®->status_link, &rga_service.waiting);\r
return reg;\r
}\r
\r
+static struct rga_reg * rga_reg_init_2(rga_session *session, struct rga_req *req0, struct rga_req *req1)\r
+{\r
+ unsigned long flag;\r
+ uint32_t ret;\r
+\r
+ struct rga_reg *reg0, *reg1;\r
+\r
+ reg0 = NULL;\r
+ reg1 = NULL;\r
+\r
+ do\r
+ { \r
+ reg0 = kmalloc(sizeof(struct rga_reg), GFP_KERNEL);\r
+ if (NULL == reg0) {\r
+ pr_err("%s [%d] kmalloc fail in rga_reg_init\n", __FUNCTION__, __LINE__);\r
+ break;\r
+ }\r
+\r
+ reg1 = kmalloc(sizeof(struct rga_reg), GFP_KERNEL);\r
+ if (NULL == reg1) {\r
+ pr_err("%s [%d] kmalloc fail in rga_reg_init\n", __FUNCTION__, __LINE__);\r
+ break;\r
+ }\r
+\r
+ reg0->session = session;\r
+ INIT_LIST_HEAD(®0->session_link);\r
+ INIT_LIST_HEAD(®0->status_link);\r
+\r
+ if(req0->mmu_info.mmu_en)\r
+ {\r
+ ret = rga_set_mmu_info(reg0, req0);\r
+ if(ret < 0) {\r
+ printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__);\r
+ break; \r
+ }\r
+ }\r
+ \r
+ RGA_gen_reg_info(req0, (uint8_t *)reg0->cmd_reg);\r
+\r
+ if(req1->mmu_info.mmu_en)\r
+ {\r
+ ret = rga_set_mmu_info(reg0, req1);\r
+ if(ret < 0) {\r
+ printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__);\r
+ break; \r
+ }\r
+ }\r
+ \r
+ RGA_gen_reg_info(req1, (uint8_t *)reg0->cmd_reg);\r
+\r
+ spin_lock_irqsave(&rga_service.lock, flag);\r
+ list_add_tail(®0->status_link, &rga_service.waiting);\r
+ list_add_tail(®1->status_link, &rga_service.waiting);\r
+ list_add_tail(®0->session_link, &session->waiting); \r
+ list_add_tail(®1->session_link, &session->waiting);\r
+ spin_unlock_irqrestore(&rga_service.lock, flag);\r
+\r
+ return reg1;\r
+ }\r
+ while(0);\r
+\r
+ if(reg0 != NULL) {\r
+ kfree(reg0);\r
+ }\r
+\r
+ if(reg1 != NULL) {\r
+ kfree(reg1);\r
+ }\r
+\r
+ return NULL;\r
+}\r
+\r
+\r
static void rga_reg_deinit(struct rga_reg *reg)\r
{\r
list_del_init(®->session_link);\r
list_add_tail(®->session_link, ®->session->running);\r
}\r
\r
-#if 0\r
-static void rga_reg_from_run_to_done(struct rga_reg *reg)\r
-{\r
- spin_lock(&rga_service.lock);\r
- list_del_init(®->status_link);\r
- list_add_tail(®->status_link, &rga_service.done);\r
-\r
- list_del_init(®->session_link);\r
- list_add_tail(®->session_link, ®->session->done);\r
- \r
- atomic_sub(1, ®->session->task_running);\r
- atomic_sub(1, &rga_service.total_running);\r
- wake_up_interruptible_sync(®->session->wait);\r
- spin_unlock(&rga_service.lock);\r
-}\r
-#endif\r
-\r
-\r
static void rga_service_session_clear(rga_session *session)\r
{\r
struct rga_reg *reg, *n;\r
}\r
\r
\r
-static void rga_try_set_reg(void)\r
+static void rga_try_set_reg(uint32_t num)\r
{\r
unsigned long flag;\r
\r
// first get reg from reg list\r
+\r
+ if (!num)\r
+ {\r
+ printk("rga try set reg cmd num is 0\n");\r
+ return;\r
+ }\r
+ \r
spin_lock_irqsave(&rga_service.lock, flag);\r
if (!list_empty(&rga_service.waiting)) \r
{\r
- struct rga_reg *reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link);\r
-\r
- if(!(rga_read(RGA_STATUS) & 0x1)) \r
- { \r
- /* RGA is busy */\r
- if((atomic_read(&rga_service.total_running) <= 0xf) && (atomic_read(&rga_service.int_disable) == 0)) \r
- {\r
- rga_copy_reg(reg, atomic_read(&rga_service.total_running));\r
- rga_reg_from_wait_to_run(reg);\r
- rga_write(RGA_INT, 0x1<<10);\r
- reg->session->done = 0;\r
- rga_write(RGA_CMD_CTRL, (0x1<<3)|(0x1<<1));\r
- if(atomic_read(®->int_enable))\r
- atomic_set(&rga_service.int_disable, 1);\r
+ do\r
+ {\r
+ struct rga_reg *reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link); \r
+ if((rga_read(RGA_STATUS) & 0x1)) \r
+ { \r
+ /* RGA is busy */\r
+ if((atomic_read(&rga_service.cmd_num) <= 0xf) && (atomic_read(&rga_service.int_disable) == 0)) \r
+ {\r
+ rga_copy_reg(reg, atomic_read(&rga_service.cmd_num)); \r
+ rga_reg_from_wait_to_run(reg);\r
+ \r
+ rga_write(0x1<<10, RGA_INT);\r
+ \r
+ atomic_set(®->session->done, 0);\r
+ \r
+ rga_write((0x1<<3)|(0x1<<1), RGA_CMD_CTRL);\r
+ \r
+ if(atomic_read(®->int_enable))\r
+ atomic_set(&rga_service.int_disable, 1);\r
+ }\r
}\r
- }\r
- else \r
- { \r
- /* RGA is idle */\r
- rga_copy_reg(reg, 0); \r
- rga_reg_from_wait_to_run(reg);\r
+ else \r
+ { \r
+ /* RGA is idle */\r
+ rga_copy_reg(reg, 0); \r
+ rga_reg_from_wait_to_run(reg);\r
+ dmac_flush_range(&rga_service.cmd_buff[0], &rga_service.cmd_buff[28]);\r
\r
- /* MMU */\r
- rga_write(RGA_CMD_ADDR, 0);\r
\r
- /* All CMD finish int */\r
- rga_write(RGA_INT, 0x1<<10);\r
+ /* \r
+ * if cmd buf must use mmu\r
+ * it should be writed before cmd start \r
+ */\r
+ rga_write((2<<4)|0x1, RGA_MMU_CTRL);\r
+ rga_write(virt_to_phys(reg->MMU_base)>>2, RGA_MMU_TBL);\r
+ \r
+ /* CMD buff */\r
+ rga_write(virt_to_phys(rga_service.cmd_buff) & (~PAGE_MASK), RGA_CMD_ADDR);\r
\r
- /* Start proc */\r
- reg->session->done = 0;\r
- rga_write(RGA_CMD_CTRL, (0x1<<3)|0x1); \r
- } \r
+ /* master mode */\r
+ rga_write(0x1<<2, RGA_SYS_CTRL);\r
+\r
+ /* All CMD finish int */\r
+ rga_write(0x1<<10, RGA_INT);\r
+\r
+ /* Start proc */\r
+ atomic_set(®->session->done, 0);\r
+ rga_write(0x1, RGA_CMD_CTRL);\r
+\r
+ }\r
+ num--;\r
+ }\r
+ while(num);\r
}\r
spin_unlock_irqrestore(&rga_service.lock, flag);\r
}\r
static int rga_blit_async(rga_session *session, struct rga_req *req)\r
{\r
int ret = -1;\r
- struct rga_reg *reg0, *reg1;\r
+ struct rga_reg *reg;\r
struct rga_req *req2;\r
\r
uint32_t saw, sah, daw, dah;\r
\r
RGA_gen_two_pro(req, req2);\r
\r
- reg0 = rga_reg_init(session, req2);\r
- if(reg0 == NULL) {\r
- return -EFAULT;\r
- }\r
-\r
- reg1 = rga_reg_init(session, req);\r
- if(reg1 == NULL) {\r
+ reg = rga_reg_init_2(session, req2, req);\r
+ if(reg == NULL) {\r
return -EFAULT;\r
}\r
+ \r
+ atomic_set(®->int_enable, 1);\r
\r
- rga_try_set_reg();\r
- rga_try_set_reg();\r
+ rga_try_set_reg(2);\r
\r
if(req2 != NULL)\r
{\r
return -EINVAL;\r
}\r
\r
- reg0 = rga_reg_init(session, req);\r
- if(reg0 == NULL) {\r
+ reg = rga_reg_init(session, req);\r
+ if(reg == NULL) {\r
return -EFAULT;\r
}\r
\r
- rga_try_set_reg(); \r
+ rga_try_set_reg(1); \r
}\r
\r
//printk("rga_blit_async done******************\n");\r
static int rga_blit_sync(rga_session *session, struct rga_req *req)\r
{\r
int ret = 0;\r
- struct rga_reg *reg0, *reg1;\r
+ struct rga_reg *reg;\r
struct rga_req *req2;\r
\r
uint32_t saw, sah, daw, dah;\r
\r
RGA_gen_two_pro(req, req2);\r
\r
- reg0 = rga_reg_init(session, req2);\r
- if(reg0 == NULL) {\r
+ reg = rga_reg_init_2(session, req2, req);\r
+ if(reg == NULL) {\r
return -EFAULT;\r
}\r
+ \r
+ atomic_set(®->int_enable, 1);\r
\r
- reg1 = rga_reg_init(session, req);\r
- if(reg1 == NULL) {\r
- return -EFAULT;\r
- }\r
- atomic_set(®1->int_enable, 1);\r
-\r
- rga_try_set_reg();\r
- rga_try_set_reg(); \r
+ rga_try_set_reg(2); \r
\r
}\r
else {\r
- /* check value if legal */\r
+ /* check value if legal */ \r
ret = rga_check_param(req);\r
if(ret == -EINVAL) {\r
return -EFAULT;\r
}\r
\r
- reg0 = rga_reg_init(session, req);\r
- if(reg0 == NULL) {\r
+ reg = rga_reg_init(session, req);\r
+ if(reg == NULL) {\r
return -EFAULT;\r
}\r
- atomic_set(®0->int_enable, 1);\r
+\r
+ atomic_set(®->int_enable, 1);\r
\r
- rga_try_set_reg();\r
+ rga_try_set_reg(1);\r
} \r
\r
- ret = wait_event_interruptible_timeout(session->wait, session->done, RGA_TIMEOUT_DELAY);\r
-\r
+ ret = wait_event_interruptible_timeout(session->wait, atomic_read(&session->done), RGA_TIMEOUT_DELAY);\r
if (unlikely(ret < 0)) \r
{\r
pr_err("pid %d wait task ret %d\n", session->pid, ret);\r
case RGA_FLUSH:\r
ret = rga_flush(session, arg);\r
break;\r
+ case RGA_GET_RESULT:\r
+ ret = rga_get_result(session, arg);\r
default:\r
ERR("unknown ioctl cmd!\n");\r
ret = -EINVAL;\r
break;\r
}\r
\r
- if(req != NULL)\r
- {\r
+ if(req != NULL) {\r
kfree(req);\r
}\r
\r
/* no need to protect */\r
list_add_tail(&session->list_session, &rga_service.session);\r
atomic_set(&session->task_running, 0);\r
+ atomic_set(&session->num_done, 0);\r
file->private_data = (void *)session;\r
\r
DBG("*** rga dev opened *** \n");\r
rga_session *session = (rga_session *)file->private_data;\r
if (NULL == session)\r
return -EINVAL;\r
-\r
+ \r
task_running = atomic_read(&session->task_running);\r
- if (task_running) {\r
+\r
+ while (task_running) \r
+ {\r
pr_err("rga_service session %d still has %d task running when closing\n", session->pid, task_running);\r
msleep(50);\r
+ /*ͬ²½*/ \r
}\r
+ \r
wake_up_interruptible_sync(&session->wait);\r
spin_lock_irqsave(&rga_service.lock, flag);\r
list_del(&session->list_session);\r
static irqreturn_t rga_irq(int irq, void *dev_id)\r
{\r
struct rga_reg *reg;\r
+ uint32_t num = 0;\r
+ struct list_head *next;\r
+ int int_enable = 0;\r
\r
DBG("rga_irq %d \n", irq);\r
\r
+ printk("rga_irq is valid\n");\r
+\r
/*clear INT */\r
rga_write(rga_read(RGA_INT) | (0x1<<6), RGA_INT);\r
- if(((rga_read(RGA_STATUS) & 0x1) != 0))// idle\r
+\r
+ if(((rga_read(RGA_STATUS) & 0x1) != 0))// idle\r
{ \r
printk("RGA is not idle!\n");\r
rga_soft_reset();\r
}\r
\r
+\r
spin_lock(&rga_service.lock);\r
do\r
{\r
reg = list_entry(rga_service.running.next, struct rga_reg, status_link);\r
+\r
+ #if 0\r
if(reg->MMU_base != NULL)\r
{\r
kfree(reg->MMU_base);\r
}\r
- \r
+ #endif\r
+ \r
atomic_sub(1, ®->session->task_running);\r
atomic_sub(1, &rga_service.total_running);\r
\r
if(list_empty(®->session->waiting))\r
{\r
- reg->session->done = 1;\r
+ atomic_set(®->session->done, 1);\r
wake_up_interruptible_sync(®->session->wait);\r
}\r
+ \r
rga_reg_deinit(reg);\r
\r
}\r
while(!list_empty(&rga_service.running));\r
\r
+ atomic_set(&rga_service.cmd_num, 0);\r
+\r
+ spin_unlock(&rga_service.lock);\r
\r
+ next = &rga_service.waiting;\r
+ \r
/* add cmd to cmd buf */\r
- while(((!list_empty(&rga_service.waiting)) && (atomic_read(&rga_service.int_disable) == 0)))\r
- {\r
- rga_try_set_reg();\r
+ while((!list_empty(next)) && ((int_enable) == 0) && (num <= 0xf))\r
+ { \r
+ num += 1;\r
+ reg = list_entry(next->next, struct rga_reg, status_link);\r
+ int_enable = atomic_read(®->int_enable); \r
+ next = next->next;\r
}\r
\r
- spin_lock(&rga_service.lock);\r
- \r
+ rga_try_set_reg(num);\r
+ \r
return IRQ_HANDLED;\r
}\r
\r
{\r
uint32_t enable;\r
\r
- enable = drvdata->enable;\r
- rga_power_off(NULL);\r
+ enable = drvdata->enable; \r
+ //rga_power_off(NULL); \r
drvdata->enable = enable;\r
\r
return 0;\r
}\r
\r
static int rga_resume(struct platform_device *pdev)\r
-{\r
- rga_power_on();\r
+{ \r
+ //rga_power_on(); \r
return 0;\r
}\r
\r
static void rga_shutdown(struct platform_device *pdev)\r
{\r
- pr_cont("shutdown..."); \r
- rga_power_off(NULL);\r
- pr_cont("done\n");\r
+ pr_cont("shutdown..."); \r
+ //rga_power_off(NULL); \r
+ pr_cont("done\n");\r
}\r
\r
\r
ERR("failed to allocate driver data.\n");\r
return -ENOMEM;\r
}\r
- \r
+\r
+ #if 0\r
/* get the clock */\r
data->pd_display = clk_get(&pdev->dev, "pd_display");\r
if (IS_ERR(data->pd_display))\r
ret = -ENOENT;\r
goto err_clock;\r
}\r
+ #endif\r
\r
/* map the memory */\r
if (!request_mem_region(RK30_RGA_PHYS, RK30_RGA_SIZE, "rga_io")) \r
free_irq(data->irq0, &data->miscdev);\r
iounmap((void __iomem *)(data->rga_base));\r
\r
+ \r
+\r
+ #if 0 \r
if(data->axi_clk) {\r
clk_put(data->axi_clk);\r
}\r
if(data->pd_display){\r
clk_put(data->pd_display);\r
}\r
+ #endif\r
\r
kfree(data);\r
return 0;\r
static int __init rga_init(void)\r
{\r
int ret;\r
- uint8_t *buf;\r
+ uint32_t *mmu_buf;\r
+ uint32_t i;\r
+ uint32_t *buf_p;\r
\r
- /* malloc pre scale mid buf */\r
- buf = kmalloc(PRE_SCALE_BUF_SIZE, GFP_KERNEL);\r
- if(buf == NULL) {\r
+ /* malloc pre scale mid buf mmu table */\r
+ mmu_buf = (uint32_t *)kmalloc(1024*8, GFP_KERNEL); \r
+ if(mmu_buf == NULL) \r
+ {\r
ERR("RGA get Pre Scale buff failed. \n");\r
return -1;\r
}\r
- rga_service.pre_scale_buf = (uint32_t *)buf;\r
+\r
+ /* malloc 8 M buf */\r
+ for(i=0; i<2048; i++)\r
+ { \r
+ buf_p = (uint32_t *)__get_free_page(GFP_KERNEL); \r
+ if(buf_p == NULL)\r
+ {\r
+ printk("RGA init pre scale buf falied\n");\r
+ return -ENOMEM;\r
+ }\r
+ \r
+ mmu_buf[i] = virt_to_phys((void *)((uint32_t)buf_p)); \r
+ }\r
+ \r
+ rga_service.pre_scale_buf = (uint32_t *)mmu_buf; \r
\r
if ((ret = platform_driver_register(&rga_driver)) != 0)\r
{\r
return ret;\r
}\r
\r
- INFO("Module initialized.\n");\r
+ INFO("Module initialized.\n"); \r
\r
return 0;\r
}\r
\r
static void __exit rga_exit(void)\r
{\r
+ uint32_t i;\r
+\r
+ for(i=0; i<2048; i++)\r
+ {\r
+ if((uint32_t *)rga_service.pre_scale_buf[i] != NULL)\r
+ {\r
+ __free_page((void *)rga_service.pre_scale_buf[i]);\r
+ } \r
+ }\r
+ \r
if(rga_service.pre_scale_buf != NULL) {\r
kfree((uint8_t *)rga_service.pre_scale_buf);\r
}\r
}\r
\r
\r
-\r
module_init(rga_init);\r
module_exit(rga_exit);\r
\r
MODULE_AUTHOR("zsq@rock-chips.com");\r
MODULE_DESCRIPTION("Driver for rga device");\r
MODULE_LICENSE("GPL");\r
-\r
-\r
\r
#include "rga_mmu_info.h"\r
\r
-\r
extern rga_service_info rga_service;\r
\r
#define KERNEL_SPACE_VALID 0xc0000000\r
{\r
int32_t result;\r
uint32_t i;\r
- \r
- down_read(¤t->mm->mmap_sem);\r
- result = get_user_pages(current,\r
- current->mm,\r
- Memory << PAGE_SHIFT,\r
- pageCount,\r
- 1,\r
- 0,\r
- pages,\r
- NULL\r
- );\r
- up_read(¤t->mm->mmap_sem);\r
- \r
- if(result <= 0 || result < pageCount) \r
- {\r
- return -EINVAL; \r
- }\r
+ uint32_t status;\r
+\r
+ do\r
+ { \r
+ down_read(¤t->mm->mmap_sem);\r
+ result = get_user_pages(current,\r
+ current->mm,\r
+ Memory << PAGE_SHIFT,\r
+ pageCount,\r
+ 1,\r
+ 0,\r
+ pages,\r
+ NULL\r
+ );\r
+ up_read(¤t->mm->mmap_sem);\r
+ \r
+ if(result <= 0 || result < pageCount) \r
+ {\r
+ struct vm_area_struct *vma;\r
+\r
+ vma = find_vma(current->mm, Memory);\r
+\r
+ if (vma && (vma->vm_flags & VM_PFNMAP) )\r
+ {\r
+ do\r
+ {\r
+ pte_t * pte;\r
+ spinlock_t * ptl;\r
+ unsigned long pfn;\r
+\r
+ pgd_t * pgd = pgd_offset(current->mm, Memory);\r
+ pud_t * pud = pud_offset(pgd, Memory);\r
+ if (pud)\r
+ {\r
+ pmd_t * pmd = pmd_offset(pud, Memory);\r
+ if (pmd)\r
+ {\r
+ pte = pte_offset_map_lock(current->mm, pmd, Memory, &ptl);\r
+ if (!pte)\r
+ {\r
+ break;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ break;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ break;\r
+ }\r
+\r
+ pfn = pte_pfn(*pte);\r
+ \r
+ pte_unmap_unlock(pte, ptl);\r
+\r
+ /* Free the page table. */\r
+ if (pages != NULL)\r
+ {\r
+ /* Release the pages if any. */\r
+ if (result > 0)\r
+ {\r
+ for (i = 0; i < result; i++)\r
+ {\r
+ if (pages[i] == NULL)\r
+ {\r
+ break;\r
+ }\r
+\r
+ page_cache_release(pages[i]);\r
+ }\r
+ }\r
+ }\r
+\r
+ return 0;\r
+ }\r
+ while (0);\r
+\r
+ status = RGA_OUT_OF_RESOURCES;\r
+ break;\r
+ }\r
+ else\r
+ {\r
+ status = RGA_OUT_OF_RESOURCES;\r
+ break;\r
+ } \r
+ }\r
\r
- for (i = 0; i < pageCount; i++)\r
- {\r
- /* Flush the data cache. */\r
+ for (i = 0; i < pageCount; i++)\r
+ {\r
+ /* Flush the data cache. */\r
#ifdef ANDROID\r
- dma_sync_single_for_device(\r
- gcvNULL,\r
- page_to_phys(pages[i]),\r
- PAGE_SIZE,\r
- DMA_TO_DEVICE);\r
+ dma_sync_single_for_device(\r
+ NULL,\r
+ page_to_phys(pages[i]),\r
+ PAGE_SIZE,\r
+ DMA_TO_DEVICE);\r
#else\r
- flush_dcache_page(pages[i]);\r
+ flush_dcache_page(pages[i]);\r
#endif\r
- }\r
+ }\r
\r
- /* Fill the page table. */\r
- for(i=0; i<pageCount; i++) {\r
+ /* Fill the page table. */\r
+ for(i=0; i<pageCount; i++) {\r
\r
- /* Get the physical address from page struct. */\r
- pageTable[i * (PAGE_SIZE/4096)] = page_to_phys(pages[i]);\r
- } \r
+ /* Get the physical address from page struct. */\r
+ pageTable[i * (PAGE_SIZE/4096)] = page_to_phys(pages[i]);\r
+ } \r
+ }\r
+ while(0);\r
\r
- return 0;\r
+ if (rgaIS_ERROR(status))\r
+ {\r
+ /* Release page array. */\r
+ if (result > 0 && pages != NULL)\r
+ {\r
+ for (i = 0; i < result; i++)\r
+ {\r
+ if (pages[i] == NULL)\r
+ {\r
+ break;\r
+ }\r
+#ifdef ANDROID\r
+ dma_sync_single_for_device(\r
+ NULL,\r
+ page_to_phys(pages[i]),\r
+ PAGE_SIZE,\r
+ DMA_FROM_DEVICE);\r
+#endif\r
+ page_cache_release(pages[i]);\r
+ }\r
+ }\r
+ }\r
+\r
+ return status;\r
}\r
\r
static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)\r
uint32_t AllSize;\r
uint32_t *MMU_Base, *MMU_p;\r
int ret;\r
+ int status;\r
\r
struct page **pages = NULL;\r
\r
- /* cal src buf mmu info */ \r
- SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
- req->src.format, req->src.vir_w, req->src.vir_h,\r
- &SrcStart);\r
- if(SrcMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ MMU_Base = NULL;\r
\r
- /* cal dst buf mmu info */ \r
- DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
- req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
- &DstStart);\r
- if(DstMemSize == 0) {\r
- return -EINVAL; \r
- }\r
-\r
- /* cal cmd buf mmu info */\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
- if(CMDMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ do\r
+ {\r
+ /* cal src buf mmu info */ \r
+ SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
+ req->src.format, req->src.vir_w, req->src.vir_h,\r
+ &SrcStart);\r
+ if(SrcMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
- \r
- pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed");\r
- return -EINVAL; \r
- }\r
- \r
- MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed");\r
- return -EINVAL; \r
- }\r
+ /* cal dst buf mmu info */ \r
+ DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
+ req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
+ &DstStart);\r
+ if(DstMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i)<< PAGE_SHIFT));\r
- }\r
+ /* cal cmd buf mmu info */\r
+ CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
+ if(CMDMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- if(req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map src memory failed");\r
- return -EINVAL;\r
+ AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
+ \r
+ pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+ if(pages == NULL) {\r
+ pr_err("RGA MMU malloc pages mem failed");\r
+ status = RGA_MALLOC_ERROR;\r
+ break; \r
}\r
- }\r
- else\r
- {\r
- MMU_p = MMU_Base + CMDMemSize;\r
\r
- for(i=0; i<SrcMemSize; i++)\r
+ MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
+ if(MMU_Base == NULL) {\r
+ pr_err("RGA MMU malloc MMU_Base point failed");\r
+ status = RGA_MALLOC_ERROR;\r
+ break; \r
+ }\r
+\r
+ for(i=0; i<CMDMemSize; i++) {\r
+ MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT)); \r
+ }\r
+\r
+ if(req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
{\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
+ ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga map src memory failed");\r
+ status = ret;\r
+ break;\r
+ }\r
}\r
- }\r
- \r
- if(req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map dst memory failed");\r
- return -EINVAL;\r
+ else\r
+ {\r
+ MMU_p = MMU_Base + CMDMemSize;\r
+ \r
+ for(i=0; i<SrcMemSize; i++)\r
+ {\r
+ MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
+ }\r
}\r
- }\r
- else\r
- {\r
- MMU_p = MMU_Base + CMDMemSize + SrcMemSize;\r
\r
- for(i=0; i<DstMemSize; i++)\r
+ if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
{\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
- } \r
- }\r
+ ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga map dst memory failed");\r
+ status = ret;\r
+ break;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ MMU_p = MMU_Base + CMDMemSize + SrcMemSize;\r
+ \r
+ for(i=0; i<DstMemSize; i++)\r
+ {\r
+ MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
+ } \r
+ }\r
\r
- /* zsq \r
- * change the buf address in req struct\r
- * for the reason of lie to MMU \r
- */\r
- req->mmu_info.base_addr = virt_to_phys(MMU_Base);\r
- \r
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
- req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
- req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
+ /* zsq \r
+ * change the buf address in req struct\r
+ * for the reason of lie to MMU \r
+ */\r
+ \r
+ req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2); \r
\r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
- \r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
+ req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
+ req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
+ req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
+\r
+ req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
+ \r
+ /*record the malloc buf for the cmd end to release*/\r
+ reg->MMU_base = MMU_Base;\r
+\r
+ dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+\r
+ status = 0;\r
+\r
+ /* Free the page table */ \r
+ if (pages != NULL) {\r
+ kfree(pages);\r
+ }\r
+\r
+ return status;\r
+ }\r
+ while(0);\r
\r
+ \r
+ /* Free the page table */ \r
if (pages != NULL) {\r
- /* Free the page table */\r
kfree(pages);\r
- } \r
+ }\r
\r
- return 0;\r
+ /* Free MMU table */\r
+ if(MMU_Base != NULL) {\r
+ kfree(MMU_Base);\r
+ }\r
+\r
+ return status;\r
}\r
\r
static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *req)\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
- uint32_t *MMU_Base;\r
- int ret;\r
+ uint32_t *MMU_Base = NULL;\r
+ uint32_t *MMU_p;\r
+ int ret, status;\r
uint32_t stride;\r
\r
uint8_t shift;\r
sw = req->src.vir_w;\r
byte_num = sw >> shift;\r
stride = (byte_num + 3) & (~3);\r
+\r
+ do\r
+ {\r
\r
- SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);\r
- if(SrcMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);\r
+ if(SrcMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
- req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
- &DstStart);\r
- if(DstMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
+ req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
+ &DstStart);\r
+ if(DstMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
- if(CMDMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
+ if(CMDMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
- \r
- pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed");\r
- return -EINVAL; \r
- }\r
- \r
- MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed");\r
- return -EINVAL; \r
- }\r
+ AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
+ \r
+ pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+ if(pages == NULL) {\r
+ pr_err("RGA MMU malloc pages mem failed");\r
+ return -EINVAL; \r
+ }\r
\r
- for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));\r
- }\r
- \r
- ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map src memory failed");\r
- return -EINVAL;\r
- }\r
+ MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
+ if(MMU_Base == NULL) {\r
+ pr_err("RGA MMU malloc MMU_Base point failed");\r
+ break; \r
+ }\r
\r
- ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map dst memory failed");\r
- return -EINVAL;\r
- }\r
+ /* map CMD addr */\r
+ for(i=0; i<CMDMemSize; i++) \r
+ {\r
+ MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));\r
+ }\r
\r
- /* zsq \r
- * change the buf address in req struct\r
- * for the reason of lie to MMU \r
- */\r
- req->mmu_info.base_addr = virt_to_phys(MMU_Base); \r
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
+ /* map src addr */\r
+ if (req->src.yrgb_addr < KERNEL_SPACE_VALID) \r
+ { \r
+ ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
+ if (ret < 0) \r
+ {\r
+ pr_err("rga map src memory failed");\r
+ status = ret;\r
+ break; \r
+ }\r
+ }\r
+ else\r
+ {\r
+ MMU_p = MMU_Base + CMDMemSize;\r
+ \r
+ for(i=0; i<SrcMemSize; i++)\r
+ {\r
+ MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
+ }\r
+ }\r
\r
- \r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
+ /* map dst addr */\r
+ if (req->src.yrgb_addr < KERNEL_SPACE_VALID) \r
+ {\r
+ ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
+ if (ret < 0) \r
+ {\r
+ pr_err("rga map dst memory failed");\r
+ status = ret;\r
+ break;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ MMU_p = MMU_Base + CMDMemSize + SrcMemSize;\r
+ \r
+ for(i=0; i<DstMemSize; i++)\r
+ {\r
+ MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
+ }\r
+ }\r
+ \r
+\r
+ /* zsq \r
+ * change the buf address in req struct\r
+ * for the reason of lie to MMU \r
+ */\r
+ req->mmu_info.base_addr = virt_to_phys(MMU_Base); \r
+ req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
+ req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
+\r
+\r
+ /*record the malloc buf for the cmd end to release*/\r
+ reg->MMU_base = MMU_Base;\r
\r
- if (pages != NULL) {\r
/* Free the page table */\r
+ if (pages != NULL) { \r
+ kfree(pages);\r
+ }\r
+\r
+ return status;\r
+\r
+ }\r
+ while(0);\r
+\r
+ /* Free the page table */\r
+ if (pages != NULL) { \r
kfree(pages);\r
- } \r
+ }\r
+\r
+ /* Free mmu table */\r
+ if (MMU_Base != NULL) {\r
+ kfree(MMU_Base);\r
+ }\r
\r
return 0;\r
}\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
- uint32_t *MMU_Base;\r
+ uint32_t *MMU_Base, *MMU_p;\r
int ret;\r
- \r
- DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
- req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
- &DstStart);\r
- if(DstMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ int status;\r
\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
- if(CMDMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ MMU_Base = NULL;\r
\r
- AllSize = DstMemSize + CMDMemSize;\r
- \r
- pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed");\r
- return -EINVAL; \r
- }\r
- \r
- MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed");\r
- return -EINVAL; \r
- }\r
+ do\r
+ { \r
+ DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
+ req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
+ &DstStart);\r
+ if(DstMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart+i)<<PAGE_SHIFT));\r
- }\r
- \r
- ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], DstStart, DstMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map dst memory failed");\r
- return -EINVAL;\r
- }\r
+ CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
+ if(CMDMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- /* zsq \r
- * change the buf address in req struct\r
- * for the reason of lie to MMU \r
- */\r
- req->mmu_info.base_addr = virt_to_phys(MMU_Base); \r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT);\r
- \r
- \r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
+ AllSize = DstMemSize + CMDMemSize;\r
+ \r
+ pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+ if(pages == NULL) {\r
+ pr_err("RGA MMU malloc pages mem failed");\r
+ status = RGA_MALLOC_ERROR; \r
+ break;\r
+ }\r
+ \r
+ MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
+ if(pages == NULL) {\r
+ pr_err("RGA MMU malloc MMU_Base point failed");\r
+ status = RGA_MALLOC_ERROR;\r
+ break; \r
+ }\r
+\r
+ for(i=0; i<CMDMemSize; i++) {\r
+ MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart+i)<<PAGE_SHIFT));\r
+ }\r
+\r
+ if (req->dst.yrgb_addr < KERNEL_SPACE_VALID) \r
+ {\r
+ ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], DstStart, DstMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga map dst memory failed");\r
+ status = ret;\r
+ break;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ MMU_p = MMU_Base + CMDMemSize;\r
+ \r
+ for(i=0; i<DstMemSize; i++)\r
+ {\r
+ MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
+ }\r
+ }\r
+ \r
+ \r
+ /* zsq \r
+ * change the buf address in req struct\r
+ * for the reason of lie to MMU \r
+ */\r
+ req->mmu_info.base_addr = virt_to_phys(MMU_Base); \r
+ req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT);\r
+ \r
+ \r
+ /*record the malloc buf for the cmd end to release*/\r
+ reg->MMU_base = MMU_Base;\r
\r
- if (pages != NULL) {\r
/* Free the page table */\r
+ if (pages != NULL) \r
+ kfree(pages);\r
+\r
+ return 0;\r
+ }\r
+ while(0);\r
+\r
+ if (pages != NULL) \r
kfree(pages);\r
- } \r
\r
- return 0;\r
+ if (MMU_Base != NULL)\r
+ kfree(MMU_Base);\r
+ \r
+ return status;\r
}\r
\r
\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
- uint32_t *MMU_Base;\r
- int ret;\r
+ uint32_t *MMU_Base, *MMU_p;\r
+ int ret, status;\r
+\r
+ MMU_Base = NULL;\r
+\r
+ do\r
+ { \r
+ /* cal dst buf mmu info */ \r
+ DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
+ req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
+ &DstStart);\r
+ if(DstMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- /* cal dst buf mmu info */ \r
- DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
- req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
- &DstStart);\r
- if(DstMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
+ if(CMDMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
- if(CMDMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ AllSize = DstMemSize + CMDMemSize;\r
+ \r
+ pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+ if(pages == NULL) {\r
+ pr_err("RGA MMU malloc pages mem failed");\r
+ status = RGA_MALLOC_ERROR;\r
+ break;\r
+ }\r
+ \r
+ MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
+ if(pages == NULL) {\r
+ pr_err("RGA MMU malloc MMU_Base point failed");\r
+ status = RGA_MALLOC_ERROR;\r
+ break;\r
+ }\r
\r
- AllSize = DstMemSize + CMDMemSize;\r
- \r
- pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed");\r
- return -EINVAL; \r
- }\r
- \r
- MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed");\r
- return -EINVAL; \r
- }\r
+ for(i=0; i<CMDMemSize; i++) {\r
+ MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart+i)<<PAGE_SHIFT));\r
+ }\r
\r
- for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart+i)<<PAGE_SHIFT));\r
- }\r
- \r
- ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], DstStart, DstMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map dst memory failed");\r
- return -EINVAL;\r
- }\r
+ if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
+ {\r
+ ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], DstStart, DstMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga map dst memory failed");\r
+ status = ret;\r
+ break;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ MMU_p = MMU_Base + CMDMemSize;\r
+ \r
+ for(i=0; i<DstMemSize; i++)\r
+ {\r
+ MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
+ }\r
+ }\r
\r
- /* zsq \r
- * change the buf address in req struct\r
- * for the reason of lie to MMU \r
- */\r
- req->mmu_info.base_addr = virt_to_phys(MMU_Base); \r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT);\r
- \r
- \r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
+ /* zsq \r
+ * change the buf address in req struct\r
+ * for the reason of lie to MMU \r
+ */\r
+ req->mmu_info.base_addr = virt_to_phys(MMU_Base); \r
+ req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT);\r
+ \r
+ \r
+ /*record the malloc buf for the cmd end to release*/\r
+ reg->MMU_base = MMU_Base;\r
\r
- if (pages != NULL) {\r
/* Free the page table */\r
+ if (pages != NULL) { \r
+ kfree(pages);\r
+ } \r
+\r
+ return 0;\r
+\r
+ }\r
+ while(0);\r
+\r
+ if (pages != NULL)\r
kfree(pages);\r
- } \r
\r
- return 0;\r
+ if (MMU_Base != NULL)\r
+ kfree(MMU_Base);\r
+\r
+ return status;\r
}\r
\r
static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req)\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
- uint32_t *MMU_Base;\r
- int ret;\r
+ uint32_t *MMU_Base, *MMU_p;\r
+ int ret, status;\r
\r
- /* cal src buf mmu info */ \r
- SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
- req->src.format, req->src.vir_w, req->src.vir_h,\r
- &SrcStart);\r
- if(SrcMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ MMU_Base = NULL;\r
+ \r
+ do\r
+ {\r
+ /* cal src buf mmu info */ \r
+ SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
+ req->src.format, req->src.vir_w, req->src.vir_h,\r
+ &SrcStart);\r
+ if(SrcMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- /* cal dst buf mmu info */ \r
- DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
- req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
- &DstStart);\r
- if(DstMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ /* cal dst buf mmu info */ \r
+ DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
+ req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
+ &DstStart);\r
+ if(DstMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- /* cal cmd buf mmu info */\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
- if(CMDMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ /* cal cmd buf mmu info */\r
+ CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
+ if(CMDMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
- \r
- pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed");\r
- return -EINVAL; \r
- }\r
- \r
- MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed");\r
- return -EINVAL; \r
- }\r
+ AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
+ \r
+ pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+ if(pages == NULL) {\r
+ pr_err("RGA MMU malloc pages mem failed");\r
+ status = RGA_MALLOC_ERROR;\r
+ break; \r
+ }\r
+ \r
+ MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
+ if(pages == NULL) {\r
+ pr_err("RGA MMU malloc MMU_Base point failed");\r
+ status = RGA_MALLOC_ERROR;\r
+ break; \r
+ }\r
\r
- for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<< PAGE_SHIFT));\r
- }\r
- \r
- ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map src memory failed");\r
- return -EINVAL;\r
- }\r
+ for(i=0; i<CMDMemSize; i++) {\r
+ MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<< PAGE_SHIFT));\r
+ }\r
\r
- ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map dst memory failed");\r
- return -EINVAL;\r
- }\r
+ if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
+ {\r
+ ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
+ if (ret < 0) \r
+ {\r
+ pr_err("rga map src memory failed");\r
+ status = ret;\r
+ break;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ MMU_p = MMU_Base + CMDMemSize;\r
+ \r
+ for(i=0; i<SrcMemSize; i++)\r
+ {\r
+ MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
+ } \r
+ }\r
\r
- /* zsq \r
- * change the buf address in req struct\r
- * for the reason of lie to MMU \r
- */\r
- req->mmu_info.base_addr = virt_to_phys(MMU_Base);\r
- \r
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
- req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
- req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
+ \r
+ if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
+ {\r
+ ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
+ if (ret < 0) \r
+ {\r
+ pr_err("rga map dst memory failed");\r
+ status = ret;\r
+ break;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ MMU_p = MMU_Base + CMDMemSize + SrcMemSize;\r
+ \r
+ for(i=0; i<DstMemSize; i++)\r
+ {\r
+ MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
+ }\r
+ }\r
\r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
- \r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
+ /* zsq \r
+ * change the buf address in req struct\r
+ * for the reason of lie to MMU \r
+ */\r
+ req->mmu_info.base_addr = virt_to_phys(MMU_Base);\r
+ \r
+ req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
+ req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
+ req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
+\r
+ req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
+ \r
+ /*record the malloc buf for the cmd end to release*/\r
+ reg->MMU_base = MMU_Base;\r
\r
- if (pages != NULL) {\r
/* Free the page table */\r
+ if (pages != NULL) { \r
+ kfree(pages);\r
+ } \r
+\r
+ return 0;\r
+ }\r
+ while(0);\r
+\r
+ if (pages != NULL)\r
kfree(pages);\r
- } \r
\r
- return 0;\r
+ if (MMU_Base != NULL)\r
+ kfree(MMU_Base);\r
+\r
+ return status;\r
}\r
\r
\r
uint32_t AllSize;\r
uint32_t *MMU_Base, *MMU_p;\r
int ret;\r
+ int status;\r
\r
- /* cal src buf mmu info */ \r
- SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
- req->src.format, req->src.vir_w, req->src.vir_h,\r
- &SrcStart);\r
- if(SrcMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ MMU_Base = NULL;\r
\r
- /* cal dst buf mmu info */ \r
- DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
- req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
- &DstStart);\r
- if(DstMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ do\r
+ {\r
+ /* cal src buf mmu info */ \r
+ SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
+ req->src.format, req->src.vir_w, req->src.vir_h,\r
+ &SrcStart);\r
+ if(SrcMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- /* cal cmd buf mmu info */\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
- if(CMDMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ /* cal dst buf mmu info */ \r
+ DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
+ req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
+ &DstStart);\r
+ if(DstMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
- \r
- pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed");\r
- return -EINVAL; \r
- }\r
+ /* cal cmd buf mmu info */\r
+ CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
+ if(CMDMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- /* \r
- * Allocate MMU Index mem\r
- * This mem release in run_to_done fun \r
- */\r
- MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed");\r
- return -EINVAL; \r
- }\r
+ AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
+ \r
+ pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+ if(pages == NULL) \r
+ {\r
+ pr_err("RGA MMU malloc pages mem failed");\r
+ status = RGA_MALLOC_ERROR;\r
+ break; \r
+ }\r
\r
- for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));\r
- }\r
+ /* \r
+ * Allocate MMU Index mem\r
+ * This mem release in run_to_done fun \r
+ */\r
+ MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
+ if(pages == NULL) {\r
+ pr_err("RGA MMU malloc MMU_Base point failed");\r
+ status = RGA_MALLOC_ERROR; \r
+ break; \r
+ }\r
\r
+ for(i=0; i<CMDMemSize; i++) {\r
+ MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));\r
+ }\r
\r
- /* map src pages */\r
- ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map src memory failed");\r
- return -EINVAL;\r
- }\r
\r
- \r
- if(req->dst.yrgb_addr >= 0xc0000000) \r
- { \r
- /* kernel space */\r
- MMU_p = MMU_Base + CMDMemSize + SrcMemSize;\r
- for(i=0; i<DstMemSize; i++) \r
+ /* map src pages */\r
+ if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
{\r
- MMU_p[i] = virt_to_phys((uint32_t *)((DstStart + i)<< PAGE_SHIFT)); \r
+ ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga map src memory failed");\r
+ status = ret;\r
+ break;\r
+ }\r
}\r
- }\r
- else \r
- {\r
- /* user space */\r
- ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
- if (ret < 0) \r
+ else\r
{\r
- pr_err("rga map dst memory failed");\r
- return -EINVAL;\r
- } \r
- }\r
+ MMU_p = MMU_Base + CMDMemSize;\r
+ \r
+ for(i=0; i<SrcMemSize; i++)\r
+ {\r
+ MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
+ } \r
+ }\r
\r
- /* zsq \r
- * change the buf address in req struct\r
- * for the reason of lie to MMU \r
- */\r
- req->mmu_info.base_addr = virt_to_phys(MMU_Base);\r
- \r
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
- req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
- req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
+ \r
+ if(req->dst.yrgb_addr >= KERNEL_SPACE_VALID) \r
+ { \r
+ /* kernel space */\r
+ MMU_p = MMU_Base + CMDMemSize + SrcMemSize;\r
+ for(i=0; i<DstMemSize; i++) \r
+ {\r
+ MMU_p[i] = virt_to_phys((uint32_t *)((DstStart + i)<< PAGE_SHIFT)); \r
+ }\r
+ }\r
+ else \r
+ {\r
+ /* user space */\r
+ ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
+ if (ret < 0) \r
+ {\r
+ pr_err("rga map dst memory failed");\r
+ status = ret;\r
+ break;\r
+ } \r
+ }\r
\r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
- \r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
+ /* zsq \r
+ * change the buf address in req struct\r
+ * for the reason of lie to MMU \r
+ */\r
+ req->mmu_info.base_addr = virt_to_phys(MMU_Base)>>2;\r
+\r
+ #if 0\r
+ req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
+ req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
+ req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
+\r
+ req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
+ #else\r
+\r
+ req->src.yrgb_addr &= 0xffffff;\r
+ req->src.uv_addr &= 0xfffffff;\r
+ req->src.v_addr &= 0xfffffff;\r
+\r
+ req->dst.yrgb_addr &= 0xfffffff;\r
+ \r
+ #endif\r
+ \r
+ /*record the malloc buf for the cmd end to release*/\r
+ reg->MMU_base = MMU_Base;\r
\r
- if (pages != NULL) {\r
/* Free the page table */\r
+ if (pages != NULL) \r
+ { \r
+ kfree(pages);\r
+ } \r
+\r
+ return 0;\r
+ }\r
+ while(0);\r
+\r
+ if (pages != NULL)\r
kfree(pages);\r
- } \r
\r
- return 0;\r
+ if (MMU_Base != NULL)\r
+ kfree(MMU_Base);\r
+\r
+ return status;\r
}\r
\r
\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
- uint32_t *MMU_Base;\r
- int ret;\r
+ uint32_t *MMU_Base, *MMU_p;\r
+ int ret, status;\r
\r
- /* cal src buf mmu info */ \r
- SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);\r
- if(SrcMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ MMU_Base = NULL;\r
\r
- /* cal cmd buf mmu info */\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
- if(CMDMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ do\r
+ { \r
+ /* cal src buf mmu info */ \r
+ SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);\r
+ if(SrcMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
- \r
- pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed");\r
- return -EINVAL; \r
- }\r
- \r
- MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed");\r
- return -EINVAL; \r
- }\r
+ /* cal cmd buf mmu info */\r
+ CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
+ if(CMDMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));\r
- }\r
+ AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
+ \r
+ pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+ if(pages == NULL) {\r
+ pr_err("RGA MMU malloc pages mem failed");\r
+ status = RGA_MALLOC_ERROR;\r
+ break; \r
+ }\r
+ \r
+ MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
+ if(pages == NULL) {\r
+ pr_err("RGA MMU malloc MMU_Base point failed");\r
+ status = RGA_MALLOC_ERROR;\r
+ break; \r
+ }\r
+\r
+ for(i=0; i<CMDMemSize; i++) {\r
+ MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));\r
+ }\r
+\r
+ if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
+ {\r
+ ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga map src memory failed");\r
+ return -EINVAL;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ MMU_p = MMU_Base + CMDMemSize;\r
\r
- ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map src memory failed");\r
- return -EINVAL;\r
- }\r
+ for(i=0; i<SrcMemSize; i++)\r
+ {\r
+ MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
+ } \r
+ }\r
\r
- /* zsq \r
- * change the buf address in req struct\r
- * for the reason of lie to MMU \r
- */\r
- req->mmu_info.base_addr = virt_to_phys(MMU_Base);\r
- \r
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); \r
- \r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
+ /* zsq \r
+ * change the buf address in req struct\r
+ * for the reason of lie to MMU \r
+ */\r
+ req->mmu_info.base_addr = virt_to_phys(MMU_Base);\r
+ \r
+ req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); \r
+ \r
+ /*record the malloc buf for the cmd end to release*/\r
+ reg->MMU_base = MMU_Base;\r
\r
- if (pages != NULL) {\r
- /* Free the page table */\r
+ if (pages != NULL) {\r
+ /* Free the page table */\r
+ kfree(pages);\r
+ } \r
+\r
+ return 0;\r
+ }\r
+ while(0);\r
+\r
+ if (pages != NULL)\r
kfree(pages);\r
- } \r
\r
- return 0;\r
+ if (MMU_Base != NULL)\r
+ kfree(MMU_Base);\r
+\r
+ return status;\r
}\r
\r
static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_req *req)\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
- uint32_t *MMU_Base;\r
- int ret;\r
+ uint32_t *MMU_Base, *MMU_p;\r
+ int ret, status;\r
\r
- /* cal src buf mmu info */ \r
- SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart);\r
- if(SrcMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ do\r
+ {\r
\r
- /* cal cmd buf mmu info */\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
- if(CMDMemSize == 0) {\r
- return -EINVAL; \r
- }\r
+ /* cal src buf mmu info */ \r
+ SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart);\r
+ if(SrcMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
- \r
- pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed");\r
- return -EINVAL; \r
- }\r
- \r
- MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed");\r
- return -EINVAL; \r
- }\r
+ /* cal cmd buf mmu info */\r
+ CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
+ if(CMDMemSize == 0) {\r
+ return -EINVAL; \r
+ }\r
\r
- for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));\r
- }\r
+ AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
+ \r
+ pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+ if(pages == NULL) {\r
+ pr_err("RGA MMU malloc pages mem failed");\r
+ status = RGA_MALLOC_ERROR;\r
+ break; \r
+ }\r
+ \r
+ MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
+ if(pages == NULL) {\r
+ pr_err("RGA MMU malloc MMU_Base point failed");\r
+ status = RGA_MALLOC_ERROR;\r
+ break; \r
+ }\r
+\r
+ for(i=0; i<CMDMemSize; i++) {\r
+ MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));\r
+ }\r
+\r
+ if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
+ {\r
+ ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga map src memory failed");\r
+ status = ret;\r
+ break;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ MMU_p = MMU_Base + CMDMemSize;\r
\r
- ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map src memory failed");\r
- return -EINVAL;\r
- }\r
+ for(i=0; i<SrcMemSize; i++)\r
+ {\r
+ MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
+ } \r
+ }\r
\r
- /* zsq \r
- * change the buf address in req struct\r
- * for the reason of lie to MMU \r
- */\r
- req->mmu_info.base_addr = virt_to_phys(MMU_Base);\r
- \r
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); \r
- \r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
+ /* zsq \r
+ * change the buf address in req struct\r
+ * for the reason of lie to MMU \r
+ */\r
+ req->mmu_info.base_addr = virt_to_phys(MMU_Base);\r
+ \r
+ req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); \r
+ \r
+ /*record the malloc buf for the cmd end to release*/\r
+ reg->MMU_base = MMU_Base;\r
+\r
+ if (pages != NULL) {\r
+ /* Free the page table */\r
+ kfree(pages);\r
+ }\r
+\r
+ return 0;\r
\r
- if (pages != NULL) {\r
- /* Free the page table */\r
- kfree(pages);\r
}\r
+ while(0);\r
\r
- return 0;\r
+ if (pages != NULL)\r
+ kfree(pages);\r
+\r
+ if (MMU_Base != NULL)\r
+ kfree(MMU_Base);\r
+\r
+ return status;\r
}\r
\r
int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req)\r