CONFIG_BACKLIGHT_PWM=y
CONFIG_FB_ROCKCHIP=y
CONFIG_LCDC_RK3368=y
+CONFIG_ROCKCHIP_RGA=y
+CONFIG_ROCKCHIP_RGA2=y
CONFIG_RK_TRSM=y
CONFIG_RK31XX_LVDS=y
CONFIG_DP_ANX6345=y
\r
#define IS_YUV_420(format) \\r
((format == RK_FORMAT_YCbCr_420_P) | (format == RK_FORMAT_YCbCr_420_SP) | \\r
- (format == RK_FORMAT_YCrCb_420_P) | (format == RK_FORMAT_YCrCb_420_SP)) \r
+ (format == RK_FORMAT_YCrCb_420_P) | (format == RK_FORMAT_YCrCb_420_SP))\r
\r
#define IS_YUV_422(format) \\r
((format == RK_FORMAT_YCbCr_422_P) | (format == RK_FORMAT_YCbCr_422_SP) | \\r
- (format == RK_FORMAT_YCrCb_422_P) | (format == RK_FORMAT_YCrCb_422_SP)) \r
+ (format == RK_FORMAT_YCrCb_422_P) | (format == RK_FORMAT_YCrCb_422_SP))\r
\r
#define IS_YUV(format) \\r
((format == RK_FORMAT_YCbCr_420_P) | (format == RK_FORMAT_YCbCr_420_SP) | \\r
(format == RK_FORMAT_YCrCb_420_P) | (format == RK_FORMAT_YCrCb_420_SP) | \\r
(format == RK_FORMAT_YCbCr_422_P) | (format == RK_FORMAT_YCbCr_422_SP) | \\r
(format == RK_FORMAT_YCrCb_422_P) | (format == RK_FORMAT_YCrCb_422_SP))\r
- \r
+\r
\r
extern rga_service_info rga_service;\r
\r
\r
x_time = ((s_act_w - 1)<<16) / (d_act_w - 1);\r
y_time = ((s_act_h - 1)<<16) / (d_act_h - 1);\r
- \r
+\r
sina = msg->sina;\r
cosa = msg->cosa;\r
\r
/* 16.16 x 16.16 */\r
/* matrix[] is 64 bit wide */\r
case 1 :\r
- tile->matrix[0] = cosa*x_time; \r
- tile->matrix[1] = -sina*y_time; \r
- tile->matrix[2] = sina*x_time; \r
+ tile->matrix[0] = cosa*x_time;\r
+ tile->matrix[1] = -sina*y_time;\r
+ tile->matrix[2] = sina*x_time;\r
tile->matrix[3] = cosa*y_time;\r
break;\r
case 2 :\r
- tile->matrix[0] = -(x_time<<16); \r
- tile->matrix[1] = 0; \r
- tile->matrix[2] = 0; \r
+ tile->matrix[0] = -(x_time<<16);\r
+ tile->matrix[1] = 0;\r
+ tile->matrix[2] = 0;\r
tile->matrix[3] = (y_time<<16);\r
break;\r
case 3 :\r
- tile->matrix[0] = (x_time<<16); \r
- tile->matrix[1] = 0; \r
- tile->matrix[2] = 0; \r
+ tile->matrix[0] = (x_time<<16);\r
+ tile->matrix[1] = 0;\r
+ tile->matrix[2] = 0;\r
tile->matrix[3] = -(y_time<<16);\r
break;\r
default :\r
- tile->matrix[0] = (uint64_t)1<<32; \r
- tile->matrix[1] = 0; \r
- tile->matrix[2] = 0; \r
+ tile->matrix[0] = (uint64_t)1<<32;\r
+ tile->matrix[1] = 0;\r
+ tile->matrix[2] = 0;\r
tile->matrix[3] = (uint64_t)1<<32;\r
- break; \r
- } \r
+ break;\r
+ }\r
}\r
\r
\r
int32_t RGA_gen_two_pro(struct rga_req *msg, struct rga_req *msg1)\r
{\r
- \r
+\r
struct rga_req *mp;\r
uint32_t w_ratio, h_ratio;\r
uint32_t stride;\r
uint32_t pl;\r
\r
daw = dah = 0;\r
- \r
+\r
mp = msg1;\r
\r
- if(msg->dst.act_w == 0) \r
+ if(msg->dst.act_w == 0)\r
{\r
printk("%s, [%d] rga dst act_w is zero\n", __FUNCTION__, __LINE__);\r
return -EINVAL;\r
}\r
w_ratio = (msg->src.act_w << 16) / msg->dst.act_w;\r
h_ratio = (msg->src.act_h << 16) / msg->dst.act_h;\r
- \r
+\r
memcpy(msg1, msg, sizeof(struct rga_req));\r
\r
msg->dst.format = msg->src.format;\r
\r
/*pre_scale_w cal*/\r
- if ((w_ratio >= (2<<16)) && (w_ratio < (4<<16))) { \r
+ if ((w_ratio >= (2<<16)) && (w_ratio < (4<<16))) {\r
daw = (msg->src.act_w + 1) >> 1;\r
if((IS_YUV_420(msg->dst.format)) && (daw & 1)) {\r
daw -= 1;\r
msg->src.act_w = daw << 1;\r
- } \r
+ }\r
}\r
else if ((w_ratio >= (4<<16)) && (w_ratio < (8<<16))) {\r
- daw = (msg->src.act_w + 3) >> 2; \r
+ daw = (msg->src.act_w + 3) >> 2;\r
if((IS_YUV_420(msg->dst.format)) && (daw & 1)) {\r
daw -= 1;\r
- msg->src.act_w = daw << 2; \r
+ msg->src.act_w = daw << 2;\r
}\r
}\r
else if ((w_ratio >= (8<<16)) && (w_ratio < (16<<16))) {\r
daw = (msg->src.act_w + 7) >> 3;\r
if((IS_YUV_420(msg->dst.format)) && (daw & 1)) {\r
daw -= 1;\r
- msg->src.act_w = daw << 3; \r
+ msg->src.act_w = daw << 3;\r
}\r
}\r
else\r
{\r
daw = msg->src.act_w;\r
}\r
- \r
+\r
pl = (RGA_pixel_width_init(msg->src.format));\r
stride = (pl * daw + 3) & (~3);\r
msg->dst.act_w = daw;\r
msg->dst.vir_w = stride / pl;\r
\r
- /*pre_scale_h cal*/ \r
- if ((h_ratio >= (2<<16)) && (h_ratio < (4<<16))) { \r
+ /*pre_scale_h cal*/\r
+ if ((h_ratio >= (2<<16)) && (h_ratio < (4<<16))) {\r
dah = (msg->src.act_h + 1) >> 1;\r
if((IS_YUV(msg->dst.format)) && (dah & 1)) {\r
dah -= 1;\r
msg->src.act_h = dah << 1;\r
- } \r
+ }\r
}\r
else if ((h_ratio >= (4<<16)) && (h_ratio < (8<<16))) {\r
dah = (msg->src.act_h + 3) >> 2;\r
if((IS_YUV(msg->dst.format)) && (dah & 1)) {\r
dah -= 1;\r
msg->src.act_h = dah << 2;\r
- \r
+\r
}\r
}\r
else if ((h_ratio >= (8<<16)) && (h_ratio < (16<<16))) {\r
{\r
dah = msg->src.act_h;\r
}\r
- \r
+\r
msg->dst.act_h = dah;\r
msg->dst.vir_h = dah;\r
\r
msg->dst.x_offset = 0;\r
msg->dst.y_offset = 0;\r
- \r
- msg->dst.yrgb_addr = (u32)rga_service.pre_scale_buf;\r
+\r
+ msg->dst.yrgb_addr = (unsigned long)rga_service.pre_scale_buf;\r
msg->dst.uv_addr = msg->dst.yrgb_addr + stride * dah;\r
msg->dst.v_addr = msg->dst.uv_addr + ((stride * dah) >> 1);\r
\r
\r
msg1->src.x_offset = 0;\r
msg1->src.y_offset = 0;\r
- \r
+\r
return 0;\r
}\r
\r
\r
typedef struct rga_img_info_t\r
{\r
- unsigned int yrgb_addr; /* yrgb mem addr */\r
- unsigned int uv_addr; /* cb/cr mem addr */\r
- unsigned int v_addr; /* cr mem addr */\r
- unsigned int format; //definition by RK_FORMAT\r
+ unsigned long yrgb_addr; /* yrgb mem addr */\r
+ unsigned long uv_addr; /* cb/cr mem addr */\r
+ unsigned long v_addr; /* cr mem addr */\r
+ unsigned long format; //definition by RK_FORMAT\r
\r
unsigned short act_w;\r
unsigned short act_h;\r
typedef struct MMU\r
{\r
unsigned char mmu_en;\r
- uint32_t base_addr;\r
+ unsigned long base_addr;\r
uint32_t mmu_flag; /* [0] mmu enable [1] src_flush [2] dst_flush [3] CMD_flush [4~5] page size*/\r
} MMU;\r
\r
rga_img_info_t dst; /* dst image info */\r
rga_img_info_t pat; /* patten image info */\r
\r
- uint32_t rop_mask_addr; /* rop4 mask addr */\r
- uint32_t LUT_addr; /* LUT addr */\r
+ unsigned long rop_mask_addr; /* rop4 mask addr */\r
+ unsigned long LUT_addr; /* LUT addr */\r
\r
RECT clip; /* dst clip window default value is dst_vir */\r
/* value from [0, w-1] / [0, h-1]*/\r
\r
uint32_t cmd_buff[28*8];/* cmd_buff for rga */\r
uint32_t *pre_scale_buf;\r
+ unsigned long *pre_scale_buf_virtual;\r
atomic_t int_disable; /* 0 int enable 1 int disable */\r
atomic_t cmd_num;\r
atomic_t rga_working;\r
-/*
- * Copyright (C) 2012 ROCKCHIP, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) "rga: " fmt
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/mutex.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <asm/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-//#include <mach/io.h>
-//#include <mach/irqs.h>
-#include <linux/fs.h>
-#include <asm/uaccess.h>
-#include <linux/miscdevice.h>
-#include <linux/poll.h>
-#include <linux/delay.h>
-#include <linux/wait.h>
-#include <linux/syscalls.h>
-#include <linux/timer.h>
-#include <linux/time.h>
-#include <asm/cacheflush.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include <linux/wakelock.h>
-
-#if defined(CONFIG_ION_ROCKCHIP)
-#include <linux/rockchip_ion.h>
-#endif
-
-
-#include "rga.h"
-#include "rga_reg_info.h"
-#include "rga_mmu_info.h"
-#include "RGA_API.h"
-
-#define RGA_TEST_CASE 0
-
-#define RGA_TEST 0
-#define RGA_TEST_TIME 0
-#define RGA_TEST_FLUSH_TIME 0
-#define RGA_INFO_BUS_ERROR 1
-
-#define PRE_SCALE_BUF_SIZE 2048*1024*4
-
-#define RGA_POWER_OFF_DELAY 4*HZ /* 4s */
-#define RGA_TIMEOUT_DELAY 2*HZ /* 2s */
-
-#define RGA_MAJOR 255
-
-#if defined(CONFIG_ARCH_RK2928) || defined(CONFIG_ARCH_RK3026)
-#define RK30_RGA_PHYS RK2928_RGA_PHYS
-#define RK30_RGA_SIZE RK2928_RGA_SIZE
-#endif
-#define RGA_RESET_TIMEOUT 1000
-
-/* Driver information */
-#define DRIVER_DESC "RGA Device Driver"
-#define DRIVER_NAME "rga"
-
-#define RGA_VERSION "1.003"
-
-ktime_t rga_start;
-ktime_t rga_end;
-
-rga_session rga_session_global;
-
-long (*rga_ioctl_kernel_p)(struct rga_req *);
-
-
-struct rga_drvdata {
- struct miscdevice miscdev;
- struct device dev;
- void *rga_base;
- int irq;
-
- struct delayed_work power_off_work;
- void (*rga_irq_callback)(int rga_retval); //callback function used by aync call
- struct wake_lock wake_lock;
-
- struct clk *pd_rga;
- struct clk *aclk_rga;
- struct clk *hclk_rga;
-
- //#if defined(CONFIG_ION_ROCKCHIP)
- struct ion_client * ion_client;
- //#endif
-};
-
-static struct rga_drvdata *drvdata;
-rga_service_info rga_service;
-struct rga_mmu_buf_t rga_mmu_buf;
-
-
-#if defined(CONFIG_ION_ROCKCHIP)
-extern struct ion_client *rockchip_ion_client_create(const char * name);
-#endif
-
-static int rga_blit_async(rga_session *session, struct rga_req *req);
-static void rga_del_running_list(void);
-static void rga_del_running_list_timeout(void);
-static void rga_try_set_reg(void);
-
-
-/* Logging */
-#define RGA_DEBUG 1
-#if RGA_DEBUG
-#define DBG(format, args...) printk(KERN_DEBUG "%s: " format, DRIVER_NAME, ## args)
-#define ERR(format, args...) printk(KERN_ERR "%s: " format, DRIVER_NAME, ## args)
-#define WARNING(format, args...) printk(KERN_WARN "%s: " format, DRIVER_NAME, ## args)
-#define INFO(format, args...) printk(KERN_INFO "%s: " format, DRIVER_NAME, ## args)
-#else
-#define DBG(format, args...)
-#define ERR(format, args...)
-#define WARNING(format, args...)
-#define INFO(format, args...)
-#endif
-
-#if RGA_TEST
-static void print_info(struct rga_req *req)
-{
- printk("src : yrgb_addr = %.8x, src.uv_addr = %.8x, src.v_addr = %.8x, format = %d\n",
- req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr, req->src.format);
- printk("src : act_w = %d, act_h = %d, vir_w = %d, vir_h = %d\n",
- req->src.act_w, req->src.act_h, req->src.vir_w, req->src.vir_h);
- printk("src : x_off = %.8x y_off = %.8x\n", req->src.x_offset, req->src.y_offset);
-
- printk("dst : yrgb_addr = %.8x, dst.uv_addr = %.8x, dst.v_addr = %.8x, format = %d\n",
- req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, req->dst.format);
- printk("dst : x_off = %.8x y_off = %.8x\n", req->dst.x_offset, req->dst.y_offset);
- printk("dst : act_w = %d, act_h = %d, vir_w = %d, vir_h = %d\n",
- req->dst.act_w, req->dst.act_h, req->dst.vir_w, req->dst.vir_h);
-
- printk("clip.xmin = %d, clip.xmax = %d. clip.ymin = %d, clip.ymax = %d\n",
- req->clip.xmin, req->clip.xmax, req->clip.ymin, req->clip.ymax);
-
- printk("mmu_flag = %.8x\n", req->mmu_info.mmu_flag);
-
- //printk("alpha_rop_flag = %.8x\n", req->alpha_rop_flag);
- //printk("alpha_rop_mode = %.8x\n", req->alpha_rop_mode);
- //printk("PD_mode = %.8x\n", req->PD_mode);
-}
-#endif
-
-
-static inline void rga_write(u32 b, u32 r)
-{
- __raw_writel(b, drvdata->rga_base + r);
-}
-
-static inline u32 rga_read(u32 r)
-{
- return __raw_readl(drvdata->rga_base + r);
-}
-
-static void rga_soft_reset(void)
-{
- u32 i;
- u32 reg;
-
- rga_write(1, RGA_SYS_CTRL); //RGA_SYS_CTRL
-
- for(i = 0; i < RGA_RESET_TIMEOUT; i++)
- {
- reg = rga_read(RGA_SYS_CTRL) & 1; //RGA_SYS_CTRL
-
- if(reg == 0)
- break;
-
- udelay(1);
- }
-
- if(i == RGA_RESET_TIMEOUT)
- ERR("soft reset timeout.\n");
-}
-
-static void rga_dump(void)
-{
- int running;
- struct rga_reg *reg, *reg_tmp;
- rga_session *session, *session_tmp;
-
- running = atomic_read(&rga_service.total_running);
- printk("rga total_running %d\n", running);
-
- #if 0
-
- /* Dump waiting list info */
- if (!list_empty(&rga_service.waiting))
- {
- list_head *next;
+/*\r
+ * Copyright (C) 2012 ROCKCHIP, Inc.\r
+ *\r
+ * This software is licensed under the terms of the GNU General Public\r
+ * License version 2, as published by the Free Software Foundation, and\r
+ * may be copied, distributed, and modified under those terms.\r
+ *\r
+ * This program is distributed in the hope that it will be useful,\r
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
+ * GNU General Public License for more details.\r
+ *\r
+ */\r
+\r
+#define pr_fmt(fmt) "rga: " fmt\r
+#include <linux/kernel.h>\r
+#include <linux/init.h>\r
+#include <linux/module.h>\r
+#include <linux/platform_device.h>\r
+#include <linux/sched.h>\r
+#include <linux/mutex.h>\r
+#include <linux/err.h>\r
+#include <linux/clk.h>\r
+#include <asm/delay.h>\r
+#include <linux/dma-mapping.h>\r
+#include <linux/delay.h>\r
+#include <asm/io.h>\r
+#include <linux/irq.h>\r
+#include <linux/interrupt.h>\r
+//#include <mach/io.h>\r
+//#include <mach/irqs.h>\r
+#include <linux/fs.h>\r
+#include <asm/uaccess.h>\r
+#include <linux/miscdevice.h>\r
+#include <linux/poll.h>\r
+#include <linux/delay.h>\r
+#include <linux/wait.h>\r
+#include <linux/syscalls.h>\r
+#include <linux/timer.h>\r
+#include <linux/time.h>\r
+#include <asm/cacheflush.h>\r
+#include <linux/slab.h>\r
+#include <linux/fb.h>\r
+#include <linux/wakelock.h>\r
+\r
+#if defined(CONFIG_ION_ROCKCHIP)\r
+#include <linux/rockchip_ion.h>\r
+#endif\r
+\r
+\r
+#include "rga.h"\r
+#include "rga_reg_info.h"\r
+#include "rga_mmu_info.h"\r
+#include "RGA_API.h"\r
+\r
+#define RGA_TEST_CASE 0\r
+\r
+#define RGA_TEST 0\r
+#define RGA_TEST_TIME 0\r
+#define RGA_TEST_FLUSH_TIME 0\r
+#define RGA_INFO_BUS_ERROR 1\r
+\r
+#define PRE_SCALE_BUF_SIZE 2048*1024*4\r
+\r
+#define RGA_POWER_OFF_DELAY 4*HZ /* 4s */\r
+#define RGA_TIMEOUT_DELAY 2*HZ /* 2s */\r
+\r
+#define RGA_MAJOR 255\r
+\r
+#if defined(CONFIG_ARCH_RK2928) || defined(CONFIG_ARCH_RK3026)\r
+#define RK30_RGA_PHYS RK2928_RGA_PHYS\r
+#define RK30_RGA_SIZE RK2928_RGA_SIZE\r
+#endif\r
+#define RGA_RESET_TIMEOUT 1000\r
+\r
+/* Driver information */\r
+#define DRIVER_DESC "RGA Device Driver"\r
+#define DRIVER_NAME "rga"\r
+\r
+#define RGA_VERSION "1.003"\r
+\r
+ktime_t rga_start;\r
+ktime_t rga_end;\r
+\r
+rga_session rga_session_global;\r
+\r
+long (*rga_ioctl_kernel_p)(struct rga_req *);\r
+\r
+\r
+struct rga_drvdata {\r
+ struct miscdevice miscdev;\r
+ struct device dev;\r
+ void *rga_base;\r
+ int irq;\r
+\r
+ struct delayed_work power_off_work;\r
+ void (*rga_irq_callback)(int rga_retval); //callback function used by aync call\r
+ struct wake_lock wake_lock;\r
+\r
+ struct clk *pd_rga;\r
+ struct clk *aclk_rga;\r
+ struct clk *hclk_rga;\r
+\r
+ //#if defined(CONFIG_ION_ROCKCHIP)\r
+ struct ion_client * ion_client;\r
+ //#endif\r
+};\r
+\r
+static struct rga_drvdata *drvdata;\r
+rga_service_info rga_service;\r
+struct rga_mmu_buf_t rga_mmu_buf;\r
+\r
+\r
+#if defined(CONFIG_ION_ROCKCHIP)\r
+extern struct ion_client *rockchip_ion_client_create(const char * name);\r
+#endif\r
+\r
+static int rga_blit_async(rga_session *session, struct rga_req *req);\r
+static void rga_del_running_list(void);\r
+static void rga_del_running_list_timeout(void);\r
+static void rga_try_set_reg(void);\r
+\r
+\r
+/* Logging */\r
+#define RGA_DEBUG 1\r
+#if RGA_DEBUG\r
+#define DBG(format, args...) printk(KERN_DEBUG "%s: " format, DRIVER_NAME, ## args)\r
+#define ERR(format, args...) printk(KERN_ERR "%s: " format, DRIVER_NAME, ## args)\r
+#define WARNING(format, args...) printk(KERN_WARN "%s: " format, DRIVER_NAME, ## args)\r
+#define INFO(format, args...) printk(KERN_INFO "%s: " format, DRIVER_NAME, ## args)\r
+#else\r
+#define DBG(format, args...)\r
+#define ERR(format, args...)\r
+#define WARNING(format, args...)\r
+#define INFO(format, args...)\r
+#endif\r
+\r
+#if RGA_TEST\r
+static void print_info(struct rga_req *req)\r
+{\r
+ printk("src : yrgb_addr = %.8x, src.uv_addr = %.8x, src.v_addr = %.8x, format = %d\n",\r
+ req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr, req->src.format);\r
+ printk("src : act_w = %d, act_h = %d, vir_w = %d, vir_h = %d\n",\r
+ req->src.act_w, req->src.act_h, req->src.vir_w, req->src.vir_h);\r
+ printk("src : x_off = %.8x y_off = %.8x\n", req->src.x_offset, req->src.y_offset);\r
+\r
+ printk("dst : yrgb_addr = %.8x, dst.uv_addr = %.8x, dst.v_addr = %.8x, format = %d\n",\r
+ req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, req->dst.format);\r
+ printk("dst : x_off = %.8x y_off = %.8x\n", req->dst.x_offset, req->dst.y_offset);\r
+ printk("dst : act_w = %d, act_h = %d, vir_w = %d, vir_h = %d\n",\r
+ req->dst.act_w, req->dst.act_h, req->dst.vir_w, req->dst.vir_h);\r
+\r
+ printk("clip.xmin = %d, clip.xmax = %d. clip.ymin = %d, clip.ymax = %d\n",\r
+ req->clip.xmin, req->clip.xmax, req->clip.ymin, req->clip.ymax);\r
+\r
+ printk("mmu_flag = %.8x\n", req->mmu_info.mmu_flag);\r
+\r
+ //printk("alpha_rop_flag = %.8x\n", req->alpha_rop_flag);\r
+ //printk("alpha_rop_mode = %.8x\n", req->alpha_rop_mode);\r
+ //printk("PD_mode = %.8x\n", req->PD_mode);\r
+}\r
+#endif\r
+\r
+\r
+static inline void rga_write(u32 b, u32 r)\r
+{\r
+ __raw_writel(b, drvdata->rga_base + r);\r
+}\r
+\r
+static inline u32 rga_read(u32 r)\r
+{\r
+ return __raw_readl(drvdata->rga_base + r);\r
+}\r
+\r
+static void rga_soft_reset(void)\r
+{\r
+ u32 i;\r
+ u32 reg;\r
+\r
+ rga_write(1, RGA_SYS_CTRL); //RGA_SYS_CTRL\r
+\r
+ for(i = 0; i < RGA_RESET_TIMEOUT; i++)\r
+ {\r
+ reg = rga_read(RGA_SYS_CTRL) & 1; //RGA_SYS_CTRL\r
+\r
+ if(reg == 0)\r
+ break;\r
+\r
+ udelay(1);\r
+ }\r
+\r
+ if(i == RGA_RESET_TIMEOUT)\r
+ ERR("soft reset timeout.\n");\r
+}\r
+\r
+static void rga_dump(void)\r
+{\r
+ int running;\r
+ struct rga_reg *reg, *reg_tmp;\r
+ rga_session *session, *session_tmp;\r
+\r
+ running = atomic_read(&rga_service.total_running);\r
+ printk("rga total_running %d\n", running);\r
+\r
+ #if 0\r
+\r
+ /* Dump waiting list info */\r
+ if (!list_empty(&rga_service.waiting))\r
+ {\r
+ list_head *next;\r
+\r
+ next = &rga_service.waiting;\r
+\r
+ printk("rga_service dump waiting list\n");\r
+\r
+ do\r
+ {\r
+ reg = list_entry(next->next, struct rga_reg, status_link);\r
+ running = atomic_read(®->session->task_running);\r
+ num_done = atomic_read(®->session->num_done);\r
+ printk("rga session pid %d, done %d, running %d\n", reg->session->pid, num_done, running);\r
+ next = next->next;\r
+ }\r
+ while(!list_empty(next));\r
+ }\r
+\r
+ /* Dump running list info */\r
+ if (!list_empty(&rga_service.running))\r
+ {\r
+ printk("rga_service dump running list\n");\r
+\r
+ list_head *next;\r
+\r
+ next = &rga_service.running;\r
+ do\r
+ {\r
+ reg = list_entry(next->next, struct rga_reg, status_link);\r
+ running = atomic_read(®->session->task_running);\r
+ num_done = atomic_read(®->session->num_done);\r
+ printk("rga session pid %d, done %d, running %d:\n", reg->session->pid, num_done, running);\r
+ next = next->next;\r
+ }\r
+ while(!list_empty(next));\r
+ }\r
+ #endif\r
+\r
+ list_for_each_entry_safe(session, session_tmp, &rga_service.session, list_session)\r
+ {\r
+ printk("session pid %d:\n", session->pid);\r
+ running = atomic_read(&session->task_running);\r
+ printk("task_running %d\n", running);\r
+ list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link)\r
+ {\r
+ printk("waiting register set 0x%.lu\n", (unsigned long)reg);\r
+ }\r
+ list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link)\r
+ {\r
+ printk("running register set 0x%.lu\n", (unsigned long)reg);\r
+ }\r
+ }\r
+}\r
+\r
+static inline void rga_queue_power_off_work(void)\r
+{\r
+ queue_delayed_work(system_nrt_wq, &drvdata->power_off_work, RGA_POWER_OFF_DELAY);\r
+}\r
+\r
+/* Caller must hold rga_service.lock */\r
+static void rga_power_on(void)\r
+{\r
+ static ktime_t last;\r
+ ktime_t now = ktime_get();\r
+\r
+ if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {\r
+ cancel_delayed_work_sync(&drvdata->power_off_work);\r
+ rga_queue_power_off_work();\r
+ last = now;\r
+ }\r
+ if (rga_service.enable)\r
+ return;\r
+\r
+ clk_prepare_enable(drvdata->aclk_rga);\r
+ clk_prepare_enable(drvdata->hclk_rga);\r
+ //clk_prepare_enable(drvdata->pd_rga);\r
+ wake_lock(&drvdata->wake_lock);\r
+ rga_service.enable = true;\r
+}\r
+\r
+/* Caller must hold rga_service.lock */\r
+static void rga_power_off(void)\r
+{\r
+ int total_running;\r
+\r
+ if (!rga_service.enable) {\r
+ return;\r
+ }\r
+\r
+ total_running = atomic_read(&rga_service.total_running);\r
+ if (total_running) {\r
+ pr_err("power off when %d task running!!\n", total_running);\r
+ mdelay(50);\r
+ pr_err("delay 50 ms for running task\n");\r
+ rga_dump();\r
+ }\r
+\r
+ //clk_disable_unprepare(drvdata->pd_rga);\r
+ clk_disable_unprepare(drvdata->aclk_rga);\r
+ clk_disable_unprepare(drvdata->hclk_rga);\r
+ wake_unlock(&drvdata->wake_lock);\r
+ rga_service.enable = false;\r
+}\r
+\r
+static void rga_power_off_work(struct work_struct *work)\r
+{\r
+ if (mutex_trylock(&rga_service.lock)) {\r
+ rga_power_off();\r
+ mutex_unlock(&rga_service.lock);\r
+ } else {\r
+ /* Come back later if the device is busy... */\r
- next = &rga_service.waiting;
-
- printk("rga_service dump waiting list\n");
-
- do
- {
- reg = list_entry(next->next, struct rga_reg, status_link);
- running = atomic_read(®->session->task_running);
- num_done = atomic_read(®->session->num_done);
- printk("rga session pid %d, done %d, running %d\n", reg->session->pid, num_done, running);
- next = next->next;
- }
- while(!list_empty(next));
- }
-
- /* Dump running list info */
- if (!list_empty(&rga_service.running))
- {
- printk("rga_service dump running list\n");
-
- list_head *next;
-
- next = &rga_service.running;
- do
- {
- reg = list_entry(next->next, struct rga_reg, status_link);
- running = atomic_read(®->session->task_running);
- num_done = atomic_read(®->session->num_done);
- printk("rga session pid %d, done %d, running %d:\n", reg->session->pid, num_done, running);
- next = next->next;
- }
- while(!list_empty(next));
- }
- #endif
-
- list_for_each_entry_safe(session, session_tmp, &rga_service.session, list_session)
- {
- printk("session pid %d:\n", session->pid);
- running = atomic_read(&session->task_running);
- printk("task_running %d\n", running);
- list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link)
- {
- printk("waiting register set 0x%.8x\n", (unsigned int)reg);
- }
- list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link)
- {
- printk("running register set 0x%.8x\n", (unsigned int)reg);
- }
- }
-}
-
-static inline void rga_queue_power_off_work(void)
-{
- queue_delayed_work(system_nrt_wq, &drvdata->power_off_work, RGA_POWER_OFF_DELAY);
-}
-
-/* Caller must hold rga_service.lock */
-static void rga_power_on(void)
-{
- static ktime_t last;
- ktime_t now = ktime_get();
-
- if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {
- cancel_delayed_work_sync(&drvdata->power_off_work);
- rga_queue_power_off_work();
- last = now;
- }
- if (rga_service.enable)
- return;
-
- clk_prepare_enable(drvdata->aclk_rga);
- clk_prepare_enable(drvdata->hclk_rga);
- //clk_prepare_enable(drvdata->pd_rga);
- wake_lock(&drvdata->wake_lock);
- rga_service.enable = true;
-}
-
-/* Caller must hold rga_service.lock */
-static void rga_power_off(void)
-{
- int total_running;
-
- if (!rga_service.enable) {
- return;
- }
-
- total_running = atomic_read(&rga_service.total_running);
- if (total_running) {
- pr_err("power off when %d task running!!\n", total_running);
- mdelay(50);
- pr_err("delay 50 ms for running task\n");
- rga_dump();
- }
-
- //clk_disable_unprepare(drvdata->pd_rga);
- clk_disable_unprepare(drvdata->aclk_rga);
- clk_disable_unprepare(drvdata->hclk_rga);
- wake_unlock(&drvdata->wake_lock);
- rga_service.enable = false;
-}
-
-static void rga_power_off_work(struct work_struct *work)
-{
- if (mutex_trylock(&rga_service.lock)) {
- rga_power_off();
- mutex_unlock(&rga_service.lock);
- } else {
- /* Come back later if the device is busy... */
rga_queue_power_off_work();
}
}
cmd_buf = (uint32_t *)rga_service.cmd_buff + offset*32;
reg_p = (uint32_t *)reg->cmd_reg;
- for(i=0; i<32; i++)
- {
+ for(i=0; i<32; i++)\r
cmd_buf[i] = reg_p[i];
- }
-
- dsb();
+\r
}
rga_copy_reg(reg, 0);
rga_reg_from_wait_to_run(reg);
-
+\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(&rga_service.cmd_buff[0], &rga_service.cmd_buff[28]);
- outer_flush_range(virt_to_phys(&rga_service.cmd_buff[0]),virt_to_phys(&rga_service.cmd_buff[28]));
+ outer_flush_range(virt_to_phys(&rga_service.cmd_buff[0]),virt_to_phys(&rga_service.cmd_buff[28]));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(&rga_service.cmd_buff[0], &rga_service.cmd_buff[28]);\r
+ #endif\r
#if defined(CONFIG_ARCH_RK30)
rga_soft_reset();
req->sg_src = NULL;
req->sg_dst = NULL;
-
+\r
src_offset = req->line_draw_info.flag;
dst_offset = req->line_draw_info.line_width;
} else {
dev_info(&pdev->dev, "rga ion client create success!\n");
}
- #endif
-
- ret = misc_register(&rga_dev);
- if(ret)
- {
- ERR("cannot register miscdev (%d)\n", ret);
- goto err_misc_register;
- }
-
- pr_info("Driver loaded succesfully\n");
-
- return 0;
-
-err_misc_register:
- free_irq(data->irq, pdev);
-err_irq:
- iounmap(data->rga_base);
-err_ioremap:
- wake_lock_destroy(&data->wake_lock);
- //kfree(data);
-
- return ret;
-}
-
-static int rga_drv_remove(struct platform_device *pdev)
-{
- struct rga_drvdata *data = platform_get_drvdata(pdev);
- DBG("%s [%d]\n",__FUNCTION__,__LINE__);
-
- wake_lock_destroy(&data->wake_lock);
- misc_deregister(&(data->miscdev));
- free_irq(data->irq, &data->miscdev);
- iounmap((void __iomem *)(data->rga_base));
-
- //clk_put(data->pd_rga);
- devm_clk_put(&pdev->dev, data->aclk_rga);
- devm_clk_put(&pdev->dev, data->hclk_rga);
-
- //kfree(data);
- return 0;
-}
-
-static struct platform_driver rga_driver = {
- .probe = rga_drv_probe,
- .remove = rga_drv_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = "rga",
- .of_match_table = of_match_ptr(rockchip_rga_dt_ids),
- },
-};
-
-
-void rga_test_0(void);
-void rga_test_1(void);
-
-
-static int __init rga_init(void)
-{
- int ret;
- uint32_t *mmu_buf;
- uint32_t i;
- uint32_t *buf_p;
-
- /* malloc pre scale mid buf mmu table */
- mmu_buf = kzalloc(1024*8, GFP_KERNEL);
- if(mmu_buf == NULL) {
- printk(KERN_ERR "RGA get Pre Scale buff failed. \n");
- return -1;
- }
-
- /* malloc 4 M buf */
- for(i=0; i<1024; i++) {
- buf_p = (uint32_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
- if(buf_p == NULL) {
- printk(KERN_ERR "RGA init pre scale buf falied\n");
- return -ENOMEM;
- }
- mmu_buf[i] = virt_to_phys((void *)((uint32_t)buf_p));
- }
-
- rga_service.pre_scale_buf = (uint32_t *)mmu_buf;
-
- buf_p = kmalloc(1024*256, GFP_KERNEL);
- rga_mmu_buf.buf_virtual = buf_p;
- rga_mmu_buf.buf = (uint32_t *)virt_to_phys((void *)((uint32_t)buf_p));
- rga_mmu_buf.front = 0;
- rga_mmu_buf.back = 64*1024;
- rga_mmu_buf.size = 64*1024;
-
- rga_mmu_buf.pages = kmalloc((32768)* sizeof(struct page *), GFP_KERNEL);
-
- if ((ret = platform_driver_register(&rga_driver)) != 0)
- {
- printk(KERN_ERR "Platform device register failed (%d).\n", ret);
- return ret;
- }
-
- {
- rga_session_global.pid = 0x0000ffff;
- INIT_LIST_HEAD(&rga_session_global.waiting);
- INIT_LIST_HEAD(&rga_session_global.running);
- INIT_LIST_HEAD(&rga_session_global.list_session);
-
- INIT_LIST_HEAD(&rga_service.waiting);
- INIT_LIST_HEAD(&rga_service.running);
- INIT_LIST_HEAD(&rga_service.done);
- INIT_LIST_HEAD(&rga_service.session);
-
- init_waitqueue_head(&rga_session_global.wait);
- //mutex_lock(&rga_service.lock);
- list_add_tail(&rga_session_global.list_session, &rga_service.session);
- //mutex_unlock(&rga_service.lock);
- atomic_set(&rga_session_global.task_running, 0);
- atomic_set(&rga_session_global.num_done, 0);
- }
-
-
-
- #if RGA_TEST_CASE
- rga_test_0();
- #endif
-
- INFO("Module initialized.\n");
-
- return 0;
-}
-
-static void __exit rga_exit(void)
-{
- uint32_t i;
-
- rga_power_off();
-
- for(i=0; i<1024; i++)
- {
- if((uint32_t *)rga_service.pre_scale_buf[i] != NULL)
- {
- __free_page((void *)rga_service.pre_scale_buf[i]);
- }
- }
-
- if(rga_service.pre_scale_buf != NULL) {
- kfree((uint8_t *)rga_service.pre_scale_buf);
- }
-
- if (rga_mmu_buf.buf_virtual)
- kfree(rga_mmu_buf.buf_virtual);
-
- if (rga_mmu_buf.pages)
- kfree(rga_mmu_buf.pages);
-
- platform_driver_unregister(&rga_driver);
-}
-
-
-#if RGA_TEST_CASE
-
-extern struct fb_info * rk_get_fb(int fb_id);
-EXPORT_SYMBOL(rk_get_fb);
-
-extern void rk_direct_fb_show(struct fb_info * fbi);
-EXPORT_SYMBOL(rk_direct_fb_show);
-
-unsigned int src_buf[1920*1080];
-unsigned int dst_buf[1920*1080];
-//unsigned int tmp_buf[1920*1080 * 2];
-
-void rga_test_0(void)
-{
- struct rga_req req;
- rga_session session;
- unsigned int *src, *dst;
- uint32_t i, j;
- uint8_t *p;
- uint8_t t;
- uint32_t *dst0, *dst1, *dst2;
-
- struct fb_info *fb;
-
- session.pid = current->pid;
- INIT_LIST_HEAD(&session.waiting);
- INIT_LIST_HEAD(&session.running);
- INIT_LIST_HEAD(&session.list_session);
- init_waitqueue_head(&session.wait);
- /* no need to protect */
- list_add_tail(&session.list_session, &rga_service.session);
- atomic_set(&session.task_running, 0);
- atomic_set(&session.num_done, 0);
- //file->private_data = (void *)session;
-
- fb = rk_get_fb(0);
-
- memset(&req, 0, sizeof(struct rga_req));
- src = src_buf;
- dst = dst_buf;
-
- memset(src_buf, 0x80, 1024*600*4);
-
- dmac_flush_range(&src_buf[0], &src_buf[1024*600]);
- outer_flush_range(virt_to_phys(&src_buf[0]),virt_to_phys(&src_buf[1024*600]));
-
-
- #if 0
- memset(src_buf, 0x80, 800*480*4);
- memset(dst_buf, 0xcc, 800*480*4);
-
- dmac_flush_range(&dst_buf[0], &dst_buf[800*480]);
- outer_flush_range(virt_to_phys(&dst_buf[0]),virt_to_phys(&dst_buf[800*480]));
- #endif
-
- dst0 = &dst_buf[0];
- //dst1 = &dst_buf[1280*800*4];
- //dst2 = &dst_buf[1280*800*4*2];
-
- i = j = 0;
-
- printk("\n********************************\n");
- printk("************ RGA_TEST ************\n");
- printk("********************************\n\n");
-
- req.src.act_w = 1024;
- req.src.act_h = 600;
-
- req.src.vir_w = 1024;
- req.src.vir_h = 600;
- req.src.yrgb_addr = (uint32_t)virt_to_phys(src);
- req.src.uv_addr = (uint32_t)(req.src.yrgb_addr + 1080*1920);
- req.src.v_addr = (uint32_t)virt_to_phys(src);
- req.src.format = RK_FORMAT_RGBA_8888;
-
- req.dst.act_w = 600;
- req.dst.act_h = 352;
-
- req.dst.vir_w = 1280;
- req.dst.vir_h = 800;
- req.dst.x_offset = 600;
- req.dst.y_offset = 0;
-
- dst = dst0;
-
- req.dst.yrgb_addr = ((uint32_t)virt_to_phys(dst));
-
- //req.dst.format = RK_FORMAT_RGB_565;
-
- req.clip.xmin = 0;
- req.clip.xmax = 1279;
- req.clip.ymin = 0;
- req.clip.ymax = 799;
-
- //req.render_mode = color_fill_mode;
- //req.fg_color = 0x80ffffff;
-
- req.rotate_mode = 1;
- //req.scale_mode = 2;
-
- //req.alpha_rop_flag = 0;
- //req.alpha_rop_mode = 0x19;
- //req.PD_mode = 3;
-
- req.sina = 65536;
- req.cosa = 0;
-
- //req.mmu_info.mmu_flag = 0x21;
- //req.mmu_info.mmu_en = 1;
-
- //printk("src = %.8x\n", req.src.yrgb_addr);
- //printk("src = %.8x\n", req.src.uv_addr);
- //printk("dst = %.8x\n", req.dst.yrgb_addr);
-
-
- rga_blit_sync(&session, &req);
-
- #if 1
- fb->var.bits_per_pixel = 32;
-
- fb->var.xres = 1280;
- fb->var.yres = 800;
-
- fb->var.red.length = 8;
- fb->var.red.offset = 0;
- fb->var.red.msb_right = 0;
-
- fb->var.green.length = 8;
- fb->var.green.offset = 8;
- fb->var.green.msb_right = 0;
-
- fb->var.blue.length = 8;
-
- fb->var.blue.offset = 16;
- fb->var.blue.msb_right = 0;
-
- fb->var.transp.length = 8;
- fb->var.transp.offset = 24;
- fb->var.transp.msb_right = 0;
-
- fb->var.nonstd &= (~0xff);
- fb->var.nonstd |= 1;
-
- fb->fix.smem_start = virt_to_phys(dst);
-
- rk_direct_fb_show(fb);
- #endif
-
-}
-
-#endif
-module_init(rga_init);
-module_exit(rga_exit);
-
-/* Module information */
-MODULE_AUTHOR("zsq@rock-chips.com");
-MODULE_DESCRIPTION("Driver for rga device");
-MODULE_LICENSE("GPL");
+ #endif\r
+\r
+ ret = misc_register(&rga_dev);\r
+ if(ret)\r
+ {\r
+ ERR("cannot register miscdev (%d)\n", ret);\r
+ goto err_misc_register;\r
+ }\r
+\r
+ pr_info("Driver loaded succesfully\n");\r
+\r
+ return 0;\r
+\r
+err_misc_register:\r
+ free_irq(data->irq, pdev);\r
+err_irq:\r
+ iounmap(data->rga_base);\r
+err_ioremap:\r
+ wake_lock_destroy(&data->wake_lock);\r
+ //kfree(data);\r
+\r
+ return ret;\r
+}\r
+\r
+static int rga_drv_remove(struct platform_device *pdev)\r
+{\r
+ struct rga_drvdata *data = platform_get_drvdata(pdev);\r
+ DBG("%s [%d]\n",__FUNCTION__,__LINE__);\r
+\r
+ wake_lock_destroy(&data->wake_lock);\r
+ misc_deregister(&(data->miscdev));\r
+ free_irq(data->irq, &data->miscdev);\r
+ iounmap((void __iomem *)(data->rga_base));\r
+\r
+ //clk_put(data->pd_rga);\r
+ devm_clk_put(&pdev->dev, data->aclk_rga);\r
+ devm_clk_put(&pdev->dev, data->hclk_rga);\r
+\r
+ //kfree(data);\r
+ return 0;\r
+}\r
+\r
+static struct platform_driver rga_driver = {\r
+ .probe = rga_drv_probe,\r
+ .remove = rga_drv_remove,\r
+ .driver = {\r
+ .owner = THIS_MODULE,\r
+ .name = "rga",\r
+ .of_match_table = of_match_ptr(rockchip_rga_dt_ids),\r
+ },\r
+};\r
+\r
+\r
+void rga_test_0(void);\r
+void rga_test_1(void);\r
+\r
+\r
+static int __init rga_init(void)\r
+{\r
+ int ret;\r
+ uint32_t *mmu_buf;\r
+ unsigned long *mmu_buf_virtual;\r
+ uint32_t i;\r
+ uint32_t *buf_p;\r
+\r
+ /* malloc pre scale mid buf mmu table */\r
+ mmu_buf = kzalloc(1024*8, GFP_KERNEL);\r
+ mmu_buf_virtual = kzalloc(1024*2*sizeof(unsigned long), GFP_KERNEL);\r
+ if(mmu_buf == NULL) {\r
+ printk(KERN_ERR "RGA get Pre Scale buff failed. \n");\r
+ return -1;\r
+ }\r
+\r
+ /* malloc 4 M buf */\r
+ for(i=0; i<1024; i++) {\r
+ buf_p = (uint32_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);\r
+ if(buf_p == NULL) {\r
+ printk(KERN_ERR "RGA init pre scale buf falied\n");\r
+ return -ENOMEM;\r
+ }\r
+ mmu_buf[i] = virt_to_phys((void *)((unsigned long)buf_p));\r
+ mmu_buf_virtual[i] = (unsigned long)buf_p;\r
+ }\r
+\r
+ rga_service.pre_scale_buf = (uint32_t *)mmu_buf;\r
+ rga_service.pre_scale_buf_virtual = (unsigned long *)mmu_buf_virtual;\r
+\r
+ buf_p = kmalloc(1024*256, GFP_KERNEL);\r
+ rga_mmu_buf.buf_virtual = buf_p;\r
+ rga_mmu_buf.buf = (uint32_t *)virt_to_phys((void *)((unsigned long)buf_p));\r
+ rga_mmu_buf.front = 0;\r
+ rga_mmu_buf.back = 64*1024;\r
+ rga_mmu_buf.size = 64*1024;\r
+\r
+ rga_mmu_buf.pages = kmalloc((32768)* sizeof(struct page *), GFP_KERNEL);\r
+\r
+ if ((ret = platform_driver_register(&rga_driver)) != 0)\r
+ {\r
+ printk(KERN_ERR "Platform device register failed (%d).\n", ret);\r
+ return ret;\r
+ }\r
+\r
+ {\r
+ rga_session_global.pid = 0x0000ffff;\r
+ INIT_LIST_HEAD(&rga_session_global.waiting);\r
+ INIT_LIST_HEAD(&rga_session_global.running);\r
+ INIT_LIST_HEAD(&rga_session_global.list_session);\r
+\r
+ INIT_LIST_HEAD(&rga_service.waiting);\r
+ INIT_LIST_HEAD(&rga_service.running);\r
+ INIT_LIST_HEAD(&rga_service.done);\r
+ INIT_LIST_HEAD(&rga_service.session);\r
+\r
+ init_waitqueue_head(&rga_session_global.wait);\r
+ //mutex_lock(&rga_service.lock);\r
+ list_add_tail(&rga_session_global.list_session, &rga_service.session);\r
+ //mutex_unlock(&rga_service.lock);\r
+ atomic_set(&rga_session_global.task_running, 0);\r
+ atomic_set(&rga_session_global.num_done, 0);\r
+ }\r
+\r
+\r
+\r
+ #if RGA_TEST_CASE\r
+ rga_test_0();\r
+ #endif\r
+\r
+ INFO("Module initialized.\n");\r
+\r
+ return 0;\r
+}\r
+\r
+static void __exit rga_exit(void)\r
+{\r
+ uint32_t i;\r
+\r
+ rga_power_off();\r
+\r
+ for(i=0; i<1024; i++)\r
+ {\r
+ if((unsigned long)rga_service.pre_scale_buf_virtual[i])\r
+ {\r
+ __free_page((void *)rga_service.pre_scale_buf_virtual[i]);\r
+ }\r
+ }\r
+\r
+ if(rga_service.pre_scale_buf != NULL) {\r
+ kfree((uint8_t *)rga_service.pre_scale_buf);\r
+ }\r
+\r
+ if (rga_mmu_buf.buf_virtual)\r
+ kfree(rga_mmu_buf.buf_virtual);\r
+\r
+ if (rga_mmu_buf.pages)\r
+ kfree(rga_mmu_buf.pages);\r
+\r
+ platform_driver_unregister(&rga_driver);\r
+}\r
+\r
+\r
+#if RGA_TEST_CASE\r
+\r
+extern struct fb_info * rk_get_fb(int fb_id);\r
+EXPORT_SYMBOL(rk_get_fb);\r
+\r
+extern void rk_direct_fb_show(struct fb_info * fbi);\r
+EXPORT_SYMBOL(rk_direct_fb_show);\r
+\r
+unsigned int src_buf[1920*1080];\r
+unsigned int dst_buf[1920*1080];\r
+//unsigned int tmp_buf[1920*1080 * 2];\r
+\r
+void rga_test_0(void)\r
+{\r
+ struct rga_req req;\r
+ rga_session session;\r
+ unsigned int *src, *dst;\r
+ uint32_t i, j;\r
+ uint8_t *p;\r
+ uint8_t t;\r
+ uint32_t *dst0, *dst1, *dst2;\r
+\r
+ struct fb_info *fb;\r
+\r
+ session.pid = current->pid;\r
+ INIT_LIST_HEAD(&session.waiting);\r
+ INIT_LIST_HEAD(&session.running);\r
+ INIT_LIST_HEAD(&session.list_session);\r
+ init_waitqueue_head(&session.wait);\r
+ /* no need to protect */\r
+ list_add_tail(&session.list_session, &rga_service.session);\r
+ atomic_set(&session.task_running, 0);\r
+ atomic_set(&session.num_done, 0);\r
+ //file->private_data = (void *)session;\r
+\r
+ fb = rk_get_fb(0);\r
+\r
+ memset(&req, 0, sizeof(struct rga_req));\r
+ src = src_buf;\r
+ dst = dst_buf;\r
+\r
+ memset(src_buf, 0x80, 1024*600*4);\r
+\r
+ dmac_flush_range(&src_buf[0], &src_buf[1024*600]);\r
+ outer_flush_range(virt_to_phys(&src_buf[0]),virt_to_phys(&src_buf[1024*600]));\r
+\r
+\r
+ #if 0\r
+ memset(src_buf, 0x80, 800*480*4);\r
+ memset(dst_buf, 0xcc, 800*480*4);\r
+\r
+ dmac_flush_range(&dst_buf[0], &dst_buf[800*480]);\r
+ outer_flush_range(virt_to_phys(&dst_buf[0]),virt_to_phys(&dst_buf[800*480]));\r
+ #endif\r
+\r
+ dst0 = &dst_buf[0];\r
+ //dst1 = &dst_buf[1280*800*4];\r
+ //dst2 = &dst_buf[1280*800*4*2];\r
+\r
+ i = j = 0;\r
+\r
+ printk("\n********************************\n");\r
+ printk("************ RGA_TEST ************\n");\r
+ printk("********************************\n\n");\r
+\r
+ req.src.act_w = 1024;\r
+ req.src.act_h = 600;\r
+\r
+ req.src.vir_w = 1024;\r
+ req.src.vir_h = 600;\r
+ req.src.yrgb_addr = (uint32_t)virt_to_phys(src);\r
+ req.src.uv_addr = (uint32_t)(req.src.yrgb_addr + 1080*1920);\r
+ req.src.v_addr = (uint32_t)virt_to_phys(src);\r
+ req.src.format = RK_FORMAT_RGBA_8888;\r
+\r
+ req.dst.act_w = 600;\r
+ req.dst.act_h = 352;\r
+\r
+ req.dst.vir_w = 1280;\r
+ req.dst.vir_h = 800;\r
+ req.dst.x_offset = 600;\r
+ req.dst.y_offset = 0;\r
+\r
+ dst = dst0;\r
+\r
+ req.dst.yrgb_addr = ((uint32_t)virt_to_phys(dst));\r
+\r
+ //req.dst.format = RK_FORMAT_RGB_565;\r
+\r
+ req.clip.xmin = 0;\r
+ req.clip.xmax = 1279;\r
+ req.clip.ymin = 0;\r
+ req.clip.ymax = 799;\r
+\r
+ //req.render_mode = color_fill_mode;\r
+ //req.fg_color = 0x80ffffff;\r
+\r
+ req.rotate_mode = 1;\r
+ //req.scale_mode = 2;\r
+\r
+ //req.alpha_rop_flag = 0;\r
+ //req.alpha_rop_mode = 0x19;\r
+ //req.PD_mode = 3;\r
+\r
+ req.sina = 65536;\r
+ req.cosa = 0;\r
+\r
+ //req.mmu_info.mmu_flag = 0x21;\r
+ //req.mmu_info.mmu_en = 1;\r
+\r
+ //printk("src = %.8x\n", req.src.yrgb_addr);\r
+ //printk("src = %.8x\n", req.src.uv_addr);\r
+ //printk("dst = %.8x\n", req.dst.yrgb_addr);\r
+\r
+\r
+ rga_blit_sync(&session, &req);\r
+\r
+ #if 1\r
+ fb->var.bits_per_pixel = 32;\r
+\r
+ fb->var.xres = 1280;\r
+ fb->var.yres = 800;\r
+\r
+ fb->var.red.length = 8;\r
+ fb->var.red.offset = 0;\r
+ fb->var.red.msb_right = 0;\r
+\r
+ fb->var.green.length = 8;\r
+ fb->var.green.offset = 8;\r
+ fb->var.green.msb_right = 0;\r
+\r
+ fb->var.blue.length = 8;\r
+\r
+ fb->var.blue.offset = 16;\r
+ fb->var.blue.msb_right = 0;\r
+\r
+ fb->var.transp.length = 8;\r
+ fb->var.transp.offset = 24;\r
+ fb->var.transp.msb_right = 0;\r
+\r
+ fb->var.nonstd &= (~0xff);\r
+ fb->var.nonstd |= 1;\r
+\r
+ fb->fix.smem_start = virt_to_phys(dst);\r
+\r
+ rk_direct_fb_show(fb);\r
+ #endif\r
+\r
+}\r
+\r
+#endif\r
+module_init(rga_init);\r
+module_exit(rga_exit);\r
+\r
+/* Module information */\r
+MODULE_AUTHOR("zsq@rock-chips.com");\r
+MODULE_DESCRIPTION("Driver for rga device");\r
+MODULE_LICENSE("GPL");\r
return 0;\r
}\r
\r
-static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr)\r
+static int rga_mem_size_cal(unsigned long Mem, uint32_t MemSize, unsigned long *StartAddr)\r
{\r
- uint32_t start, end;\r
+ unsigned long start, end;\r
uint32_t pageCount;\r
\r
end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
return pageCount;\r
}\r
\r
-static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr,\r
- int format, uint32_t w, uint32_t h, uint32_t *StartAddr )\r
+static int rga_buf_size_cal(unsigned long yrgb_addr, unsigned long uv_addr, unsigned long v_addr,\r
+ int format, uint32_t w, uint32_t h, unsigned long *StartAddr )\r
{\r
uint32_t size_yrgb = 0;\r
uint32_t size_uv = 0;\r
uint32_t size_v = 0;\r
uint32_t stride = 0;\r
- uint32_t start, end;\r
+ unsigned long start, end;\r
uint32_t pageCount;\r
\r
switch(format)\r
\r
static int rga_MapUserMemory(struct page **pages,\r
uint32_t *pageTable,\r
- uint32_t Memory,\r
+ unsigned long Memory,\r
uint32_t pageCount)\r
{\r
int32_t result;\r
uint32_t i;\r
uint32_t status;\r
- uint32_t Address;\r
- //uint32_t temp;\r
+ unsigned long Address;\r
\r
status = 0;\r
Address = 0;\r
\r
- do\r
- {\r
+ do {\r
down_read(¤t->mm->mmap_sem);\r
result = get_user_pages(current,\r
current->mm,\r
{\r
uint32_t i;\r
uint32_t status;\r
- uint32_t Address;\r
+ unsigned long Address;\r
uint32_t mapped_size = 0;\r
uint32_t len = 0;\r
struct scatterlist *sgl = sg->sgl;\r
static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int SrcMemSize, DstMemSize;\r
- uint32_t SrcStart, DstStart;\r
+ unsigned long SrcStart, DstStart;\r
uint32_t i;\r
uint32_t AllSize;\r
uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;\r
else {\r
MMU_p = MMU_Base;\r
\r
- if(req->src.yrgb_addr == (uint32_t)rga_service.pre_scale_buf) {\r
+ if(req->src.yrgb_addr == (unsigned long)rga_service.pre_scale_buf) {\r
for(i=0; i<SrcMemSize; i++)\r
MMU_p[i] = rga_service.pre_scale_buf[i];\r
}\r
* change the buf address in req struct\r
*/\r
\r
- req->mmu_info.base_addr = (uint32_t)MMU_Base_phys >> 2;\r
+ req->mmu_info.base_addr = (unsigned long)MMU_Base_phys >> 2;\r
\r
uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base), virt_to_phys(MMU_Base + AllSize + 1));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ #endif\r
\r
rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);\r
reg->MMU_len = AllSize + 16;\r
static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int SrcMemSize, DstMemSize, CMDMemSize;\r
- uint32_t SrcStart, DstStart, CMDStart;\r
+ unsigned long SrcStart, DstStart, CMDStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
return -EINVAL;\r
}\r
\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
+ CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
if(CMDMemSize == 0) {\r
return -EINVAL;\r
}\r
\r
/* map CMD addr */\r
for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));\r
+ MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));\r
}\r
\r
/* map src addr */\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ #endif\r
\r
rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);\r
reg->MMU_len = AllSize + 16;\r
static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int DstMemSize;\r
- uint32_t DstStart;\r
+ unsigned long DstStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
\r
MMU_Base = NULL;\r
\r
- do\r
- {\r
+ do {\r
DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
&DstStart);\r
* change the buf address in req struct\r
*/\r
\r
- req->mmu_info.base_addr = ((uint32_t)(MMU_Base_phys)>>2);\r
+ req->mmu_info.base_addr = ((unsigned long)(MMU_Base_phys)>>2);\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ #endif\r
\r
rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);\r
reg->MMU_len = AllSize + 16;\r
\r
static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
- int DstMemSize;\r
- uint32_t DstStart;\r
- struct page **pages = NULL;\r
- uint32_t i;\r
- uint32_t AllSize;\r
- uint32_t *MMU_Base, *MMU_p;\r
- int ret, status;\r
-\r
- MMU_Base = NULL;\r
-\r
- do\r
- {\r
- /* cal dst buf mmu info */\r
- DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
- req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
- &DstStart);\r
- if(DstMemSize == 0) {\r
- return -EINVAL;\r
- }\r
-\r
- AllSize = DstMemSize;\r
-\r
- pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
-\r
- MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
-\r
- if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map dst memory failed\n");\r
- status = ret;\r
- break;\r
- }\r
- }\r
- else\r
- {\r
- MMU_p = MMU_Base;\r
-\r
- for(i=0; i<DstMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
- }\r
- }\r
-\r
- /* zsq\r
- * change the buf address in req struct\r
- * for the reason of lie to MMU\r
- */\r
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);\r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
-\r
-\r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
-\r
- /* flush data to DDR */\r
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
-\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
- return 0;\r
-\r
- }\r
- while(0);\r
-\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
- if (MMU_Base != NULL)\r
- kfree(MMU_Base);\r
-\r
- return status;\r
+ return 0;\r
}\r
\r
static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
- int SrcMemSize, DstMemSize;\r
- uint32_t SrcStart, DstStart;\r
- struct page **pages = NULL;\r
- uint32_t i;\r
- uint32_t AllSize;\r
- uint32_t *MMU_Base, *MMU_p;\r
- int ret, status;\r
- uint32_t uv_size, v_size;\r
-\r
- MMU_Base = NULL;\r
-\r
- do\r
- {\r
- /* cal src buf mmu info */\r
- SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
- req->src.format, req->src.vir_w, req->src.vir_h,\r
- &SrcStart);\r
- if(SrcMemSize == 0) {\r
- return -EINVAL;\r
- }\r
-\r
- /* cal dst buf mmu info */\r
- DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
- req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
- &DstStart);\r
- if(DstMemSize == 0) {\r
- return -EINVAL;\r
- }\r
-\r
- AllSize = SrcMemSize + DstMemSize;\r
-\r
- pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
-\r
- MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
- status = RGA_MALLOC_ERROR;\r
- break;\r
- }\r
-\r
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
- if (ret < 0)\r
- {\r
- pr_err("rga map src memory failed\n");\r
- status = ret;\r
- break;\r
- }\r
- }\r
- else\r
- {\r
- MMU_p = MMU_Base;\r
-\r
- for(i=0; i<SrcMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
- }\r
- }\r
-\r
-\r
- if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
- if (ret < 0)\r
- {\r
- pr_err("rga map dst memory failed\n");\r
- status = ret;\r
- break;\r
- }\r
- }\r
- else\r
- {\r
- MMU_p = MMU_Base + SrcMemSize;\r
-\r
- for(i=0; i<DstMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
- }\r
- }\r
-\r
- MMU_Base[AllSize] = MMU_Base[AllSize - 1];\r
-\r
- /* zsq\r
- * change the buf address in req struct\r
- * for the reason of lie to MMU\r
- */\r
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);\r
-\r
- uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
- v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
-\r
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));\r
- req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);\r
- req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);\r
-\r
- uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
- v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
-\r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);\r
- req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);\r
- req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);\r
-\r
-\r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
-\r
- /* flush data to DDR */\r
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
-\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
- return 0;\r
- }\r
- while(0);\r
-\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
- if (MMU_Base != NULL)\r
- kfree(MMU_Base);\r
-\r
- return status;\r
+ return 0;\r
}\r
\r
\r
static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int SrcMemSize, DstMemSize;\r
- uint32_t SrcStart, DstStart;\r
+ unsigned long SrcStart, DstStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
/* kernel space */\r
MMU_p = MMU_Base + SrcMemSize;\r
\r
- if(req->dst.yrgb_addr == (uint32_t)rga_service.pre_scale_buf) {\r
+ if(req->dst.yrgb_addr == (unsigned long)rga_service.pre_scale_buf) {\r
for(i=0; i<DstMemSize; i++)\r
MMU_p[i] = rga_service.pre_scale_buf[i];\r
}\r
* for the reason of lie to MMU\r
*/\r
\r
- req->mmu_info.base_addr = ((uint32_t)(MMU_Base_phys)>>2);\r
+ req->mmu_info.base_addr = ((unsigned long)(MMU_Base_phys)>>2);\r
\r
uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ #endif\r
\r
rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);\r
reg->MMU_len = AllSize + 16;\r
static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int SrcMemSize, CMDMemSize;\r
- uint32_t SrcStart, CMDStart;\r
+ unsigned long SrcStart, CMDStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
\r
MMU_Base = NULL;\r
\r
- do\r
- {\r
+ do {\r
/* cal src buf mmu info */\r
SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);\r
if(SrcMemSize == 0) {\r
}\r
\r
/* cal cmd buf mmu info */\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
+ CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
if(CMDMemSize == 0) {\r
return -EINVAL;\r
}\r
}\r
\r
for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));\r
+ MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));\r
}\r
\r
if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ #endif\r
+\r
\r
if (pages != NULL) {\r
/* Free the page table */\r
static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
int SrcMemSize, CMDMemSize;\r
- uint32_t SrcStart, CMDStart;\r
+ unsigned long SrcStart, CMDStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
}\r
\r
/* cal cmd buf mmu info */\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
+ CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
if(CMDMemSize == 0) {\r
return -EINVAL;\r
}\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ #endif\r
\r
if (pages != NULL) {\r
/* Free the page table */\r
\r
typedef struct MMU_INFO\r
{\r
- u32 src0_base_addr;\r
- u32 src1_base_addr;\r
- u32 dst_base_addr;\r
- u32 els_base_addr;\r
+ uint64_t src0_base_addr;\r
+ uint64_t src1_base_addr;\r
+ uint64_t dst_base_addr;\r
+ uint64_t els_base_addr;\r
\r
u8 src0_mmu_flag; /* [0] src0 mmu enable [1] src0_flush [2] src0_prefetch_en [3] src0_prefetch dir */\r
u8 src1_mmu_flag; /* [0] src1 mmu enable [1] src1_flush [2] src1_prefetch_en [3] src1_prefetch dir */\r
typedef struct MMU\r
{\r
unsigned char mmu_en;\r
- uint32_t base_addr;\r
+ uint64_t base_addr;\r
uint32_t mmu_flag; /* [0] mmu enable [1] src_flush [2] dst_flush [3] CMD_flush [4~5] page size*/\r
} MMU;\r
\r
\r
typedef struct rga_img_info_t\r
{\r
- unsigned int yrgb_addr; /* yrgb mem addr */\r
- unsigned int uv_addr; /* cb/cr mem addr */\r
- unsigned int v_addr; /* cr mem addr */\r
+ uint64_t yrgb_addr; /* yrgb mem addr */\r
+ uint64_t uv_addr; /* cb/cr mem addr */\r
+ uint64_t v_addr; /* cr mem addr */\r
unsigned int format; //definition by RK_FORMAT\r
\r
unsigned short act_w;\r
\r
unsigned short endian_mode; //for BPP\r
unsigned short alpha_swap;\r
-\r
- //unsigned short uv_x_off;\r
- //unsigned short uv_y_off;\r
}\r
rga_img_info_t;\r
\r
rga_img_info_t dst; /* dst image info */\r
rga_img_info_t pat; /* patten image info */\r
\r
- uint32_t rop_mask_addr; /* rop4 mask addr */\r
- uint32_t LUT_addr; /* LUT addr */\r
+ uint64_t rop_mask_addr; /* rop4 mask addr */\r
+ uint64_t LUT_addr; /* LUT addr */\r
\r
RECT clip; /* dst clip window default value is dst_vir */\r
/* value from [0, w-1] / [0, h-1]*/\r
rga_img_info_t dst; // dst active window\r
rga_img_info_t pat; // patten active window\r
\r
- u32 rop_mask_addr; // rop4 mask addr\r
- u32 LUT_addr; // LUT addr\r
+ uint64_t rop_mask_addr; // rop4 mask addr\r
+ uint64_t LUT_addr; // LUT addr\r
\r
u32 rop_mask_stride;\r
\r
int32_t curr;\r
unsigned int *buf;\r
unsigned int *buf_virtual;\r
-};\r
\r
-//add for FPGA test ,by hxx & luj\r
+ struct page **pages;\r
+};\r
\r
enum\r
{\r
#define CONFIG_RGA_IOMMU\r
#endif\r
\r
-\r
-\r
#define RGA2_TEST_FLUSH_TIME 0\r
#define RGA2_INFO_BUS_ERROR 1\r
\r
{\r
printk("render_mode=%d bitblt_mode=%d rotate_mode=%.8x\n",\r
req->render_mode, req->bitblt_mode, req->rotate_mode);\r
- printk("src : y=%.8x uv=%.8x v=%.8x format=%d aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d \n",\r
+ printk("src : y=%.llx uv=%.llx v=%.llx format=%d aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d \n",\r
req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr, req->src.format,\r
req->src.act_w, req->src.act_h, req->src.vir_w, req->src.vir_h,\r
req->src.x_offset, req->src.y_offset);\r
- printk("dst : y=%.8x uv=%.8x v=%.8x format=%d aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d \n",\r
+ printk("dst : y=%llx uv=%llx v=%llx format=%d aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d \n",\r
req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, req->dst.format,\r
req->dst.act_w, req->dst.act_h, req->dst.vir_w, req->dst.vir_h,\r
req->dst.x_offset, req->dst.y_offset);\r
\r
static inline void rga2_write(u32 b, u32 r)\r
{\r
- __raw_writel(b, rga2_drvdata->rga_base + r);\r
+ //__raw_writel(b, rga2_drvdata->rga_base + r);\r
+\r
+ *((volatile unsigned int *)(rga2_drvdata->rga_base + r)) = b;\r
}\r
\r
static inline u32 rga2_read(u32 r)\r
{\r
- return __raw_readl(rga2_drvdata->rga_base + r);\r
+ //return __raw_readl(rga2_drvdata->rga_base + r);\r
+\r
+ return *((volatile unsigned int *)(rga2_drvdata->rga_base + r));\r
}\r
\r
static void rga2_soft_reset(void)\r
u32 i;\r
u32 reg;\r
\r
- rga2_write(1, RGA2_SYS_CTRL); //RGA_SYS_CTRL\r
+ rga2_write((1 << 3) | (1 << 4), RGA2_SYS_CTRL); //RGA_SYS_CTRL\r
\r
for(i = 0; i < RGA2_RESET_TIMEOUT; i++)\r
{\r
running = atomic_read(&rga2_service.total_running);\r
printk("rga total_running %d\n", running);\r
\r
- #if 0\r
-\r
- /* Dump waiting list info */\r
- if (!list_empty(&rga_service.waiting))\r
- {\r
- list_head *next;\r
-\r
- next = &rga_service.waiting;\r
-\r
- printk("rga_service dump waiting list\n");\r
-\r
- do\r
- {\r
- reg = list_entry(next->next, struct rga_reg, status_link);\r
- running = atomic_read(®->session->task_running);\r
- num_done = atomic_read(®->session->num_done);\r
- printk("rga session pid %d, done %d, running %d\n", reg->session->pid, num_done, running);\r
- next = next->next;\r
- }\r
- while(!list_empty(next));\r
- }\r
-\r
- /* Dump running list info */\r
- if (!list_empty(&rga_service.running))\r
- {\r
- printk("rga_service dump running list\n");\r
-\r
- list_head *next;\r
-\r
- next = &rga_service.running;\r
- do\r
- {\r
- reg = list_entry(next->next, struct rga_reg, status_link);\r
- running = atomic_read(®->session->task_running);\r
- num_done = atomic_read(®->session->num_done);\r
- printk("rga session pid %d, done %d, running %d:\n", reg->session->pid, num_done, running);\r
- next = next->next;\r
- }\r
- while(!list_empty(next));\r
- }\r
- #endif\r
-\r
list_for_each_entry_safe(session, session_tmp, &rga2_service.session, list_session)\r
{\r
printk("session pid %d:\n", session->pid);\r
printk("task_running %d\n", running);\r
list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link)\r
{\r
- printk("waiting register set 0x%.8x\n", (unsigned int)reg);\r
+ printk("waiting register set 0x%.lu\n", (unsigned long)reg);\r
}\r
list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link)\r
{\r
- printk("running register set 0x%.8x\n", (unsigned int)reg);\r
+ printk("running register set 0x%.lu\n", (unsigned long)reg);\r
}\r
}\r
}\r
return;\r
\r
clk_prepare_enable(rga2_drvdata->rga2);\r
- clk_prepare_enable(rga2_drvdata->pd_rga2);\r
clk_prepare_enable(rga2_drvdata->aclk_rga2);\r
clk_prepare_enable(rga2_drvdata->hclk_rga2);\r
//clk_enable(rga2_drvdata->pd_rga2);\r
\r
//clk_disable(rga2_drvdata->pd_rga2);\r
clk_disable_unprepare(rga2_drvdata->rga2);\r
- clk_disable_unprepare(rga2_drvdata->pd_rga2);\r
+ //clk_disable_unprepare(rga2_drvdata->pd_rga2);\r
clk_disable_unprepare(rga2_drvdata->aclk_rga2);\r
clk_disable_unprepare(rga2_drvdata->hclk_rga2);\r
wake_unlock(&rga2_drvdata->wake_lock);\r
uint32_t *reg_p;\r
\r
if(atomic_read(®->session->task_running) != 0)\r
- {\r
printk(KERN_ERR "task_running is no zero\n");\r
- }\r
\r
atomic_add(1, &rga2_service.cmd_num);\r
atomic_add(1, ®->session->task_running);\r
reg_p = (uint32_t *)reg->cmd_reg;\r
\r
for(i=0; i<32; i++)\r
- {\r
cmd_buf[i] = reg_p[i];\r
- }\r
-\r
- dsb();\r
}\r
\r
\r
rga2_copy_reg(reg, 0);\r
rga2_reg_from_wait_to_run(reg);\r
\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(&rga2_service.cmd_buff[0], &rga2_service.cmd_buff[32]);\r
outer_flush_range(virt_to_phys(&rga2_service.cmd_buff[0]),virt_to_phys(&rga2_service.cmd_buff[32]));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(&rga2_service.cmd_buff[0], &rga2_service.cmd_buff[32]);\r
+ #endif\r
\r
- #if defined(CONFIG_ARCH_RK30)\r
rga2_soft_reset();\r
- #endif\r
\r
rga2_write(0x0, RGA2_SYS_CTRL);\r
- //rga2_write(0, RGA_MMU_CTRL);\r
\r
/* CMD buff */\r
rga2_write(virt_to_phys(rga2_service.cmd_buff), RGA2_CMD_BASE);\r
\r
#if RGA2_TEST\r
- if(rga2_flag)\r
- {\r
- //printk(KERN_DEBUG "cmd_addr = %.8x\n", rga_read(RGA_CMD_ADDR));\r
+ if(rga2_flag) {\r
uint32_t i, *p;\r
p = rga2_service.cmd_buff;\r
printk("CMD_REG\n");\r
rga2_write(rga2_read(RGA2_INT)|(0x1<<10)|(0x1<<8), RGA2_INT);\r
\r
#if RGA2_TEST_TIME\r
- rga_start = ktime_get();\r
+ rga2_start = ktime_get();\r
#endif\r
\r
/* Start proc */\r
atomic_sub(1, ®->session->task_running);\r
atomic_sub(1, &rga2_service.total_running);\r
\r
- //printk("RGA soft reset for timeout process\n");\r
rga2_soft_reset();\r
\r
-\r
- #if 0\r
- printk("RGA_INT is %.8x\n", rga_read(RGA_INT));\r
- printk("reg->session->task_running = %d\n", atomic_read(®->session->task_running));\r
- printk("rga_service.total_running = %d\n", atomic_read(&rga_service.total_running));\r
-\r
- print_info(®->req);\r
-\r
- {\r
- uint32_t *p, i;\r
- p = reg->cmd_reg;\r
- for (i=0; i<7; i++)\r
- printk("%.8x %.8x %.8x %.8x\n", p[0 + i*4], p[1+i*4], p[2 + i*4], p[3 + i*4]);\r
-\r
- }\r
- #endif\r
-\r
if(list_empty(®->session->waiting))\r
{\r
atomic_set(®->session->done, 1);\r
\r
ret = rga2_blit(session, req);\r
if(ret < 0)\r
- {\r
return ret;\r
- }\r
\r
ret_timeout = wait_event_timeout(session->wait, atomic_read(&session->done), RGA2_TIMEOUT_DELAY);\r
\r
return ret;\r
}\r
\r
+static long compat_rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)\r
+{\r
+ struct rga2_req req;\r
+ struct rga_req req_rga;\r
+ int ret = 0;\r
+ rga2_session *session;\r
+\r
+ memset(&req, 0x0, sizeof(req));\r
+\r
+ mutex_lock(&rga2_service.mutex);\r
+\r
+ session = (rga2_session *)file->private_data;\r
+\r
+ #if RGA2_TEST_MSG\r
+ printk("use compat_rga_ioctl\n");\r
+ #endif\r
+\r
+ if (NULL == session) {\r
+ printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);\r
+ mutex_unlock(&rga2_service.mutex);\r
+ return -EINVAL;\r
+ }\r
+\r
+ memset(&req, 0x0, sizeof(req));\r
+\r
+ switch (cmd) {\r
+ case RGA_BLIT_SYNC:\r
+ if (unlikely(copy_from_user(&req_rga, compat_ptr((compat_uptr_t)arg), sizeof(struct rga_req))))\r
+ {\r
+ ERR("copy_from_user failed\n");\r
+ ret = -EFAULT;\r
+ break;\r
+ }\r
+\r
+ RGA_MSG_2_RGA2_MSG(&req_rga, &req);\r
+\r
+ ret = rga2_blit_sync(session, &req);\r
+ break;\r
+ case RGA_BLIT_ASYNC:\r
+ if (unlikely(copy_from_user(&req_rga, compat_ptr((compat_uptr_t)arg), sizeof(struct rga_req))))\r
+ {\r
+ ERR("copy_from_user failed\n");\r
+ ret = -EFAULT;\r
+ break;\r
+ }\r
+\r
+ RGA_MSG_2_RGA2_MSG(&req_rga, &req);\r
+\r
+ if((atomic_read(&rga2_service.total_running) > 8))\r
+ {\r
+ ret = rga2_blit_sync(session, &req);\r
+ }\r
+ else\r
+ {\r
+ ret = rga2_blit_async(session, &req);\r
+ }\r
+ break;\r
+ case RGA2_BLIT_SYNC:\r
+ if (unlikely(copy_from_user(&req, compat_ptr((compat_uptr_t)arg), sizeof(struct rga2_req))))\r
+ {\r
+ ERR("copy_from_user failed\n");\r
+ ret = -EFAULT;\r
+ break;\r
+ }\r
+ ret = rga2_blit_sync(session, &req);\r
+ break;\r
+ case RGA2_BLIT_ASYNC:\r
+ if (unlikely(copy_from_user(&req, compat_ptr((compat_uptr_t)arg), sizeof(struct rga2_req))))\r
+ {\r
+ ERR("copy_from_user failed\n");\r
+ ret = -EFAULT;\r
+ break;\r
+ }\r
+\r
+ if((atomic_read(&rga2_service.total_running) > 16))\r
+ {\r
+ ret = rga2_blit_sync(session, &req);\r
+ }\r
+ else\r
+ {\r
+ ret = rga2_blit_async(session, &req);\r
+ }\r
+ break;\r
+ case RGA_FLUSH:\r
+ case RGA2_FLUSH:\r
+ ret = rga2_flush(session, arg);\r
+ break;\r
+ case RGA_GET_RESULT:\r
+ case RGA2_GET_RESULT:\r
+ ret = rga2_get_result(session, arg);\r
+ break;\r
+ case RGA_GET_VERSION:\r
+ case RGA2_GET_VERSION:\r
+ ret = copy_to_user((void *)arg, RGA2_VERSION, sizeof(RGA2_VERSION));\r
+ //ret = 0;\r
+ break;\r
+ default:\r
+ ERR("unknown ioctl cmd!\n");\r
+ ret = -EINVAL;\r
+ break;\r
+ }\r
+\r
+ mutex_unlock(&rga2_service.mutex);\r
+\r
+ return ret;\r
+}\r
+\r
+\r
\r
long rga2_ioctl_kernel(struct rga_req *req_rga)\r
{\r
.open = rga2_open,\r
.release = rga2_release,\r
.unlocked_ioctl = rga_ioctl,\r
+ .compat_ioctl = compat_rga_ioctl,\r
};\r
\r
static struct miscdevice rga2_dev ={\r
};\r
\r
static const struct of_device_id rockchip_rga_dt_ids[] = {\r
- { .compatible = "rockchip,rk3288-rga2", },\r
+ { .compatible = "rockchip,rk3368-rga2", },\r
{},\r
};\r
\r
/* malloc pre scale mid buf mmu table */\r
buf_p = kmalloc(1024*256, GFP_KERNEL);\r
rga2_mmu_buf.buf_virtual = buf_p;\r
- rga2_mmu_buf.buf = (uint32_t *)virt_to_phys((void *)((uint32_t)buf_p));\r
+ rga2_mmu_buf.buf = (uint32_t *)virt_to_phys((void *)((unsigned long)buf_p));\r
rga2_mmu_buf.front = 0;\r
rga2_mmu_buf.back = 64*1024;\r
rga2_mmu_buf.size = 64*1024;\r
\r
+ rga2_mmu_buf.pages = kmalloc(32768 * sizeof(struct page *), GFP_KERNEL);\r
+\r
if ((ret = platform_driver_register(&rga2_driver)) != 0)\r
{\r
printk(KERN_ERR "Platform device register failed (%d).\n", ret);\r
\r
#if RGA2_TEST_CASE\r
\r
-extern struct fb_info * rk_get_fb(int fb_id);\r
-EXPORT_SYMBOL(rk_get_fb);\r
-\r
-extern void rk_direct_fb_show(struct fb_info * fbi);\r
-EXPORT_SYMBOL(rk_direct_fb_show);\r
-\r
-//unsigned int src_buf[4096*2304*3/2];\r
-//unsigned int dst_buf[3840*2304*3/2];\r
-//unsigned int tmp_buf[1920*1080 * 2];\r
-\r
void rga2_test_0(void)\r
{\r
struct rga2_req req;\r
//fb = rk_get_fb(0);\r
\r
memset(&req, 0, sizeof(struct rga2_req));\r
- src = kmalloc(4096*2304*3/2, GFP_KERNEL);\r
- dst = kmalloc(3840*2160*3/2, GFP_KERNEL);\r
+ src = kmalloc(800*480*4, GFP_KERNEL);\r
+ dst = kmalloc(800*480*4, GFP_KERNEL);\r
\r
- //memset(src, 0x80, 4096*2304*4);\r
+ printk("\n********************************\n");\r
+ printk("************ RGA2_TEST ************\n");\r
+ printk("********************************\n\n");\r
+\r
+ memset(src, 0x80, 800*480*4);\r
+ memset(dst, 0x0, 800*480*4);\r
\r
//dmac_flush_range(&src, &src[800*480*4]);\r
//outer_flush_range(virt_to_phys(&src),virt_to_phys(&src[800*480*4]));\r
\r
i = j = 0;\r
\r
- printk("\n********************************\n");\r
- printk("************ RGA2_TEST ************\n");\r
- printk("********************************\n\n");\r
\r
+\r
+ #if 0\r
req.pat.act_w = 16;\r
req.pat.act_h = 16;\r
req.pat.vir_w = 16;\r
req.pat.vir_h = 16;\r
req.pat.yrgb_addr = virt_to_phys(src);\r
- req.render_mode = update_palette_table_mode;\r
+ req.render_mode = 0;\r
rga2_blit_sync(&session, &req);\r
+ #endif\r
\r
- req.src.act_w = 4096;\r
- req.src.act_h = 2304;\r
+ req.src.act_w = 320;\r
+ req.src.act_h = 240;\r
\r
- req.src.vir_w = 4096;\r
- req.src.vir_h = 2304;\r
- req.src.yrgb_addr = (uint32_t)0;//virt_to_phys(src);\r
- req.src.uv_addr = (uint32_t)virt_to_phys(src);\r
+ req.src.vir_w = 320;\r
+ req.src.vir_h = 240;\r
+ req.src.yrgb_addr = 0;//(uint32_t)virt_to_phys(src);\r
+ req.src.uv_addr = (unsigned long)virt_to_phys(src);\r
req.src.v_addr = 0;\r
- req.src.format = RGA2_FORMAT_YCbCr_420_SP;\r
+ req.src.format = RGA2_FORMAT_RGBA_8888;\r
\r
- req.dst.act_w = 3840;\r
- req.dst.act_h = 2160;\r
+ req.dst.act_w = 320;\r
+ req.dst.act_h = 240;\r
req.dst.x_offset = 0;\r
req.dst.y_offset = 0;\r
\r
- req.dst.vir_w = 3840;\r
- req.dst.vir_h = 2160;\r
+ req.dst.vir_w = 320;\r
+ req.dst.vir_h = 240;\r
\r
req.dst.yrgb_addr = 0;//((uint32_t)virt_to_phys(dst));\r
- req.dst.uv_addr = ((uint32_t)virt_to_phys(dst));\r
- req.dst.format = RGA2_FORMAT_YCbCr_420_SP;\r
+ req.dst.uv_addr = (unsigned long)virt_to_phys(dst);\r
+ req.dst.format = RGA2_FORMAT_RGBA_8888;\r
\r
//dst = dst0;\r
\r
\r
rga2_blit_sync(&session, &req);\r
\r
- #if 0\r
- fb->var.bits_per_pixel = 32;\r
-\r
- fb->var.xres = 1280;\r
- fb->var.yres = 800;\r
-\r
- fb->var.red.length = 8;\r
- fb->var.red.offset = 0;\r
- fb->var.red.msb_right = 0;\r
-\r
- fb->var.green.length = 8;\r
- fb->var.green.offset = 8;\r
- fb->var.green.msb_right = 0;\r
-\r
- fb->var.blue.length = 8;\r
-\r
- fb->var.blue.offset = 16;\r
- fb->var.blue.msb_right = 0;\r
-\r
- fb->var.transp.length = 8;\r
- fb->var.transp.offset = 24;\r
- fb->var.transp.msb_right = 0;\r
-\r
- fb->var.nonstd &= (~0xff);\r
- fb->var.nonstd |= 1;\r
-\r
- fb->fix.smem_start = virt_to_phys(dst);\r
-\r
- rk_direct_fb_show(fb);\r
- #endif\r
+ for(j=0; j<100; j++) {\r
+ printk("%.8x\n", dst[j]);\r
+ }\r
\r
if(src)\r
kfree(src);\r
return 0;\r
}\r
\r
-#if 0\r
-static int rga2_mmu_buf_cal(struct rga2_mmu_buf_t *t, uint32_t size)\r
-{\r
- if((t->front + size) > t->back) {\r
- return -1;\r
- }\r
- else {\r
- return 0;\r
- }\r
-}\r
-#endif\r
-\r
-\r
-\r
-static int rga2_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr)\r
+static int rga2_mem_size_cal(unsigned long Mem, uint32_t MemSize, unsigned long *StartAddr)\r
{\r
- uint32_t start, end;\r
+ unsigned long start, end;\r
uint32_t pageCount;\r
\r
end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
return pageCount;\r
}\r
\r
-static int rga2_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr,\r
- int format, uint32_t w, uint32_t h, uint32_t *StartAddr )\r
+static int rga2_buf_size_cal(unsigned long yrgb_addr, unsigned long uv_addr, unsigned long v_addr,\r
+ int format, uint32_t w, uint32_t h, unsigned long *StartAddr )\r
{\r
uint32_t size_yrgb = 0;\r
uint32_t size_uv = 0;\r
uint32_t size_v = 0;\r
uint32_t stride = 0;\r
- uint32_t start, end;\r
+ unsigned long start, end;\r
uint32_t pageCount;\r
\r
switch(format)\r
\r
static int rga2_MapUserMemory(struct page **pages,\r
uint32_t *pageTable,\r
- uint32_t Memory,\r
+ unsigned long Memory,\r
uint32_t pageCount)\r
{\r
int32_t result;\r
uint32_t i;\r
uint32_t status;\r
- uint32_t Address;\r
- //uint32_t temp;\r
+ unsigned long Address;\r
\r
status = 0;\r
Address = 0;\r
);\r
up_read(¤t->mm->mmap_sem);\r
\r
- #if 0\r
- if(result <= 0 || result < pageCount)\r
- {\r
- status = 0;\r
-\r
- for(i=0; i<pageCount; i++)\r
- {\r
- temp = armv7_va_to_pa((Memory + i) << PAGE_SHIFT);\r
- if (temp == 0xffffffff)\r
- {\r
- printk("rga find mmu phy ddr error\n ");\r
- status = RGA_OUT_OF_RESOURCES;\r
- break;\r
- }\r
-\r
- pageTable[i] = temp;\r
- }\r
-\r
- return status;\r
- }\r
- #else\r
if(result <= 0 || result < pageCount)\r
{\r
struct vm_area_struct *vma;\r
\r
if (vma)//&& (vma->vm_flags & VM_PFNMAP) )\r
{\r
- #if 1\r
do\r
{\r
pte_t * pte;\r
}\r
while (0);\r
\r
- #else\r
- do\r
- {\r
- pte_t * pte;\r
- spinlock_t * ptl;\r
- unsigned long pfn;\r
- pgd_t * pgd;\r
- pud_t * pud;\r
- pmd_t * pmd;\r
-\r
- pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);\r
- pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);\r
- pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);\r
- pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl);\r
-\r
- pfn = pte_pfn(*pte);\r
- Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));\r
- pte_unmap_unlock(pte, ptl);\r
- }\r
- while (0);\r
- #endif\r
-\r
- pageTable[i] = Address;\r
+ pageTable[i] = (uint32_t)Address;\r
}\r
else\r
{\r
\r
return status;\r
}\r
- #endif\r
\r
/* Fill the page table. */\r
for(i=0; i<pageCount; i++)\r
{\r
uint32_t i;\r
uint32_t status;\r
- uint32_t Address;\r
+ unsigned long Address;\r
uint32_t mapped_size = 0;\r
uint32_t len;\r
struct scatterlist *sgl = sg->sgl;\r
Address = sg_phys(sgl);\r
\r
for(i=0; i<len; i++) {\r
- Memory[mapped_size + i] = Address + (i << PAGE_SHIFT);\r
+ Memory[mapped_size + i] = (uint32_t)(Address + (i << PAGE_SHIFT));\r
}\r
\r
mapped_size += len;\r
static int rga2_mmu_info_BitBlt_mode(struct rga2_reg *reg, struct rga2_req *req)\r
{\r
int Src0MemSize, DstMemSize, Src1MemSize;\r
- uint32_t Src0Start, Src1Start, DstStart;\r
+ unsigned long Src0Start, Src1Start, DstStart;\r
uint32_t AllSize;\r
uint32_t *MMU_Base, *MMU_Base_phys;\r
int ret;\r
Src1MemSize = 0;\r
DstMemSize = 0;\r
\r
- do\r
- {\r
+ do {\r
/* cal src0 buf mmu info */\r
if(req->mmu_info.src0_mmu_flag & 1) {\r
Src0MemSize = rga2_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
break;\r
}\r
\r
- pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- status = RGA2_MALLOC_ERROR;\r
- break;\r
- }\r
+ pages = rga2_mmu_buf.pages;\r
\r
mutex_lock(&rga2_service.lock);\r
MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));\r
\r
if (ret < 0) {\r
pr_err("rga2 map src0 memory failed\n");\r
- pr_err("RGA2 : yrgb = %.8x, uv = %.8x format = %d\n", req->src.yrgb_addr, req->src.uv_addr, req->src.format);\r
- pr_err("RGA2 : vir_w = %d, vir_h = %d\n", req->src.vir_w, req->src.vir_h);\r
status = ret;\r
break;\r
}\r
\r
/* change the buf address in req struct */\r
- req->mmu_info.src0_base_addr = (((uint32_t)MMU_Base_phys));\r
+ req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys));\r
uv_size = (req->src.uv_addr - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->src.v_addr - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;\r
\r
\r
if (ret < 0) {\r
pr_err("rga2 map src1 memory failed\n");\r
- pr_err("RGA2 : yrgb = %.8x, format = %d\n", req->src1.yrgb_addr, req->src1.format);\r
- pr_err("RGA2 : vir_w = %d, vir_h = %d\n", req->src1.vir_w, req->src1.vir_h);\r
status = ret;\r
break;\r
}\r
\r
/* change the buf address in req struct */\r
- req->mmu_info.src1_base_addr = ((uint32_t)(MMU_Base_phys + Src0MemSize));\r
+ req->mmu_info.src1_base_addr = ((unsigned long)(MMU_Base_phys + Src0MemSize));\r
req->src1.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));\r
}\r
\r
}\r
if (ret < 0) {\r
pr_err("rga2 map dst memory failed\n");\r
- pr_err("RGA2 : yrgb = %.8x, uv = %.8x\n, format = %d\n", req->dst.yrgb_addr, req->dst.uv_addr, req->dst.format);\r
- pr_err("RGA2 : vir_w = %d, vir_h = %d\n", req->dst.vir_w, req->dst.vir_h);\r
status = ret;\r
break;\r
}\r
\r
/* change the buf address in req struct */\r
- req->mmu_info.dst_base_addr = ((uint32_t)(MMU_Base_phys + Src0MemSize + Src1MemSize));\r
+ req->mmu_info.dst_base_addr = ((unsigned long)(MMU_Base_phys + Src0MemSize + Src1MemSize));\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
}\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ #endif\r
\r
rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);\r
reg->MMU_len = AllSize;\r
\r
status = 0;\r
\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
return status;\r
}\r
while(0);\r
\r
-\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
return status;\r
}\r
\r
static int rga2_mmu_info_color_palette_mode(struct rga2_reg *reg, struct rga2_req *req)\r
{\r
int SrcMemSize, DstMemSize;\r
- uint32_t SrcStart, DstStart;\r
+ unsigned long SrcStart, DstStart;\r
struct page **pages = NULL;\r
uint32_t AllSize;\r
uint32_t *MMU_Base = NULL, *MMU_Base_phys;\r
SrcMemSize = 0;\r
DstMemSize = 0;\r
\r
- do\r
- {\r
+ do {\r
if (req->mmu_info.src0_mmu_flag) {\r
SrcMemSize = rga2_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);\r
if(SrcMemSize == 0) {\r
break;\r
}\r
\r
- pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+ pages = rga2_mmu_buf.pages;\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc pages mem failed\n");\r
return -EINVAL;\r
}\r
\r
/* change the buf address in req struct */\r
- req->mmu_info.src0_base_addr = (((uint32_t)MMU_Base_phys));\r
+ req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys));\r
req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));\r
}\r
\r
}\r
\r
/* change the buf address in req struct */\r
- req->mmu_info.dst_base_addr = ((uint32_t)(MMU_Base_phys + SrcMemSize));\r
+ req->mmu_info.dst_base_addr = ((unsigned long)(MMU_Base_phys + SrcMemSize));\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
}\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ #endif\r
\r
rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);\r
reg->MMU_len = AllSize;\r
\r
- status = 0;\r
-\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
- return status;\r
+ return 0;\r
}\r
while(0);\r
\r
- /* Free the page table */\r
- if (pages != NULL) {\r
- kfree(pages);\r
- }\r
-\r
return 0;\r
}\r
\r
static int rga2_mmu_info_color_fill_mode(struct rga2_reg *reg, struct rga2_req *req)\r
{\r
int DstMemSize;\r
- uint32_t DstStart;\r
+ unsigned long DstStart;\r
struct page **pages = NULL;\r
uint32_t AllSize;\r
uint32_t *MMU_Base, *MMU_Base_phys;\r
\r
MMU_Base = NULL;\r
\r
- do\r
- {\r
+ do {\r
if(req->mmu_info.dst_mmu_flag & 1) {\r
DstMemSize = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
\r
AllSize = (DstMemSize + 15) & (~15);\r
\r
- pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA2 MMU malloc pages mem failed\n");\r
- status = RGA2_MALLOC_ERROR;\r
- break;\r
- }\r
+ pages = rga2_mmu_buf.pages;\r
\r
if(rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {\r
pr_err("RGA2 Get MMU mem failed\n");\r
}\r
\r
/* change the buf address in req struct */\r
- req->mmu_info.dst_base_addr = ((uint32_t)MMU_Base_phys);\r
+ req->mmu_info.dst_base_addr = ((unsigned long)MMU_Base_phys);\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
}\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ #endif\r
\r
rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);\r
\r
- /* Free the page table */\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
return 0;\r
}\r
while(0);\r
\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
return status;\r
}\r
\r
static int rga2_mmu_info_update_palette_table_mode(struct rga2_reg *reg, struct rga2_req *req)\r
{\r
int SrcMemSize;\r
- uint32_t SrcStart;\r
+ unsigned long SrcStart;\r
struct page **pages = NULL;\r
uint32_t AllSize;\r
uint32_t *MMU_Base, *MMU_Base_phys;\r
\r
MMU_Base = NULL;\r
\r
- do\r
- {\r
+ do {\r
/* cal src buf mmu info */\r
SrcMemSize = rga2_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h, &SrcStart);\r
if(SrcMemSize == 0) {\r
mutex_unlock(&rga2_service.lock);\r
\r
pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- status = RGA2_MALLOC_ERROR;\r
- break;\r
- }\r
\r
if(SrcMemSize) {\r
ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
}\r
\r
/* change the buf address in req struct */\r
- req->mmu_info.src0_base_addr = (((uint32_t)MMU_Base_phys));\r
+ req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys));\r
req->pat.yrgb_addr = (req->pat.yrgb_addr & (~PAGE_MASK));\r
}\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
outer_flush_range(virt_to_phys(MMU_Base), virt_to_phys(MMU_Base + AllSize));\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ #endif\r
\r
rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);\r
reg->MMU_len = AllSize;\r
\r
- if (pages != NULL) {\r
- /* Free the page table */\r
- kfree(pages);\r
- }\r
-\r
return 0;\r
}\r
while(0);\r
\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
return status;\r
}\r
\r
static int rga2_mmu_info_update_patten_buff_mode(struct rga2_reg *reg, struct rga2_req *req)\r
{\r
int SrcMemSize, CMDMemSize;\r
- uint32_t SrcStart, CMDStart;\r
+ unsigned long SrcStart, CMDStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
\r
MMU_Base = MMU_p = 0;\r
\r
- do\r
- {\r
-\r
+ do {\r
/* cal src buf mmu info */\r
SrcMemSize = rga2_mem_size_cal(req->pat.yrgb_addr, req->pat.act_w * req->pat.act_h * 4, &SrcStart);\r
if(SrcMemSize == 0) {\r
}\r
\r
/* cal cmd buf mmu info */\r
- CMDMemSize = rga2_mem_size_cal((uint32_t)rga2_service.cmd_buff, RGA2_CMD_BUF_SIZE, &CMDStart);\r
+ CMDMemSize = rga2_mem_size_cal((unsigned long)rga2_service.cmd_buff, RGA2_CMD_BUF_SIZE, &CMDStart);\r
if(CMDMemSize == 0) {\r
return -EINVAL;\r
}\r
\r
AllSize = SrcMemSize + CMDMemSize;\r
\r
- pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
- status = RGA2_MALLOC_ERROR;\r
- break;\r
- }\r
+ pages = rga2_mmu_buf.pages;\r
\r
MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
- status = RGA2_MALLOC_ERROR;\r
- break;\r
- }\r
\r
for(i=0; i<CMDMemSize; i++) {\r
MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
+ #ifdef CONFIG_ARM\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
-\r
- if (pages != NULL) {\r
- /* Free the page table */\r
- kfree(pages);\r
- }\r
+ #elif defined(CONFIG_ARM64)\r
+ __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ #endif\r
\r
return 0;\r
\r
}\r
while(0);\r
\r
- if (pages != NULL)\r
- kfree(pages);\r
-\r
return status;\r
}\r
\r
p = p + (x_off>>shift) + y_off*src_stride;\r
\r
\r
- *bRGA_SRC_BASE0 = (RK_U32)p;\r
+ *bRGA_SRC_BASE0 = (unsigned long)p;\r
\r
reg = ((reg & (~m_RGA2_SRC_INFO_SW_SRC_FMT)) | (s_RGA2_SRC_INFO_SW_SRC_FMT((msg->palette_mode | 0xc))));\r
reg = ((reg & (~m_RGA2_SRC_INFO_SW_SW_CP_ENDAIN)) | (s_RGA2_SRC_INFO_SW_SW_CP_ENDAIN(msg->endian_mode & 1)));\r
bRGA_FADING_CTRL = (RK_U32 *)(base + RGA2_FADING_CTRL_OFFSET);\r
\r
*bRGA_FADING_CTRL = msg->fading_g_value << 8;\r
- // *bRGA_MASK_BASE = (RK_U32)msg->LUT_addr;\r
*bRGA_MASK_BASE = (RK_U32)msg->pat.yrgb_addr;\r
}\r
\r