#define FUSE_SKU_INFO 0x110
#define FUSE_SPARE_BIT 0x200
-DEFINE_MUTEX(lock);
+static DEFINE_MUTEX(tegra_fuse_dma_lock);
#ifdef CONFIG_TEGRA_SYSTEM_DMA
-struct tegra_dma_channel *dma;
-u32 *fuse_bb;
-dma_addr_t fuse_bb_phys;
-struct completion rd_wait;
-struct completion wr_wait;
+static struct tegra_dma_channel *tegra_fuse_dma;
+static u32 *tegra_fuse_bb;
+static dma_addr_t tegra_fuse_bb_phys;
+static DECLARE_COMPLETION(tegra_fuse_wait);
static void fuse_dma_complete(struct tegra_dma_req *req)
{
- if (req)
- req->to_memory ? complete(&rd_wait) : complete(&wr_wait);
+ complete(&tegra_fuse_wait);
}
static inline u32 fuse_readl(unsigned long offset)
{
struct tegra_dma_req req;
+ int ret;
- if (!dma)
- return -EINVAL;
+ if (!tegra_fuse_dma)
+ return readl(IO_TO_VIRT(TEGRA_FUSE_BASE + offset));
- mutex_lock(&lock);
+ mutex_lock(&tegra_fuse_dma_lock);
req.complete = fuse_dma_complete;
req.to_memory = 1;
- req.dest_addr = fuse_bb_phys;
+ req.dest_addr = tegra_fuse_bb_phys;
req.dest_bus_width = 32;
req.dest_wrap = 1;
req.source_addr = TEGRA_FUSE_BASE + offset;
req.req_sel = 0;
req.size = 4;
- init_completion(&rd_wait);
- tegra_dma_enqueue_req(dma, &req);
- if (wait_for_completion_timeout(&rd_wait, msecs_to_jiffies(50)) == 0) {
- WARN_ON(1);
- mutex_unlock(&lock);
- return 0;
- }
+ INIT_COMPLETION(tegra_fuse_wait);
+
+ tegra_dma_enqueue_req(tegra_fuse_dma, &req);
+
+ ret = wait_for_completion_timeout(&tegra_fuse_wait,
+ msecs_to_jiffies(50));
- mutex_unlock(&lock);
- return *((u32 *)fuse_bb);
+ if (WARN(ret == 0, "fuse read dma timed out"))
+ *(u32 *)tegra_fuse_bb = 0;
+
+ mutex_unlock(&tegra_fuse_dma_lock);
+ return *((u32 *)tegra_fuse_bb);
}
static inline void fuse_writel(u32 value, unsigned long offset)
{
struct tegra_dma_req req;
+ int ret;
- if (!dma || !fuse_bb)
+ if (!tegra_fuse_dma) {
+ writel(value, IO_TO_VIRT(TEGRA_FUSE_BASE + offset));
return;
+ }
- mutex_lock(&lock);
- *((u32 *)fuse_bb) = value;
+ mutex_lock(&tegra_fuse_dma_lock);
+ *((u32 *)tegra_fuse_bb) = value;
req.complete = fuse_dma_complete;
req.to_memory = 0;
req.dest_addr = TEGRA_FUSE_BASE + offset;
req.dest_wrap = 4;
req.dest_bus_width = 32;
- req.source_addr = fuse_bb_phys;
+ req.source_addr = tegra_fuse_bb_phys;
req.source_bus_width = 32;
req.source_wrap = 1;
req.req_sel = 0;
req.size = 4;
- init_completion(&wr_wait);
- tegra_dma_enqueue_req(dma, &req);
- if (wait_for_completion_timeout(&wr_wait, msecs_to_jiffies(50)) == 0)
- WARN_ON(1);
- mutex_unlock(&lock);
+ INIT_COMPLETION(tegra_fuse_wait);
+
+ tegra_dma_enqueue_req(tegra_fuse_dma, &req);
+
+ ret = wait_for_completion_timeout(&tegra_fuse_wait,
+ msecs_to_jiffies(50));
+
+ mutex_unlock(&tegra_fuse_dma_lock);
}
#else
static inline u32 fuse_readl(unsigned long offset)
reg |= 1 << 28;
writel(reg, IO_TO_VIRT(TEGRA_CLK_RESET_BASE + 0x48));
+ pr_info("Tegra SKU: %d CPU Process: %d Core Process: %d\n",
+ tegra_sku_id(), tegra_cpu_process_id(),
+ tegra_core_process_id());
+}
+
+void tegra_init_fuse_dma(void)
+{
#ifdef CONFIG_TEGRA_SYSTEM_DMA
- dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT |
+ tegra_fuse_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT |
TEGRA_DMA_SHARED);
- if (!dma) {
+ if (!tegra_fuse_dma) {
pr_err("%s: can not allocate dma channel\n", __func__);
return;
}
- fuse_bb = dma_alloc_coherent(NULL, sizeof(u32),
- &fuse_bb_phys, GFP_KERNEL);
- if (!fuse_bb) {
+ tegra_fuse_bb = dma_alloc_coherent(NULL, sizeof(u32),
+ &tegra_fuse_bb_phys, GFP_KERNEL);
+ if (!tegra_fuse_bb) {
pr_err("%s: can not allocate bounce buffer\n", __func__);
- tegra_dma_free_channel(dma);
- dma = NULL;
+ tegra_dma_free_channel(tegra_fuse_dma);
+ tegra_fuse_dma = NULL;
return;
}
- mutex_init(&lock);
#endif
-
- pr_info("Tegra SKU: %d CPU Process: %d Core Process: %d\n",
- tegra_sku_id(), tegra_cpu_process_id(),
- tegra_core_process_id());
}
unsigned long long tegra_chip_uid(void)