#define __LINUX_NVHOST_H
#include <linux/device.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
struct nvhost_master;
#define nvhost_set_drvdata(_dev,data) dev_set_drvdata(&(_dev)->dev, (data))
int nvhost_bus_register(struct nvhost_master *host);
+
+#if !defined(__KERNEL__)
+#define __user
+#endif
+
+#define NVHOST_NO_TIMEOUT (-1)
+#define NVHOST_IOCTL_MAGIC 'H'
+
+struct nvhost_submit_hdr {
+ __u32 syncpt_id;
+ __u32 syncpt_incrs;
+ __u32 num_cmdbufs;
+ __u32 num_relocs;
+};
+
+struct nvhost_cmdbuf {
+ __u32 mem;
+ __u32 offset;
+ __u32 words;
+};
+
+struct nvhost_reloc {
+ __u32 cmdbuf_mem;
+ __u32 cmdbuf_offset;
+ __u32 target;
+ __u32 target_offset;
+};
+
+struct nvhost_get_param_args {
+ __u32 value;
+};
+
+struct nvhost_set_nvmap_fd_args {
+ __u32 fd;
+};
+
+#define NVHOST_IOCTL_CHANNEL_FLUSH \
+ _IOR(NVHOST_IOCTL_MAGIC, 1, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS \
+ _IOR(NVHOST_IOCTL_MAGIC, 2, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_GET_WAITBASES \
+ _IOR(NVHOST_IOCTL_MAGIC, 3, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES \
+ _IOR(NVHOST_IOCTL_MAGIC, 4, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD \
+ _IOW(NVHOST_IOCTL_MAGIC, 5, struct nvhost_set_nvmap_fd_args)
+#define NVHOST_IOCTL_CHANNEL_LAST \
+ _IOC_NR(NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD)
+#define NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE sizeof(struct nvhost_get_param_args)
+
+struct nvhost_ctrl_syncpt_read_args {
+ __u32 id;
+ __u32 value;
+};
+
+struct nvhost_ctrl_syncpt_incr_args {
+ __u32 id;
+};
+
+struct nvhost_ctrl_syncpt_wait_args {
+ __u32 id;
+ __u32 thresh;
+ __s32 timeout;
+};
+
+struct nvhost_ctrl_module_mutex_args {
+ __u32 id;
+ __u32 lock;
+};
+
+struct nvhost_ctrl_module_regrdwr_args {
+ __u32 id;
+ __u32 num_offsets;
+ __u32 block_size;
+ __u32 *offsets;
+ __u32 *values;
+ __u32 write;
+};
+
+#define NVHOST_IOCTL_CTRL_SYNCPT_READ \
+ _IOWR(NVHOST_IOCTL_MAGIC, 1, struct nvhost_ctrl_syncpt_read_args)
+#define NVHOST_IOCTL_CTRL_SYNCPT_INCR \
+ _IOW(NVHOST_IOCTL_MAGIC, 2, struct nvhost_ctrl_syncpt_incr_args)
+#define NVHOST_IOCTL_CTRL_SYNCPT_WAIT \
+ _IOW(NVHOST_IOCTL_MAGIC, 3, struct nvhost_ctrl_syncpt_wait_args)
+
+#define NVHOST_IOCTL_CTRL_MODULE_MUTEX \
+ _IOWR(NVHOST_IOCTL_MAGIC, 4, struct nvhost_ctrl_module_mutex_args)
+#define NVHOST_IOCTL_CTRL_MODULE_REGRDWR \
+ _IOWR(NVHOST_IOCTL_MAGIC, 5, struct nvhost_ctrl_module_regrdwr_args)
+
+#define NVHOST_IOCTL_CTRL_LAST \
+ _IOC_NR(NVHOST_IOCTL_CTRL_MODULE_REGRDWR)
+#define NVHOST_IOCTL_CTRL_MAX_ARG_SIZE sizeof(struct nvhost_ctrl_module_regrdwr_args)
+
#endif
nvhost-objs = \
+ nvhost_acm.o \
+ nvhost_syncpt.o \
+ nvhost_cdma.o \
+ nvhost_cpuaccess.o \
+ nvhost_intr.o \
+ nvhost_channel.o \
+ nvhost_3dctx.o \
dev.o \
- bus.o
+ bus.o \
+ debug.o
obj-$(CONFIG_TEGRA_GRHOST) += nvhost.o
--- /dev/null
+/*
+ * drivers/video/tegra/dc/dc.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/io.h>
+
+#include "dev.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+enum {
+ NVHOST_DBG_STATE_CMD = 0,
+ NVHOST_DBG_STATE_DATA = 1,
+};
+
+static int nvhost_debug_handle_cmd(struct seq_file *s, u32 val, int *count)
+{
+ unsigned mask;
+ unsigned subop;
+
+ switch (val >> 28) {
+ case 0x0:
+ mask = val & 0x3f;
+ if (mask) {
+ seq_printf(s, "SETCL(class=%03x, offset=%03x, mask=%02x, [",
+ val >> 6 & 0x3ff, val >> 16 & 0xfff, mask);
+ *count = hweight8(mask);
+ return NVHOST_DBG_STATE_DATA;
+ } else {
+ seq_printf(s, "SETCL(class=%03x)\n", val >> 6 & 0x3ff);
+ return NVHOST_DBG_STATE_CMD;
+ }
+
+ case 0x1:
+ seq_printf(s, "INCR(offset=%03x, [", val >> 16 & 0x3ff);
+ *count = val & 0xffff;
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0x2:
+ seq_printf(s, "NOMINCR(offset=%03x, [", val >> 16 & 0x3ff);
+ *count = val & 0xffff;
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0x3:
+ mask = val & 0xffff;
+ seq_printf(s, "MASK(offset=%03x, mask=%03x, [",
+ val >> 16 & 0x3ff, mask);
+ *count = hweight16(mask);
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0x4:
+ seq_printf(s, "IMM(offset=%03x, data=%03x)\n",
+ val >> 16 & 0x3ff, val & 0xffff);
+ return NVHOST_DBG_STATE_CMD;
+
+ case 0x5:
+ seq_printf(s, "RESTART(offset=%08x)\n", val << 4);
+ return NVHOST_DBG_STATE_CMD;
+
+ case 0x6:
+ seq_printf(s, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
+ val >> 16 & 0x3ff, val >> 15 & 0x1, val >> 15 & 0x1,
+ val & 0x3fff);
+ *count = 1;
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0xe:
+ subop = val >> 24 & 0xf;
+ if (subop == 0)
+ seq_printf(s, "ACQUIRE_MLOCK(index=%d)\n", val & 0xff);
+ else if (subop == 1)
+ seq_printf(s, "RELEASE_MLOCK(index=%d)\n", val & 0xff);
+ else
+ seq_printf(s, "EXTEND_UNKNOWN(%08x)\n", val);
+
+ return NVHOST_DBG_STATE_CMD;
+
+ case 0xf:
+ seq_printf(s, "DONE()\n");
+ return NVHOST_DBG_STATE_CMD;
+
+ default:
+ return NVHOST_DBG_STATE_CMD;
+ }
+}
+
+static int nvhost_debug_show(struct seq_file *s, void *unused)
+{
+ struct nvhost_master *m = s->private;
+ int i;
+
+ nvhost_module_busy(&m->mod);
+
+ for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
+ void __iomem *regs = m->channels[i].aperture;
+ u32 dmaput, dmaget, dmactrl;
+ u32 cbstat, cbread;
+ u32 fifostat;
+ u32 val, base;
+ unsigned start, end;
+ unsigned wr_ptr, rd_ptr;
+ int state;
+ int count = 0;
+
+ dmaput = readl(regs + HOST1X_CHANNEL_DMAPUT);
+ dmaget = readl(regs + HOST1X_CHANNEL_DMAGET);
+ dmactrl = readl(regs + HOST1X_CHANNEL_DMACTRL);
+ cbread = readl(m->aperture + HOST1X_SYNC_CBREAD(i));
+ cbstat = readl(m->aperture + HOST1X_SYNC_CBSTAT(i));
+
+ if (dmactrl != 0x0 || !m->channels[i].cdma.push_buffer.mapped) {
+ seq_printf(s, "%d: inactive\n\n", i);
+ continue;
+ }
+
+ switch (cbstat) {
+ case 0x00010008:
+ seq_printf(s, "%d: waiting on syncpt %d val %d\n",
+ i, cbread >> 24, cbread & 0xffffff);
+ break;
+
+ case 0x00010009:
+ base = cbread >> 15 & 0xf;
+
+ val = readl(m->aperture + HOST1X_SYNC_SYNCPT_BASE(base)) & 0xffff;
+ val += cbread & 0xffff;
+
+ seq_printf(s, "%d: waiting on syncpt %d val %d\n",
+ i, cbread >> 24, val);
+ break;
+
+ default:
+ seq_printf(s, "%d: active class %02x, offset %04x, val %08x\n",
+ i, cbstat >> 16, cbstat & 0xffff, cbread);
+ break;
+ }
+
+ fifostat = readl(regs + HOST1X_CHANNEL_FIFOSTAT);
+ if ((fifostat & 1 << 10) == 0 ) {
+
+ writel(0x0, m->aperture + HOST1X_SYNC_CFPEEK_CTRL);
+ writel(1 << 31 | i << 16, m->aperture + HOST1X_SYNC_CFPEEK_CTRL);
+ rd_ptr = readl(m->aperture + HOST1X_SYNC_CFPEEK_PTRS) & 0x1ff;
+ wr_ptr = readl(m->aperture + HOST1X_SYNC_CFPEEK_PTRS) >> 16 & 0x1ff;
+
+ start = readl(m->aperture + HOST1X_SYNC_CF_SETUP(i)) & 0x1ff;
+ end = (readl(m->aperture + HOST1X_SYNC_CF_SETUP(i)) >> 16) & 0x1ff;
+
+ state = NVHOST_DBG_STATE_CMD;
+
+ do {
+ writel(0x0, m->aperture + HOST1X_SYNC_CFPEEK_CTRL);
+ writel(1 << 31 | i << 16 | rd_ptr, m->aperture + HOST1X_SYNC_CFPEEK_CTRL);
+ val = readl(m->aperture + HOST1X_SYNC_CFPEEK_READ);
+
+ switch (state) {
+ case NVHOST_DBG_STATE_CMD:
+ seq_printf(s, "%d: %08x:", i, val);
+
+ state = nvhost_debug_handle_cmd(s, val, &count);
+ if (state == NVHOST_DBG_STATE_DATA && count == 0) {
+ state = NVHOST_DBG_STATE_CMD;
+ seq_printf(s, "])\n");
+ }
+ break;
+
+ case NVHOST_DBG_STATE_DATA:
+ count--;
+ seq_printf(s, "%08x%s", val, count > 0 ? ", " : "])\n");
+ if (count == 0)
+ state = NVHOST_DBG_STATE_CMD;
+ break;
+ }
+
+ if (rd_ptr == end)
+ rd_ptr = start;
+ else
+ rd_ptr++;
+
+
+ } while (rd_ptr != wr_ptr);
+
+ if (state == NVHOST_DBG_STATE_DATA)
+ seq_printf(s, ", ...])\n");
+ }
+ seq_printf(s, "\n");
+ }
+
+ nvhost_module_idle(&m->mod);
+ return 0;
+}
+
+
+static int nvhost_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nvhost_debug_show, inode->i_private);
+}
+
+static const struct file_operations nvhost_debug_fops = {
+ .open = nvhost_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void nvhost_debug_init(struct nvhost_master *master)
+{
+ debugfs_create_file("tegra_host", S_IRUGO, NULL, master, &nvhost_debug_fops);
+}
+#else
+void nvhost_debug_add(struct nvhost_master *master)
+{
+}
+
+#endif
+
#include <asm/io.h>
#include <mach/nvhost.h>
+#include <mach/nvmap.h>
#define DRIVER_NAME "tegra_grhost"
#define IFACE_NAME "nvhost"
+static int nvhost_major = NVHOST_MAJOR;
+static int nvhost_minor = NVHOST_CHANNEL_BASE;
+
+struct nvhost_channel_userctx {
+ struct nvhost_channel *ch;
+ struct nvhost_hwctx *hwctx;
+ u32 syncpt_id;
+ u32 syncpt_incrs;
+ u32 cmdbufs_pending;
+ u32 relocs_pending;
+ struct nvmap_handle_ref *gather_mem;
+ struct nvhost_op_pair *gathers;
+ int num_gathers;
+ int pinarray_size;
+ struct nvmap_pinarray_elem pinarray[NVHOST_MAX_HANDLES];
+ struct nvmap_handle *unpinarray[NVHOST_MAX_HANDLES];
+ struct nvmap_client *nvmap;
+};
+
+struct nvhost_ctrl_userctx {
+ struct nvhost_master *dev;
+ u32 mod_locks[NV_HOST1X_NB_MLOCKS];
+};
+
+static int nvhost_channelrelease(struct inode *inode, struct file *filp)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+
+ filp->private_data = NULL;
+
+ nvhost_putchannel(priv->ch, priv->hwctx);
+
+ if (priv->hwctx)
+ priv->ch->ctxhandler.put(priv->hwctx);
+
+ if (priv->gathers)
+ nvmap_munmap(priv->gather_mem, priv->gathers);
+
+ if (!IS_ERR_OR_NULL(priv->gather_mem))
+ nvmap_free(priv->ch->dev->nvmap, priv->gather_mem);
+
+ nvmap_client_put(priv->nvmap);
+ kfree(priv);
+ return 0;
+}
+
+static int nvhost_channelopen(struct inode *inode, struct file *filp)
+{
+ struct nvhost_channel_userctx *priv;
+ struct nvhost_channel *ch;
+ size_t gather_size;
+
+ ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
+ ch = nvhost_getchannel(ch);
+ if (!ch)
+ return -ENOMEM;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ nvhost_putchannel(ch, NULL);
+ return -ENOMEM;
+ }
+ filp->private_data = priv;
+ priv->ch = ch;
+ gather_size = sizeof(struct nvhost_op_pair) * NVHOST_MAX_GATHERS;
+ priv->gather_mem = nvmap_alloc(ch->dev->nvmap, gather_size, 32,
+ NVMAP_HANDLE_CACHEABLE);
+ if (IS_ERR(priv->gather_mem))
+ goto fail;
+
+ if (ch->ctxhandler.alloc) {
+ priv->hwctx = ch->ctxhandler.alloc(ch);
+ if (!priv->hwctx)
+ goto fail;
+ }
+
+ priv->gathers = (struct nvhost_op_pair *)nvmap_mmap(priv->gather_mem);
+
+ return 0;
+fail:
+ nvhost_channelrelease(inode, filp);
+ return -ENOMEM;
+}
+
+static void add_gather(struct nvhost_channel_userctx *ctx, int idx,
+ u32 mem_id, u32 words, u32 offset)
+{
+ struct nvmap_pinarray_elem *pin;
+ pin = &ctx->pinarray[ctx->pinarray_size++];
+ pin->patch_mem = (u32)nvmap_ref_to_handle(ctx->gather_mem);
+ pin->patch_offset = (idx * sizeof(struct nvhost_op_pair)) +
+ offsetof(struct nvhost_op_pair, op2);
+ pin->pin_mem = mem_id;
+ pin->pin_offset = offset;
+ ctx->gathers[idx].op1 = nvhost_opcode_gather(0, words);
+}
+
+static void reset_submit(struct nvhost_channel_userctx *ctx)
+{
+ ctx->cmdbufs_pending = 0;
+ ctx->relocs_pending = 0;
+}
+
+static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+ size_t remaining = count;
+ int err = 0;
+
+ while (remaining) {
+ size_t consumed;
+ if (!priv->relocs_pending && !priv->cmdbufs_pending) {
+ consumed = sizeof(struct nvhost_submit_hdr);
+ if (remaining < consumed)
+ break;
+ if (copy_from_user(&priv->syncpt_id, buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ if (!priv->cmdbufs_pending) {
+ err = -EFAULT;
+ break;
+ }
+ /* leave room for ctx switch */
+ priv->num_gathers = 2;
+ priv->pinarray_size = 0;
+ } else if (priv->cmdbufs_pending) {
+ struct nvhost_cmdbuf cmdbuf;
+ consumed = sizeof(cmdbuf);
+ if (remaining < consumed)
+ break;
+ if (copy_from_user(&cmdbuf, buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ add_gather(priv, priv->num_gathers++,
+ cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
+ priv->cmdbufs_pending--;
+ } else if (priv->relocs_pending) {
+ int numrelocs = remaining / sizeof(struct nvhost_reloc);
+ if (!numrelocs)
+ break;
+ numrelocs = min_t(int, numrelocs, priv->relocs_pending);
+ consumed = numrelocs * sizeof(struct nvhost_reloc);
+ if (copy_from_user(&priv->pinarray[priv->pinarray_size],
+ buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ priv->pinarray_size += numrelocs;
+ priv->relocs_pending -= numrelocs;
+ } else {
+ err = -EFAULT;
+ break;
+ }
+ remaining -= consumed;
+ buf += consumed;
+ }
+
+ if (err < 0) {
+ dev_err(&priv->ch->dev->pdev->dev, "channel write error\n");
+ reset_submit(priv);
+ return err;
+ }
+
+ return (count - remaining);
+}
+
+static int nvhost_ioctl_channel_flush(struct nvhost_channel_userctx *ctx,
+ struct nvhost_get_param_args *args)
+{
+ struct nvhost_cpuinterrupt ctxsw;
+ int gather_idx = 2;
+ int num_intrs = 0;
+ u32 syncval;
+ int num_unpin;
+ int err;
+
+ if (ctx->relocs_pending || ctx->cmdbufs_pending) {
+ reset_submit(ctx);
+ dev_err(&ctx->ch->dev->pdev->dev, "channel submit out of sync\n");
+ return -EFAULT;
+ }
+ if (!ctx->nvmap) {
+ dev_err(&ctx->ch->dev->pdev->dev, "no nvmap context set\n");
+ return -EFAULT;
+ }
+ if (ctx->num_gathers <= 2)
+ return 0;
+
+ /* keep module powered */
+ nvhost_module_busy(&ctx->ch->mod);
+
+ /* pin mem handles and patch physical addresses */
+ num_unpin = nvmap_pin_array(ctx->nvmap,
+ nvmap_ref_to_handle(ctx->gather_mem),
+ ctx->pinarray, ctx->pinarray_size,
+ ctx->unpinarray);
+ if (num_unpin < 0) {
+ dev_warn(&ctx->ch->dev->pdev->dev, "nvmap_pin_array failed: "
+ "%d\n", num_unpin);
+ nvhost_module_idle(&ctx->ch->mod);
+ return num_unpin;
+ }
+
+ /* get submit lock */
+ err = mutex_lock_interruptible(&ctx->ch->submitlock);
+ if (err) {
+ nvmap_unpin_handles(ctx->nvmap, ctx->unpinarray, num_unpin);
+ nvhost_module_idle(&ctx->ch->mod);
+ return err;
+ }
+
+ /* context switch */
+ if (ctx->ch->cur_ctx != ctx->hwctx) {
+ struct nvhost_hwctx *hw = ctx->hwctx;
+ if (hw && hw->valid) {
+ gather_idx--;
+ ctx->gathers[gather_idx].op1 =
+ nvhost_opcode_gather(0, hw->restore_size);
+ ctx->gathers[gather_idx].op2 = hw->restore_phys;
+ ctx->syncpt_incrs += hw->restore_incrs;
+ }
+ hw = ctx->ch->cur_ctx;
+ if (hw) {
+ gather_idx--;
+ ctx->gathers[gather_idx].op1 =
+ nvhost_opcode_gather(0, hw->save_size);
+ ctx->gathers[gather_idx].op2 = hw->save_phys;
+ ctx->syncpt_incrs += hw->save_incrs;
+ num_intrs = 1;
+ ctxsw.syncpt_val = hw->save_incrs - 1;
+ ctxsw.intr_data = hw;
+ hw->valid = true;
+ ctx->ch->ctxhandler.get(hw);
+ }
+ ctx->ch->cur_ctx = ctx->hwctx;
+ }
+
+ /* add a setclass for modules that require it */
+ if (gather_idx == 2 && ctx->ch->desc->class) {
+ gather_idx--;
+ ctx->gathers[gather_idx].op1 =
+ nvhost_opcode_setclass(ctx->ch->desc->class, 0, 0);
+ ctx->gathers[gather_idx].op2 = NVHOST_OPCODE_NOOP;
+ }
+
+ /* get absolute sync value */
+ if (BIT(ctx->syncpt_id) & NVSYNCPTS_CLIENT_MANAGED)
+ syncval = nvhost_syncpt_set_max(&ctx->ch->dev->syncpt,
+ ctx->syncpt_id, ctx->syncpt_incrs);
+ else
+ syncval = nvhost_syncpt_incr_max(&ctx->ch->dev->syncpt,
+ ctx->syncpt_id, ctx->syncpt_incrs);
+
+ /* patch absolute syncpt value into interrupt triggers */
+ ctxsw.syncpt_val += syncval - ctx->syncpt_incrs;
+
+ nvhost_channel_submit(ctx->ch, ctx->nvmap, &ctx->gathers[gather_idx],
+ ctx->num_gathers - gather_idx, &ctxsw, num_intrs,
+ ctx->unpinarray, num_unpin,
+ ctx->syncpt_id, syncval);
+
+ /* schedule a submit complete interrupt */
+ nvhost_intr_add_action(&ctx->ch->dev->intr, ctx->syncpt_id, syncval,
+ NVHOST_INTR_ACTION_SUBMIT_COMPLETE, ctx->ch, NULL);
+
+ mutex_unlock(&ctx->ch->submitlock);
+ args->value = syncval;
+ return 0;
+}
+
+static long nvhost_channelctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+ u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
+ int err = 0;
+
+ if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
+ (_IOC_NR(cmd) == 0) ||
+ (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST))
+ return -EFAULT;
+
+ BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE);
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case NVHOST_IOCTL_CHANNEL_FLUSH:
+ err = nvhost_ioctl_channel_flush(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->desc->syncpts;
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->desc->waitbases;
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->desc->modulemutexes;
+ break;
+ case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
+ {
+ int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
+ struct nvmap_client *new_client = nvmap_client_get_file(fd);
+
+ if (IS_ERR(new_client)) {
+ err = PTR_ERR(new_client);
+ break;
+ }
+
+ if (priv->nvmap)
+ nvmap_client_put(priv->nvmap);
+
+ priv->nvmap = new_client;
+ break;
+ }
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
+ err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
+
+ return err;
+}
+
+static struct file_operations nvhost_channelops = {
+ .owner = THIS_MODULE,
+ .release = nvhost_channelrelease,
+ .open = nvhost_channelopen,
+ .write = nvhost_channelwrite,
+ .unlocked_ioctl = nvhost_channelctl
+};
+
+static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
+{
+ struct nvhost_ctrl_userctx *priv = filp->private_data;
+ int i;
+
+ filp->private_data = NULL;
+ if (priv->mod_locks[0])
+ nvhost_module_idle(&priv->dev->mod);
+ for (i = 1; i < NV_HOST1X_NB_MLOCKS; i++)
+ if (priv->mod_locks[i])
+ nvhost_mutex_unlock(&priv->dev->cpuaccess, i);
+ kfree(priv);
+ return 0;
+}
+
+static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
+{
+ struct nvhost_master *host = container_of(inode->i_cdev, struct nvhost_master, cdev);
+ struct nvhost_ctrl_userctx *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = host;
+ filp->private_data = priv;
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_read(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_read_args *args)
+{
+ if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
+ return -EINVAL;
+ args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_incr(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_incr_args *args)
+{
+ if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
+ return -EINVAL;
+ nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_wait(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_wait_args *args)
+{
+ u32 timeout;
+ if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
+ return -EINVAL;
+ if (args->timeout == NVHOST_NO_TIMEOUT)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = (u32)msecs_to_jiffies(args->timeout);
+
+ return nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
+ args->thresh, timeout);
+}
+
+static int nvhost_ioctl_ctrl_module_mutex(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_module_mutex_args *args)
+{
+ int err = 0;
+ if (args->id >= NV_HOST1X_SYNCPT_NB_PTS ||
+ args->lock > 1)
+ return -EINVAL;
+
+ if (args->lock && !ctx->mod_locks[args->id]) {
+ if (args->id == 0)
+ nvhost_module_busy(&ctx->dev->mod);
+ else
+ err = nvhost_mutex_try_lock(&ctx->dev->cpuaccess, args->id);
+ if (!err)
+ ctx->mod_locks[args->id] = 1;
+ }
+ else if (!args->lock && ctx->mod_locks[args->id]) {
+ if (args->id == 0)
+ nvhost_module_idle(&ctx->dev->mod);
+ else
+ nvhost_mutex_unlock(&ctx->dev->cpuaccess, args->id);
+ ctx->mod_locks[args->id] = 0;
+ }
+ return err;
+}
+
+static int nvhost_ioctl_ctrl_module_regrdwr(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_module_regrdwr_args *args)
+{
+ u32 num_offsets = args->num_offsets;
+ u32 *offsets = args->offsets;
+ void *values = args->values;
+ u32 vals[64];
+
+ if (!nvhost_access_module_regs(&ctx->dev->cpuaccess, args->id) ||
+ (num_offsets == 0))
+ return -EINVAL;
+
+ while (num_offsets--) {
+ u32 remaining = args->block_size;
+ u32 offs;
+ if (get_user(offs, offsets))
+ return -EFAULT;
+ offsets++;
+ while (remaining) {
+ u32 batch = min(remaining, 64*sizeof(u32));
+ if (args->write) {
+ if (copy_from_user(vals, values, batch))
+ return -EFAULT;
+ nvhost_write_module_regs(&ctx->dev->cpuaccess,
+ args->id, offs, batch, vals);
+ } else {
+ nvhost_read_module_regs(&ctx->dev->cpuaccess,
+ args->id, offs, batch, vals);
+ if (copy_to_user(values, vals, batch))
+ return -EFAULT;
+ }
+ remaining -= batch;
+ offs += batch;
+ values += batch;
+ }
+ }
+
+ return 0;
+}
+
+static long nvhost_ctrlctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvhost_ctrl_userctx *priv = filp->private_data;
+ u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
+ int err = 0;
+
+ if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
+ (_IOC_NR(cmd) == 0) ||
+ (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST))
+ return -EFAULT;
+
+ BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE);
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case NVHOST_IOCTL_CTRL_SYNCPT_READ:
+ err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
+ err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
+ err = nvhost_ioctl_ctrl_syncpt_wait(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
+ err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
+ err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
+ break;
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
+ err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
+
+ return err;
+}
+
+static struct file_operations nvhost_ctrlops = {
+ .owner = THIS_MODULE,
+ .release = nvhost_ctrlrelease,
+ .open = nvhost_ctrlopen,
+ .unlocked_ioctl = nvhost_ctrlctl
+};
+
+static void power_host(struct nvhost_module *mod, enum nvhost_power_action action)
+{
+ struct nvhost_master *dev = container_of(mod, struct nvhost_master, mod);
+
+ if (action == NVHOST_POWER_ACTION_ON) {
+ nvhost_intr_configure(&dev->intr, clk_get_rate(mod->clk[0]));
+ nvhost_syncpt_reset(&dev->syncpt);
+ }
+ else if (action == NVHOST_POWER_ACTION_OFF) {
+ int i;
+ for (i = 0; i < NVHOST_NUMCHANNELS; i++)
+ nvhost_channel_suspend(&dev->channels[i]);
+ nvhost_syncpt_save(&dev->syncpt);
+ }
+}
+
+static int __devinit nvhost_user_init(struct nvhost_master *host)
+{
+ int i, err, devno;
+
+ host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
+ if (IS_ERR(host->nvhost_class)) {
+ err = PTR_ERR(host->nvhost_class);
+ dev_err(&host->pdev->dev, "failed to create class\n");
+ goto fail;
+ }
+
+ if (nvhost_major) {
+ devno = MKDEV(nvhost_major, nvhost_minor);
+ err = register_chrdev_region(devno, NVHOST_NUMCHANNELS + 1, IFACE_NAME);
+ } else {
+ err = alloc_chrdev_region(&devno, nvhost_minor,
+ NVHOST_NUMCHANNELS + 1, IFACE_NAME);
+ nvhost_major = MAJOR(devno);
+ }
+ if (err < 0) {
+ dev_err(&host->pdev->dev, "failed to reserve chrdev region\n");
+ goto fail;
+ }
+
+ for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
+ struct nvhost_channel *ch = &host->channels[i];
+
+ cdev_init(&ch->cdev, &nvhost_channelops);
+ ch->cdev.owner = THIS_MODULE;
+
+ devno = MKDEV(nvhost_major, nvhost_minor + i);
+ err = cdev_add(&ch->cdev, devno, 1);
+ if (err < 0) {
+ dev_err(&host->pdev->dev, "failed to add chan %i cdev\n", i);
+ goto fail;
+ }
+ ch->node = device_create(host->nvhost_class, NULL, devno, NULL,
+ IFACE_NAME "-%s", ch->desc->name);
+ if (IS_ERR(ch->node)) {
+ err = PTR_ERR(ch->node);
+ dev_err(&host->pdev->dev, "failed to create chan %i device\n", i);
+ goto fail;
+ }
+ }
+
+ cdev_init(&host->cdev, &nvhost_ctrlops);
+ host->cdev.owner = THIS_MODULE;
+ devno = MKDEV(nvhost_major, nvhost_minor + NVHOST_NUMCHANNELS);
+ err = cdev_add(&host->cdev, devno, 1);
+ if (err < 0)
+ goto fail;
+ host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
+ IFACE_NAME "-ctrl");
+ if (IS_ERR(host->ctrl)) {
+ err = PTR_ERR(host->ctrl);
+ dev_err(&host->pdev->dev, "failed to create ctrl device\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return err;
+}
+
static int __devinit nvhost_probe(struct platform_device *pdev)
{
struct nvhost_master *host;
+ struct resource *regs, *intr0, *intr1;
+ int i, err;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ intr0 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ intr1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+
+ if (!regs || !intr0 || !intr1) {
+ dev_err(&pdev->dev, "missing required platform resources\n");
+ return -ENXIO;
+ }
host = kzalloc(sizeof(*host), GFP_KERNEL);
if (!host)
host->pdev = pdev;
+ host->nvmap = nvmap_create_client(nvmap_dev);
+ if (!host->nvmap) {
+ dev_err(&pdev->dev, "unable to create nvmap client\n");
+ err = -EIO;
+ goto fail;
+ }
+
+ host->reg_mem = request_mem_region(regs->start,
+ resource_size(regs), pdev->name);
+ if (!host->reg_mem) {
+ dev_err(&pdev->dev, "failed to get host register memory\n");
+ err = -ENXIO;
+ goto fail;
+ }
+ host->aperture = ioremap(regs->start, resource_size(regs));
+ if (!host->aperture) {
+ dev_err(&pdev->dev, "failed to remap host registers\n");
+ err = -ENXIO;
+ goto fail;
+ }
+ host->sync_aperture = host->aperture +
+ (NV_HOST1X_CHANNEL0_BASE +
+ HOST1X_CHANNEL_SYNC_REG_BASE);
+
+ for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
+ struct nvhost_channel *ch = &host->channels[i];
+ err = nvhost_channel_init(ch, host, i);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to init channel %d\n", i);
+ goto fail;
+ }
+ }
+
+ err = nvhost_cpuaccess_init(&host->cpuaccess, pdev);
+ if (err) goto fail;
+ err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
+ if (err) goto fail;
+ err = nvhost_user_init(host);
+ if (err) goto fail;
+ err = nvhost_module_init(&host->mod, "host1x", power_host, NULL, &pdev->dev);
+ if (err) goto fail;
+
platform_set_drvdata(pdev, host);
nvhost_bus_register(host);
+ nvhost_debug_init(host);
+
dev_info(&pdev->dev, "initialized\n");
return 0;
+
+fail:
+ if (host->nvmap)
+ nvmap_client_put(host->nvmap);
+ /* TODO: [ahatala 2010-05-04] */
+ kfree(host);
+ return err;
}
static int __exit nvhost_remove(struct platform_device *pdev)
return 0;
}
+static int nvhost_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct nvhost_master *host = platform_get_drvdata(pdev);
+ dev_info(&pdev->dev, "suspending\n");
+ nvhost_module_suspend(&host->mod);
+ dev_info(&pdev->dev, "suspended\n");
+ return 0;
+}
+
static struct platform_driver nvhost_driver = {
- .probe = nvhost_probe,
.remove = __exit_p(nvhost_remove),
+ .suspend = nvhost_suspend,
.driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME
static int __init nvhost_mod_init(void)
{
- return platform_driver_register(&nvhost_driver);
+ return platform_driver_probe(&nvhost_driver, nvhost_probe);
}
static void __exit nvhost_mod_exit(void)
#ifndef __NVHOST_DEV_H
#define __NVHOST_DEV_H
+#include "nvhost_acm.h"
+#include "nvhost_syncpt.h"
+#include "nvhost_intr.h"
+#include "nvhost_cpuaccess.h"
+#include "nvhost_channel.h"
+#include "nvhost_hardware.h"
+
+#define NVHOST_MAJOR 0 /* dynamic */
struct nvhost_master {
+ void __iomem *aperture;
+ void __iomem *sync_aperture;
+ struct resource *reg_mem;
struct platform_device *pdev;
+ struct class *nvhost_class;
+ struct cdev cdev;
+ struct device *ctrl;
+ struct nvhost_syncpt syncpt;
+ struct nvmap_client *nvmap;
+ struct nvhost_cpuaccess cpuaccess;
+ struct nvhost_intr intr;
+ struct nvhost_module mod;
+ struct nvhost_channel channels[NVHOST_NUMCHANNELS];
};
+void nvhost_debug_init(struct nvhost_master *master);
+
#endif
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_3dctx.c
+ *
+ * Tegra Graphics Host 3d hardware context
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_hwctx.h"
+#include "dev.h"
+
+#include <linux/slab.h>
+
+const struct hwctx_reginfo ctxsave_regs_3d[] = {
+ HWCTX_REGINFO(0xe00, 16, DIRECT),
+ HWCTX_REGINFO(0xe10, 16, DIRECT),
+ HWCTX_REGINFO(0xe20, 1, DIRECT),
+ HWCTX_REGINFO(0xe21, 1, DIRECT),
+ HWCTX_REGINFO(0xe22, 1, DIRECT),
+ HWCTX_REGINFO(0xe25, 1, DIRECT),
+ HWCTX_REGINFO(0xe26, 1, DIRECT),
+ HWCTX_REGINFO(0xe28, 2, DIRECT),
+ HWCTX_REGINFO(0xe2a, 1, DIRECT),
+ HWCTX_REGINFO(0x1, 1, DIRECT),
+ HWCTX_REGINFO(0x2, 1, DIRECT),
+ HWCTX_REGINFO(0xc, 2, DIRECT),
+ HWCTX_REGINFO(0xe, 2, DIRECT),
+ HWCTX_REGINFO(0x10, 2, DIRECT),
+ HWCTX_REGINFO(0x12, 2, DIRECT),
+ HWCTX_REGINFO(0x14, 2, DIRECT),
+ HWCTX_REGINFO(0x100, 32, DIRECT),
+ HWCTX_REGINFO(0x120, 1, DIRECT),
+ HWCTX_REGINFO(0x121, 1, DIRECT),
+ HWCTX_REGINFO(0x124, 1, DIRECT),
+ HWCTX_REGINFO(0x125, 1, DIRECT),
+ HWCTX_REGINFO(0x200, 1, DIRECT),
+ HWCTX_REGINFO(0x201, 1, DIRECT),
+ HWCTX_REGINFO(0x202, 1, DIRECT),
+ HWCTX_REGINFO(0x203, 1, DIRECT),
+ HWCTX_REGINFO(0x204, 1, DIRECT),
+ HWCTX_REGINFO(0x207, 1024, INDIRECT),
+ HWCTX_REGINFO(0x209, 1, DIRECT),
+ HWCTX_REGINFO(0x300, 64, DIRECT),
+ HWCTX_REGINFO(0x343, 1, DIRECT),
+ HWCTX_REGINFO(0x344, 1, DIRECT),
+ HWCTX_REGINFO(0x345, 1, DIRECT),
+ HWCTX_REGINFO(0x346, 1, DIRECT),
+ HWCTX_REGINFO(0x347, 1, DIRECT),
+ HWCTX_REGINFO(0x348, 1, DIRECT),
+ HWCTX_REGINFO(0x349, 1, DIRECT),
+ HWCTX_REGINFO(0x34a, 1, DIRECT),
+ HWCTX_REGINFO(0x34b, 1, DIRECT),
+ HWCTX_REGINFO(0x34c, 1, DIRECT),
+ HWCTX_REGINFO(0x34d, 1, DIRECT),
+ HWCTX_REGINFO(0x34e, 1, DIRECT),
+ HWCTX_REGINFO(0x34f, 1, DIRECT),
+ HWCTX_REGINFO(0x350, 1, DIRECT),
+ HWCTX_REGINFO(0x351, 1, DIRECT),
+ HWCTX_REGINFO(0x352, 1, DIRECT),
+ HWCTX_REGINFO(0x353, 1, DIRECT),
+ HWCTX_REGINFO(0x354, 1, DIRECT),
+ HWCTX_REGINFO(0x355, 1, DIRECT),
+ HWCTX_REGINFO(0x356, 1, DIRECT),
+ HWCTX_REGINFO(0x357, 1, DIRECT),
+ HWCTX_REGINFO(0x358, 1, DIRECT),
+ HWCTX_REGINFO(0x359, 1, DIRECT),
+ HWCTX_REGINFO(0x35a, 1, DIRECT),
+ HWCTX_REGINFO(0x35b, 1, DIRECT),
+ HWCTX_REGINFO(0x363, 1, DIRECT),
+ HWCTX_REGINFO(0x364, 1, DIRECT),
+ HWCTX_REGINFO(0x400, 2, DIRECT),
+ HWCTX_REGINFO(0x402, 1, DIRECT),
+ HWCTX_REGINFO(0x403, 1, DIRECT),
+ HWCTX_REGINFO(0x404, 1, DIRECT),
+ HWCTX_REGINFO(0x405, 1, DIRECT),
+ HWCTX_REGINFO(0x406, 1, DIRECT),
+ HWCTX_REGINFO(0x407, 1, DIRECT),
+ HWCTX_REGINFO(0x408, 1, DIRECT),
+ HWCTX_REGINFO(0x409, 1, DIRECT),
+ HWCTX_REGINFO(0x40a, 1, DIRECT),
+ HWCTX_REGINFO(0x40b, 1, DIRECT),
+ HWCTX_REGINFO(0x40c, 1, DIRECT),
+ HWCTX_REGINFO(0x40d, 1, DIRECT),
+ HWCTX_REGINFO(0x40e, 1, DIRECT),
+ HWCTX_REGINFO(0x40f, 1, DIRECT),
+ HWCTX_REGINFO(0x411, 1, DIRECT),
+ HWCTX_REGINFO(0x500, 1, DIRECT),
+ HWCTX_REGINFO(0x501, 1, DIRECT),
+ HWCTX_REGINFO(0x502, 1, DIRECT),
+ HWCTX_REGINFO(0x503, 1, DIRECT),
+ HWCTX_REGINFO(0x520, 32, DIRECT),
+ HWCTX_REGINFO(0x540, 64, INDIRECT),
+ HWCTX_REGINFO(0x600, 0, INDIRECT_OFFSET),
+ HWCTX_REGINFO(0x602, 16, INDIRECT_DATA),
+ HWCTX_REGINFO(0x603, 128, INDIRECT),
+ HWCTX_REGINFO(0x608, 4, DIRECT),
+ HWCTX_REGINFO(0x60e, 1, DIRECT),
+ HWCTX_REGINFO(0x700, 64, INDIRECT),
+ HWCTX_REGINFO(0x710, 16, DIRECT),
+ HWCTX_REGINFO(0x720, 32, DIRECT),
+ HWCTX_REGINFO(0x740, 1, DIRECT),
+ HWCTX_REGINFO(0x741, 1, DIRECT),
+ HWCTX_REGINFO(0x800, 0, INDIRECT_OFFSET),
+ HWCTX_REGINFO(0x802, 16, INDIRECT_DATA),
+ HWCTX_REGINFO(0x803, 512, INDIRECT),
+ HWCTX_REGINFO(0x805, 64, INDIRECT),
+ HWCTX_REGINFO(0x820, 32, DIRECT),
+ HWCTX_REGINFO(0x900, 64, INDIRECT),
+ HWCTX_REGINFO(0x902, 1, DIRECT),
+ HWCTX_REGINFO(0x903, 1, DIRECT),
+ HWCTX_REGINFO(0xa02, 1, DIRECT),
+ HWCTX_REGINFO(0xa03, 1, DIRECT),
+ HWCTX_REGINFO(0xa04, 1, DIRECT),
+ HWCTX_REGINFO(0xa05, 1, DIRECT),
+ HWCTX_REGINFO(0xa06, 1, DIRECT),
+ HWCTX_REGINFO(0xa07, 1, DIRECT),
+ HWCTX_REGINFO(0xa08, 1, DIRECT),
+ HWCTX_REGINFO(0xa09, 1, DIRECT),
+ HWCTX_REGINFO(0xa0a, 1, DIRECT),
+ HWCTX_REGINFO(0xa0b, 1, DIRECT),
+ HWCTX_REGINFO(0x205, 1024, INDIRECT)
+};
+
+
+/*** restore ***/
+
+static unsigned int context_restore_size = 0;
+
+static void restore_begin(u32 *ptr, u32 waitbase)
+{
+ /* set class to host */
+ ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_INCR_SYNCPT_BASE, 1);
+ /* increment sync point base */
+ ptr[1] = nvhost_class_host_incr_syncpt_base(waitbase, 1);
+ /* set class to 3D */
+ ptr[2] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ /* program PSEQ_QUAD_ID */
+ ptr[3] = nvhost_opcode_imm(0x545, 0);
+}
+#define RESTORE_BEGIN_SIZE 4
+
+static void restore_end(u32 *ptr, u32 syncpt_id)
+{
+ /* syncpt increment to track restore gather. */
+ ptr[0] = nvhost_opcode_imm(0x0, ((1UL << 8) | (u8)(syncpt_id & 0xff)));
+}
+#define RESTORE_END_SIZE 1
+
+static void restore_direct(u32 *ptr, u32 start_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_incr(start_reg, count);
+}
+#define RESTORE_DIRECT_SIZE 1
+
+static void restore_indoffset(u32 *ptr, u32 offset_reg, u32 offset)
+{
+ ptr[0] = nvhost_opcode_imm(offset_reg, offset);
+}
+#define RESTORE_INDOFFSET_SIZE 1
+
+static void restore_inddata(u32 *ptr, u32 data_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_nonincr(data_reg, count);
+}
+#define RESTORE_INDDATA_SIZE 1
+
+static void restore_registers_from_fifo(u32 *ptr, unsigned int count,
+ struct nvhost_channel *channel,
+ unsigned int *pending)
+{
+ void __iomem *chan_regs = channel->aperture;
+ unsigned int entries = *pending;
+ while (count) {
+ unsigned int num;
+
+ while (!entries) {
+ /* query host for number of entries in fifo */
+ entries = nvhost_channel_fifostat_outfentries(
+ readl(chan_regs + HOST1X_CHANNEL_FIFOSTAT));
+ if (!entries)
+ cpu_relax();
+ /* TODO: [ahowe 2010-06-14] timeout */
+ }
+ num = min(entries, count);
+ entries -= num;
+ count -= num;
+
+ while (num & ~0x3) {
+ u32 arr[4];
+ arr[0] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ arr[1] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ arr[2] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ arr[3] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ memcpy(ptr, arr, 4*sizeof(u32));
+ ptr += 4;
+ num -= 4;
+ }
+ while (num--)
+ *ptr++ = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ }
+ *pending = entries;
+}
+
+static void setup_restore(u32 *ptr, u32 waitbase)
+{
+ const struct hwctx_reginfo *r;
+ const struct hwctx_reginfo *rend;
+
+ restore_begin(ptr, waitbase);
+ ptr += RESTORE_BEGIN_SIZE;
+
+ r = ctxsave_regs_3d;
+ rend = ctxsave_regs_3d + ARRAY_SIZE(ctxsave_regs_3d);
+ for ( ; r != rend; ++r) {
+ u32 offset = r->offset;
+ u32 count = r->count;
+ switch (r->type) {
+ case HWCTX_REGINFO_DIRECT:
+ restore_direct(ptr, offset, count);
+ ptr += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT:
+ restore_indoffset(ptr, offset, 0);
+ ptr += RESTORE_INDOFFSET_SIZE;
+ restore_inddata(ptr, offset + 1, count);
+ ptr += RESTORE_INDDATA_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_OFFSET:
+ restore_indoffset(ptr, offset, count);
+ ptr += RESTORE_INDOFFSET_SIZE;
+ continue; /* INDIRECT_DATA follows with real count */
+ case HWCTX_REGINFO_INDIRECT_DATA:
+ restore_inddata(ptr, offset, count);
+ ptr += RESTORE_INDDATA_SIZE;
+ break;
+ }
+ ptr += count;
+ }
+
+ restore_end(ptr, NVSYNCPT_3D);
+ wmb();
+}
+
+/*** save ***/
+
+/* the same context save command sequence is used for all contexts. */
+static struct nvmap_handle_ref *context_save_buf = NULL;
+static u32 context_save_phys = 0;
+static u32 *context_save_ptr = NULL;
+static unsigned int context_save_size = 0;
+
+static void save_begin(u32 *ptr, u32 syncpt_id, u32 waitbase)
+{
+ /* set class to the unit to flush */
+ ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ /*
+ * Flush pipe and signal context read thread to start reading
+ * sync point increment
+ */
+ ptr[1] = nvhost_opcode_imm(0, 0x100 | syncpt_id);
+ ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1);
+ /* wait for base+1 */
+ ptr[3] = nvhost_class_host_wait_syncpt_base(syncpt_id, waitbase, 1);
+ ptr[4] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ ptr[5] = nvhost_opcode_imm(0, syncpt_id);
+ ptr[6] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, 0, 0);
+}
+#define SAVE_BEGIN_SIZE 7
+
+static void save_direct(u32 *ptr, u32 start_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1);
+ ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ start_reg, true);
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count);
+}
+#define SAVE_DIRECT_SIZE 3
+
+static void save_indoffset(u32 *ptr, u32 offset_reg, u32 offset)
+{
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1);
+ ptr[1] = nvhost_class_host_indoff_reg_write(NV_HOST_MODULE_GR3D,
+ offset_reg, true);
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, 1);
+ ptr[3] = offset;
+}
+#define SAVE_INDOFFSET_SIZE 4
+
+static inline void save_inddata(u32 *ptr, u32 data_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1);
+ ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ data_reg, false);
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count);
+}
+#define SAVE_INDDDATA_SIZE 3
+
+static void save_end(u32 *ptr, u32 syncpt_id, u32 waitbase)
+{
+ /* Wait for context read service */
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1);
+ ptr[1] = nvhost_class_host_wait_syncpt_base(syncpt_id, waitbase, 3);
+ /* Increment syncpoint base */
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INCR_SYNCPT_BASE, 1);
+ ptr[3] = nvhost_class_host_incr_syncpt_base(waitbase, 3);
+ /* set class back to the unit */
+ ptr[4] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+}
+#define SAVE_END_SIZE 5
+
+static void __init setup_save(
+ u32 *ptr, unsigned int *words_save, unsigned int *words_restore,
+ u32 syncpt_id, u32 waitbase)
+{
+ const struct hwctx_reginfo *r;
+ const struct hwctx_reginfo *rend;
+ unsigned int save = SAVE_BEGIN_SIZE + SAVE_END_SIZE;
+ unsigned int restore = RESTORE_BEGIN_SIZE + RESTORE_END_SIZE;
+
+ if (ptr) {
+ save_begin(ptr, syncpt_id, waitbase);
+ ptr += SAVE_BEGIN_SIZE;
+ }
+
+ r = ctxsave_regs_3d;
+ rend = ctxsave_regs_3d + ARRAY_SIZE(ctxsave_regs_3d);
+ for ( ; r != rend; ++r) {
+ u32 offset = r->offset;
+ u32 count = r->count;
+ switch (r->type) {
+ case HWCTX_REGINFO_DIRECT:
+ if (ptr) {
+ save_direct(ptr, offset, count);
+ ptr += SAVE_DIRECT_SIZE;
+ }
+ save += SAVE_DIRECT_SIZE;
+ restore += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT:
+ if (ptr) {
+ save_indoffset(ptr, offset, 0);
+ ptr += SAVE_INDOFFSET_SIZE;
+ save_inddata(ptr, offset + 1, count);
+ ptr += SAVE_INDDDATA_SIZE;
+ }
+ save += SAVE_INDOFFSET_SIZE;
+ restore += RESTORE_INDOFFSET_SIZE;
+ save += SAVE_INDDDATA_SIZE;
+ restore += RESTORE_INDDATA_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_OFFSET:
+ if (ptr) {
+ save_indoffset(ptr, offset, count);
+ ptr += SAVE_INDOFFSET_SIZE;
+ }
+ save += SAVE_INDOFFSET_SIZE;
+ restore += RESTORE_INDOFFSET_SIZE;
+ continue; /* INDIRECT_DATA follows with real count */
+ case HWCTX_REGINFO_INDIRECT_DATA:
+ if (ptr) {
+ save_inddata(ptr, offset, count);
+ ptr += SAVE_INDDDATA_SIZE;
+ }
+ save += SAVE_INDDDATA_SIZE;
+ restore += RESTORE_INDDATA_SIZE;
+ break;
+ }
+ if (ptr) {
+ memset(ptr, 0, count * 4);
+ ptr += count;
+ }
+ save += count;
+ restore += count;
+ }
+
+ if (ptr)
+ save_end(ptr, syncpt_id, waitbase);
+
+ if (words_save)
+ *words_save = save;
+ if (words_restore)
+ *words_restore = restore;
+ wmb();
+}
+
+/*** ctx3d ***/
+
+static struct nvhost_hwctx *ctx3d_alloc(struct nvhost_channel *ch)
+{
+ struct nvhost_hwctx *ctx;
+ struct nvmap_client *nvmap = ch->dev->nvmap;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+ ctx->restore = nvmap_alloc(nvmap, context_restore_size * 4, 32,
+ NVMAP_HANDLE_WRITE_COMBINE);
+
+ if (IS_ERR_OR_NULL(ctx->restore)) {
+ kfree(ctx);
+ return NULL;
+ }
+
+ ctx->save_cpu_data = nvmap_mmap(ctx->restore);
+ if (!ctx->save_cpu_data) {
+ nvmap_free(nvmap, ctx->restore);
+ kfree(ctx);
+ return NULL;
+ }
+
+ setup_restore(ctx->save_cpu_data, NVWAITBASE_3D);
+ ctx->channel = ch;
+ ctx->restore_phys = nvmap_pin(nvmap, ctx->restore);
+ ctx->restore_size = context_restore_size;
+ ctx->save = context_save_buf;
+ ctx->save_phys = context_save_phys;
+ ctx->save_size = context_save_size;
+ ctx->save_incrs = 3;
+ ctx->restore_incrs = 1;
+ ctx->valid = false;
+ kref_init(&ctx->ref);
+ return ctx;
+}
+
+static void ctx3d_free(struct kref *ref)
+{
+ struct nvhost_hwctx *ctx = container_of(ref, struct nvhost_hwctx, ref);
+ struct nvmap_client *nvmap = ctx->channel->dev->nvmap;
+
+ nvmap_munmap(ctx->restore, ctx->save_cpu_data);
+ nvmap_unpin(nvmap, ctx->restore);
+ nvmap_free(nvmap, ctx->restore);
+ kfree(ctx);
+}
+
+static void ctx3d_get(struct nvhost_hwctx *ctx)
+{
+ kref_get(&ctx->ref);
+}
+
+static void ctx3d_put(struct nvhost_hwctx *ctx)
+{
+ kref_put(&ctx->ref, ctx3d_free);
+}
+
+static void ctx3d_save_service(struct nvhost_hwctx *ctx)
+{
+ const struct hwctx_reginfo *r;
+ const struct hwctx_reginfo *rend;
+ unsigned int pending = 0;
+ u32 *ptr = (u32 *)ctx->save_cpu_data + RESTORE_BEGIN_SIZE;
+
+ BUG_ON(!ctx->save_cpu_data);
+
+ r = ctxsave_regs_3d;
+ rend = ctxsave_regs_3d + ARRAY_SIZE(ctxsave_regs_3d);
+ for ( ; r != rend; ++r) {
+ u32 count = r->count;
+ switch (r->type) {
+ case HWCTX_REGINFO_DIRECT:
+ ptr += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT:
+ ptr += RESTORE_INDOFFSET_SIZE + RESTORE_INDDATA_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_OFFSET:
+ ptr += RESTORE_INDOFFSET_SIZE;
+ continue; /* INDIRECT_DATA follows with real count */
+ case HWCTX_REGINFO_INDIRECT_DATA:
+ ptr += RESTORE_INDDATA_SIZE;
+ break;
+ }
+ restore_registers_from_fifo(ptr, count, ctx->channel, &pending);
+ ptr += count;
+ }
+
+ BUG_ON((u32)((ptr + RESTORE_END_SIZE) - (u32*)ctx->save_cpu_data)
+ != context_restore_size);
+
+ wmb();
+ nvhost_syncpt_cpu_incr(&ctx->channel->dev->syncpt, NVSYNCPT_3D);
+}
+
+
+/*** nvhost_3dctx ***/
+
+int __init nvhost_3dctx_handler_init(struct nvhost_hwctx_handler *h)
+{
+ struct nvhost_channel *ch;
+ struct nvmap_client *nvmap;
+
+ ch = container_of(h, struct nvhost_channel, ctxhandler);
+ nvmap = ch->dev->nvmap;
+
+ setup_save(NULL, &context_save_size, &context_restore_size, 0, 0);
+
+ context_save_buf = nvmap_alloc(nvmap, context_save_size * 4, 32,
+ NVMAP_HANDLE_WRITE_COMBINE);
+
+ if (IS_ERR(context_save_buf)) {
+ int err = PTR_ERR(context_save_buf);
+ context_save_buf = NULL;
+ return err;
+ }
+
+ context_save_ptr = nvmap_mmap(context_save_buf);
+ if (!context_save_ptr) {
+ nvmap_free(nvmap, context_save_buf);
+ context_save_buf = NULL;
+ return -ENOMEM;
+ }
+
+ context_save_phys = nvmap_pin(nvmap, context_save_buf);
+ setup_save(context_save_ptr, NULL, NULL, NVSYNCPT_3D, NVWAITBASE_3D);
+
+ h->alloc = ctx3d_alloc;
+ h->get = ctx3d_get;
+ h->put = ctx3d_put;
+ h->save_service = ctx3d_save_service;
+ return 0;
+}
+
+/* TODO: [ahatala 2010-05-27] */
+int __init nvhost_mpectx_handler_init(struct nvhost_hwctx_handler *h)
+{
+ return 0;
+}
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_acm.c
+ *
+ * Tegra Graphics Host Automatic Clock Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_acm.h"
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <mach/powergate.h>
+#include <mach/clk.h>
+
+#define ACM_TIMEOUT 1*HZ
+
+void nvhost_module_busy(struct nvhost_module *mod)
+{
+ mutex_lock(&mod->lock);
+ cancel_delayed_work(&mod->powerdown);
+ if ((atomic_inc_return(&mod->refcount) == 1) && !mod->powered) {
+ if (mod->parent)
+ nvhost_module_busy(mod->parent);
+ if (mod->powergate_id != -1) {
+ BUG_ON(mod->num_clks != 1);
+ tegra_powergate_sequence_power_up(
+ mod->powergate_id, mod->clk[0]);
+ } else {
+ int i;
+ for (i = 0; i < mod->num_clks; i++)
+ clk_enable(mod->clk[i]);
+ }
+ if (mod->func)
+ mod->func(mod, NVHOST_POWER_ACTION_ON);
+ mod->powered = true;
+ }
+ mutex_unlock(&mod->lock);
+}
+
+static void powerdown_handler(struct work_struct *work)
+{
+ struct nvhost_module *mod;
+ mod = container_of(to_delayed_work(work), struct nvhost_module, powerdown);
+ mutex_lock(&mod->lock);
+ if ((atomic_read(&mod->refcount) == 0) && mod->powered) {
+ int i;
+ if (mod->func)
+ mod->func(mod, NVHOST_POWER_ACTION_OFF);
+ for (i = 0; i < mod->num_clks; i++) {
+ clk_disable(mod->clk[i]);
+ }
+ if (mod->powergate_id != -1) {
+ tegra_periph_reset_assert(mod->clk[0]);
+ tegra_powergate_power_off(mod->powergate_id);
+ }
+ mod->powered = false;
+ if (mod->parent)
+ nvhost_module_idle(mod->parent);
+ }
+ mutex_unlock(&mod->lock);
+}
+
+void nvhost_module_idle_mult(struct nvhost_module *mod, int refs)
+{
+ bool kick = false;
+
+ mutex_lock(&mod->lock);
+ if (atomic_sub_return(refs, &mod->refcount) == 0) {
+ BUG_ON(!mod->powered);
+ schedule_delayed_work(&mod->powerdown, ACM_TIMEOUT);
+ kick = true;
+ }
+ mutex_unlock(&mod->lock);
+
+ if (kick)
+ wake_up(&mod->idle);
+}
+
+static const char *get_module_clk_id(const char *module, int index)
+{
+ if (index == 1 && strcmp(module, "gr2d") == 0)
+ return "epp";
+ else if (index == 0)
+ return module;
+ return NULL;
+}
+
+static int get_module_powergate_id(const char *module)
+{
+ if (strcmp(module, "gr3d") == 0)
+ return TEGRA_POWERGATE_3D;
+ else if (strcmp(module, "mpe") == 0)
+ return TEGRA_POWERGATE_MPE;
+ return -1;
+}
+
+int nvhost_module_init(struct nvhost_module *mod, const char *name,
+ nvhost_modulef func, struct nvhost_module *parent,
+ struct device *dev)
+{
+ int i = 0;
+ mod->name = name;
+
+ while (i < NVHOST_MODULE_MAX_CLOCKS) {
+ long rate;
+ mod->clk[i] = clk_get(dev, get_module_clk_id(name, i));
+ if (IS_ERR_OR_NULL(mod->clk[i]))
+ break;
+ rate = clk_round_rate(mod->clk[i], UINT_MAX);
+ if (rate < 0) {
+ pr_err("%s: can't get maximum rate for %s\n",
+ __func__, name);
+ break;
+ }
+ if (rate != clk_get_rate(mod->clk[i])) {
+ clk_set_rate(mod->clk[i], rate);
+ }
+ i++;
+ }
+
+ mod->num_clks = i;
+ mod->func = func;
+ mod->parent = parent;
+ mod->powered = false;
+ mod->powergate_id = get_module_powergate_id(name);
+ mutex_init(&mod->lock);
+ init_waitqueue_head(&mod->idle);
+ INIT_DELAYED_WORK(&mod->powerdown, powerdown_handler);
+
+ return 0;
+}
+
+static int is_module_idle(struct nvhost_module *mod)
+{
+ int count;
+ mutex_lock(&mod->lock);
+ count = atomic_read(&mod->refcount);
+ mutex_unlock(&mod->lock);
+ return (count == 0);
+}
+
+void nvhost_module_suspend(struct nvhost_module *mod)
+{
+ wait_event(mod->idle, is_module_idle(mod));
+ flush_delayed_work(&mod->powerdown);
+ BUG_ON(mod->powered);
+}
+
+void nvhost_module_deinit(struct nvhost_module *mod)
+{
+ int i;
+ nvhost_module_suspend(mod);
+ for (i = 0; i < mod->num_clks; i++)
+ clk_put(mod->clk[i]);
+}
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_acm.h
+ *
+ * Tegra Graphics Host Automatic Clock Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_ACM_H
+#define __NVHOST_ACM_H
+
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+
+#define NVHOST_MODULE_MAX_CLOCKS 2
+
+struct nvhost_module;
+
+enum nvhost_power_action {
+ NVHOST_POWER_ACTION_OFF,
+ NVHOST_POWER_ACTION_ON,
+};
+
+typedef void (*nvhost_modulef)(struct nvhost_module *mod, enum nvhost_power_action action);
+
+struct nvhost_module {
+ const char *name;
+ nvhost_modulef func;
+ struct delayed_work powerdown;
+ struct clk *clk[NVHOST_MODULE_MAX_CLOCKS];
+ int num_clks;
+ struct mutex lock;
+ bool powered;
+ atomic_t refcount;
+ wait_queue_head_t idle;
+ struct nvhost_module *parent;
+ int powergate_id;
+};
+
+int nvhost_module_init(struct nvhost_module *mod, const char *name,
+ nvhost_modulef func, struct nvhost_module *parent,
+ struct device *dev);
+void nvhost_module_deinit(struct nvhost_module *mod);
+void nvhost_module_suspend(struct nvhost_module *mod);
+
+void nvhost_module_busy(struct nvhost_module *mod);
+void nvhost_module_idle_mult(struct nvhost_module *mod, int refs);
+
+static inline bool nvhost_module_powered(struct nvhost_module *mod)
+{
+ return mod->powered;
+}
+
+static inline void nvhost_module_idle(struct nvhost_module *mod)
+{
+ nvhost_module_idle_mult(mod, 1);
+
+}
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_cdma.c
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_cdma.h"
+#include "dev.h"
+#include <asm/cacheflush.h>
+
+/*
+ * TODO:
+ * stats
+ * - for figuring out what to optimize further
+ * resizable push buffer & sync queue
+ * - some channels hardly need any, some channels (3d) could use more
+ */
+
+#define cdma_to_channel(cdma) container_of(cdma, struct nvhost_channel, cdma)
+#define cdma_to_dev(cdma) ((cdma_to_channel(cdma))->dev)
+#define cdma_to_nvmap(cdma) ((cdma_to_dev(cdma))->nvmap)
+#define pb_to_cdma(pb) container_of(pb, struct nvhost_cdma, push_buffer)
+
+/*
+ * push_buffer
+ *
+ * The push buffer is a circular array of words to be fetched by command DMA.
+ * Note that it works slightly differently to the sync queue; fence == cur
+ * means that the push buffer is full, not empty.
+ */
+
+// 8 bytes per slot. (This number does not include the final RESTART.)
+#define PUSH_BUFFER_SIZE (NVHOST_GATHER_QUEUE_SIZE * 8)
+
+static void destroy_push_buffer(struct push_buffer *pb);
+
+/**
+ * Reset to empty push buffer
+ */
+static void reset_push_buffer(struct push_buffer *pb)
+{
+ pb->fence = PUSH_BUFFER_SIZE - 8;
+ pb->cur = 0;
+}
+
+/**
+ * Init push buffer resources
+ */
+static int init_push_buffer(struct push_buffer *pb)
+{
+ struct nvhost_cdma *cdma = pb_to_cdma(pb);
+ struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
+ pb->mem = NULL;
+ pb->mapped = NULL;
+ pb->phys = 0;
+ reset_push_buffer(pb);
+
+ /* allocate and map pushbuffer memory */
+ pb->mem = nvmap_alloc(nvmap, PUSH_BUFFER_SIZE + 4, 32,
+ NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR_OR_NULL(pb->mem)) {
+ pb->mem = NULL;
+ goto fail;
+ }
+ pb->mapped = nvmap_mmap(pb->mem);
+ if (pb->mapped == NULL)
+ goto fail;
+
+ /* pin pushbuffer and get physical address */
+ pb->phys = nvmap_pin(nvmap, pb->mem);
+ if (pb->phys >= 0xfffff000) {
+ pb->phys = 0;
+ goto fail;
+ }
+
+ /* put the restart at the end of pushbuffer memory */
+ *(pb->mapped + (PUSH_BUFFER_SIZE >> 2)) = nvhost_opcode_restart(pb->phys);
+
+ return 0;
+
+fail:
+ destroy_push_buffer(pb);
+ return -ENOMEM;
+}
+
+/**
+ * Clean up push buffer resources
+ */
+static void destroy_push_buffer(struct push_buffer *pb)
+{
+ struct nvhost_cdma *cdma = pb_to_cdma(pb);
+ struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
+ if (pb->mapped)
+ nvmap_munmap(pb->mem, pb->mapped);
+
+ if (pb->phys != 0)
+ nvmap_unpin(nvmap, pb->mem);
+
+ if (pb->mem)
+ nvmap_free(nvmap, pb->mem);
+
+ pb->mem = NULL;
+ pb->mapped = NULL;
+ pb->phys = 0;
+}
+
+/**
+ * Push two words to the push buffer
+ * Caller must ensure push buffer is not full
+ */
+static void push_to_push_buffer(struct push_buffer *pb, u32 op1, u32 op2)
+{
+ u32 cur = pb->cur;
+ u32 *p = (u32*)((u32)pb->mapped + cur);
+ BUG_ON(cur == pb->fence);
+ *(p++) = op1;
+ *(p++) = op2;
+ pb->cur = (cur + 8) & (PUSH_BUFFER_SIZE - 1);
+ /* printk("push_to_push_buffer: op1=%08x; op2=%08x; cur=%x\n", op1, op2, pb->cur); */
+}
+
+/**
+ * Pop a number of two word slots from the push buffer
+ * Caller must ensure push buffer is not empty
+ */
+static void pop_from_push_buffer(struct push_buffer *pb, unsigned int slots)
+{
+ pb->fence = (pb->fence + slots * 8) & (PUSH_BUFFER_SIZE - 1);
+}
+
+/**
+ * Return the number of two word slots free in the push buffer
+ */
+static u32 push_buffer_space(struct push_buffer *pb)
+{
+ return ((pb->fence - pb->cur) & (PUSH_BUFFER_SIZE - 1)) / 8;
+}
+
+static u32 push_buffer_putptr(struct push_buffer *pb)
+{
+ return pb->phys + pb->cur;
+}
+
+
+/* Sync Queue
+ *
+ * The sync queue is a circular buffer of u32s interpreted as:
+ * 0: SyncPointID
+ * 1: SyncPointValue
+ * 2: NumSlots (how many pushbuffer slots to free)
+ * 3: NumHandles
+ * 4: nvmap client which pinned the handles
+ * 5..: NumHandles * nvmemhandle to unpin
+ *
+ * There's always one word unused, so (accounting for wrap):
+ * - Write == Read => queue empty
+ * - Write + 1 == Read => queue full
+ * The queue must not be left with less than SYNC_QUEUE_MIN_ENTRY words
+ * of space at the end of the array.
+ *
+ * We want to pass contiguous arrays of handles to NrRmMemUnpin, so arrays
+ * that would wrap at the end of the buffer will be split into two (or more)
+ * entries.
+ */
+
+/* Number of words needed to store an entry containing one handle */
+#define SYNC_QUEUE_MIN_ENTRY (4 + (2 * sizeof(void *) / sizeof(u32)))
+
+/**
+ * Reset to empty queue.
+ */
+static void reset_sync_queue(struct sync_queue *queue)
+{
+ queue->read = 0;
+ queue->write = 0;
+}
+
+/**
+ * Find the number of handles that can be stashed in the sync queue without
+ * waiting.
+ * 0 -> queue is full, must update to wait for some entries to be freed.
+ */
+static unsigned int sync_queue_space(struct sync_queue *queue)
+{
+ unsigned int read = queue->read;
+ unsigned int write = queue->write;
+ u32 size;
+
+ BUG_ON(read > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
+ BUG_ON(write > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
+
+ /*
+ * We can use all of the space up to the end of the buffer, unless the
+ * read position is within that space (the read position may advance
+ * asynchronously, but that can't take space away once we've seen it).
+ */
+ if (read > write) {
+ size = (read - 1) - write;
+ } else {
+ size = NVHOST_SYNC_QUEUE_SIZE - write;
+
+ /*
+ * If the read position is zero, it gets complicated. We can't
+ * use the last word in the buffer, because that would leave
+ * the queue empty.
+ * But also if we use too much we would not leave enough space
+ * for a single handle packet, and would have to wrap in
+ * add_to_sync_queue - also leaving write == read == 0,
+ * an empty queue.
+ */
+ if (read == 0)
+ size -= SYNC_QUEUE_MIN_ENTRY;
+ }
+
+ /*
+ * There must be room for an entry header and at least one handle,
+ * otherwise we report a full queue.
+ */
+ if (size < SYNC_QUEUE_MIN_ENTRY)
+ return 0;
+ /* Minimum entry stores one handle */
+ return (size - SYNC_QUEUE_MIN_ENTRY) + 1;
+}
+
+/**
+ * Add an entry to the sync queue.
+ */
+#define entry_size(_cnt) ((1 + _cnt)*sizeof(void *)/sizeof(u32))
+
+static void add_to_sync_queue(struct sync_queue *queue,
+ u32 sync_point_id, u32 sync_point_value,
+ u32 nr_slots, struct nvmap_client *user_nvmap,
+ struct nvmap_handle **handles, u32 nr_handles)
+{
+ u32 write = queue->write;
+ u32 *p = queue->buffer + write;
+ u32 size = 4 + (entry_size(nr_handles));
+
+ BUG_ON(sync_point_id == NVSYNCPT_INVALID);
+ BUG_ON(sync_queue_space(queue) < nr_handles);
+
+ write += size;
+ BUG_ON(write > NVHOST_SYNC_QUEUE_SIZE);
+
+ *p++ = sync_point_id;
+ *p++ = sync_point_value;
+ *p++ = nr_slots;
+ *p++ = nr_handles;
+ BUG_ON(!user_nvmap);
+ *(struct nvmap_client **)p = nvmap_client_get(user_nvmap);
+
+ p = (u32 *)((void *)p + sizeof(struct nvmap_client *));
+
+ if (nr_handles)
+ memcpy(p, handles, nr_handles * sizeof(struct nvmap_handle *));
+
+ /* If there's not enough room for another entry, wrap to the start. */
+ if ((write + SYNC_QUEUE_MIN_ENTRY) > NVHOST_SYNC_QUEUE_SIZE) {
+ /*
+ * It's an error for the read position to be zero, as that
+ * would mean we emptied the queue while adding something.
+ */
+ BUG_ON(queue->read == 0);
+ write = 0;
+ }
+
+ queue->write = write;
+}
+
+/**
+ * Get a pointer to the next entry in the queue, or NULL if the queue is empty.
+ * Doesn't consume the entry.
+ */
+static u32 *sync_queue_head(struct sync_queue *queue)
+{
+ u32 read = queue->read;
+ u32 write = queue->write;
+
+ BUG_ON(read > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
+ BUG_ON(write > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
+
+ if (read == write)
+ return NULL;
+ return queue->buffer + read;
+}
+
+/**
+ * Advances to the next queue entry, if you want to consume it.
+ */
+static void
+dequeue_sync_queue_head(struct sync_queue *queue)
+{
+ u32 read = queue->read;
+ u32 size;
+
+ BUG_ON(read == queue->write);
+
+ size = 4 + entry_size(queue->buffer[read + 3]);
+
+ read += size;
+ BUG_ON(read > NVHOST_SYNC_QUEUE_SIZE);
+
+ /* If there's not enough room for another entry, wrap to the start. */
+ if ((read + SYNC_QUEUE_MIN_ENTRY) > NVHOST_SYNC_QUEUE_SIZE)
+ read = 0;
+
+ queue->read = read;
+}
+
+
+/*** Cdma internal stuff ***/
+
+/**
+ * Kick channel DMA into action by writing its PUT offset (if it has changed)
+ */
+static void kick_cdma(struct nvhost_cdma *cdma)
+{
+ u32 put = push_buffer_putptr(&cdma->push_buffer);
+ if (put != cdma->last_put) {
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+ wmb();
+ writel(put, chan_regs + HOST1X_CHANNEL_DMAPUT);
+ cdma->last_put = put;
+ }
+}
+
+/**
+ * Return the status of the cdma's sync queue or push buffer for the given event
+ * - sq empty: returns 1 for empty, 0 for not empty (as in "1 empty queue" :-)
+ * - sq space: returns the number of handles that can be stored in the queue
+ * - pb space: returns the number of free slots in the channel's push buffer
+ * Must be called with the cdma lock held.
+ */
+static unsigned int cdma_status(struct nvhost_cdma *cdma, enum cdma_event event)
+{
+ switch (event) {
+ case CDMA_EVENT_SYNC_QUEUE_EMPTY:
+ return sync_queue_head(&cdma->sync_queue) ? 0 : 1;
+ case CDMA_EVENT_SYNC_QUEUE_SPACE:
+ return sync_queue_space(&cdma->sync_queue);
+ case CDMA_EVENT_PUSH_BUFFER_SPACE:
+ return push_buffer_space(&cdma->push_buffer);
+ default:
+ return 0;
+ }
+}
+
+/**
+ * Sleep (if necessary) until the requested event happens
+ * - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
+ * - Returns 1
+ * - CDMA_EVENT_SYNC_QUEUE_SPACE : there is space in the sync queue.
+ * - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
+ * - Return the amount of space (> 0)
+ * Must be called with the cdma lock held.
+ */
+static unsigned int wait_cdma(struct nvhost_cdma *cdma, enum cdma_event event)
+{
+ for (;;) {
+ unsigned int space = cdma_status(cdma, event);
+ if (space)
+ return space;
+
+ BUG_ON(cdma->event != CDMA_EVENT_NONE);
+ cdma->event = event;
+
+ mutex_unlock(&cdma->lock);
+ down(&cdma->sem);
+ mutex_lock(&cdma->lock);
+ }
+}
+
+/**
+ * For all sync queue entries that have already finished according to the
+ * current sync point registers:
+ * - unpin & unref their mems
+ * - pop their push buffer slots
+ * - remove them from the sync queue
+ * This is normally called from the host code's worker thread, but can be
+ * called manually if necessary.
+ * Must be called with the cdma lock held.
+ */
+static void update_cdma(struct nvhost_cdma *cdma)
+{
+ bool signal = false;
+ struct nvhost_master *dev = cdma_to_dev(cdma);
+
+ BUG_ON(!cdma->running);
+
+ /*
+ * Walk the sync queue, reading the sync point registers as necessary,
+ * to consume as many sync queue entries as possible without blocking
+ */
+ for (;;) {
+ u32 syncpt_id, syncpt_val;
+ unsigned int nr_slots, nr_handles;
+ struct nvmap_handle **handles;
+ struct nvmap_client *nvmap;
+ u32 *sync;
+
+ sync = sync_queue_head(&cdma->sync_queue);
+ if (!sync) {
+ if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
+ signal = true;
+ break;
+ }
+
+ syncpt_id = *sync++;
+ syncpt_val = *sync++;
+
+ BUG_ON(syncpt_id == NVSYNCPT_INVALID);
+
+ /* Check whether this syncpt has completed, and bail if not */
+ if (!nvhost_syncpt_min_cmp(&dev->syncpt, syncpt_id, syncpt_val))
+ break;
+
+ nr_slots = *sync++;
+ nr_handles = *sync++;
+ nvmap = *(struct nvmap_client **)sync;
+ sync = ((void *)sync + sizeof(struct nvmap_client *));
+ handles = (struct nvmap_handle **)sync;
+
+ BUG_ON(!nvmap);
+
+ /* Unpin the memory */
+ nvmap_unpin_handles(nvmap, handles, nr_handles);
+
+ nvmap_client_put(nvmap);
+
+ /* Pop push buffer slots */
+ if (nr_slots) {
+ pop_from_push_buffer(&cdma->push_buffer, nr_slots);
+ if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
+ signal = true;
+ }
+
+ dequeue_sync_queue_head(&cdma->sync_queue);
+ if (cdma->event == CDMA_EVENT_SYNC_QUEUE_SPACE)
+ signal = true;
+ }
+
+ /* Wake up CdmaWait() if the requested event happened */
+ if (signal) {
+ cdma->event = CDMA_EVENT_NONE;
+ up(&cdma->sem);
+ }
+}
+
+/**
+ * Create a cdma
+ */
+int nvhost_cdma_init(struct nvhost_cdma *cdma)
+{
+ int err;
+
+ mutex_init(&cdma->lock);
+ sema_init(&cdma->sem, 0);
+ cdma->event = CDMA_EVENT_NONE;
+ cdma->running = false;
+ err = init_push_buffer(&cdma->push_buffer);
+ if (err)
+ return err;
+ reset_sync_queue(&cdma->sync_queue);
+ return 0;
+}
+
+/**
+ * Destroy a cdma
+ */
+void nvhost_cdma_deinit(struct nvhost_cdma *cdma)
+{
+ BUG_ON(cdma->running);
+ destroy_push_buffer(&cdma->push_buffer);
+}
+
+static void start_cdma(struct nvhost_cdma *cdma)
+{
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+
+ if (cdma->running)
+ return;
+
+ cdma->last_put = push_buffer_putptr(&cdma->push_buffer);
+
+ writel(nvhost_channel_dmactrl(true, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ /* set base, put, end pointer (all of memory) */
+ writel(0, chan_regs + HOST1X_CHANNEL_DMASTART);
+ writel(cdma->last_put, chan_regs + HOST1X_CHANNEL_DMAPUT);
+ writel(0xFFFFFFFF, chan_regs + HOST1X_CHANNEL_DMAEND);
+
+ /* reset GET */
+ writel(nvhost_channel_dmactrl(true, true, true),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ /* start the command DMA */
+ writel(nvhost_channel_dmactrl(false, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ cdma->running = true;
+
+}
+
+void nvhost_cdma_stop(struct nvhost_cdma *cdma)
+{
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+
+ if (!cdma->running)
+ return;
+
+ mutex_lock(&cdma->lock);
+ wait_cdma(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
+ mutex_unlock(&cdma->lock);
+ writel(nvhost_channel_dmactrl(true, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ cdma->running = false;
+}
+
+/**
+ * Begin a cdma submit
+ */
+void nvhost_cdma_begin(struct nvhost_cdma *cdma)
+{
+ if (!cdma->running)
+ start_cdma(cdma);
+ mutex_lock(&cdma->lock);
+ cdma->slots_free = 0;
+ cdma->slots_used = 0;
+}
+
+/**
+ * Push two words into a push buffer slot
+ * Blocks as necessary if the push buffer is full.
+ */
+void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2)
+{
+ u32 slots_free = cdma->slots_free;
+ if (slots_free == 0) {
+ kick_cdma(cdma);
+ slots_free = wait_cdma(cdma, CDMA_EVENT_PUSH_BUFFER_SPACE);
+ }
+ cdma->slots_free = slots_free - 1;
+ cdma->slots_used++;
+ push_to_push_buffer(&cdma->push_buffer, op1, op2);
+}
+
+/**
+ * End a cdma submit
+ * Kick off DMA, add a contiguous block of memory handles to the sync queue,
+ * and a number of slots to be freed from the pushbuffer.
+ * Blocks as necessary if the sync queue is full.
+ * The handles for a submit must all be pinned at the same time, but they
+ * can be unpinned in smaller chunks.
+ */
+void nvhost_cdma_end(struct nvmap_client *user_nvmap, struct nvhost_cdma *cdma,
+ u32 sync_point_id, u32 sync_point_value,
+ struct nvmap_handle **handles, unsigned int nr_handles)
+{
+ kick_cdma(cdma);
+
+ while (nr_handles || cdma->slots_used) {
+ unsigned int count;
+ /*
+ * Wait until there's enough room in the
+ * sync queue to write something.
+ */
+ count = wait_cdma(cdma, CDMA_EVENT_SYNC_QUEUE_SPACE);
+
+ /*
+ * Add reloc entries to sync queue (as many as will fit)
+ * and unlock it
+ */
+ if (count > nr_handles)
+ count = nr_handles;
+ add_to_sync_queue(&cdma->sync_queue, sync_point_id,
+ sync_point_value, cdma->slots_used,
+ user_nvmap, handles, count);
+ /* NumSlots only goes in the first packet */
+ cdma->slots_used = 0;
+ handles += count;
+ nr_handles -= count;
+ }
+
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Update cdma state according to current sync point values
+ */
+void nvhost_cdma_update(struct nvhost_cdma *cdma)
+{
+ mutex_lock(&cdma->lock);
+ update_cdma(cdma);
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Manually spin until all CDMA has finished. Used if an async update
+ * cannot be scheduled for any reason.
+ */
+void nvhost_cdma_flush(struct nvhost_cdma *cdma)
+{
+ mutex_lock(&cdma->lock);
+ while (sync_queue_head(&cdma->sync_queue)) {
+ update_cdma(cdma);
+ mutex_unlock(&cdma->lock);
+ schedule();
+ mutex_lock(&cdma->lock);
+ }
+ mutex_unlock(&cdma->lock);
+}
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_cdma.h
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_CDMA_H
+#define __NVHOST_CDMA_H
+
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+
+#include <mach/nvhost.h>
+#include <mach/nvmap.h>
+
+#include "nvhost_acm.h"
+
+/*
+ * cdma
+ *
+ * This is in charge of a host command DMA channel.
+ * Sends ops to a push buffer, and takes responsibility for unpinning
+ * (& possibly freeing) of memory after those ops have completed.
+ * Producer:
+ * begin
+ * push - send ops to the push buffer
+ * end - start command DMA and enqueue handles to be unpinned
+ * Consumer:
+ * update - call to update sync queue and push buffer, unpin memory
+ */
+
+/* Size of the sync queue. If it is too small, we won't be able to queue up
+ * many command buffers. If it is too large, we waste memory. */
+#define NVHOST_SYNC_QUEUE_SIZE 8192
+
+/* Number of gathers we allow to be queued up per channel. Must be a
+ power of two. Currently sized such that pushbuffer is 4KB (512*8B). */
+#define NVHOST_GATHER_QUEUE_SIZE 512
+
+struct push_buffer {
+ struct nvmap_handle_ref *mem; /* handle to pushbuffer memory */
+ u32 *mapped; /* mapped pushbuffer memory */
+ u32 phys; /* physical address of pushbuffer */
+ u32 fence; /* index we've written */
+ u32 cur; /* index to write to */
+};
+
+struct sync_queue {
+ unsigned int read; /* read position within buffer */
+ unsigned int write; /* write position within buffer */
+ u32 buffer[NVHOST_SYNC_QUEUE_SIZE]; /* queue data */
+};
+
+enum cdma_event {
+ CDMA_EVENT_NONE, /* not waiting for any event */
+ CDMA_EVENT_SYNC_QUEUE_EMPTY, /* wait for empty sync queue */
+ CDMA_EVENT_SYNC_QUEUE_SPACE, /* wait for space in sync queue */
+ CDMA_EVENT_PUSH_BUFFER_SPACE /* wait for space in push buffer */
+};
+
+struct nvhost_cdma {
+ struct mutex lock; /* controls access to shared state */
+ struct semaphore sem; /* signalled when event occurs */
+ enum cdma_event event; /* event that sem is waiting for */
+ unsigned int slots_used; /* pb slots used in current submit */
+ unsigned int slots_free; /* pb slots free in current submit */
+ unsigned int last_put; /* last value written to DMAPUT */
+ struct push_buffer push_buffer; /* channel's push buffer */
+ struct sync_queue sync_queue; /* channel's sync queue */
+ bool running;
+};
+
+int nvhost_cdma_init(struct nvhost_cdma *cdma);
+void nvhost_cdma_deinit(struct nvhost_cdma *cdma);
+void nvhost_cdma_stop(struct nvhost_cdma *cdma);
+void nvhost_cdma_begin(struct nvhost_cdma *cdma);
+void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2);
+void nvhost_cdma_end(struct nvmap_client *user_nvmap,
+ struct nvhost_cdma *cdma,
+ u32 sync_point_id, u32 sync_point_value,
+ struct nvmap_handle **handles, unsigned int nr_handles);
+void nvhost_cdma_update(struct nvhost_cdma *cdma);
+void nvhost_cdma_flush(struct nvhost_cdma *cdma);
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_channel.c
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_channel.h"
+#include "dev.h"
+#include "nvhost_hwctx.h"
+
+#include <linux/platform_device.h>
+
+#define NVMODMUTEX_2D_FULL (1)
+#define NVMODMUTEX_2D_SIMPLE (2)
+#define NVMODMUTEX_2D_SB_A (3)
+#define NVMODMUTEX_2D_SB_B (4)
+#define NVMODMUTEX_3D (5)
+#define NVMODMUTEX_DISPLAYA (6)
+#define NVMODMUTEX_DISPLAYB (7)
+#define NVMODMUTEX_VI (8)
+#define NVMODMUTEX_DSI (9)
+
+static void power_2d(struct nvhost_module *mod, enum nvhost_power_action action);
+static void power_3d(struct nvhost_module *mod, enum nvhost_power_action action);
+static void power_mpe(struct nvhost_module *mod, enum nvhost_power_action action);
+
+static const struct nvhost_channeldesc channelmap[] = {
+{
+ /* channel 0 */
+ .name = "display",
+ .syncpts = BIT(NVSYNCPT_DISP0) | BIT(NVSYNCPT_DISP1) |
+ BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1),
+ .modulemutexes = BIT(NVMODMUTEX_DISPLAYA) | BIT(NVMODMUTEX_DISPLAYB),
+},
+{
+ /* channel 1 */
+ .name = "gr3d",
+ .syncpts = BIT(NVSYNCPT_3D),
+ .waitbases = BIT(NVWAITBASE_3D),
+ .modulemutexes = BIT(NVMODMUTEX_3D),
+ .class = NV_GRAPHICS_3D_CLASS_ID,
+ .power = power_3d,
+},
+{
+ /* channel 2 */
+ .name = "gr2d",
+ .syncpts = BIT(NVSYNCPT_2D_0) | BIT(NVSYNCPT_2D_1),
+ .waitbases = BIT(NVWAITBASE_2D_0) | BIT(NVWAITBASE_2D_1),
+ .modulemutexes = BIT(NVMODMUTEX_2D_FULL) | BIT(NVMODMUTEX_2D_SIMPLE) |
+ BIT(NVMODMUTEX_2D_SB_A) | BIT(NVMODMUTEX_2D_SB_B),
+ .power = power_2d,
+},
+{
+ /* channel 3 */
+ .name = "isp",
+ .syncpts = 0,
+},
+{
+ /* channel 4 */
+ .name = "vi",
+ .syncpts = BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_1) |
+ BIT(NVSYNCPT_VI_ISP_2) | BIT(NVSYNCPT_VI_ISP_3) |
+ BIT(NVSYNCPT_VI_ISP_4) | BIT(NVSYNCPT_VI_ISP_5),
+ .modulemutexes = BIT(NVMODMUTEX_VI),
+},
+{
+ /* channel 5 */
+ .name = "mpe",
+ .syncpts = BIT(NVSYNCPT_MPE) | BIT(NVSYNCPT_MPE_EBM_EOF) |
+ BIT(NVSYNCPT_MPE_WR_SAFE),
+ .waitbases = BIT(NVWAITBASE_MPE),
+ .class = NV_VIDEO_ENCODE_MPEG_CLASS_ID,
+ .power = power_mpe,
+},
+{
+ /* channel 6 */
+ .name = "dsi",
+ .syncpts = BIT(NVSYNCPT_DSI),
+ .modulemutexes = BIT(NVMODMUTEX_DSI),
+}};
+
+static inline void __iomem *channel_aperture(void __iomem *p, int ndx)
+{
+ ndx += NVHOST_CHANNEL_BASE;
+ p += NV_HOST1X_CHANNEL0_BASE;
+ p += ndx * NV_HOST1X_CHANNEL_MAP_SIZE_BYTES;
+ return p;
+}
+
+int __init nvhost_channel_init(struct nvhost_channel *ch,
+ struct nvhost_master *dev, int index)
+{
+ BUILD_BUG_ON(NVHOST_NUMCHANNELS != ARRAY_SIZE(channelmap));
+
+ ch->dev = dev;
+ ch->desc = &channelmap[index];
+ ch->aperture = channel_aperture(dev->aperture, index);
+ mutex_init(&ch->reflock);
+ mutex_init(&ch->submitlock);
+
+ return nvhost_hwctx_handler_init(&ch->ctxhandler, ch->desc->name);
+}
+
+struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch)
+{
+ int err = 0;
+ mutex_lock(&ch->reflock);
+ if (ch->refcount == 0) {
+ err = nvhost_module_init(&ch->mod, ch->desc->name,
+ ch->desc->power, &ch->dev->mod,
+ &ch->dev->pdev->dev);
+ if (!err) {
+ err = nvhost_cdma_init(&ch->cdma);
+ if (err)
+ nvhost_module_deinit(&ch->mod);
+ }
+ }
+ if (!err) {
+ ch->refcount++;
+ }
+ mutex_unlock(&ch->reflock);
+
+ return err ? NULL : ch;
+}
+
+void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx)
+{
+ if (ctx) {
+ mutex_lock(&ch->submitlock);
+ if (ch->cur_ctx == ctx)
+ ch->cur_ctx = NULL;
+ mutex_unlock(&ch->submitlock);
+ }
+
+ mutex_lock(&ch->reflock);
+ if (ch->refcount == 1) {
+ nvhost_module_deinit(&ch->mod);
+ /* cdma may already be stopped, that's ok */
+ nvhost_cdma_stop(&ch->cdma);
+ nvhost_cdma_deinit(&ch->cdma);
+ }
+ ch->refcount--;
+ mutex_unlock(&ch->reflock);
+}
+
+void nvhost_channel_suspend(struct nvhost_channel *ch)
+{
+ mutex_lock(&ch->reflock);
+ BUG_ON(nvhost_module_powered(&ch->mod));
+ nvhost_cdma_stop(&ch->cdma);
+ mutex_unlock(&ch->reflock);
+}
+
+void nvhost_channel_submit(struct nvhost_channel *ch,
+ struct nvmap_client *user_nvmap,
+ struct nvhost_op_pair *ops, int num_pairs,
+ struct nvhost_cpuinterrupt *intrs, int num_intrs,
+ struct nvmap_handle **unpins, int num_unpins,
+ u32 syncpt_id, u32 syncpt_val)
+{
+ int i;
+ struct nvhost_op_pair* p;
+
+ /* schedule interrupts */
+ for (i = 0; i < num_intrs; i++) {
+ nvhost_intr_add_action(&ch->dev->intr, syncpt_id, intrs[i].syncpt_val,
+ NVHOST_INTR_ACTION_CTXSAVE, intrs[i].intr_data, NULL);
+ }
+
+ /* begin a CDMA submit */
+ nvhost_cdma_begin(&ch->cdma);
+
+ /* push ops */
+ for (i = 0, p = ops; i < num_pairs; i++, p++)
+ nvhost_cdma_push(&ch->cdma, p->op1, p->op2);
+
+ /* end CDMA submit & stash pinned hMems into sync queue for later cleanup */
+ nvhost_cdma_end(user_nvmap, &ch->cdma, syncpt_id, syncpt_val,
+ unpins, num_unpins);
+}
+
+static void power_2d(struct nvhost_module *mod, enum nvhost_power_action action)
+{
+ /* TODO: [ahatala 2010-06-17] reimplement EPP hang war */
+ if (action == NVHOST_POWER_ACTION_OFF) {
+ /* TODO: [ahatala 2010-06-17] reset EPP */
+ }
+}
+
+static void power_3d(struct nvhost_module *mod, enum nvhost_power_action action)
+{
+ struct nvhost_channel *ch = container_of(mod, struct nvhost_channel, mod);
+
+ if (action == NVHOST_POWER_ACTION_OFF) {
+ mutex_lock(&ch->submitlock);
+ if (ch->cur_ctx) {
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ struct nvhost_op_pair save;
+ struct nvhost_cpuinterrupt ctxsw;
+ u32 syncval;
+ syncval = nvhost_syncpt_incr_max(&ch->dev->syncpt,
+ NVSYNCPT_3D,
+ ch->cur_ctx->save_incrs);
+ save.op1 = nvhost_opcode_gather(0, ch->cur_ctx->save_size);
+ save.op2 = ch->cur_ctx->save_phys;
+ ctxsw.intr_data = ch->cur_ctx;
+ ctxsw.syncpt_val = syncval - 1;
+ ch->cur_ctx->valid = true;
+ ch->ctxhandler.get(ch->cur_ctx);
+ ch->cur_ctx = NULL;
+
+ nvhost_channel_submit(ch, ch->dev->nvmap,
+ &save, 1, &ctxsw, 1, NULL, 0,
+ NVSYNCPT_3D, syncval);
+
+ nvhost_intr_add_action(&ch->dev->intr, NVSYNCPT_3D,
+ syncval,
+ NVHOST_INTR_ACTION_WAKEUP,
+ &wq, NULL);
+ wait_event(wq,
+ nvhost_syncpt_min_cmp(&ch->dev->syncpt,
+ NVSYNCPT_3D, syncval));
+ nvhost_cdma_update(&ch->cdma);
+ }
+ mutex_unlock(&ch->submitlock);
+ }
+}
+
+static void power_mpe(struct nvhost_module *mod, enum nvhost_power_action action)
+{
+}
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_channel.h
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_CHANNEL_H
+#define __NVHOST_CHANNEL_H
+
+#include "nvhost_cdma.h"
+#include "nvhost_acm.h"
+#include "nvhost_hwctx.h"
+
+#include <linux/cdev.h>
+#include <linux/io.h>
+
+#define NVHOST_CHANNEL_BASE 0
+#define NVHOST_NUMCHANNELS (NV_HOST1X_CHANNELS - 1)
+#define NVHOST_MAX_GATHERS 512
+#define NVHOST_MAX_HANDLES 1280
+
+struct nvhost_master;
+
+struct nvhost_channeldesc {
+ const char *name;
+ nvhost_modulef power;
+ u32 syncpts;
+ u32 waitbases;
+ u32 modulemutexes;
+ u32 class;
+};
+
+struct nvhost_channel {
+ int refcount;
+ struct mutex reflock;
+ struct mutex submitlock;
+ void __iomem *aperture;
+ struct nvhost_master *dev;
+ const struct nvhost_channeldesc *desc;
+ struct nvhost_hwctx *cur_ctx;
+ struct device *node;
+ struct cdev cdev;
+ struct nvhost_hwctx_handler ctxhandler;
+ struct nvhost_module mod;
+ struct nvhost_cdma cdma;
+};
+
+struct nvhost_op_pair {
+ u32 op1;
+ u32 op2;
+};
+
+struct nvhost_cpuinterrupt {
+ u32 syncpt_val;
+ void *intr_data;
+};
+
+int nvhost_channel_init(
+ struct nvhost_channel *ch,
+ struct nvhost_master *dev, int index);
+
+void nvhost_channel_submit(struct nvhost_channel *ch,
+ struct nvmap_client *user_nvmap,
+ struct nvhost_op_pair *ops, int num_pairs,
+ struct nvhost_cpuinterrupt *intrs, int num_intrs,
+ struct nvmap_handle **unpins, int num_unpins,
+ u32 syncpt_id, u32 syncpt_val);
+
+struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch);
+void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx);
+void nvhost_channel_suspend(struct nvhost_channel *ch);
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_cpuaccess.c
+ *
+ * Tegra Graphics Host Cpu Register Access
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_cpuaccess.h"
+#include "dev.h"
+#include <linux/string.h>
+
+#define cpuaccess_to_dev(ctx) container_of(ctx, struct nvhost_master, cpuaccess)
+
+int nvhost_cpuaccess_init(struct nvhost_cpuaccess *ctx,
+ struct platform_device *pdev)
+{
+ int i;
+ for (i = 0; i < NVHOST_MODULE_NUM; i++) {
+ struct resource *mem;
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, i+1);
+ if (!mem) {
+ dev_err(&pdev->dev, "missing module memory resource\n");
+ return -ENXIO;
+ }
+
+ ctx->regs[i] = ioremap(mem->start, resource_size(mem));
+ if (!ctx->regs[i]) {
+ dev_err(&pdev->dev, "failed to map module registers\n");
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+void nvhost_cpuaccess_deinit(struct nvhost_cpuaccess *ctx)
+{
+ int i;
+ for (i = 0; i < NVHOST_MODULE_NUM; i++) {
+ iounmap(ctx->regs[i]);
+ release_resource(ctx->reg_mem[i]);
+ }
+}
+
+int nvhost_mutex_try_lock(struct nvhost_cpuaccess *ctx, unsigned int idx)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ void __iomem *sync_regs = dev->sync_aperture;
+ u32 reg;
+
+ /* mlock registers returns 0 when the lock is aquired.
+ * writing 0 clears the lock. */
+ nvhost_module_busy(&dev->mod);
+ reg = readl(sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
+ if (reg) {
+ nvhost_module_idle(&dev->mod);
+ return -ERESTARTSYS;
+ }
+ return 0;
+}
+
+void nvhost_mutex_unlock(struct nvhost_cpuaccess *ctx, unsigned int idx)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ void __iomem *sync_regs = dev->sync_aperture;
+ writel(0, sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
+ nvhost_module_idle(&dev->mod);
+}
+
+void nvhost_read_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, void *values)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ void __iomem *p = ctx->regs[module] + offset;
+ u32* out = (u32*)values;
+ BUG_ON(size & 3);
+ size >>= 2;
+ nvhost_module_busy(&dev->mod);
+ while (size--) {
+ *(out++) = readl(p);
+ p += 4;
+ }
+ rmb();
+ nvhost_module_idle(&dev->mod);
+}
+
+void nvhost_write_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, const void *values)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ void __iomem *p = ctx->regs[module] + offset;
+ const u32* in = (const u32*)values;
+ BUG_ON(size & 3);
+ size >>= 2;
+ nvhost_module_busy(&dev->mod);
+ while (size--) {
+ writel(*(in++), p);
+ p += 4;
+ }
+ wmb();
+ nvhost_module_idle(&dev->mod);
+}
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_cpuaccess.h
+ *
+ * Tegra Graphics Host Cpu Register Access
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_CPUACCESS_H
+#define __NVHOST_CPUACCESS_H
+
+#include "nvhost_hardware.h"
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+enum nvhost_module_id {
+ NVHOST_MODULE_DISPLAY_A = 0,
+ NVHOST_MODULE_DISPLAY_B,
+ NVHOST_MODULE_VI,
+ NVHOST_MODULE_ISP,
+ NVHOST_MODULE_MPE,
+#if 0
+ /* TODO: [ahatala 2010-07-02] find out if these are needed */
+ NVHOST_MODULE_FUSE,
+ NVHOST_MODULE_APB_MISC,
+ NVHOST_MODULE_CLK_RESET,
+#endif
+ NVHOST_MODULE_NUM
+};
+
+struct nvhost_cpuaccess {
+ struct resource *reg_mem[NVHOST_MODULE_NUM];
+ void __iomem *regs[NVHOST_MODULE_NUM];
+};
+
+int nvhost_cpuaccess_init(struct nvhost_cpuaccess *ctx,
+ struct platform_device *pdev);
+
+void nvhost_cpuaccess_deinit(struct nvhost_cpuaccess *ctx);
+
+int nvhost_mutex_try_lock(struct nvhost_cpuaccess *ctx, unsigned int idx);
+
+void nvhost_mutex_unlock(struct nvhost_cpuaccess *ctx, unsigned int idx);
+
+static inline bool nvhost_access_module_regs(
+ struct nvhost_cpuaccess *ctx, u32 module)
+{
+ return (module < NVHOST_MODULE_NUM);
+}
+
+void nvhost_read_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, void *values);
+
+void nvhost_write_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, const void *values);
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_hardware.h
+ *
+ * Tegra Graphics Host Register Offsets
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_HARDWARE_H
+#define __NVHOST_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+/* class ids */
+enum {
+ NV_HOST1X_CLASS_ID = 0x1,
+ NV_VIDEO_ENCODE_MPEG_CLASS_ID = 0x20,
+ NV_GRAPHICS_3D_CLASS_ID = 0x60
+};
+
+
+/* channel registers */
+#define NV_HOST1X_CHANNELS 8
+#define NV_HOST1X_CHANNEL0_BASE 0
+#define NV_HOST1X_CHANNEL_MAP_SIZE_BYTES 16384
+
+
+#define HOST1X_CHANNEL_FIFOSTAT 0x00
+#define HOST1X_CHANNEL_INDDATA 0x0c
+#define HOST1X_CHANNEL_DMASTART 0x14
+#define HOST1X_CHANNEL_DMAPUT 0x18
+#define HOST1X_CHANNEL_DMAGET 0x1c
+#define HOST1X_CHANNEL_DMAEND 0x20
+#define HOST1X_CHANNEL_DMACTRL 0x24
+
+#define HOST1X_SYNC_CF_SETUP(x) (0x3080 + (4 * (x)))
+
+#define HOST1X_SYNC_SYNCPT_BASE(x) (0x3600 + (4 * (x)))
+
+#define HOST1X_SYNC_CBREAD(x) (0x3720 + (4 * (x)))
+#define HOST1X_SYNC_CFPEEK_CTRL 0x374c
+#define HOST1X_SYNC_CFPEEK_READ 0x3750
+#define HOST1X_SYNC_CFPEEK_PTRS 0x3754
+#define HOST1X_SYNC_CBSTAT(x) (0x3758 + (4 * (x)))
+
+static inline unsigned nvhost_channel_fifostat_outfentries(u32 reg)
+{
+ return (reg >> 24) & 0x1f;
+}
+
+static inline u32 nvhost_channel_dmactrl(bool stop, bool get_rst, bool init_get)
+{
+ u32 v = stop ? 1 : 0;
+ if (get_rst)
+ v |= 2;
+ if (init_get)
+ v |= 4;
+ return v;
+}
+
+
+/* sync registers */
+#define NV_HOST1X_SYNCPT_NB_PTS 32
+#define NV_HOST1X_SYNCPT_NB_BASES 8
+#define NV_HOST1X_NB_MLOCKS 16
+#define HOST1X_CHANNEL_SYNC_REG_BASE 12288
+
+enum {
+ HOST1X_SYNC_INTMASK = 0x4,
+ HOST1X_SYNC_INTC0MASK = 0x8,
+ HOST1X_SYNC_HINTSTATUS = 0x20,
+ HOST1X_SYNC_HINTMASK = 0x24,
+ HOST1X_SYNC_HINTSTATUS_EXT = 0x28,
+ HOST1X_SYNC_HINTMASK_EXT = 0x2c,
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS = 0x40,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_0 = 0x50,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_1 = 0x54,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE = 0x60,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0 = 0x68,
+ HOST1X_SYNC_USEC_CLK = 0x1a4,
+ HOST1X_SYNC_CTXSW_TIMEOUT_CFG = 0x1a8,
+ HOST1X_SYNC_IP_BUSY_TIMEOUT = 0x1bc,
+ HOST1X_SYNC_IP_READ_TIMEOUT_ADDR = 0x1c0,
+ HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR = 0x1c4,
+ HOST1X_SYNC_MLOCK_0 = 0x2c0,
+ HOST1X_SYNC_MLOCK_OWNER_0 = 0x340,
+ HOST1X_SYNC_SYNCPT_0 = 0x400,
+ HOST1X_SYNC_SYNCPT_INT_THRESH_0 = 0x500,
+ HOST1X_SYNC_SYNCPT_BASE_0 = 0x600,
+ HOST1X_SYNC_SYNCPT_CPU_INCR = 0x700
+};
+
+static inline bool nvhost_sync_hintstatus_ext_ip_read_int(u32 reg)
+{
+ return (reg & BIT(30)) != 0;
+}
+
+static inline bool nvhost_sync_hintstatus_ext_ip_write_int(u32 reg)
+{
+ return (reg & BIT(31)) != 0;
+}
+
+static inline bool nvhost_sync_mlock_owner_ch_owns(u32 reg)
+{
+ return (reg & BIT(0)) != 0;
+}
+
+static inline bool nvhost_sync_mlock_owner_cpu_owns(u32 reg)
+{
+ return (reg & BIT(1)) != 0;
+}
+
+static inline unsigned int nvhost_sync_mlock_owner_owner_chid(u32 reg)
+{
+ return (reg >> 8) & 0xf;
+}
+
+
+/* host class */
+enum {
+ NV_CLASS_HOST_INCR_SYNCPT = 0x0,
+ NV_CLASS_HOST_WAIT_SYNCPT = 0x8,
+ NV_CLASS_HOST_WAIT_SYNCPT_BASE = 0x9,
+ NV_CLASS_HOST_INCR_SYNCPT_BASE = 0xc,
+ NV_CLASS_HOST_INDOFF = 0x2d,
+ NV_CLASS_HOST_INDDATA = 0x2e
+};
+
+static inline u32 nvhost_class_host_wait_syncpt_base(
+ unsigned indx, unsigned base_indx, unsigned offset)
+{
+ return (indx << 24) | (base_indx << 16) | offset;
+}
+
+static inline u32 nvhost_class_host_incr_syncpt_base(
+ unsigned base_indx, unsigned offset)
+{
+ return (base_indx << 24) | offset;
+}
+
+enum {
+ NV_HOST_MODULE_HOST1X = 0,
+ NV_HOST_MODULE_MPE = 1,
+ NV_HOST_MODULE_GR3D = 6
+};
+
+static inline u32 nvhost_class_host_indoff_reg_write(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = (0xf << 28) | (mod_id << 18) | (offset << 2);
+ if (auto_inc)
+ v |= BIT(27);
+ return v;
+}
+
+static inline u32 nvhost_class_host_indoff_reg_read(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = (mod_id << 18) | (offset << 2) | 1;
+ if (auto_inc)
+ v |= BIT(27);
+ return v;
+}
+
+
+/* cdma opcodes */
+static inline u32 nvhost_opcode_setclass(
+ unsigned class_id, unsigned offset, unsigned mask)
+{
+ return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 nvhost_opcode_incr(unsigned offset, unsigned count)
+{
+ return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_nonincr(unsigned offset, unsigned count)
+{
+ return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_mask(unsigned offset, unsigned mask)
+{
+ return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 nvhost_opcode_imm(unsigned offset, unsigned value)
+{
+ return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 nvhost_opcode_restart(unsigned address)
+{
+ return (5 << 28) | (address >> 4);
+}
+
+static inline u32 nvhost_opcode_gather(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_gather_nonincr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 nvhost_opcode_gather_incr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define NVHOST_OPCODE_NOOP nvhost_opcode_nonincr(0, 0)
+
+
+
+#endif /* __NVHOST_HARDWARE_H */
+
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_hwctx.h
+ *
+ * Tegra Graphics Host Hardware Context Interface
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_HWCTX_H
+#define __NVHOST_HWCTX_H
+
+#include <linux/string.h>
+#include <linux/kref.h>
+
+#include <mach/nvhost.h>
+#include <mach/nvmap.h>
+
+struct nvhost_channel;
+
+struct nvhost_hwctx {
+ struct kref ref;
+
+ struct nvhost_channel *channel;
+ bool valid;
+
+ struct nvmap_handle_ref *save;
+ u32 save_phys;
+ u32 save_size;
+ u32 save_incrs;
+ void *save_cpu_data;
+
+ struct nvmap_handle_ref *restore;
+ u32 restore_phys;
+ u32 restore_size;
+ u32 restore_incrs;
+};
+
+struct nvhost_hwctx_handler {
+ struct nvhost_hwctx * (*alloc) (struct nvhost_channel *ch);
+ void (*get) (struct nvhost_hwctx *ctx);
+ void (*put) (struct nvhost_hwctx *ctx);
+ void (*save_service) (struct nvhost_hwctx *ctx);
+};
+
+int nvhost_3dctx_handler_init(struct nvhost_hwctx_handler *h);
+int nvhost_mpectx_handler_init(struct nvhost_hwctx_handler *h);
+
+static inline int nvhost_hwctx_handler_init(struct nvhost_hwctx_handler *h,
+ const char *module)
+{
+ if (strcmp(module, "gr3d") == 0)
+ return nvhost_3dctx_handler_init(h);
+ else if (strcmp(module, "mpe") == 0)
+ return nvhost_mpectx_handler_init(h);
+
+ return 0;
+}
+
+struct hwctx_reginfo {
+ unsigned int offset:12;
+ unsigned int count:16;
+ unsigned int type:2;
+};
+
+enum {
+ HWCTX_REGINFO_DIRECT = 0,
+ HWCTX_REGINFO_INDIRECT,
+ HWCTX_REGINFO_INDIRECT_OFFSET,
+ HWCTX_REGINFO_INDIRECT_DATA
+};
+
+#define HWCTX_REGINFO(offset, count, type) {offset, count, HWCTX_REGINFO_##type}
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_intr.c
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_intr.h"
+#include "dev.h"
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+
+#define intr_to_dev(x) container_of(x, struct nvhost_master, intr)
+
+
+/*** HW sync point threshold interrupt management ***/
+
+static void set_syncpt_threshold(void __iomem *sync_regs, u32 id, u32 thresh)
+{
+ thresh &= 0xffff;
+ writel(thresh, sync_regs + (HOST1X_SYNC_SYNCPT_INT_THRESH_0 + id * 4));
+}
+
+static void enable_syncpt_interrupt(void __iomem *sync_regs, u32 id)
+{
+ writel(BIT(id), sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0);
+}
+
+
+/*** Wait list management ***/
+
+struct nvhost_waitlist {
+ struct list_head list;
+ struct kref refcount;
+ u32 thresh;
+ enum nvhost_intr_action action;
+ atomic_t state;
+ void *data;
+ int count;
+};
+
+enum waitlist_state
+{
+ WLS_PENDING,
+ WLS_REMOVED,
+ WLS_CANCELLED,
+ WLS_HANDLED
+};
+
+static void waiter_release(struct kref *kref)
+{
+ kfree(container_of(kref, struct nvhost_waitlist, refcount));
+}
+
+/*
+ * add a waiter to a waiter queue, sorted by threshold
+ * returns true if it was added at the head of the queue
+ */
+static bool add_waiter_to_queue(struct nvhost_waitlist *waiter,
+ struct list_head *queue)
+{
+ struct nvhost_waitlist *pos;
+ u32 thresh = waiter->thresh;
+
+ list_for_each_entry_reverse(pos, queue, list)
+ if ((s32)(pos->thresh - thresh) <= 0) {
+ list_add(&waiter->list, &pos->list);
+ return false;
+ }
+
+ list_add(&waiter->list, queue);
+ return true;
+}
+
+/*
+ * run through a waiter queue for a single sync point ID
+ * and gather all completed waiters into lists by actions
+ */
+static void remove_completed_waiters(struct list_head *head, u32 sync,
+ struct list_head completed[NVHOST_INTR_ACTION_COUNT])
+{
+ struct list_head *dest;
+ struct nvhost_waitlist *waiter, *next, *prev;
+
+ list_for_each_entry_safe(waiter, next, head, list) {
+ if ((s32)(waiter->thresh - sync) > 0)
+ break;
+
+ dest = completed + waiter->action;
+
+ /* consolidate submit cleanups */
+ if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE
+ && !list_empty(dest)) {
+ prev = list_entry(dest->prev,
+ struct nvhost_waitlist, list);
+ if (prev->data == waiter->data) {
+ prev->count++;
+ dest = NULL;
+ }
+ }
+
+ /* PENDING->REMOVED or CANCELLED->HANDLED */
+ if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
+ list_del(&waiter->list);
+ kref_put(&waiter->refcount, waiter_release);
+ } else {
+ list_move_tail(&waiter->list, dest);
+ }
+ }
+}
+
+static void action_submit_complete(struct nvhost_waitlist *waiter)
+{
+ struct nvhost_channel *channel = waiter->data;
+ int nr_completed = waiter->count;
+
+ nvhost_cdma_update(&channel->cdma);
+ nvhost_module_idle_mult(&channel->mod, nr_completed);
+}
+
+static void action_ctxsave(struct nvhost_waitlist *waiter)
+{
+ struct nvhost_hwctx *hwctx = waiter->data;
+ struct nvhost_channel *channel = hwctx->channel;
+
+ channel->ctxhandler.save_service(hwctx);
+ channel->ctxhandler.put(hwctx);
+}
+
+static void action_wakeup(struct nvhost_waitlist *waiter)
+{
+ wait_queue_head_t *wq = waiter->data;
+
+ wake_up(wq);
+}
+
+static void action_wakeup_interruptible(struct nvhost_waitlist *waiter)
+{
+ wait_queue_head_t *wq = waiter->data;
+
+ wake_up_interruptible(wq);
+}
+
+typedef void (*action_handler)(struct nvhost_waitlist *waiter);
+
+static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = {
+ action_submit_complete,
+ action_ctxsave,
+ action_wakeup,
+ action_wakeup_interruptible,
+};
+
+static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT])
+{
+ struct list_head *head = completed;
+ int i;
+
+ for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) {
+ action_handler handler = action_handlers[i];
+ struct nvhost_waitlist *waiter, *next;
+
+ list_for_each_entry_safe(waiter, next, head, list) {
+ list_del(&waiter->list);
+ handler(waiter);
+ atomic_set(&waiter->state, WLS_HANDLED);
+ smp_wmb();
+ kref_put(&waiter->refcount, waiter_release);
+ }
+ }
+}
+
+
+/*** Interrupt service functions ***/
+
+/**
+ * Host1x intterrupt service function
+ * Handles read / write failures
+ */
+static irqreturn_t host1x_isr(int irq, void *dev_id)
+{
+ struct nvhost_intr *intr = dev_id;
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+ u32 stat;
+ u32 ext_stat;
+ u32 addr;
+
+ stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS);
+ ext_stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
+
+ if (nvhost_sync_hintstatus_ext_ip_read_int(ext_stat)) {
+ addr = readl(sync_regs + HOST1X_SYNC_IP_READ_TIMEOUT_ADDR);
+ pr_err("Host read timeout at address %x\n", addr);
+ }
+
+ if (nvhost_sync_hintstatus_ext_ip_write_int(ext_stat)) {
+ addr = readl(sync_regs + HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR);
+ pr_err("Host write timeout at address %x\n", addr);
+ }
+
+ writel(ext_stat, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
+ writel(stat, sync_regs + HOST1X_SYNC_HINTSTATUS);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * Sync point threshold interrupt service function
+ * Handles sync point threshold triggers, in interrupt context
+ */
+static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
+{
+ struct nvhost_intr_syncpt *syncpt = dev_id;
+ unsigned int id = syncpt->id;
+ struct nvhost_intr *intr = container_of(syncpt, struct nvhost_intr,
+ syncpt[id]);
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+
+ writel(BIT(id),
+ sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
+ writel(BIT(id),
+ sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
+
+ return IRQ_WAKE_THREAD;
+}
+
+
+/**
+ * Sync point threshold interrupt service thread function
+ * Handles sync point threshold triggers, in thread context
+ */
+static irqreturn_t syncpt_thresh_fn(int irq, void *dev_id)
+{
+ struct nvhost_intr_syncpt *syncpt = dev_id;
+ unsigned int id = syncpt->id;
+ struct nvhost_intr *intr = container_of(syncpt, struct nvhost_intr,
+ syncpt[id]);
+ struct nvhost_master *dev = intr_to_dev(intr);
+ void __iomem *sync_regs = dev->sync_aperture;
+
+ struct list_head completed[NVHOST_INTR_ACTION_COUNT];
+ u32 sync;
+ unsigned int i;
+
+ for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i)
+ INIT_LIST_HEAD(completed + i);
+
+ sync = nvhost_syncpt_update_min(&dev->syncpt, id);
+
+ spin_lock(&syncpt->lock);
+
+ remove_completed_waiters(&syncpt->wait_head, sync, completed);
+
+ if (!list_empty(&syncpt->wait_head)) {
+ u32 thresh = list_first_entry(&syncpt->wait_head,
+ struct nvhost_waitlist, list)->thresh;
+
+ set_syncpt_threshold(sync_regs, id, thresh);
+ enable_syncpt_interrupt(sync_regs, id);
+ }
+
+ spin_unlock(&syncpt->lock);
+
+ run_handlers(completed);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * lazily request a syncpt's irq
+ */
+static int request_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
+{
+ static DEFINE_MUTEX(mutex);
+ int err;
+
+ mutex_lock(&mutex);
+ if (!syncpt->irq_requested) {
+ err = request_threaded_irq(syncpt->irq,
+ syncpt_thresh_isr, syncpt_thresh_fn,
+ 0, syncpt->thresh_irq_name, syncpt);
+ if (!err)
+ syncpt->irq_requested = 1;
+ }
+ mutex_unlock(&mutex);
+ return err;
+}
+
+
+/*** Main API ***/
+
+int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
+ enum nvhost_intr_action action, void *data,
+ void **ref)
+{
+ struct nvhost_waitlist *waiter;
+ struct nvhost_intr_syncpt *syncpt;
+ void __iomem *sync_regs;
+ int queue_was_empty;
+ int err;
+
+ /* create and initialize a new waiter */
+ waiter = kmalloc(sizeof(*waiter), GFP_KERNEL);
+ if (!waiter)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&waiter->list);
+ kref_init(&waiter->refcount);
+ if (ref)
+ kref_get(&waiter->refcount);
+ waiter->thresh = thresh;
+ waiter->action = action;
+ atomic_set(&waiter->state, WLS_PENDING);
+ waiter->data = data;
+ waiter->count = 1;
+
+ BUG_ON(id >= NV_HOST1X_SYNCPT_NB_PTS);
+ syncpt = intr->syncpt + id;
+ sync_regs = intr_to_dev(intr)->sync_aperture;
+
+ spin_lock(&syncpt->lock);
+
+ /* lazily request irq for this sync point */
+ if (!syncpt->irq_requested) {
+ spin_unlock(&syncpt->lock);
+
+ err = request_syncpt_irq(syncpt);
+ if (err) {
+ kfree(waiter);
+ return err;
+ }
+
+ spin_lock(&syncpt->lock);
+ }
+
+ queue_was_empty = list_empty(&syncpt->wait_head);
+
+ if (add_waiter_to_queue(waiter, &syncpt->wait_head)) {
+ /* added at head of list - new threshold value */
+ set_syncpt_threshold(sync_regs, id, thresh);
+
+ /* added as first waiter - enable interrupt */
+ if (queue_was_empty)
+ enable_syncpt_interrupt(sync_regs, id);
+ }
+
+ spin_unlock(&syncpt->lock);
+
+ if (ref)
+ *ref = waiter;
+ return 0;
+}
+
+void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref)
+{
+ struct nvhost_waitlist *waiter = ref;
+
+ while (atomic_cmpxchg(&waiter->state,
+ WLS_PENDING, WLS_CANCELLED) == WLS_REMOVED)
+ schedule();
+
+ kref_put(&waiter->refcount, waiter_release);
+}
+
+
+/*** Init & shutdown ***/
+
+int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync)
+{
+ unsigned int id;
+ struct nvhost_intr_syncpt *syncpt;
+ int err;
+
+ err = request_irq(irq_gen, host1x_isr, 0, "host_status", intr);
+ if (err)
+ goto fail;
+ intr->host1x_irq = irq_gen;
+ intr->host1x_isr_started = true;
+
+ for (id = 0, syncpt = intr->syncpt;
+ id < NV_HOST1X_SYNCPT_NB_PTS;
+ ++id, ++syncpt) {
+ syncpt->id = id;
+ syncpt->irq = irq_sync + id;
+ syncpt->irq_requested = 0;
+ spin_lock_init(&syncpt->lock);
+ INIT_LIST_HEAD(&syncpt->wait_head);
+ snprintf(syncpt->thresh_irq_name,
+ sizeof(syncpt->thresh_irq_name),
+ "%s", nvhost_syncpt_name(id));
+ }
+
+ return 0;
+
+fail:
+ nvhost_intr_deinit(intr);
+ return err;
+}
+
+void nvhost_intr_deinit(struct nvhost_intr *intr)
+{
+ unsigned int id;
+ struct nvhost_intr_syncpt *syncpt;
+
+ for (id = 0, syncpt = intr->syncpt;
+ id < NV_HOST1X_SYNCPT_NB_PTS;
+ ++id, ++syncpt)
+ if (syncpt->irq_requested)
+ free_irq(syncpt->irq, syncpt);
+
+ if (intr->host1x_isr_started) {
+ free_irq(intr->host1x_irq, intr);
+ intr->host1x_isr_started = false;
+ }
+}
+
+void nvhost_intr_configure (struct nvhost_intr *intr, u32 hz)
+{
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+
+ // write microsecond clock register
+ writel((hz + 1000000 - 1)/1000000, sync_regs + HOST1X_SYNC_USEC_CLK);
+
+ /* disable the ip_busy_timeout. this prevents write drops, etc.
+ * there's no real way to recover from a hung client anyway.
+ */
+ writel(0, sync_regs + HOST1X_SYNC_IP_BUSY_TIMEOUT);
+
+ /* increase the auto-ack timout to the maximum value. 2d will hang
+ * otherwise on ap20.
+ */
+ writel(0xff, sync_regs + HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
+
+ /* disable interrupts for both cpu's */
+ writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_0);
+ writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_1);
+
+ /* masking all of the interrupts actually means "enable" */
+ writel(BIT(0), sync_regs + HOST1X_SYNC_INTMASK);
+
+ /* enable HOST_INT_C0MASK */
+ writel(BIT(0), sync_regs + HOST1X_SYNC_INTC0MASK);
+
+ /* enable HINTMASK_EXT */
+ writel(BIT(31), sync_regs + HOST1X_SYNC_HINTMASK);
+
+ /* enable IP_READ_INT and IP_WRITE_INT */
+ writel(BIT(30) | BIT(31), sync_regs + HOST1X_SYNC_HINTMASK_EXT);
+}
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_intr.h
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_INTR_H
+#define __NVHOST_INTR_H
+
+#include <linux/kthread.h>
+#include <linux/semaphore.h>
+
+#include "nvhost_hardware.h"
+
+struct nvhost_channel;
+
+enum nvhost_intr_action {
+ /**
+ * Perform cleanup after a submit has completed.
+ * 'data' points to a channel
+ */
+ NVHOST_INTR_ACTION_SUBMIT_COMPLETE = 0,
+
+ /**
+ * Save a HW context.
+ * 'data' points to a context
+ */
+ NVHOST_INTR_ACTION_CTXSAVE,
+
+ /**
+ * Wake up a task.
+ * 'data' points to a wait_queue_head_t
+ */
+ NVHOST_INTR_ACTION_WAKEUP,
+
+ /**
+ * Wake up a interruptible task.
+ * 'data' points to a wait_queue_head_t
+ */
+ NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
+
+ NVHOST_INTR_ACTION_COUNT
+};
+
+struct nvhost_intr_syncpt {
+ u8 id;
+ u8 irq_requested;
+ u16 irq;
+ spinlock_t lock;
+ struct list_head wait_head;
+ char thresh_irq_name[12];
+};
+
+struct nvhost_intr {
+ struct nvhost_intr_syncpt syncpt[NV_HOST1X_SYNCPT_NB_PTS];
+ int host1x_irq;
+ bool host1x_isr_started;
+};
+
+/**
+ * Schedule an action to be taken when a sync point reaches the given threshold.
+ *
+ * @id the sync point
+ * @thresh the threshold
+ * @action the action to take
+ * @data a pointer to extra data depending on action, see above
+ * @ref must be passed if cancellation is possible, else NULL
+ *
+ * This is a non-blocking api.
+ */
+int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
+ enum nvhost_intr_action action, void *data,
+ void **ref);
+
+/**
+ * Unreference an action submitted to nvhost_intr_add_action().
+ * You must call this if you passed non-NULL as ref.
+ * @ref the ref returned from nvhost_intr_add_action()
+ */
+void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref);
+
+int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync);
+void nvhost_intr_deinit(struct nvhost_intr *intr);
+void nvhost_intr_configure(struct nvhost_intr *intr, u32 hz);
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_mpectx.c
+ *
+ * Tegra Graphics Host MPE HW Context
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* Placeholder */
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_syncpt.c
+ *
+ * Tegra Graphics Host Syncpoints
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_syncpt.h"
+#include "dev.h"
+
+#define client_managed(id) (BIT(id) & NVSYNCPTS_CLIENT_MANAGED)
+#define syncpt_to_dev(sp) container_of(sp, struct nvhost_master, syncpt)
+#define SYNCPT_CHECK_PERIOD 2*HZ
+
+static bool check_max(struct nvhost_syncpt *sp, u32 id, u32 real)
+{
+ u32 max;
+ if (client_managed(id))
+ return true;
+ smp_rmb();
+ max = (u32)atomic_read(&sp->max_val[id]);
+ return ((s32)(max - real) >= 0);
+}
+
+/**
+ * Write the current syncpoint value back to hw.
+ */
+static void reset_syncpt(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ int min;
+ smp_rmb();
+ min = atomic_read(&sp->min_val[id]);
+ writel(min, dev->sync_aperture + (HOST1X_SYNC_SYNCPT_0 + id * 4));
+}
+
+/**
+ * Write the current waitbase value back to hw.
+ */
+static void reset_syncpt_wait_base(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ writel(sp->base_val[id],
+ dev->sync_aperture + (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
+}
+
+/**
+ * Read waitbase value from hw.
+ */
+static void read_syncpt_wait_base(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ sp->base_val[id] = readl(dev->sync_aperture +
+ (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
+}
+
+/**
+ * Resets syncpoint and waitbase values to sw shadows
+ */
+void nvhost_syncpt_reset(struct nvhost_syncpt *sp)
+{
+ u32 i;
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++)
+ reset_syncpt(sp, i);
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++)
+ reset_syncpt_wait_base(sp, i);
+ wmb();
+}
+
+/**
+ * Updates sw shadow state for client managed registers
+ */
+void nvhost_syncpt_save(struct nvhost_syncpt *sp)
+{
+ u32 i;
+
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
+ if (client_managed(i))
+ nvhost_syncpt_update_min(sp, i);
+ else
+ BUG_ON(!nvhost_syncpt_min_eq_max(sp, i));
+ }
+
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++)
+ read_syncpt_wait_base(sp, i);
+}
+
+/**
+ * Updates the last value read from hardware.
+ */
+u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ void __iomem *sync_regs = dev->sync_aperture;
+ u32 old, live;
+
+ do {
+ smp_rmb();
+ old = (u32)atomic_read(&sp->min_val[id]);
+ live = readl(sync_regs + (HOST1X_SYNC_SYNCPT_0 + id * 4));
+ } while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old);
+
+ BUG_ON(!check_max(sp, id, live));
+
+ return live;
+}
+
+/**
+ * Get the current syncpoint value
+ */
+u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id)
+{
+ u32 val;
+
+ nvhost_module_busy(&syncpt_to_dev(sp)->mod);
+ val = nvhost_syncpt_update_min(sp, id);
+ nvhost_module_idle(&syncpt_to_dev(sp)->mod);
+ return val;
+}
+
+/**
+ * Write a cpu syncpoint increment to the hardware, without touching
+ * the cache. Caller is responsible for host being powered.
+ */
+void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ BUG_ON(!nvhost_module_powered(&dev->mod));
+ BUG_ON(!client_managed(id) && nvhost_syncpt_min_eq_max(sp, id));
+ writel(BIT(id), dev->sync_aperture + HOST1X_SYNC_SYNCPT_CPU_INCR);
+ wmb();
+}
+
+/**
+ * Increment syncpoint value from cpu, updating cache
+ */
+void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
+{
+ nvhost_syncpt_incr_max(sp, id, 1);
+ nvhost_module_busy(&syncpt_to_dev(sp)->mod);
+ nvhost_syncpt_cpu_incr(sp, id);
+ nvhost_module_idle(&syncpt_to_dev(sp)->mod);
+}
+
+/**
+ * Main entrypoint for syncpoint value waits.
+ */
+int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
+ u32 thresh, u32 timeout)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ void *ref;
+ int err = 0;
+
+ BUG_ON(!check_max(sp, id, thresh));
+
+ /* first check cache */
+ if (nvhost_syncpt_min_cmp(sp, id, thresh))
+ return 0;
+
+ /* keep host alive */
+ nvhost_module_busy(&syncpt_to_dev(sp)->mod);
+
+ if (client_managed(id) || !nvhost_syncpt_min_eq_max(sp, id)) {
+ /* try to read from register */
+ u32 val = nvhost_syncpt_update_min(sp, id);
+ if ((s32)(val - thresh) >= 0)
+ goto done;
+ }
+
+ if (!timeout) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ /* schedule a wakeup when the syncpoint value is reached */
+ err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
+ NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, &ref);
+ if (err)
+ goto done;
+
+ err = -EAGAIN;
+ /* wait for the syncpoint, or timeout, or signal */
+ while (timeout) {
+ u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
+ int remain = wait_event_interruptible_timeout(wq,
+ nvhost_syncpt_min_cmp(sp, id, thresh),
+ check);
+ if (remain > 0 || nvhost_syncpt_min_cmp(sp, id, thresh)) {
+ err = 0;
+ break;
+ }
+ if (remain < 0) {
+ err = remain;
+ break;
+ }
+ if (timeout != NVHOST_NO_TIMEOUT)
+ timeout -= check;
+ if (timeout) {
+ dev_warn(&syncpt_to_dev(sp)->pdev->dev,
+ "syncpoint id %d (%s) stuck waiting %d\n",
+ id, nvhost_syncpt_name(id), thresh);
+ nvhost_syncpt_debug(sp);
+ }
+ };
+ nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref);
+
+done:
+ nvhost_module_idle(&syncpt_to_dev(sp)->mod);
+ return err;
+}
+
+static const char *s_syncpt_names[32] = {
+ "", "", "", "", "", "", "", "", "", "", "", "",
+ "vi_isp_0", "vi_isp_1", "vi_isp_2", "vi_isp_3", "vi_isp_4", "vi_isp_5",
+ "2d_0", "2d_1",
+ "", "",
+ "3d", "mpe", "disp0", "disp1", "vblank0", "vblank1", "mpe_ebm_eof", "mpe_wr_safe",
+ "2d_tinyblt", "dsi"
+};
+
+const char *nvhost_syncpt_name(u32 id)
+{
+ BUG_ON(id > ARRAY_SIZE(s_syncpt_names));
+ return s_syncpt_names[id];
+}
+
+void nvhost_syncpt_debug(struct nvhost_syncpt *sp)
+{
+ u32 i;
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
+ u32 max = nvhost_syncpt_read_max(sp, i);
+ if (!max)
+ continue;
+ dev_info(&syncpt_to_dev(sp)->pdev->dev,
+ "id %d (%s) min %d max %d\n",
+ i, nvhost_syncpt_name(i),
+ nvhost_syncpt_update_min(sp, i), max);
+
+ }
+}
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_syncpt.h
+ *
+ * Tegra Graphics Host Syncpoints
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_SYNCPT_H
+#define __NVHOST_SYNCPT_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+
+#include "nvhost_hardware.h"
+
+#define NVSYNCPT_VI_ISP_0 (12)
+#define NVSYNCPT_VI_ISP_1 (13)
+#define NVSYNCPT_VI_ISP_2 (14)
+#define NVSYNCPT_VI_ISP_3 (15)
+#define NVSYNCPT_VI_ISP_4 (16)
+#define NVSYNCPT_VI_ISP_5 (17)
+#define NVSYNCPT_2D_0 (18)
+#define NVSYNCPT_2D_1 (19)
+#define NVSYNCPT_3D (22)
+#define NVSYNCPT_MPE (23)
+#define NVSYNCPT_DISP0 (24)
+#define NVSYNCPT_DISP1 (25)
+#define NVSYNCPT_VBLANK0 (26)
+#define NVSYNCPT_VBLANK1 (27)
+#define NVSYNCPT_MPE_EBM_EOF (28)
+#define NVSYNCPT_MPE_WR_SAFE (29)
+#define NVSYNCPT_DSI (31)
+#define NVSYNCPT_INVALID (-1)
+
+/*#define NVSYNCPT_2D_CHANNEL2_0 (20) */
+/*#define NVSYNCPT_2D_CHANNEL2_1 (21) */
+/*#define NVSYNCPT_2D_TINYBLT_WAR (30)*/
+/*#define NVSYNCPT_2D_TINYBLT_RESTORE_CLASS_ID (30)*/
+
+/* sync points that are wholly managed by the client */
+#define NVSYNCPTS_CLIENT_MANAGED ( \
+ BIT(NVSYNCPT_DISP0) | BIT(NVSYNCPT_DISP1) | BIT(NVSYNCPT_DSI) | \
+ BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_2) | \
+ BIT(NVSYNCPT_VI_ISP_3) | BIT(NVSYNCPT_VI_ISP_4) | BIT(NVSYNCPT_VI_ISP_5) | \
+ BIT(NVSYNCPT_MPE_EBM_EOF) | BIT(NVSYNCPT_MPE_WR_SAFE) | \
+ BIT(NVSYNCPT_2D_1))
+
+#define NVWAITBASE_2D_0 (1)
+#define NVWAITBASE_2D_1 (2)
+#define NVWAITBASE_3D (3)
+#define NVWAITBASE_MPE (4)
+
+struct nvhost_syncpt {
+ atomic_t min_val[NV_HOST1X_SYNCPT_NB_PTS];
+ atomic_t max_val[NV_HOST1X_SYNCPT_NB_PTS];
+ u32 base_val[NV_HOST1X_SYNCPT_NB_BASES];
+};
+
+/**
+ * Updates the value sent to hardware.
+ */
+static inline u32 nvhost_syncpt_incr_max(struct nvhost_syncpt *sp,
+ u32 id, u32 incrs)
+{
+ return (u32)atomic_add_return(incrs, &sp->max_val[id]);
+}
+
+/**
+ * Updated the value sent to hardware.
+ */
+static inline u32 nvhost_syncpt_set_max(struct nvhost_syncpt *sp,
+ u32 id, u32 val)
+{
+ atomic_set(&sp->max_val[id], val);
+ smp_wmb();
+ return val;
+}
+
+static inline u32 nvhost_syncpt_read_max(struct nvhost_syncpt *sp, u32 id)
+{
+ smp_rmb();
+ return (u32)atomic_read(&sp->max_val[id]);
+}
+
+/**
+ * Returns true if syncpoint has reached threshold
+ */
+static inline bool nvhost_syncpt_min_cmp(struct nvhost_syncpt *sp,
+ u32 id, u32 thresh)
+{
+ u32 cur;
+ smp_rmb();
+ cur = (u32)atomic_read(&sp->min_val[id]);
+ return ((s32)(cur - thresh) >= 0);
+}
+
+/**
+ * Returns true if syncpoint min == max
+ */
+static inline bool nvhost_syncpt_min_eq_max(struct nvhost_syncpt *sp, u32 id)
+{
+ int min, max;
+ smp_rmb();
+ min = atomic_read(&sp->min_val[id]);
+ max = atomic_read(&sp->max_val[id]);
+ return (min == max);
+}
+
+void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id);
+
+u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id);
+
+void nvhost_syncpt_save(struct nvhost_syncpt *sp);
+
+void nvhost_syncpt_reset(struct nvhost_syncpt *sp);
+
+u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id);
+
+void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id);
+
+int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh,
+ u32 timeout);
+
+static inline int nvhost_syncpt_wait(struct nvhost_syncpt *sp, u32 id, u32 thresh)
+{
+ return nvhost_syncpt_wait_timeout(sp, id, thresh, MAX_SCHEDULE_TIMEOUT);
+}
+
+
+const char *nvhost_syncpt_name(u32 id);
+
+void nvhost_syncpt_debug(struct nvhost_syncpt *sp);
+
+#endif