From 521aae88f1f463a82bb4d2bda3350bfa8cae0836 Mon Sep 17 00:00:00 2001
From: Dmitry Osipenko <digetx@gmail.com>
Date: Tue, 19 May 2015 02:10:46 +0300
Subject: [PATCH] HACK: staging: Add downstream NVIDIA video driver

Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
---
 drivers/clk/tegra/clk-tegra20.c               |   22 +
 drivers/of/platform.c                         |   43 +
 drivers/staging/Kconfig                       |    2 +
 drivers/staging/Makefile                      |    2 +
 drivers/staging/tegra/Kconfig                 |   12 +
 drivers/staging/tegra/Makefile                |    7 +
 drivers/staging/tegra/clk.c                   |    8 +
 drivers/staging/tegra/csi.c                   |   85 +
 drivers/staging/tegra/include/linux/nvhost.h  |  255 ++
 .../tegra/include/linux/nvhost_ioctl.h        |  213 +
 drivers/staging/tegra/include/linux/nvmap.h   |  144 +
 .../tegra/include/linux/tegra_overlay.h       |   79 +
 drivers/staging/tegra/include/mach/clk.h      |   47 +
 drivers/staging/tegra/include/mach/csi.h      |   40 +
 drivers/staging/tegra/include/mach/fuse.h     |   21 +
 .../staging/tegra/include/mach/hdmi-audio.h   |   47 +
 drivers/staging/tegra/include/mach/iovmm.h    |  352 ++
 drivers/staging/tegra/include/mach/kfuse.h    |   20 +
 .../tegra/include/mach/latency_allowance.h    |  109 +
 drivers/staging/tegra/include/mach/mc.h       |  101 +
 .../staging/tegra/include/mach/powergate.h    |   32 +
 .../tegra/include/media/tegra_camera.h        |   60 +
 .../tegra/include/trace/events/nvhost.h       |  568 +++
 .../tegra/include/trace/events/nvmap.h        |  303 ++
 drivers/staging/tegra/include/video/nvhdcp.h  |   91 +
 .../tegra/include/video/tegra_dc_ext.h        |  331 ++
 drivers/staging/tegra/include/video/tegrafb.h |   32 +
 drivers/staging/tegra/pageattr.c              |   66 +
 drivers/staging/tegra/powergate.c             |  218 +
 drivers/staging/tegra/video/Kconfig           |  201 +
 drivers/staging/tegra/video/Makefile          |   10 +
 drivers/staging/tegra/video/dc/Makefile       |   14 +
 drivers/staging/tegra/video/dc/bandwidth.c    |  284 ++
 drivers/staging/tegra/video/dc/clock.c        |  144 +
 drivers/staging/tegra/video/dc/csc.c          |   69 +
 drivers/staging/tegra/video/dc/dc.c           | 2417 ++++++++++++
 drivers/staging/tegra/video/dc/dc.h           |  618 +++
 drivers/staging/tegra/video/dc/dc_config.c    |  247 ++
 drivers/staging/tegra/video/dc/dc_config.h    |  162 +
 drivers/staging/tegra/video/dc/dc_priv.h      |  407 ++
 drivers/staging/tegra/video/dc/dc_reg.h       |  564 +++
 drivers/staging/tegra/video/dc/dc_sysfs.c     |  373 ++
 drivers/staging/tegra/video/dc/dsi.c          | 3509 +++++++++++++++++
 drivers/staging/tegra/video/dc/dsi.h          |  375 ++
 drivers/staging/tegra/video/dc/dsi_regs.h     |  351 ++
 drivers/staging/tegra/video/dc/edid.c         |  605 +++
 drivers/staging/tegra/video/dc/edid.h         |   86 +
 drivers/staging/tegra/video/dc/ext/Makefile   |    8 +
 drivers/staging/tegra/video/dc/ext/control.c  |  276 ++
 drivers/staging/tegra/video/dc/ext/cursor.c   |  203 +
 drivers/staging/tegra/video/dc/ext/dev.c      | 1140 ++++++
 drivers/staging/tegra/video/dc/ext/events.c   |  197 +
 .../tegra/video/dc/ext/tegra_dc_ext_priv.h    |  152 +
 drivers/staging/tegra/video/dc/ext/util.c     |   78 +
 drivers/staging/tegra/video/dc/fb.h           |   64 +
 drivers/staging/tegra/video/dc/hdmi.c         | 2519 ++++++++++++
 drivers/staging/tegra/video/dc/hdmi.h         |  222 ++
 drivers/staging/tegra/video/dc/hdmi_reg.h     |  480 +++
 drivers/staging/tegra/video/dc/lut.c          |  130 +
 drivers/staging/tegra/video/dc/mode.c         |  343 ++
 drivers/staging/tegra/video/dc/nvhdcp.c       | 1259 ++++++
 drivers/staging/tegra/video/dc/nvhdcp.h       |   46 +
 drivers/staging/tegra/video/dc/nvsd.c         |  914 +++++
 drivers/staging/tegra/video/dc/nvsd.h         |   25 +
 drivers/staging/tegra/video/dc/overlay.c      |  893 +++++
 drivers/staging/tegra/video/dc/overlay.h      |   43 +
 drivers/staging/tegra/video/dc/rgb.c          |  158 +
 drivers/staging/tegra/video/dc/tegra_dc_ext.h |   78 +
 drivers/staging/tegra/video/dc/tegra_fb.h     |   27 +
 drivers/staging/tegra/video/dc/window.c       |  469 +++
 drivers/staging/tegra/video/fb.c              |  776 ++++
 drivers/staging/tegra/video/host/Makefile     |   31 +
 drivers/staging/tegra/video/host/bus.c        |  715 ++++
 drivers/staging/tegra/video/host/bus.h        |   38 +
 drivers/staging/tegra/video/host/bus_client.c |  669 ++++
 drivers/staging/tegra/video/host/bus_client.h |   42 +
 .../staging/tegra/video/host/chip_support.c   |   56 +
 .../staging/tegra/video/host/chip_support.h   |  181 +
 drivers/staging/tegra/video/host/debug.c      |  234 ++
 drivers/staging/tegra/video/host/debug.h      |   50 +
 drivers/staging/tegra/video/host/dev.c        |   31 +
 drivers/staging/tegra/video/host/dev.h        |   25 +
 .../staging/tegra/video/host/gr2d/Makefile    |    8 +
 drivers/staging/tegra/video/host/gr2d/gr2d.c  |   80 +
 .../staging/tegra/video/host/gr3d/Makefile    |   11 +
 drivers/staging/tegra/video/host/gr3d/gr3d.c  |  278 ++
 drivers/staging/tegra/video/host/gr3d/gr3d.h  |   57 +
 .../staging/tegra/video/host/gr3d/gr3d_t20.c  |  399 ++
 .../staging/tegra/video/host/gr3d/gr3d_t20.h  |   33 +
 .../staging/tegra/video/host/gr3d/gr3d_t30.c  |  437 ++
 .../staging/tegra/video/host/gr3d/gr3d_t30.h  |   33 +
 .../staging/tegra/video/host/gr3d/scale3d.c   |  941 +++++
 .../staging/tegra/video/host/gr3d/scale3d.h   |   47 +
 .../staging/tegra/video/host/host1x/Makefile  |    8 +
 .../staging/tegra/video/host/host1x/host1x.c  |  580 +++
 .../staging/tegra/video/host/host1x/host1x.h  |   90 +
 .../video/host/host1x/host1x01_hardware.h     |  170 +
 .../tegra/video/host/host1x/host1x_cdma.c     |  517 +++
 .../tegra/video/host/host1x/host1x_cdma.h     |   39 +
 .../tegra/video/host/host1x/host1x_channel.c  |  681 ++++
 .../tegra/video/host/host1x/host1x_debug.c    |  405 ++
 .../tegra/video/host/host1x/host1x_hwctx.h    |   66 +
 .../tegra/video/host/host1x/host1x_intr.c     |  278 ++
 .../tegra/video/host/host1x/host1x_syncpt.c   |  180 +
 .../tegra/video/host/host1x/host1x_syncpt.h   |   62 +
 .../video/host/host1x/hw_host1x01_channel.h   |  182 +
 .../video/host/host1x/hw_host1x01_sync.h      |  398 ++
 .../video/host/host1x/hw_host1x01_uclass.h    |  474 +++
 drivers/staging/tegra/video/host/isp/Makefile |    8 +
 drivers/staging/tegra/video/host/isp/isp.c    |   86 +
 drivers/staging/tegra/video/host/mpe/Makefile |    8 +
 drivers/staging/tegra/video/host/mpe/mpe.c    |  696 ++++
 drivers/staging/tegra/video/host/mpe/mpe.h    |   32 +
 drivers/staging/tegra/video/host/nvhost_acm.c |  671 ++++
 drivers/staging/tegra/video/host/nvhost_acm.h |   58 +
 .../staging/tegra/video/host/nvhost_cdma.c    |  559 +++
 .../staging/tegra/video/host/nvhost_cdma.h    |  117 +
 .../staging/tegra/video/host/nvhost_channel.c |  188 +
 .../staging/tegra/video/host/nvhost_channel.h |   77 +
 .../staging/tegra/video/host/nvhost_hwctx.h   |   66 +
 .../staging/tegra/video/host/nvhost_intr.c    |  406 ++
 .../staging/tegra/video/host/nvhost_intr.h    |  115 +
 drivers/staging/tegra/video/host/nvhost_job.c |  358 ++
 drivers/staging/tegra/video/host/nvhost_job.h |  148 +
 .../staging/tegra/video/host/nvhost_memmgr.c  |   35 +
 .../staging/tegra/video/host/nvhost_memmgr.h  |   38 +
 .../staging/tegra/video/host/nvhost_syncpt.c  |  510 +++
 .../staging/tegra/video/host/nvhost_syncpt.h  |  151 +
 drivers/staging/tegra/video/host/nvmap.c      |  100 +
 drivers/staging/tegra/video/host/nvmap.h      |   27 +
 drivers/staging/tegra/video/host/t20/Makefile |    8 +
 drivers/staging/tegra/video/host/t20/t20.c    |  256 ++
 drivers/staging/tegra/video/host/t20/t20.h    |   29 +
 drivers/staging/tegra/video/host/t30/Makefile |    8 +
 drivers/staging/tegra/video/host/t30/t30.c    |  283 ++
 drivers/staging/tegra/video/host/t30/t30.h    |   29 +
 drivers/staging/tegra/video/host/vi/Makefile  |    8 +
 drivers/staging/tegra/video/host/vi/vi.c      |   86 +
 drivers/staging/tegra/video/nvmap/Makefile    |   10 +
 drivers/staging/tegra/video/nvmap/nvmap.c     |  619 +++
 drivers/staging/tegra/video/nvmap/nvmap.h     |  321 ++
 .../staging/tegra/video/nvmap/nvmap_common.h  |   29 +
 drivers/staging/tegra/video/nvmap/nvmap_dev.c | 1499 +++++++
 .../staging/tegra/video/nvmap/nvmap_handle.c  | 1069 +++++
 .../staging/tegra/video/nvmap/nvmap_heap.c    | 1129 ++++++
 .../staging/tegra/video/nvmap/nvmap_heap.h    |   68 +
 .../staging/tegra/video/nvmap/nvmap_ioctl.c   |  800 ++++
 .../staging/tegra/video/nvmap/nvmap_ioctl.h   |  162 +
 .../staging/tegra/video/nvmap/nvmap_iommu.c   |   96 +
 drivers/staging/tegra/video/nvmap/nvmap_mru.c |  187 +
 drivers/staging/tegra/video/nvmap/nvmap_mru.h |   84 +
 151 files changed, 44006 insertions(+)
 create mode 100644 drivers/staging/tegra/Kconfig
 create mode 100644 drivers/staging/tegra/Makefile
 create mode 100644 drivers/staging/tegra/clk.c
 create mode 100644 drivers/staging/tegra/csi.c
 create mode 100644 drivers/staging/tegra/include/linux/nvhost.h
 create mode 100644 drivers/staging/tegra/include/linux/nvhost_ioctl.h
 create mode 100644 drivers/staging/tegra/include/linux/nvmap.h
 create mode 100644 drivers/staging/tegra/include/linux/tegra_overlay.h
 create mode 100644 drivers/staging/tegra/include/mach/clk.h
 create mode 100644 drivers/staging/tegra/include/mach/csi.h
 create mode 100644 drivers/staging/tegra/include/mach/fuse.h
 create mode 100644 drivers/staging/tegra/include/mach/hdmi-audio.h
 create mode 100644 drivers/staging/tegra/include/mach/iovmm.h
 create mode 100644 drivers/staging/tegra/include/mach/kfuse.h
 create mode 100644 drivers/staging/tegra/include/mach/latency_allowance.h
 create mode 100644 drivers/staging/tegra/include/mach/mc.h
 create mode 100644 drivers/staging/tegra/include/mach/powergate.h
 create mode 100644 drivers/staging/tegra/include/media/tegra_camera.h
 create mode 100644 drivers/staging/tegra/include/trace/events/nvhost.h
 create mode 100644 drivers/staging/tegra/include/trace/events/nvmap.h
 create mode 100644 drivers/staging/tegra/include/video/nvhdcp.h
 create mode 100644 drivers/staging/tegra/include/video/tegra_dc_ext.h
 create mode 100644 drivers/staging/tegra/include/video/tegrafb.h
 create mode 100644 drivers/staging/tegra/pageattr.c
 create mode 100644 drivers/staging/tegra/powergate.c
 create mode 100644 drivers/staging/tegra/video/Kconfig
 create mode 100644 drivers/staging/tegra/video/Makefile
 create mode 100644 drivers/staging/tegra/video/dc/Makefile
 create mode 100644 drivers/staging/tegra/video/dc/bandwidth.c
 create mode 100644 drivers/staging/tegra/video/dc/clock.c
 create mode 100644 drivers/staging/tegra/video/dc/csc.c
 create mode 100644 drivers/staging/tegra/video/dc/dc.c
 create mode 100644 drivers/staging/tegra/video/dc/dc.h
 create mode 100644 drivers/staging/tegra/video/dc/dc_config.c
 create mode 100644 drivers/staging/tegra/video/dc/dc_config.h
 create mode 100644 drivers/staging/tegra/video/dc/dc_priv.h
 create mode 100644 drivers/staging/tegra/video/dc/dc_reg.h
 create mode 100644 drivers/staging/tegra/video/dc/dc_sysfs.c
 create mode 100644 drivers/staging/tegra/video/dc/dsi.c
 create mode 100644 drivers/staging/tegra/video/dc/dsi.h
 create mode 100644 drivers/staging/tegra/video/dc/dsi_regs.h
 create mode 100644 drivers/staging/tegra/video/dc/edid.c
 create mode 100644 drivers/staging/tegra/video/dc/edid.h
 create mode 100644 drivers/staging/tegra/video/dc/ext/Makefile
 create mode 100644 drivers/staging/tegra/video/dc/ext/control.c
 create mode 100644 drivers/staging/tegra/video/dc/ext/cursor.c
 create mode 100644 drivers/staging/tegra/video/dc/ext/dev.c
 create mode 100644 drivers/staging/tegra/video/dc/ext/events.c
 create mode 100644 drivers/staging/tegra/video/dc/ext/tegra_dc_ext_priv.h
 create mode 100644 drivers/staging/tegra/video/dc/ext/util.c
 create mode 100644 drivers/staging/tegra/video/dc/fb.h
 create mode 100644 drivers/staging/tegra/video/dc/hdmi.c
 create mode 100644 drivers/staging/tegra/video/dc/hdmi.h
 create mode 100644 drivers/staging/tegra/video/dc/hdmi_reg.h
 create mode 100644 drivers/staging/tegra/video/dc/lut.c
 create mode 100644 drivers/staging/tegra/video/dc/mode.c
 create mode 100644 drivers/staging/tegra/video/dc/nvhdcp.c
 create mode 100644 drivers/staging/tegra/video/dc/nvhdcp.h
 create mode 100644 drivers/staging/tegra/video/dc/nvsd.c
 create mode 100644 drivers/staging/tegra/video/dc/nvsd.h
 create mode 100644 drivers/staging/tegra/video/dc/overlay.c
 create mode 100644 drivers/staging/tegra/video/dc/overlay.h
 create mode 100644 drivers/staging/tegra/video/dc/rgb.c
 create mode 100644 drivers/staging/tegra/video/dc/tegra_dc_ext.h
 create mode 100644 drivers/staging/tegra/video/dc/tegra_fb.h
 create mode 100644 drivers/staging/tegra/video/dc/window.c
 create mode 100644 drivers/staging/tegra/video/fb.c
 create mode 100644 drivers/staging/tegra/video/host/Makefile
 create mode 100644 drivers/staging/tegra/video/host/bus.c
 create mode 100644 drivers/staging/tegra/video/host/bus.h
 create mode 100644 drivers/staging/tegra/video/host/bus_client.c
 create mode 100644 drivers/staging/tegra/video/host/bus_client.h
 create mode 100644 drivers/staging/tegra/video/host/chip_support.c
 create mode 100644 drivers/staging/tegra/video/host/chip_support.h
 create mode 100644 drivers/staging/tegra/video/host/debug.c
 create mode 100644 drivers/staging/tegra/video/host/debug.h
 create mode 100644 drivers/staging/tegra/video/host/dev.c
 create mode 100644 drivers/staging/tegra/video/host/dev.h
 create mode 100644 drivers/staging/tegra/video/host/gr2d/Makefile
 create mode 100644 drivers/staging/tegra/video/host/gr2d/gr2d.c
 create mode 100644 drivers/staging/tegra/video/host/gr3d/Makefile
 create mode 100644 drivers/staging/tegra/video/host/gr3d/gr3d.c
 create mode 100644 drivers/staging/tegra/video/host/gr3d/gr3d.h
 create mode 100644 drivers/staging/tegra/video/host/gr3d/gr3d_t20.c
 create mode 100644 drivers/staging/tegra/video/host/gr3d/gr3d_t20.h
 create mode 100644 drivers/staging/tegra/video/host/gr3d/gr3d_t30.c
 create mode 100644 drivers/staging/tegra/video/host/gr3d/gr3d_t30.h
 create mode 100644 drivers/staging/tegra/video/host/gr3d/scale3d.c
 create mode 100644 drivers/staging/tegra/video/host/gr3d/scale3d.h
 create mode 100644 drivers/staging/tegra/video/host/host1x/Makefile
 create mode 100644 drivers/staging/tegra/video/host/host1x/host1x.c
 create mode 100644 drivers/staging/tegra/video/host/host1x/host1x.h
 create mode 100644 drivers/staging/tegra/video/host/host1x/host1x01_hardware.h
 create mode 100644 drivers/staging/tegra/video/host/host1x/host1x_cdma.c
 create mode 100644 drivers/staging/tegra/video/host/host1x/host1x_cdma.h
 create mode 100644 drivers/staging/tegra/video/host/host1x/host1x_channel.c
 create mode 100644 drivers/staging/tegra/video/host/host1x/host1x_debug.c
 create mode 100644 drivers/staging/tegra/video/host/host1x/host1x_hwctx.h
 create mode 100644 drivers/staging/tegra/video/host/host1x/host1x_intr.c
 create mode 100644 drivers/staging/tegra/video/host/host1x/host1x_syncpt.c
 create mode 100644 drivers/staging/tegra/video/host/host1x/host1x_syncpt.h
 create mode 100644 drivers/staging/tegra/video/host/host1x/hw_host1x01_channel.h
 create mode 100644 drivers/staging/tegra/video/host/host1x/hw_host1x01_sync.h
 create mode 100644 drivers/staging/tegra/video/host/host1x/hw_host1x01_uclass.h
 create mode 100644 drivers/staging/tegra/video/host/isp/Makefile
 create mode 100644 drivers/staging/tegra/video/host/isp/isp.c
 create mode 100644 drivers/staging/tegra/video/host/mpe/Makefile
 create mode 100644 drivers/staging/tegra/video/host/mpe/mpe.c
 create mode 100644 drivers/staging/tegra/video/host/mpe/mpe.h
 create mode 100644 drivers/staging/tegra/video/host/nvhost_acm.c
 create mode 100644 drivers/staging/tegra/video/host/nvhost_acm.h
 create mode 100644 drivers/staging/tegra/video/host/nvhost_cdma.c
 create mode 100644 drivers/staging/tegra/video/host/nvhost_cdma.h
 create mode 100644 drivers/staging/tegra/video/host/nvhost_channel.c
 create mode 100644 drivers/staging/tegra/video/host/nvhost_channel.h
 create mode 100644 drivers/staging/tegra/video/host/nvhost_hwctx.h
 create mode 100644 drivers/staging/tegra/video/host/nvhost_intr.c
 create mode 100644 drivers/staging/tegra/video/host/nvhost_intr.h
 create mode 100644 drivers/staging/tegra/video/host/nvhost_job.c
 create mode 100644 drivers/staging/tegra/video/host/nvhost_job.h
 create mode 100644 drivers/staging/tegra/video/host/nvhost_memmgr.c
 create mode 100644 drivers/staging/tegra/video/host/nvhost_memmgr.h
 create mode 100644 drivers/staging/tegra/video/host/nvhost_syncpt.c
 create mode 100644 drivers/staging/tegra/video/host/nvhost_syncpt.h
 create mode 100644 drivers/staging/tegra/video/host/nvmap.c
 create mode 100644 drivers/staging/tegra/video/host/nvmap.h
 create mode 100644 drivers/staging/tegra/video/host/t20/Makefile
 create mode 100644 drivers/staging/tegra/video/host/t20/t20.c
 create mode 100644 drivers/staging/tegra/video/host/t20/t20.h
 create mode 100644 drivers/staging/tegra/video/host/t30/Makefile
 create mode 100644 drivers/staging/tegra/video/host/t30/t30.c
 create mode 100644 drivers/staging/tegra/video/host/t30/t30.h
 create mode 100644 drivers/staging/tegra/video/host/vi/Makefile
 create mode 100644 drivers/staging/tegra/video/host/vi/vi.c
 create mode 100644 drivers/staging/tegra/video/nvmap/Makefile
 create mode 100644 drivers/staging/tegra/video/nvmap/nvmap.c
 create mode 100644 drivers/staging/tegra/video/nvmap/nvmap.h
 create mode 100644 drivers/staging/tegra/video/nvmap/nvmap_common.h
 create mode 100644 drivers/staging/tegra/video/nvmap/nvmap_dev.c
 create mode 100644 drivers/staging/tegra/video/nvmap/nvmap_handle.c
 create mode 100644 drivers/staging/tegra/video/nvmap/nvmap_heap.c
 create mode 100644 drivers/staging/tegra/video/nvmap/nvmap_heap.h
 create mode 100644 drivers/staging/tegra/video/nvmap/nvmap_ioctl.c
 create mode 100644 drivers/staging/tegra/video/nvmap/nvmap_ioctl.h
 create mode 100644 drivers/staging/tegra/video/nvmap/nvmap_iommu.c
 create mode 100644 drivers/staging/tegra/video/nvmap/nvmap_mru.c
 create mode 100644 drivers/staging/tegra/video/nvmap/nvmap_mru.h

diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 837e5cbd60e9..f76874a00e60 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -205,14 +205,36 @@ static struct tegra_clk_pll_freq_table pll_a_freq_table[] = {
 };
 
 static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
+	{ 12000000,    5000000,   10, 24, 1,  4 },
+	{ 12000000,   10000000,   10, 12, 1,  4 },
+	{ 12000000,  161500000,  323, 24, 1,  4 },
+	{ 12000000,  162000000,  162, 12, 1,  4 },
+
 	{ 12000000,  216000000,  216, 12, 1,  4 },
 	{ 13000000,  216000000,  216, 13, 1,  4 },
 	{ 19200000,  216000000,  135, 12, 1,  3 },
 	{ 26000000,  216000000,  216, 26, 1,  4 },
+
+	{ 12000000,  252000000,  252, 12, 1,  4 },
+	{ 13000000,  252000000,  252, 13, 1,  4 },
+	{ 19200000,  252000000,  210, 16, 1,  3 },
+	{ 26000000,  252000000,  252, 26, 1,  4 },
+
+	{ 12000000,  297000000,  297, 12, 1,  4 },
+	{ 13000000,  297000000,  297, 13, 1,  4 },
+	{ 19200000,  297000000,  248, 16, 1,  4 },
+	{ 26000000,  297000000,  297, 26, 1,  4 },
+
+	{ 12000000,  504000000,  504, 12, 1,  8 },
+	{ 13000000,  504000000,  504, 13, 1,  8 },
+	{ 19200000,  504000000,  420, 16, 1,  8 },
+	{ 26000000,  504000000,  504, 26, 1,  8 },
+
 	{ 12000000,  594000000,  594, 12, 1,  8 },
 	{ 13000000,  594000000,  594, 13, 1,  8 },
 	{ 19200000,  594000000,  495, 16, 1,  8 },
 	{ 26000000,  594000000,  594, 26, 1,  8 },
+
 	{ 12000000, 1000000000, 1000, 12, 1, 12 },
 	{ 13000000, 1000000000, 1000, 13, 1, 12 },
 	{ 19200000, 1000000000,  625, 12, 1,  8 },
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 16e8daffac06..5692509f70ec 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -23,6 +23,8 @@
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 
+#include <../drivers/staging/tegra/include/linux/nvhost.h>
+
 const struct of_device_id of_default_bus_match_table[] = {
 	{ .compatible = "simple-bus", },
 	{ .compatible = "simple-mfd", },
@@ -333,6 +335,42 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l
 	return NULL;
 }
 
+/**
+ * Assume that all child devices belongs to nvhost
+ */
+static int of_nvhost_bus_create(struct device_node *node,
+				const struct of_dev_auxdata *lookup)
+{
+	const struct of_dev_auxdata *auxdata;
+	struct device_node *child;
+	const char *bus_id = NULL;
+	void *platform_data = NULL;
+	int rc;
+
+	if (!of_device_is_available(node))
+		return -ENODEV;
+
+	auxdata = of_dev_lookup(lookup, node);
+	if (auxdata) {
+		bus_id = auxdata->name;
+		platform_data = auxdata->platform_data;
+	}
+
+	pr_debug("   nvhost create host: %s\n", node->name);
+	rc = of_nvhost_device_create(node, bus_id, platform_data);
+	if (rc)
+		return rc;
+
+	for_each_child_of_node(node, child) {
+		pr_debug("   nvhost create child: %s\n", child->full_name);
+		rc = of_nvhost_bus_create(child, lookup);
+		if (rc)
+			of_node_put(child);
+	}
+
+	return rc;
+}
+
 /**
  * of_platform_bus_create() - Create a device for a node and its children.
  * @bus: device node of the bus to instantiate
@@ -378,6 +416,11 @@ static int of_platform_bus_create(struct device_node *bus,
 		return 0;
 	}
 
+	if (of_device_is_compatible(bus, "nvhost-bus")) {
+		of_nvhost_bus_create(bus, tegra20_auxdata_lookup);
+		return 0;
+	}
+
 	dev = of_platform_device_create_pdata(bus, bus_id, platform_data, parent);
 	if (!dev || !of_match_node(matches, bus))
 		return 0;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 46e38e702dd8..b329c99680fe 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -24,6 +24,8 @@ menuconfig STAGING
 
 if STAGING
 
+source "drivers/staging/tegra/Kconfig"
+
 source "drivers/staging/a500/Kconfig"
 
 source "drivers/staging/slicoss/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 4656175ffce2..c985cd625324 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -1,5 +1,7 @@
 # Makefile for staging directory
 
+obj-$(CONFIG_TEGRA_DOWNSTREAM)	+= tegra/
+
 obj-$(CONFIG_A500)		+= a500/
 
 obj-y				+= media/
diff --git a/drivers/staging/tegra/Kconfig b/drivers/staging/tegra/Kconfig
new file mode 100644
index 000000000000..23e7d5aa3f02
--- /dev/null
+++ b/drivers/staging/tegra/Kconfig
@@ -0,0 +1,12 @@
+config TEGRA_DOWNSTREAM
+	bool "Downstream NVIDIA Tegra drivers"
+	depends on ARCH_TEGRA && !CONFIG_TEGRA20_MC
+	default n
+	help
+	  Say Y to build downstream NVIDIA Tegra 2/3 drivers.
+
+if TEGRA_DOWNSTREAM
+
+source "drivers/staging/tegra/video/Kconfig"
+
+endif # TEGRA_DOWNSTREAM
diff --git a/drivers/staging/tegra/Makefile b/drivers/staging/tegra/Makefile
new file mode 100644
index 000000000000..26b035281347
--- /dev/null
+++ b/drivers/staging/tegra/Makefile
@@ -0,0 +1,7 @@
+EXTRA_CFLAGS += -Idrivers/staging/tegra/include
+
+obj-$(CONFIG_TEGRA_DOWNSTREAM) += video/
+obj-$(CONFIG_TEGRA_DOWNSTREAM) += csi.o
+obj-$(CONFIG_TEGRA_DOWNSTREAM) += clk.o
+obj-$(CONFIG_TEGRA_DOWNSTREAM) += pageattr.o
+obj-$(CONFIG_TEGRA_DOWNSTREAM) += powergate.o
diff --git a/drivers/staging/tegra/clk.c b/drivers/staging/tegra/clk.c
new file mode 100644
index 000000000000..b7745340f38d
--- /dev/null
+++ b/drivers/staging/tegra/clk.c
@@ -0,0 +1,8 @@
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+int tegra_is_clk_enabled(struct clk *c)
+{
+	return __clk_get_enable_count(c);
+}
+EXPORT_SYMBOL(tegra_is_clk_enabled);
diff --git a/drivers/staging/tegra/csi.c b/drivers/staging/tegra/csi.c
new file mode 100644
index 000000000000..6e18c2f97163
--- /dev/null
+++ b/drivers/staging/tegra/csi.c
@@ -0,0 +1,85 @@
+/*
+ * arch/arm/mach-tegra/csi.c
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <mach/csi.h>
+
+#include <../arch/arm/mach-tegra/iomap.h>
+
+#define TEGRA_VI_BASE	0x54080000
+
+static struct clk *vi_clk;
+static struct clk *csi_clk;
+
+int tegra_vi_csi_writel(u32 val, u32 offset)
+{
+	if (vi_clk == NULL) {
+		vi_clk = clk_get_sys("tegra_camera", "vi");
+		if (IS_ERR_OR_NULL(vi_clk)) {
+			pr_err("vi: can't get vi clock\n");
+			return -EINVAL;
+		}
+	}
+	clk_prepare_enable(vi_clk);
+
+	if (csi_clk == NULL) {
+		csi_clk = clk_get_sys("tegra_camera", "csi");
+		if (IS_ERR_OR_NULL(csi_clk)) {
+			pr_err("csi: can't get csi clock\n");
+			return -EINVAL;
+		}
+	}
+	clk_prepare_enable(csi_clk);
+
+	writel(val, IO_ADDRESS(TEGRA_VI_BASE) + offset * 4);
+
+	clk_disable_unprepare(csi_clk);
+	clk_disable_unprepare(vi_clk);
+	return 0;
+}
+
+int tegra_vi_csi_readl(u32 offset, u32 *val)
+{
+	if (vi_clk == NULL) {
+		vi_clk = clk_get_sys("tegra_camera", "vi");
+		if (IS_ERR_OR_NULL(vi_clk)) {
+			pr_err("vi: can't get vi clock\n");
+			return -EINVAL;
+		}
+	}
+	clk_prepare_enable(vi_clk);
+
+	if (csi_clk == NULL) {
+		csi_clk = clk_get_sys("tegra_camera", "csi");
+		if (IS_ERR_OR_NULL(csi_clk)) {
+			pr_err("csi: can't get csi clock\n");
+			return -EINVAL;
+		}
+	}
+	clk_prepare_enable(csi_clk);
+
+	*val = readl(IO_ADDRESS(TEGRA_VI_BASE) + offset * 4);
+
+	clk_disable_unprepare(csi_clk);
+	clk_disable_unprepare(vi_clk);
+
+	return 0;
+}
diff --git a/drivers/staging/tegra/include/linux/nvhost.h b/drivers/staging/tegra/include/linux/nvhost.h
new file mode 100644
index 000000000000..0e4c3843f3b3
--- /dev/null
+++ b/drivers/staging/tegra/include/linux/nvhost.h
@@ -0,0 +1,255 @@
+/*
+ * include/linux/nvhost.h
+ *
+ * Tegra graphics host driver
+ *
+ * Copyright (c) 2009-2012, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __LINUX_NVHOST_H
+#define __LINUX_NVHOST_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/of_device.h>
+
+struct nvhost_master;
+
+struct nvhost_device_power_attr;
+
+#define NVHOST_MODULE_MAX_CLOCKS		3
+#define NVHOST_MODULE_MAX_POWERGATE_IDS 	2
+#define NVHOST_MODULE_NO_POWERGATE_IDS		.powergate_ids = {-1, -1}
+#define NVHOST_DEFAULT_CLOCKGATE_DELAY		.clockgate_delay = 25
+#define NVHOST_NAME_SIZE			24
+#define NVSYNCPT_INVALID			(-1)
+
+/* FIXME:
+ * Sync point ids are now split into 2 files.
+ * 1 if this one and other is in
+ * drivers/video/tegra/host/host1x/host1x_syncpt.h
+ * So if someone decides to add new sync point in future
+ * please check both the header files
+ */
+#define NVSYNCPT_DISP0_A		(8)
+#define NVSYNCPT_DISP1_A		(9)
+#define NVSYNCPT_AVP_0			(10)
+#define NVSYNCPT_DISP0_B		(20)
+#define NVSYNCPT_DISP1_B		(21)
+#define NVSYNCPT_DISP0_C		(24)
+#define NVSYNCPT_DISP1_C		(25)
+#define NVSYNCPT_VBLANK0		(26)
+#define NVSYNCPT_VBLANK1		(27)
+#define NVSYNCPT_DSI			(31)
+
+enum nvhost_power_sysfs_attributes {
+	NVHOST_POWER_SYSFS_ATTRIB_CLOCKGATE_DELAY = 0,
+	NVHOST_POWER_SYSFS_ATTRIB_POWERGATE_DELAY,
+	NVHOST_POWER_SYSFS_ATTRIB_REFCOUNT,
+	NVHOST_POWER_SYSFS_ATTRIB_MAX
+};
+
+struct nvhost_device_id {
+	char name[NVHOST_NAME_SIZE];
+	unsigned long version;
+};
+
+struct nvhost_clock {
+	char *name;
+	long default_rate;
+};
+
+enum nvhost_device_powerstate_t {
+	NVHOST_POWER_STATE_DEINIT,
+	NVHOST_POWER_STATE_RUNNING,
+	NVHOST_POWER_STATE_CLOCKGATED,
+	NVHOST_POWER_STATE_POWERGATED
+};
+
+struct nvhost_device {
+	const char	*name;		/* device name */
+	int		version;	/* ip version number of device */
+	struct device	dev;		/* Linux device struct */
+	int		id;		/* Separates clients of same hw */
+	int		index;		/* Hardware channel number */
+	u32		num_resources;	/* Number of resources following */
+	struct resource	*resource;	/* Resources (IOMEM in particular) */
+	struct resource	*reg_mem;
+	void __iomem	*aperture;	/* Iomem mapped to kernel */
+
+	u32		syncpts;	/* Bitfield of sync points used */
+	u32		waitbases;	/* Bit field of wait bases */
+	u32		modulemutexes;	/* Bit field of module mutexes */
+	u32		moduleid;	/* Module id for user space API */
+
+	u32		class;		/* Device class */
+	bool		exclusive;	/* True if only one user at a time */
+	bool		keepalive;	/* Do not power gate when opened */
+	bool		waitbasesync;	/* Force sync of wait bases */
+	bool		powerup_reset;	/* Do a reset after power un-gating */
+	bool		serialize;	/* Serialize submits in the channel */
+
+	int		powergate_ids[NVHOST_MODULE_MAX_POWERGATE_IDS];
+	bool		can_powergate;	/* True if module can be power gated */
+	int		clockgate_delay;/* Delay before clock gated */
+	int		powergate_delay;/* Delay before power gated */
+	struct nvhost_clock clocks[NVHOST_MODULE_MAX_CLOCKS];/* Clock names */
+
+	struct delayed_work powerstate_down;/* Power state management */
+	int		num_clks;	/* Number of clocks opened for dev */
+	struct clk	*clk[NVHOST_MODULE_MAX_CLOCKS];
+	struct mutex	lock;		/* Power management lock */
+	int		powerstate;	/* Current power state */
+	int		refcount;	/* Number of tasks active */
+	wait_queue_head_t idle_wq;	/* Work queue for idle */
+	struct list_head client_list;	/* List of clients and rate requests */
+
+	struct nvhost_channel *channel;	/* Channel assigned for the module */
+	struct kobject *power_kobj;	/* kobject to hold power sysfs entries */
+	struct nvhost_device_power_attr *power_attrib;	/* sysfs attributes */
+
+	bool is_dynamic;
+
+	struct reset_control *rst;
+};
+
+struct nvhost_device_power_attr {
+	struct nvhost_device *ndev;
+	struct kobj_attribute power_attr[NVHOST_POWER_SYSFS_ATTRIB_MAX];
+};
+
+/* Register devices to nvhost bus */
+extern int nvhost_add_devices(struct nvhost_device **, int num);
+
+/* Register device to nvhost bus */
+extern int nvhost_device_register(struct nvhost_device *);
+
+/* Deregister device from nvhost bus */
+extern void nvhost_device_unregister(struct nvhost_device *);
+
+struct nvhost_driver {
+	int (*probe)(struct nvhost_device *, struct nvhost_device_id *);
+	int (*remove)(struct nvhost_device *);
+	void (*shutdown)(struct nvhost_device *);
+	int (*suspend)(struct nvhost_device *, pm_message_t state);
+	int (*resume)(struct nvhost_device *);
+	struct device_driver driver;
+
+	struct nvhost_device_id *id_table;
+
+	/* Finalize power on. Can be used for context restore. */
+	void (*finalize_poweron)(struct nvhost_device *dev);
+
+	/* Device is busy. */
+	void (*busy)(struct nvhost_device *);
+
+	/* Device is idle. */
+	void (*idle)(struct nvhost_device *);
+
+	/* Device is going to be suspended */
+	void (*suspend_ndev)(struct nvhost_device *);
+
+	/* Device is initialized */
+	void (*init)(struct nvhost_device *dev);
+
+	/* Device is de-initialized. */
+	void (*deinit)(struct nvhost_device *dev);
+
+	/* Preparing for power off. Used for context save. */
+	int (*prepare_poweroff)(struct nvhost_device *dev);
+
+	/* Allocates a context handler for the device */
+	struct nvhost_hwctx_handler *(*alloc_hwctx_handler)(u32 syncpt,
+			u32 waitbase, struct nvhost_channel *ch);
+
+	/* Clock gating callbacks */
+	int (*prepare_clockoff)(struct nvhost_device *dev);
+	void (*finalize_clockon)(struct nvhost_device *dev);
+};
+
+extern int nvhost_driver_register(struct nvhost_driver *);
+extern void nvhost_driver_unregister(struct nvhost_driver *);
+extern struct resource *nvhost_get_resource(struct nvhost_device *,
+		unsigned int, unsigned int);
+extern int nvhost_get_irq(struct nvhost_device *, unsigned int);
+extern struct resource *nvhost_get_resource_byname(struct nvhost_device *,
+		unsigned int, const char *);
+extern int nvhost_get_irq_byname(struct nvhost_device *, const char *);
+
+#define to_nvhost_device(x)	container_of((x), struct nvhost_device, dev)
+#define to_nvhost_driver(drv)	(container_of((drv), struct nvhost_driver, \
+				 driver))
+
+#define nvhost_get_drvdata(_dev)	dev_get_drvdata(&(_dev)->dev)
+#define nvhost_set_drvdata(_dev, data)	dev_set_drvdata(&(_dev)->dev, (data))
+
+int nvhost_bus_add_host(struct nvhost_master *host);
+int nvhost_bus_init(void);
+
+static inline struct nvhost_device *nvhost_get_parent(struct nvhost_device *_dev)
+{
+	return _dev->dev.parent ? to_nvhost_device(_dev->dev.parent) : NULL;
+}
+
+/* public host1x power management APIs */
+bool nvhost_module_powered_ext(struct nvhost_device *dev);
+void nvhost_module_busy_ext(struct nvhost_device *dev);
+void nvhost_module_idle_ext(struct nvhost_device *dev);
+
+/* public host1x sync-point management APIs */
+u32 nvhost_syncpt_incr_max_ext(struct nvhost_device *dev, u32 id, u32 incrs);
+void nvhost_syncpt_cpu_incr_ext(struct nvhost_device *dev, u32 id);
+u32 nvhost_syncpt_read_ext(struct nvhost_device *dev, u32 id);
+int nvhost_syncpt_wait_timeout_ext(struct nvhost_device *dev, u32 id, u32 thresh,
+	u32 timeout, u32 *value);
+
+void nvhost_scale3d_set_throughput_hint(int hint);
+
+int of_nvhost_device_create(struct device_node *np, const char *bus_id,
+			    void *aux_dev);
+
+extern struct nvhost_device tegra_host1x01_t20_device;
+extern struct nvhost_device tegra_display01_t20_device;
+extern struct nvhost_device tegra_gr3d01_t20_device;
+extern struct nvhost_device tegra_gr2d01_t20_device;
+extern struct nvhost_device tegra_isp01_t20_device;
+extern struct nvhost_device tegra_vi01_t20_device;
+extern struct nvhost_device tegra_mpe01_t20_device;
+extern struct nvhost_device tegra_dsi01_t20_device;
+
+#define NVHOST_T20_OF_DEV_AUXDATA \
+	OF_DEV_AUXDATA("nvidia,tegra20-host1x", 0x50000000, "host1x", \
+		       &tegra_host1x01_t20_device), \
+	OF_DEV_AUXDATA("nvidia,tegra20-display", 0, "display", \
+		       &tegra_display01_t20_device), \
+	OF_DEV_AUXDATA("nvidia,tegra20-gr3d", 0x54180000, "gr3d", \
+		       &tegra_gr3d01_t20_device), \
+	OF_DEV_AUXDATA("nvidia,tegra20-gr2d", 0x54140000, "gr2d", \
+		       &tegra_gr2d01_t20_device), \
+	OF_DEV_AUXDATA("nvidia,tegra20-isp", 0x54100000, "isp", \
+		       &tegra_isp01_t20_device), \
+	OF_DEV_AUXDATA("nvidia,tegra20-vi", 0x54080000, "vi", \
+		       &tegra_vi01_t20_device), \
+	OF_DEV_AUXDATA("nvidia,tegra20-mpe", 0x54040000, "mpe", \
+		       &tegra_mpe01_t20_device), \
+	OF_DEV_AUXDATA("nvidia,tegra20-dsi", 0x54300000, "dsi", \
+		       &tegra_dsi01_t20_device), \
+	OF_DEV_AUXDATA("nvidia,tegra20-dc", 0x54200000, "tegradc.0", NULL), \
+	OF_DEV_AUXDATA("nvidia,tegra20-dc", 0x54240000, "tegradc.1", NULL) \
+
+extern struct of_dev_auxdata tegra20_auxdata_lookup[];
+#endif
diff --git a/drivers/staging/tegra/include/linux/nvhost_ioctl.h b/drivers/staging/tegra/include/linux/nvhost_ioctl.h
new file mode 100644
index 000000000000..3f6eabf522f1
--- /dev/null
+++ b/drivers/staging/tegra/include/linux/nvhost_ioctl.h
@@ -0,0 +1,213 @@
+/*
+ * include/linux/nvhost_ioctl.h
+ *
+ * Tegra graphics host driver
+ *
+ * Copyright (c) 2009-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __LINUX_NVHOST_IOCTL_H
+#define __LINUX_NVHOST_IOCTL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#if !defined(__KERNEL__)
+#define __user
+#endif
+
+#define NVHOST_INVALID_SYNCPOINT 0xFFFFFFFF
+#define NVHOST_NO_TIMEOUT (-1)
+#define NVHOST_NO_CONTEXT 0x0
+#define NVHOST_IOCTL_MAGIC 'H'
+#define NVHOST_PRIORITY_LOW 50
+#define NVHOST_PRIORITY_MEDIUM 100
+#define NVHOST_PRIORITY_HIGH 150
+
+/* version 0 header (used with write() submit interface) */
+struct nvhost_submit_hdr {
+	__u32 syncpt_id;
+	__u32 syncpt_incrs;
+	__u32 num_cmdbufs;
+	__u32 num_relocs;
+};
+
+#define NVHOST_SUBMIT_VERSION_V0		0x0
+#define NVHOST_SUBMIT_VERSION_V1		0x1
+#define NVHOST_SUBMIT_VERSION_V2		0x2
+#define NVHOST_SUBMIT_VERSION_MAX_SUPPORTED	NVHOST_SUBMIT_VERSION_V2
+
+/* version 1 header (used with ioctl() submit interface) */
+struct nvhost_submit_hdr_ext {
+	__u32 syncpt_id;	/* version 0 fields */
+	__u32 syncpt_incrs;
+	__u32 num_cmdbufs;
+	__u32 num_relocs;
+	__u32 submit_version;	/* version 1 fields */
+	__u32 num_waitchks;
+	__u32 waitchk_mask;
+	__u32 pad[5];		/* future expansion */
+};
+
+struct nvhost_cmdbuf {
+	__u32 mem;
+	__u32 offset;
+	__u32 words;
+};
+
+struct nvhost_reloc {
+	__u32 cmdbuf_mem;
+	__u32 cmdbuf_offset;
+	__u32 target;
+	__u32 target_offset;
+};
+
+struct nvhost_reloc_shift {
+	__u32 shift;
+};
+
+struct nvhost_waitchk {
+	__u32 mem;
+	__u32 offset;
+	__u32 syncpt_id;
+	__u32 thresh;
+};
+
+struct nvhost_get_param_args {
+	__u32 value;
+};
+
+struct nvhost_set_nvmap_fd_args {
+	__u32 fd;
+};
+
+struct nvhost_read_3d_reg_args {
+	__u32 offset;
+	__u32 value;
+};
+
+struct nvhost_clk_rate_args {
+	__u64 rate;
+};
+
+struct nvhost_set_timeout_args {
+	__u32 timeout;
+};
+
+struct nvhost_set_priority_args {
+	__u32 priority;
+};
+
+#define NVHOST_IOCTL_CHANNEL_FLUSH		\
+	_IOR(NVHOST_IOCTL_MAGIC, 1, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS	\
+	_IOR(NVHOST_IOCTL_MAGIC, 2, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_GET_WAITBASES	\
+	_IOR(NVHOST_IOCTL_MAGIC, 3, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES	\
+	_IOR(NVHOST_IOCTL_MAGIC, 4, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD	\
+	_IOW(NVHOST_IOCTL_MAGIC, 5, struct nvhost_set_nvmap_fd_args)
+#define NVHOST_IOCTL_CHANNEL_NULL_KICKOFF	\
+	_IOR(NVHOST_IOCTL_MAGIC, 6, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_SUBMIT_EXT		\
+	_IOW(NVHOST_IOCTL_MAGIC, 7, struct nvhost_submit_hdr_ext)
+#define NVHOST_IOCTL_CHANNEL_READ_3D_REG \
+	_IOWR(NVHOST_IOCTL_MAGIC, 8, struct nvhost_read_3d_reg_args)
+#define NVHOST_IOCTL_CHANNEL_GET_CLK_RATE		\
+	_IOR(NVHOST_IOCTL_MAGIC, 9, struct nvhost_clk_rate_args)
+#define NVHOST_IOCTL_CHANNEL_SET_CLK_RATE		\
+	_IOW(NVHOST_IOCTL_MAGIC, 10, struct nvhost_clk_rate_args)
+#define NVHOST_IOCTL_CHANNEL_SET_TIMEOUT	\
+	_IOW(NVHOST_IOCTL_MAGIC, 11, struct nvhost_set_timeout_args)
+#define NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT	\
+	_IOR(NVHOST_IOCTL_MAGIC, 12, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_SET_PRIORITY	\
+	_IOW(NVHOST_IOCTL_MAGIC, 13, struct nvhost_set_priority_args)
+#define NVHOST_IOCTL_CHANNEL_LAST		\
+	_IOC_NR(NVHOST_IOCTL_CHANNEL_SET_PRIORITY)
+#define NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE sizeof(struct nvhost_submit_hdr_ext)
+
+struct nvhost_ctrl_syncpt_read_args {
+	__u32 id;
+	__u32 value;
+};
+
+struct nvhost_ctrl_syncpt_incr_args {
+	__u32 id;
+};
+
+struct nvhost_ctrl_syncpt_wait_args {
+	__u32 id;
+	__u32 thresh;
+	__s32 timeout;
+};
+
+struct nvhost_ctrl_syncpt_waitex_args {
+	__u32 id;
+	__u32 thresh;
+	__s32 timeout;
+	__u32 value;
+};
+
+struct nvhost_ctrl_module_mutex_args {
+	__u32 id;
+	__u32 lock;
+};
+
+enum nvhost_module_id {
+	NVHOST_MODULE_NONE = -1,
+	NVHOST_MODULE_DISPLAY_A = 0,
+	NVHOST_MODULE_DISPLAY_B,
+	NVHOST_MODULE_VI,
+	NVHOST_MODULE_ISP,
+	NVHOST_MODULE_MPE,
+};
+
+struct nvhost_ctrl_module_regrdwr_args {
+	__u32 id;
+	__u32 num_offsets;
+	__u32 block_size;
+	__u32 *offsets;
+	__u32 *values;
+	__u32 write;
+};
+
+#define NVHOST_IOCTL_CTRL_SYNCPT_READ		\
+	_IOWR(NVHOST_IOCTL_MAGIC, 1, struct nvhost_ctrl_syncpt_read_args)
+#define NVHOST_IOCTL_CTRL_SYNCPT_INCR		\
+	_IOW(NVHOST_IOCTL_MAGIC, 2, struct nvhost_ctrl_syncpt_incr_args)
+#define NVHOST_IOCTL_CTRL_SYNCPT_WAIT		\
+	_IOW(NVHOST_IOCTL_MAGIC, 3, struct nvhost_ctrl_syncpt_wait_args)
+
+#define NVHOST_IOCTL_CTRL_MODULE_MUTEX		\
+	_IOWR(NVHOST_IOCTL_MAGIC, 4, struct nvhost_ctrl_module_mutex_args)
+#define NVHOST_IOCTL_CTRL_MODULE_REGRDWR	\
+	_IOWR(NVHOST_IOCTL_MAGIC, 5, struct nvhost_ctrl_module_regrdwr_args)
+
+#define NVHOST_IOCTL_CTRL_SYNCPT_WAITEX		\
+	_IOWR(NVHOST_IOCTL_MAGIC, 6, struct nvhost_ctrl_syncpt_waitex_args)
+
+#define NVHOST_IOCTL_CTRL_GET_VERSION	\
+	_IOR(NVHOST_IOCTL_MAGIC, 7, struct nvhost_get_param_args)
+
+#define NVHOST_IOCTL_CTRL_LAST			\
+	_IOC_NR(NVHOST_IOCTL_CTRL_GET_VERSION)
+#define NVHOST_IOCTL_CTRL_MAX_ARG_SIZE	\
+	sizeof(struct nvhost_ctrl_module_regrdwr_args)
+
+#endif
diff --git a/drivers/staging/tegra/include/linux/nvmap.h b/drivers/staging/tegra/include/linux/nvmap.h
new file mode 100644
index 000000000000..7d8b248787bb
--- /dev/null
+++ b/drivers/staging/tegra/include/linux/nvmap.h
@@ -0,0 +1,144 @@
+/*
+ * include/linux/nvmap.h
+ *
+ * structure declarations for nvmem and nvmap user-space ioctls
+ *
+ * Copyright (c) 2009-2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/ioctl.h>
+#include <linux/file.h>
+#include <linux/rbtree.h>
+
+#if !defined(__KERNEL__)
+#define __user
+#endif
+
+#ifndef _LINUX_NVMAP_H
+#define _LINUX_NVMAP_H
+
+#define NVMAP_HEAP_SYSMEM  (1ul<<31)
+#define NVMAP_HEAP_IOVMM   (1ul<<30)
+
+/* common carveout heaps */
+#define NVMAP_HEAP_CARVEOUT_IRAM    (1ul<<29)
+#define NVMAP_HEAP_CARVEOUT_VPR     (1ul<<28)
+#define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0)
+
+#define NVMAP_HEAP_CARVEOUT_MASK    (NVMAP_HEAP_IOVMM - 1)
+
+/* allocation flags */
+#define NVMAP_HANDLE_UNCACHEABLE     (0x0ul << 0)
+#define NVMAP_HANDLE_WRITE_COMBINE   (0x1ul << 0)
+#define NVMAP_HANDLE_INNER_CACHEABLE (0x2ul << 0)
+#define NVMAP_HANDLE_CACHEABLE       (0x3ul << 0)
+#define NVMAP_HANDLE_CACHE_FLAG      (0x3ul << 0)
+
+#define NVMAP_HANDLE_SECURE          (0x1ul << 2)
+#define NVMAP_HANDLE_ZEROED_PAGES    (0x1ul << 3)
+
+#if defined(__KERNEL__)
+
+#if defined(CONFIG_TEGRA_NVMAP)
+struct nvmap_handle;
+struct nvmap_client;
+struct nvmap_device;
+
+#define nvmap_ref_to_handle(_ref) (*(struct nvmap_handle **)(_ref))
+/* Convert User space handle to Kernel. */
+#define nvmap_convert_handle_u2k(h) (h)
+
+/* handle_ref objects are client-local references to an nvmap_handle;
+ * they are distinct objects so that handles can be unpinned and
+ * unreferenced the correct number of times when a client abnormally
+ * terminates */
+struct nvmap_handle_ref {
+	struct nvmap_handle *handle;
+	struct rb_node	node;
+	atomic_t	dupes;	/* number of times to free on file close */
+	atomic_t	pin;	/* number of times to unpin on free */
+};
+
+#elif defined(CONFIG_ION_TEGRA)
+/* For Ion Mem Manager support through nvmap_* API's. */
+#include "../../../../../drivers/gpu/ion/ion_priv.h"
+#define nvmap_client ion_client
+#define nvmap_device ion_device
+#define nvmap_handle ion_handle
+#define nvmap_handle_ref ion_handle
+#define nvmap_ref_to_handle(_ref) (struct ion_handle *)_ref
+/* Convert User space handle to Kernel. */
+#define nvmap_convert_handle_u2k(h) ({ \
+	if ((u32)h >= TASK_SIZE) { \
+		pr_err("Invalid user space handle."); \
+		BUG(); \
+	} \
+	(*((u32 *)h)); })
+
+#endif /* CONFIG_ION_TEGRA */
+
+#define nvmap_id_to_handle(_id) ((struct nvmap_handle *)(_id))
+
+struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
+					 const char *name);
+
+struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
+				     size_t align, unsigned int flags,
+				     unsigned int heap_mask);
+
+void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r);
+
+void *nvmap_mmap(struct nvmap_handle_ref *r);
+
+void nvmap_munmap(struct nvmap_handle_ref *r, void *addr);
+
+struct nvmap_client *nvmap_client_get_file(int fd);
+
+struct nvmap_client *nvmap_client_get(struct nvmap_client *client);
+
+void nvmap_client_put(struct nvmap_client *c);
+
+phys_addr_t nvmap_pin(struct nvmap_client *c, struct nvmap_handle_ref *r);
+
+phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id);
+
+void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *r);
+
+void nvmap_unpin_handles(struct nvmap_client *client,
+			 struct nvmap_handle **h, int nr);
+
+struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
+						   unsigned long id);
+
+struct nvmap_platform_carveout {
+	const char *name;
+	unsigned int usage_mask;
+	phys_addr_t base;
+	size_t size;
+	size_t buddy_size;
+};
+
+struct nvmap_platform_data {
+	struct nvmap_platform_carveout *carveouts;
+	unsigned int nr_carveouts;
+};
+
+extern struct nvmap_device *nvmap_dev;
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_NVMAP_H */
diff --git a/drivers/staging/tegra/include/linux/tegra_overlay.h b/drivers/staging/tegra/include/linux/tegra_overlay.h
new file mode 100644
index 000000000000..2a6025afdad7
--- /dev/null
+++ b/drivers/staging/tegra/include/linux/tegra_overlay.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2010 NVIDIA Corporation
+ * Author: Dan Willemsen <dwillemsen@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_TEGRA_OVERLAY_H
+#define __LINUX_TEGRA_OVERLAY_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <video/tegrafb.h>
+
+#define TEGRA_FB_WIN_BLEND_NONE		0
+#define TEGRA_FB_WIN_BLEND_PREMULT	1
+#define TEGRA_FB_WIN_BLEND_COVERAGE	2
+
+#define TEGRA_FB_WIN_FLAG_INVERT_H	(1 << 0)
+#define TEGRA_FB_WIN_FLAG_INVERT_V	(1 << 1)
+#define TEGRA_FB_WIN_FLAG_TILED		(1 << 2)
+
+/* set index to -1 to ignore window data */
+struct tegra_overlay_windowattr {
+	__s32	index;
+	__u32	buff_id;
+	__u32	blend;
+	__u32	offset;
+	__u32	offset_u;
+	__u32	offset_v;
+	__u32	stride;
+	__u32	stride_uv;
+	__u32	pixformat;
+	__u32	x;
+	__u32	y;
+	__u32	w;
+	__u32	h;
+	__u32	out_x;
+	__u32	out_y;
+	__u32	out_w;
+	__u32	out_h;
+	__u32	z;
+	__u32	pre_syncpt_id;
+	__u32	pre_syncpt_val;
+	__u32	hfilter;
+	__u32	vfilter;
+	__u32	do_not_use__tiled; /* compatibility */
+	__u32	flags;
+};
+
+#define TEGRA_OVERLAY_FLIP_FLAG_BLEND_REORDER (1 << 0)
+#define TEGRA_FB_FLIP_N_WINDOWS			3
+
+struct tegra_overlay_flip_args {
+	struct tegra_overlay_windowattr win[TEGRA_FB_FLIP_N_WINDOWS];
+	__u32 post_syncpt_id;
+	__u32 post_syncpt_val;
+	__u32 flags;
+};
+
+#define TEGRA_OVERLAY_IOCTL_MAGIC		'O'
+
+#define TEGRA_OVERLAY_IOCTL_OPEN_WINDOW		_IOWR(TEGRA_OVERLAY_IOCTL_MAGIC, 0x40, __u32)
+#define TEGRA_OVERLAY_IOCTL_CLOSE_WINDOW	_IOW(TEGRA_OVERLAY_IOCTL_MAGIC, 0x41, __u32)
+#define TEGRA_OVERLAY_IOCTL_FLIP		_IOW(TEGRA_OVERLAY_IOCTL_MAGIC, 0x42, struct tegra_overlay_flip_args)
+#define TEGRA_OVERLAY_IOCTL_SET_NVMAP_FD	_IOW(TEGRA_OVERLAY_IOCTL_MAGIC, 0x43, __u32)
+
+#define TEGRA_OVERLAY_IOCTL_MIN_NR		_IOC_NR(TEGRA_OVERLAY_IOCTL_OPEN_WINDOW)
+#define TEGRA_OVERLAY_IOCTL_MAX_NR		_IOC_NR(TEGRA_OVERLAY_IOCTL_SET_NVMAP_FD)
+
+#endif
diff --git a/drivers/staging/tegra/include/mach/clk.h b/drivers/staging/tegra/include/mach/clk.h
new file mode 100644
index 000000000000..5028445819d6
--- /dev/null
+++ b/drivers/staging/tegra/include/mach/clk.h
@@ -0,0 +1,47 @@
+/*
+ * arch/arm/mach-tegra/include/mach/clk.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ *	Erik Gilling <konkers@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_CLK_H
+#define __MACH_CLK_H
+
+struct clk;
+
+enum tegra_clk_ex_param {
+	TEGRA_CLK_VI_INP_SEL,
+	TEGRA_CLK_DTV_INVERT,
+	TEGRA_CLK_NAND_PAD_DIV2_ENB,
+	TEGRA_CLK_PLLD_CSI_OUT_ENB,
+	TEGRA_CLK_PLLD_DSI_OUT_ENB,
+	TEGRA_CLK_PLLD_MIPI_MUX_SEL,
+};
+
+#define tegra_periph_reset_assert(...) do {} while (0)
+#define tegra_periph_reset_deassert(...) do {} while (0)
+#define tegra_clk_cfg_ex(...) do {} while (0)
+
+int tegra_is_clk_enabled(struct clk *clk);
+
+static inline int tegra_dvfs_set_rate(struct clk *c, unsigned long rate)
+{
+	int ret = clk_set_rate(c, rate);
+	BUG_ON(ret != 0);
+	return ret;
+}
+
+#endif
diff --git a/drivers/staging/tegra/include/mach/csi.h b/drivers/staging/tegra/include/mach/csi.h
new file mode 100644
index 000000000000..575de6fb497e
--- /dev/null
+++ b/drivers/staging/tegra/include/mach/csi.h
@@ -0,0 +1,40 @@
+/*
+ * arch/arm/mach-tegra/include/mach/csi.h
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_CSI_H
+#define __MACH_TEGRA_CSI_H
+
+#define CSI_CILA_MIPI_CAL_CONFIG_0 0x22a
+#define  MIPI_CAL_TERMOSA(x)		(((x) & 0x1f) << 0)
+
+#define CSI_CILB_MIPI_CAL_CONFIG_0 0x22b
+#define  MIPI_CAL_TERMOSB(x)		(((x) & 0x1f) << 0)
+
+#define CSI_CIL_PAD_CONFIG 0x229
+#define  PAD_CIL_PDVREG(x)		(((x) & 0x01) << 1)
+
+#define CSI_DSI_MIPI_CAL_CONFIG	0x234
+#define  MIPI_CAL_HSPDOSD(x)		(((x) & 0x1f) << 16)
+#define  MIPI_CAL_HSPUOSD(x)		(((x) & 0x1f) << 8)
+
+#define CSI_MIPIBIAS_PAD_CONFIG	0x235
+#define  PAD_DRIV_DN_REF(x)		(((x) & 0x7) << 16)
+#define  PAD_DRIV_UP_REF(x)		(((x) & 0x7) << 8)
+
+int tegra_vi_csi_readl(u32 offset, u32 *val);
+int tegra_vi_csi_writel(u32 value, u32 offset);
+
+#endif
diff --git a/drivers/staging/tegra/include/mach/fuse.h b/drivers/staging/tegra/include/mach/fuse.h
new file mode 100644
index 000000000000..02a588ed5477
--- /dev/null
+++ b/drivers/staging/tegra/include/mach/fuse.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ *	Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <soc/tegra/fuse.h>
+
+#define tegra_chip_id	tegra_get_chip_id()
diff --git a/drivers/staging/tegra/include/mach/hdmi-audio.h b/drivers/staging/tegra/include/mach/hdmi-audio.h
new file mode 100644
index 000000000000..35555504037a
--- /dev/null
+++ b/drivers/staging/tegra/include/mach/hdmi-audio.h
@@ -0,0 +1,47 @@
+/*
+ * arch/arm/mach-tegra/include/mach/hdmi-audio.h
+ *
+ * Copyright (c) 2008-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MACH_TEGRA_HDMI_AUDIO_H
+#define __MACH_TEGRA_HDMI_AUDIO_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+enum {
+	AUDIO_FREQ_32K = 32000,
+	AUDIO_FREQ_44_1K = 44100,
+	AUDIO_FREQ_48K = 48000,
+	AUDIO_FREQ_88_2K = 88200,
+	AUDIO_FREQ_96K = 96000,
+	AUDIO_FREQ_176_4K = 176400,
+	AUDIO_FREQ_192K = 192000,
+};
+
+enum {
+	AUTO = 0,
+	SPDIF,
+	HDA,
+};
+
+int tegra_hdmi_setup_audio_freq_source(unsigned audio_freq, unsigned audio_source);
+int tegra_hdmi_setup_hda_presence(void);
+int tegra_hdmi_audio_null_sample_inject(bool on);
+
+#endif /* __MACH_TEGRA_HDMI_AUDIO_H */
diff --git a/drivers/staging/tegra/include/mach/iovmm.h b/drivers/staging/tegra/include/mach/iovmm.h
new file mode 100644
index 000000000000..7459410ef0e8
--- /dev/null
+++ b/drivers/staging/tegra/include/mach/iovmm.h
@@ -0,0 +1,352 @@
+/*
+ * arch/arm/mach-tegra/include/mach/iovmm.h
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed i the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#ifndef _MACH_TEGRA_IOVMM_H_
+#define _MACH_TEGRA_IOVMM_H_
+
+typedef u32 tegra_iovmm_addr_t;
+
+struct tegra_iovmm_device_ops;
+
+/*
+ * each I/O virtual memory manager unit should register a device with
+ * the iovmm system
+ */
+struct tegra_iovmm_device {
+	struct tegra_iovmm_device_ops	*ops;
+	const char			*name;
+	struct list_head		list;
+	int				pgsize_bits;
+};
+
+/*
+ * tegra_iovmm_domain serves a purpose analagous to mm_struct as defined in
+ * <linux/mm_types.h> - it defines a virtual address space within which
+ * tegra_iovmm_areas can be created.
+ */
+struct tegra_iovmm_domain {
+	atomic_t		clients;
+	atomic_t		locks;
+	spinlock_t		block_lock;  /* RB-tree for iovmm_area blocks */
+	unsigned long		flags;
+	wait_queue_head_t	delay_lock;  /* when lock_client fails */
+	struct rw_semaphore	map_lock;
+	struct rb_root		all_blocks;  /* ordered by address */
+	struct rb_root		free_blocks; /* ordered by size */
+	struct tegra_iovmm_device *dev;
+};
+
+/*
+ * tegra_iovmm_client is analagous to an individual task in the task group
+ * which owns an mm_struct.
+ */
+
+struct iovmm_share_group;
+
+#if !defined(CONFIG_IOMMU_API)
+
+struct tegra_iovmm_client {
+	const char			*name;
+	unsigned long			flags;
+	struct iovmm_share_group	*group;
+	struct tegra_iovmm_domain	*domain;
+	struct miscdevice		*misc_dev;
+	struct list_head		list;
+};
+
+/*
+ * tegra_iovmm_area serves a purpose analagous to vm_area_struct as defined
+ * in <linux/mm_types.h> - it defines a virtual memory area which can be
+ * mapped to physical memory by a client-provided mapping function. */
+
+struct tegra_iovmm_area {
+	struct tegra_iovmm_domain	*domain;
+	tegra_iovmm_addr_t		iovm_start;
+	size_t				iovm_length;
+	pgprot_t			pgprot;
+	struct tegra_iovmm_area_ops	*ops;
+};
+
+#else	/* CONFIG_IOMMU_API */
+
+/*
+ * To replace IOVMM with IOMMU backend
+ */
+
+struct tegra_iovmm_client {
+	struct device *dev;
+};
+
+struct tegra_iovmm_area {
+	dma_addr_t		iovm_start;
+	size_t			iovm_length;
+	pgprot_t		pgprot;
+	struct device		*dev;
+};
+
+#endif /* CONFIG_IOMMU_API */
+
+struct tegra_iovmm_device_ops {
+	/* maps a VMA using the page residency functions provided by the VMA */
+	int (*map)(struct tegra_iovmm_domain *domain,
+		struct tegra_iovmm_area *io_vma);
+	/* marks all PTEs in a VMA as invalid; decommits the virtual addres
+	 * space (potentially freeing PDEs when decommit is true.) */
+	void (*unmap)(struct tegra_iovmm_domain *domain,
+		struct tegra_iovmm_area *io_vma, bool decommit);
+	void (*map_pfn)(struct tegra_iovmm_domain *domain,
+		struct tegra_iovmm_area *io_vma,
+		unsigned long offs, unsigned long pfn);
+	/*
+	 * ensures that a domain is resident in the hardware's mapping region
+	 * so that it may be used by a client
+	 */
+	int (*lock_domain)(struct tegra_iovmm_domain *domain,
+		struct tegra_iovmm_client *client);
+	void (*unlock_domain)(struct tegra_iovmm_domain *domain,
+		struct tegra_iovmm_client *client);
+	/*
+	 * allocates a vmm_domain for the specified client; may return the same
+	 * domain for multiple clients
+	 */
+	struct tegra_iovmm_domain* (*alloc_domain)(
+		struct tegra_iovmm_device *dev,
+		struct tegra_iovmm_client *client);
+	void (*free_domain)(struct tegra_iovmm_domain *domain,
+		struct tegra_iovmm_client *client);
+	int (*suspend)(struct tegra_iovmm_device *dev);
+	void (*resume)(struct tegra_iovmm_device *dev);
+};
+
+struct tegra_iovmm_area_ops {
+	/*
+	 * ensures that the page of data starting at the specified offset
+	 * from the start of the iovma is resident and pinned for use by
+	 * DMA, returns the system pfn, or an invalid pfn if the
+	 * operation fails.
+	 */
+	unsigned long (*lock_makeresident)(struct tegra_iovmm_area *area,
+		tegra_iovmm_addr_t offs);
+	/* called when the page is unmapped from the I/O VMA */
+	void (*release)(struct tegra_iovmm_area *area, tegra_iovmm_addr_t offs);
+};
+
+#ifdef CONFIG_TEGRA_IOVMM
+/*
+ * called by clients to allocate an I/O VMM client mapping context which
+ * will be shared by all clients in the same share_group
+ */
+struct tegra_iovmm_client *__tegra_iovmm_alloc_client(const char *name,
+	const char *share_group, struct miscdevice *misc_dev);
+
+static inline struct tegra_iovmm_client *tegra_iovmm_alloc_client(
+	struct device *dev, const char *share_group,
+	struct miscdevice *misc_dev)
+{
+	return __tegra_iovmm_alloc_client(dev_name(dev), share_group, misc_dev);
+}
+
+size_t tegra_iovmm_get_vm_size(struct tegra_iovmm_client *client);
+
+void tegra_iovmm_free_client(struct tegra_iovmm_client *client);
+
+/*
+ * called by clients to ensure that their mapping context is resident
+ * before performing any DMA operations addressing I/O VMM regions.
+ * client_lock may return -EINTR.
+ */
+int tegra_iovmm_client_lock(struct tegra_iovmm_client *client);
+int tegra_iovmm_client_trylock(struct tegra_iovmm_client *client);
+
+/* called by clients after DMA operations are complete */
+void tegra_iovmm_client_unlock(struct tegra_iovmm_client *client);
+
+/*
+ * called by clients to allocate a new iovmm_area and reserve I/O virtual
+ * address space for it. if ops is NULL, clients should subsequently call
+ * tegra_iovmm_vm_map_pages and/or tegra_iovmm_vm_insert_pfn to explicitly
+ * map the I/O virtual address to an OS-allocated page or physical address,
+ * respectively. VM operations may be called before this call returns
+ */
+struct tegra_iovmm_area *tegra_iovmm_create_vm(
+	struct tegra_iovmm_client *client, struct tegra_iovmm_area_ops *ops,
+	size_t size, size_t align, pgprot_t pgprot, unsigned long iovm_start);
+
+/*
+ * called by clients to "zap" an iovmm_area, and replace all mappings
+ * in it with invalid ones, without freeing the virtual address range
+ */
+void tegra_iovmm_zap_vm(struct tegra_iovmm_area *vm);
+
+/*
+ * after zapping a demand-loaded iovmm_area, the client should unzap it
+ * to allow the VMM device to remap the page range.
+ */
+void tegra_iovmm_unzap_vm(struct tegra_iovmm_area *vm);
+
+/* called by clients to return an iovmm_area to the free pool for the domain */
+void tegra_iovmm_free_vm(struct tegra_iovmm_area *vm);
+
+/* returns size of largest free iovm block */
+size_t tegra_iovmm_get_max_free(struct tegra_iovmm_client *client);
+
+/*
+ * called by client software to map the page-aligned I/O address vaddr to
+ * a specific physical address pfn. I/O VMA should have been created with
+ * a NULL tegra_iovmm_area_ops structure.
+ */
+void tegra_iovmm_vm_insert_pfn(struct tegra_iovmm_area *area,
+	tegra_iovmm_addr_t vaddr, unsigned long pfn);
+
+/*
+ * called by clients to return the iovmm_area containing addr, or NULL if
+ * addr has not been allocated. caller should call tegra_iovmm_area_put when
+ * finished using the returned pointer
+ */
+struct tegra_iovmm_area *tegra_iovmm_find_area_get(
+	struct tegra_iovmm_client *client, tegra_iovmm_addr_t addr);
+
+struct tegra_iovmm_area *tegra_iovmm_area_get(struct tegra_iovmm_area *vm);
+void tegra_iovmm_area_put(struct tegra_iovmm_area *vm);
+
+/* called by drivers to initialize a tegra_iovmm_domain structure */
+int tegra_iovmm_domain_init(struct tegra_iovmm_domain *domain,
+	struct tegra_iovmm_device *dev, tegra_iovmm_addr_t start,
+	tegra_iovmm_addr_t end);
+
+/* called by drivers to register an I/O VMM device with the system */
+int tegra_iovmm_register(struct tegra_iovmm_device *dev);
+
+/* called by drivers to remove an I/O VMM device from the system */
+int tegra_iovmm_unregister(struct tegra_iovmm_device *dev);
+
+#else /* CONFIG_TEGRA_IOVMM */
+
+static inline struct tegra_iovmm_client *tegra_iovmm_alloc_client(
+	struct device *dev, const char *share_group,
+	struct miscdevice *misc_dev)
+{
+	return NULL;
+}
+
+static inline size_t tegra_iovmm_get_vm_size(struct tegra_iovmm_client *client)
+{
+	return 0;
+}
+
+static inline void tegra_iovmm_free_client(struct tegra_iovmm_client *client)
+{
+}
+
+static inline int tegra_iovmm_client_lock(struct tegra_iovmm_client *client)
+{
+	return 0;
+}
+
+static inline int tegra_iovmm_client_trylock(struct tegra_iovmm_client *client)
+{
+	return 0;
+}
+
+static inline void tegra_iovmm_client_unlock(struct tegra_iovmm_client *client)
+{
+}
+
+static inline struct tegra_iovmm_area *tegra_iovmm_create_vm(
+	struct tegra_iovmm_client *client, struct tegra_iovmm_area_ops *ops,
+	size_t size, size_t align, pgprot_t pgprot, unsigned long iovm_start)
+{
+	return NULL;
+}
+
+static inline void tegra_iovmm_zap_vm(struct tegra_iovmm_area *vm)
+{
+}
+
+static inline void tegra_iovmm_unzap_vm(struct tegra_iovmm_area *vm)
+{
+}
+
+static inline void tegra_iovmm_free_vm(struct tegra_iovmm_area *vm)
+{
+}
+
+static inline size_t tegra_iovmm_get_max_free(struct tegra_iovmm_client *client)
+{
+	return 0;
+}
+
+static inline void tegra_iovmm_vm_insert_pfn(struct tegra_iovmm_area *area,
+	tegra_iovmm_addr_t vaddr, unsigned long pfn)
+{
+}
+
+static inline struct tegra_iovmm_area *tegra_iovmm_find_area_get(
+	struct tegra_iovmm_client *client, tegra_iovmm_addr_t addr)
+{
+	return NULL;
+}
+
+static inline struct tegra_iovmm_area *tegra_iovmm_area_get(
+	struct tegra_iovmm_area *vm)
+{
+	return NULL;
+}
+
+static inline void tegra_iovmm_area_put(struct tegra_iovmm_area *vm)
+{
+}
+
+static inline int tegra_iovmm_domain_init(struct tegra_iovmm_domain *domain,
+	struct tegra_iovmm_device *dev, tegra_iovmm_addr_t start,
+	tegra_iovmm_addr_t end)
+{
+	return 0;
+}
+
+static inline int tegra_iovmm_register(struct tegra_iovmm_device *dev)
+{
+	return 0;
+}
+
+static inline int tegra_iovmm_unregister(struct tegra_iovmm_device *dev)
+{
+	return 0;
+}
+
+static inline int tegra_iovmm_suspend(void)
+{
+	return 0;
+}
+
+static inline void tegra_iovmm_resume(void)
+{
+}
+
+#endif /* CONFIG_TEGRA_IOVMM */
+#endif /* _MACH_TEGRA_IOVMM_H_*/
diff --git a/drivers/staging/tegra/include/mach/kfuse.h b/drivers/staging/tegra/include/mach/kfuse.h
new file mode 100644
index 000000000000..bb849a0c05aa
--- /dev/null
+++ b/drivers/staging/tegra/include/mach/kfuse.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/mach-tegra/kfuse.h
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* there are 144 32-bit values in total */
+#define KFUSE_DATA_SZ (144 * 4)
+
+int tegra_kfuse_read(void *dest, size_t len) { return -EINVAL; }
diff --git a/drivers/staging/tegra/include/mach/latency_allowance.h b/drivers/staging/tegra/include/mach/latency_allowance.h
new file mode 100644
index 000000000000..37ea791ce882
--- /dev/null
+++ b/drivers/staging/tegra/include/mach/latency_allowance.h
@@ -0,0 +1,109 @@
+/*
+ * arch/arm/mach-tegra/include/mach/latency_allowance.h
+ *
+ * Copyright (C) 2011-2012 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_TEGRA_LATENCY_ALLOWANCE_H_
+#define _MACH_TEGRA_LATENCY_ALLOWANCE_H_
+
+enum tegra_la_id {
+	TEGRA_LA_AFIR = 0,
+	TEGRA_LA_AFIW,
+	TEGRA_LA_AVPC_ARM7R,
+	TEGRA_LA_AVPC_ARM7W,
+	TEGRA_LA_DISPLAY_0A,
+	TEGRA_LA_DISPLAY_0B,
+	TEGRA_LA_DISPLAY_0C,
+	TEGRA_LA_DISPLAY_1B,
+	TEGRA_LA_DISPLAY_HC,
+	TEGRA_LA_DISPLAY_0AB,
+	TEGRA_LA_DISPLAY_0BB,
+	TEGRA_LA_DISPLAY_0CB,
+	TEGRA_LA_DISPLAY_1BB,
+	TEGRA_LA_DISPLAY_HCB,
+	TEGRA_LA_EPPUP,
+	TEGRA_LA_EPPU,
+	TEGRA_LA_EPPV,
+	TEGRA_LA_EPPY,
+	TEGRA_LA_G2PR,
+	TEGRA_LA_G2SR,
+	TEGRA_LA_G2DR,
+	TEGRA_LA_G2DW,
+	TEGRA_LA_HOST1X_DMAR,
+	TEGRA_LA_HOST1XR,
+	TEGRA_LA_HOST1XW,
+	TEGRA_LA_HDAR,
+	TEGRA_LA_HDAW,
+	TEGRA_LA_ISPW,
+	TEGRA_LA_MPCORER,
+	TEGRA_LA_MPCOREW,
+	TEGRA_LA_MPCORE_LPR,
+	TEGRA_LA_MPCORE_LPW,
+	TEGRA_LA_MPE_UNIFBR,
+	TEGRA_LA_MPE_IPRED,
+	TEGRA_LA_MPE_AMEMRD,
+	TEGRA_LA_MPE_CSRD,
+	TEGRA_LA_MPE_UNIFBW,
+	TEGRA_LA_MPE_CSWR,
+	TEGRA_LA_FDCDRD,
+	TEGRA_LA_IDXSRD,
+	TEGRA_LA_TEXSRD,
+	TEGRA_LA_FDCDWR,
+	TEGRA_LA_FDCDRD2,
+	TEGRA_LA_IDXSRD2,
+	TEGRA_LA_TEXSRD2,
+	TEGRA_LA_FDCDWR2,
+	TEGRA_LA_PPCS_AHBDMAR,
+	TEGRA_LA_PPCS_AHBSLVR,
+	TEGRA_LA_PPCS_AHBDMAW,
+	TEGRA_LA_PPCS_AHBSLVW,
+	TEGRA_LA_PTCR,
+	TEGRA_LA_SATAR,
+	TEGRA_LA_SATAW,
+	TEGRA_LA_VDE_BSEVR,
+	TEGRA_LA_VDE_MBER,
+	TEGRA_LA_VDE_MCER,
+	TEGRA_LA_VDE_TPER,
+	TEGRA_LA_VDE_BSEVW,
+	TEGRA_LA_VDE_DBGW,
+	TEGRA_LA_VDE_MBEW,
+	TEGRA_LA_VDE_TPMW,
+	TEGRA_LA_VI_RUV,
+	TEGRA_LA_VI_WSB,
+	TEGRA_LA_VI_WU,
+	TEGRA_LA_VI_WV,
+	TEGRA_LA_VI_WY,
+	TEGRA_LA_MAX_ID
+};
+
+static inline int tegra_set_latency_allowance(enum tegra_la_id id,
+						int bandwidth_in_mbps)
+{
+	return 0;
+}
+
+static inline int tegra_enable_latency_scaling(enum tegra_la_id id,
+						unsigned int threshold_low,
+						unsigned int threshold_mid,
+						unsigned int threshold_high)
+{
+	return 0;
+}
+
+static inline void tegra_disable_latency_scaling(enum tegra_la_id id)
+{
+	return;
+}
+
+#endif /* _MACH_TEGRA_LATENCY_ALLOWANCE_H_ */
diff --git a/drivers/staging/tegra/include/mach/mc.h b/drivers/staging/tegra/include/mach/mc.h
new file mode 100644
index 000000000000..f8b77234d2ec
--- /dev/null
+++ b/drivers/staging/tegra/include/mach/mc.h
@@ -0,0 +1,101 @@
+/*
+ * arch/arm/mach-tegra/include/mach/mc.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ *	Erik Gilling <konkers@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_MC_H
+#define __MACH_TEGRA_MC_H
+
+#define TEGRA_MC_FPRI_CTRL_AVPC		0x17c
+#define TEGRA_MC_FPRI_CTRL_DC		0x180
+#define TEGRA_MC_FPRI_CTRL_DCB		0x184
+#define TEGRA_MC_FPRI_CTRL_EPP		0x188
+#define TEGRA_MC_FPRI_CTRL_G2		0x18c
+#define TEGRA_MC_FPRI_CTRL_HC		0x190
+#define TEGRA_MC_FPRI_CTRL_ISP		0x194
+#define TEGRA_MC_FPRI_CTRL_MPCORE	0x198
+#define TEGRA_MC_FPRI_CTRL_MPEA		0x19c
+#define TEGRA_MC_FPRI_CTRL_MPEB		0x1a0
+#define TEGRA_MC_FPRI_CTRL_MPEC		0x1a4
+#define TEGRA_MC_FPRI_CTRL_NV		0x1a8
+#define TEGRA_MC_FPRI_CTRL_PPCS		0x1ac
+#define TEGRA_MC_FPRI_CTRL_VDE		0x1b0
+#define TEGRA_MC_FPRI_CTRL_VI		0x1b4
+
+#define TEGRA_MC_CLIENT_AVPCARM7R	((TEGRA_MC_FPRI_CTRL_AVPC << 8) | 0)
+#define TEGRA_MC_CLIENT_AVPCARM7W	((TEGRA_MC_FPRI_CTRL_AVPC << 8) | 2)
+#define TEGRA_MC_CLIENT_DISPLAY0A	((TEGRA_MC_FPRI_CTRL_DC << 8) | 0)
+#define TEGRA_MC_CLIENT_DISPLAY0B	((TEGRA_MC_FPRI_CTRL_DC << 8) | 2)
+#define TEGRA_MC_CLIENT_DISPLAY0C	((TEGRA_MC_FPRI_CTRL_DC << 8) | 4)
+#define TEGRA_MC_CLIENT_DISPLAY1B	((TEGRA_MC_FPRI_CTRL_DC << 8) | 6)
+#define TEGRA_MC_CLIENT_DISPLAYHC	((TEGRA_MC_FPRI_CTRL_DC << 8) | 8)
+#define TEGRA_MC_CLIENT_DISPLAY0AB	((TEGRA_MC_FPRI_CTRL_DCB << 8) | 0)
+#define TEGRA_MC_CLIENT_DISPLAY0BB	((TEGRA_MC_FPRI_CTRL_DCB << 8) | 2)
+#define TEGRA_MC_CLIENT_DISPLAY0CB	((TEGRA_MC_FPRI_CTRL_DCB << 8) | 4)
+#define TEGRA_MC_CLIENT_DISPLAY1BB	((TEGRA_MC_FPRI_CTRL_DCB << 8) | 6)
+#define TEGRA_MC_CLIENT_DISPLAYHCB	((TEGRA_MC_FPRI_CTRL_DCB << 8) | 8)
+#define TEGRA_MC_CLIENT_EPPUP		((TEGRA_MC_FPRI_CTRL_EPP << 8) | 0)
+#define TEGRA_MC_CLIENT_EPPU		((TEGRA_MC_FPRI_CTRL_EPP << 8) | 2)
+#define TEGRA_MC_CLIENT_EPPV		((TEGRA_MC_FPRI_CTRL_EPP << 8) | 4)
+#define TEGRA_MC_CLIENT_EPPY		((TEGRA_MC_FPRI_CTRL_EPP << 8) | 6)
+#define TEGRA_MC_CLIENT_G2PR		((TEGRA_MC_FPRI_CTRL_G2 << 8) | 0)
+#define TEGRA_MC_CLIENT_G2SR		((TEGRA_MC_FPRI_CTRL_G2 << 8) | 2)
+#define TEGRA_MC_CLIENT_G2DR		((TEGRA_MC_FPRI_CTRL_G2 << 8) | 4)
+#define TEGRA_MC_CLIENT_G2DW		((TEGRA_MC_FPRI_CTRL_G2 << 8) | 6)
+#define TEGRA_MC_CLIENT_HOST1XDMAR	((TEGRA_MC_FPRI_CTRL_HC << 8) | 0)
+#define TEGRA_MC_CLIENT_HOST1XR		((TEGRA_MC_FPRI_CTRL_HC << 8) | 2)
+#define TEGRA_MC_CLIENT_HOST1XW		((TEGRA_MC_FPRI_CTRL_HC << 8) | 4)
+#define TEGRA_MC_CLIENT_ISPW		((TEGRA_MC_FPRI_CTRL_ISP << 8) | 0)
+#define TEGRA_MC_CLIENT_MPCORER		((TEGRA_MC_FPRI_CTRL_MPCORE << 8) | 0)
+#define TEGRA_MC_CLIENT_MPCOREW		((TEGRA_MC_FPRI_CTRL_MPCORE << 8) | 2)
+#define TEGRA_MC_CLIENT_MPEAMEMRD	((TEGRA_MC_FPRI_CTRL_MPEA << 8) | 0)
+#define TEGRA_MC_CLIENT_MPEUNIFBR	((TEGRA_MC_FPRI_CTRL_MPEB << 8) | 0)
+#define TEGRA_MC_CLIENT_MPE_IPRED	((TEGRA_MC_FPRI_CTRL_MPEB << 8) | 2)
+#define TEGRA_MC_CLIENT_MPEUNIFBW	((TEGRA_MC_FPRI_CTRL_MPEB << 8) | 4)
+#define TEGRA_MC_CLIENT_MPECSRD		((TEGRA_MC_FPRI_CTRL_MPEC << 8) | 0)
+#define TEGRA_MC_CLIENT_MPECSWR		((TEGRA_MC_FPRI_CTRL_MPEC << 8) | 2)
+#define TEGRA_MC_CLIENT_FDCDRD		((TEGRA_MC_FPRI_CTRL_NV << 8) | 0)
+#define TEGRA_MC_CLIENT_IDXSRD		((TEGRA_MC_FPRI_CTRL_NV << 8) | 2)
+#define TEGRA_MC_CLIENT_TEXSRD		((TEGRA_MC_FPRI_CTRL_NV << 8) | 4)
+#define TEGRA_MC_CLIENT_FDCDWR		((TEGRA_MC_FPRI_CTRL_NV << 8) | 6)
+#define TEGRA_MC_CLIENT_PPCSAHBDMAR	((TEGRA_MC_FPRI_CTRL_PPCS << 8) | 0)
+#define TEGRA_MC_CLIENT_PPCSAHBSLVR     ((TEGRA_MC_FPRI_CTRL_PPCS << 8) | 2)
+#define TEGRA_MC_CLIENT_PPCSAHBDMAW     ((TEGRA_MC_FPRI_CTRL_PPCS << 8) | 4)
+#define TEGRA_MC_CLIENT_PPCSAHBSLVW     ((TEGRA_MC_FPRI_CTRL_PPCS << 8) | 6)
+#define TEGRA_MC_CLIENT_VDEBSEVR	((TEGRA_MC_FPRI_CTRL_VDE << 8) | 0)
+#define TEGRA_MC_CLIENT_VDEMBER		((TEGRA_MC_FPRI_CTRL_VDE << 8) | 2)
+#define TEGRA_MC_CLIENT_VDEMCER		((TEGRA_MC_FPRI_CTRL_VDE << 8) | 4)
+#define TEGRA_MC_CLIENT_VDETPER		((TEGRA_MC_FPRI_CTRL_VDE << 8) | 6)
+#define TEGRA_MC_CLIENT_VDEBSEVW	((TEGRA_MC_FPRI_CTRL_VDE << 8) | 8)
+#define TEGRA_MC_CLIENT_VDEMBEW		((TEGRA_MC_FPRI_CTRL_VDE << 8) | 10)
+#define TEGRA_MC_CLIENT_VDETPMW		((TEGRA_MC_FPRI_CTRL_VDE << 8) | 12)
+#define TEGRA_MC_CLIENT_VIRUV		((TEGRA_MC_FPRI_CTRL_VI << 8) | 0)
+#define TEGRA_MC_CLIENT_VIWSB		((TEGRA_MC_FPRI_CTRL_VI << 8) | 2)
+#define TEGRA_MC_CLIENT_VIWU		((TEGRA_MC_FPRI_CTRL_VI << 8) | 4)
+#define TEGRA_MC_CLIENT_VIWV		((TEGRA_MC_FPRI_CTRL_VI << 8) | 6)
+#define TEGRA_MC_CLIENT_VIWY		((TEGRA_MC_FPRI_CTRL_VI << 8) | 8)
+
+#define TEGRA_MC_PRIO_LOWEST		0
+#define TEGRA_MC_PRIO_LOW		1
+#define TEGRA_MC_PRIO_MED		2
+#define TEGRA_MC_PRIO_HIGH		3
+#define TEGRA_MC_PRIO_MASK		3
+
+void tegra_mc_set_priority(unsigned long client, unsigned long prio);
+int tegra_mc_get_tiled_memory_bandwidth_multiplier(void);
+
+#endif
diff --git a/drivers/staging/tegra/include/mach/powergate.h b/drivers/staging/tegra/include/mach/powergate.h
new file mode 100644
index 000000000000..5d13b9248f5e
--- /dev/null
+++ b/drivers/staging/tegra/include/mach/powergate.h
@@ -0,0 +1,32 @@
+/*
+ * arch/arm/mach-tegra/include/mach/powergate.h
+ *
+ * Copyright (c) 2010 Google, Inc
+ *
+ * Author:
+ *	Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_TEGRA_DOWNSTREAM_POWERGATE_H_
+#define _MACH_TEGRA_DOWNSTREAM_POWERGATE_H_
+
+#include <soc/tegra/pmc.h>
+
+#define tegra_chip_id	tegra_get_chip_id()
+
+int tegra_powergate_mc_disable(int id);
+int tegra_powergate_mc_enable(int id);
+int tegra_powergate_mc_flush(int id);
+int tegra_powergate_mc_flush_done(int id);
+
+#endif /* _MACH_TEGRA_DOWNSTREAM_POWERGATE_H_ */
diff --git a/drivers/staging/tegra/include/media/tegra_camera.h b/drivers/staging/tegra/include/media/tegra_camera.h
new file mode 100644
index 000000000000..ea2e5bf451ba
--- /dev/null
+++ b/drivers/staging/tegra/include/media/tegra_camera.h
@@ -0,0 +1,60 @@
+/*
+ * include/linux/tegra_camera.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef TEGRA_CAMERA_H
+#define TEGRA_CAMERA_H
+
+/* this is to enable VI pattern generator (Null Sensor) */
+#define TEGRA_CAMERA_ENABLE_PD2VI_CLK 0x1
+
+enum {
+	TEGRA_CAMERA_MODULE_ISP = 0,
+	TEGRA_CAMERA_MODULE_VI,
+	TEGRA_CAMERA_MODULE_CSI,
+};
+
+enum {
+	TEGRA_CAMERA_VI_CLK,
+	TEGRA_CAMERA_VI_SENSOR_CLK,
+};
+
+struct tegra_camera_clk_info {
+	uint id;
+	uint clk_id;
+	unsigned long rate;
+	uint flag;	/* to inform if any special bits need to enabled/disabled */
+};
+
+enum StereoCameraMode {
+	Main = 0x0,		/* Sets the default camera to Main */
+	StereoCameraMode_Left = 0x01,	/* the left camera is on. */
+	StereoCameraMode_Right = 0x02,	/* the right camera is on. */
+	StereoCameraMode_Stereo = 0x03,	/* both cameras are on. */
+	StereoCameraMode_Force32 = 0x7FFFFFFF
+};
+
+int tegra_camera_clk_set_rate(struct tegra_camera_clk_info *info);
+void tegra_camera_clk_enable(void);
+void tegra_camera_clk_disable(void);
+bool tegra_camera_probed(void);
+void tegra_camera_gpio_set(bool enable);
+
+#define TEGRA_CAMERA_IOCTL_ENABLE		_IOWR('i', 1, uint)
+#define TEGRA_CAMERA_IOCTL_DISABLE		_IOWR('i', 2, uint)
+#define TEGRA_CAMERA_IOCTL_CLK_SET_RATE		\
+	_IOWR('i', 3, struct tegra_camera_clk_info)
+#define TEGRA_CAMERA_IOCTL_RESET		_IOWR('i', 4, uint)
+
+#endif
diff --git a/drivers/staging/tegra/include/trace/events/nvhost.h b/drivers/staging/tegra/include/trace/events/nvhost.h
new file mode 100644
index 000000000000..6506af44e576
--- /dev/null
+++ b/drivers/staging/tegra/include/trace/events/nvhost.h
@@ -0,0 +1,568 @@
+/*
+ * include/trace/events/nvhost.h
+ *
+ * Nvhost event logging to ftrace.
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nvhost
+
+#if !defined(_TRACE_NVHOST_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NVHOST_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(nvhost,
+	TP_PROTO(const char *name),
+	TP_ARGS(name),
+	TP_STRUCT__entry(__field(const char *, name)),
+	TP_fast_assign(__entry->name = name;),
+	TP_printk("name=%s", __entry->name)
+);
+
+DEFINE_EVENT(nvhost, nvhost_channel_open,
+	TP_PROTO(const char *name),
+	TP_ARGS(name)
+);
+
+DEFINE_EVENT(nvhost, nvhost_channel_release,
+	TP_PROTO(const char *name),
+	TP_ARGS(name)
+);
+
+DEFINE_EVENT(nvhost, nvhost_ioctl_channel_flush,
+	TP_PROTO(const char *name),
+	TP_ARGS(name)
+);
+
+TRACE_EVENT(nvhost_channel_write_submit,
+	TP_PROTO(const char *name, ssize_t count, u32 cmdbufs, u32 relocs,
+			u32 syncpt_id, u32 syncpt_incrs),
+
+	TP_ARGS(name, count, cmdbufs, relocs, syncpt_id, syncpt_incrs),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(ssize_t, count)
+		__field(u32, cmdbufs)
+		__field(u32, relocs)
+		__field(u32, syncpt_id)
+		__field(u32, syncpt_incrs)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->count = count;
+		__entry->cmdbufs = cmdbufs;
+		__entry->relocs = relocs;
+		__entry->syncpt_id = syncpt_id;
+		__entry->syncpt_incrs = syncpt_incrs;
+	),
+
+	TP_printk("name=%s, count=%d, cmdbufs=%u, relocs=%u, syncpt_id=%u, syncpt_incrs=%u",
+	  __entry->name, __entry->count, __entry->cmdbufs, __entry->relocs,
+	  __entry->syncpt_id, __entry->syncpt_incrs)
+);
+
+TRACE_EVENT(nvhost_ioctl_channel_submit,
+	TP_PROTO(const char *name, u32 version, u32 cmdbufs, u32 relocs,
+		 u32 waitchks, u32 syncpt_id, u32 syncpt_incrs),
+
+	TP_ARGS(name, version, cmdbufs, relocs, waitchks,
+			syncpt_id, syncpt_incrs),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(u32, version)
+		__field(u32, cmdbufs)
+		__field(u32, relocs)
+		__field(u32, waitchks)
+		__field(u32, syncpt_id)
+		__field(u32, syncpt_incrs)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->version = version;
+		__entry->cmdbufs = cmdbufs;
+		__entry->relocs = relocs;
+		__entry->waitchks = waitchks;
+		__entry->syncpt_id = syncpt_id;
+		__entry->syncpt_incrs = syncpt_incrs;
+	),
+
+	TP_printk("name=%s, version=%u, cmdbufs=%u, relocs=%u, waitchks=%u, syncpt_id=%u, syncpt_incrs=%u",
+	  __entry->name, __entry->version, __entry->cmdbufs, __entry->relocs,
+	  __entry->waitchks, __entry->syncpt_id, __entry->syncpt_incrs)
+);
+
+TRACE_EVENT(nvhost_channel_write_cmdbuf,
+	TP_PROTO(const char *name, u32 mem_id,
+			u32 words, u32 offset),
+
+	TP_ARGS(name, mem_id, words, offset),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(u32, mem_id)
+		__field(u32, words)
+		__field(u32, offset)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->mem_id = mem_id;
+		__entry->words = words;
+		__entry->offset = offset;
+	),
+
+	TP_printk("name=%s, mem_id=%08x, words=%u, offset=%d",
+	  __entry->name, __entry->mem_id,
+	  __entry->words, __entry->offset)
+);
+
+TRACE_EVENT(nvhost_cdma_end,
+	TP_PROTO(const char *name, int prio,
+		int hi_count, int med_count, int low_count),
+
+	TP_ARGS(name, prio, hi_count, med_count, low_count),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(int, prio)
+		__field(int, hi_count)
+		__field(int, med_count)
+		__field(int, low_count)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->prio = prio;
+		__entry->hi_count = hi_count;
+		__entry->med_count = med_count;
+		__entry->low_count = low_count;
+	),
+
+	TP_printk("name=%s, prio=%d, hi=%d, med=%d, low=%d",
+		__entry->name, __entry->prio,
+		__entry->hi_count, __entry->med_count, __entry->low_count)
+);
+
+TRACE_EVENT(nvhost_cdma_flush,
+	TP_PROTO(const char *name, int timeout),
+
+	TP_ARGS(name, timeout),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(int, timeout)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->timeout = timeout;
+	),
+
+	TP_printk("name=%s, timeout=%d",
+		__entry->name, __entry->timeout)
+);
+
+TRACE_EVENT(nvhost_cdma_push,
+	TP_PROTO(const char *name, u32 op1, u32 op2),
+
+	TP_ARGS(name, op1, op2),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(u32, op1)
+		__field(u32, op2)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->op1 = op1;
+		__entry->op2 = op2;
+	),
+
+	TP_printk("name=%s, op1=%08x, op2=%08x",
+		__entry->name, __entry->op1, __entry->op2)
+);
+
+TRACE_EVENT(nvhost_cdma_push_gather,
+	TP_PROTO(const char *name, u32 mem_id,
+			u32 words, u32 offset, void *cmdbuf),
+
+	TP_ARGS(name, mem_id, words, offset, cmdbuf),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(u32, mem_id)
+		__field(u32, words)
+		__field(u32, offset)
+		__field(bool, cmdbuf)
+		__dynamic_array(u32, cmdbuf, words)
+	),
+
+	TP_fast_assign(
+		if (cmdbuf) {
+			memcpy(__get_dynamic_array(cmdbuf), cmdbuf+offset,
+					words * sizeof(u32));
+		}
+		__entry->cmdbuf = cmdbuf;
+		__entry->name = name;
+		__entry->mem_id = mem_id;
+		__entry->words = words;
+		__entry->offset = offset;
+	),
+
+	TP_printk("name=%s, mem_id=%08x, words=%u, offset=%d, contents=[%s]",
+	  __entry->name, __entry->mem_id,
+	  __entry->words, __entry->offset,
+	  __print_hex(__get_dynamic_array(cmdbuf),
+		  __entry->cmdbuf ? __entry->words * 4 : 0))
+);
+
+TRACE_EVENT(nvhost_channel_write_reloc,
+	TP_PROTO(const char *name, u32 cmdbuf_mem, u32 cmdbuf_offset,
+		u32 target, u32 target_offset),
+
+	TP_ARGS(name, cmdbuf_mem, cmdbuf_offset, target, target_offset),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(u32, cmdbuf_mem)
+		__field(u32, cmdbuf_offset)
+		__field(u32, target)
+		__field(u32, target_offset)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->cmdbuf_mem = cmdbuf_mem;
+		__entry->cmdbuf_offset = cmdbuf_offset;
+		__entry->target = target;
+		__entry->target_offset = target_offset;
+	),
+
+	TP_printk("name=%s, cmdbuf_mem=%08x, cmdbuf_offset=%04x, target=%08x, target_offset=%04x",
+	  __entry->name, __entry->cmdbuf_mem, __entry->cmdbuf_offset,
+	  __entry->target, __entry->target_offset)
+);
+
+TRACE_EVENT(nvhost_channel_write_waitchks,
+	TP_PROTO(const char *name, u32 waitchks, u32 waitmask),
+
+	TP_ARGS(name, waitchks, waitmask),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(u32, waitchks)
+		__field(u32, waitmask)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->waitchks = waitchks;
+		__entry->waitmask = waitmask;
+	),
+
+	TP_printk("name=%s, waitchks=%u, waitmask=%08x",
+	  __entry->name, __entry->waitchks, __entry->waitmask)
+);
+
+TRACE_EVENT(nvhost_channel_context_save,
+	TP_PROTO(const char *name, void *ctx),
+
+	TP_ARGS(name, ctx),
+
+	TP_STRUCT__entry(
+	    __field(const char *, name)
+	    __field(void*, ctx)
+	),
+
+	TP_fast_assign(
+	    __entry->name = name;
+	    __entry->ctx = ctx;
+	),
+
+	TP_printk("name=%s, ctx=%p",
+	  __entry->name, __entry->ctx)
+);
+
+TRACE_EVENT(nvhost_channel_context_restore,
+	TP_PROTO(const char *name, void *ctx),
+
+	TP_ARGS(name, ctx),
+
+	TP_STRUCT__entry(
+	    __field(const char *, name)
+	    __field(void*, ctx)
+	),
+
+	TP_fast_assign(
+	    __entry->name = name;
+	    __entry->ctx = ctx;
+	),
+
+	TP_printk("name=%s, ctx=%p",
+	  __entry->name, __entry->ctx)
+);
+
+TRACE_EVENT(nvhost_ctrlopen,
+	TP_PROTO(const char *name),
+	TP_ARGS(name),
+	TP_STRUCT__entry(
+	    __field(const char *, name)
+	),
+	TP_fast_assign(
+	    __entry->name = name
+	),
+	TP_printk("name=%s", __entry->name)
+);
+
+TRACE_EVENT(nvhost_ctrlrelease,
+	TP_PROTO(const char *name),
+	TP_ARGS(name),
+	TP_STRUCT__entry(
+	    __field(const char *, name)
+	),
+	TP_fast_assign(
+	    __entry->name = name
+	),
+	TP_printk("name=%s", __entry->name)
+);
+
+TRACE_EVENT(nvhost_ioctl_ctrl_module_mutex,
+	TP_PROTO(u32 lock, u32 id),
+
+	TP_ARGS(lock, id),
+
+	TP_STRUCT__entry(
+	    __field(u32, lock);
+	    __field(u32, id);
+	),
+
+	TP_fast_assign(
+		__entry->lock = lock;
+		__entry->id = id;
+	),
+
+	TP_printk("lock=%u, id=%d",
+		__entry->lock, __entry->id)
+	);
+
+TRACE_EVENT(nvhost_ioctl_ctrl_syncpt_incr,
+	TP_PROTO(u32 id),
+
+	TP_ARGS(id),
+
+	TP_STRUCT__entry(
+	    __field(u32, id);
+	),
+
+	TP_fast_assign(
+	   __entry->id = id;
+	),
+
+	TP_printk("id=%d", __entry->id)
+);
+
+TRACE_EVENT(nvhost_ioctl_ctrl_syncpt_read,
+	TP_PROTO(u32 id, u32 value),
+
+	TP_ARGS(id, value),
+
+	TP_STRUCT__entry(
+	    __field(u32, id);
+		__field(u32, value);
+	),
+
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->value = value;
+	),
+
+	TP_printk("id=%d, value=%d", __entry->id, __entry->value)
+);
+
+TRACE_EVENT(nvhost_ioctl_ctrl_syncpt_wait,
+	TP_PROTO(u32 id, u32 threshold, s32 timeout, u32 value, int err),
+
+	TP_ARGS(id, threshold, timeout, value, err),
+
+	TP_STRUCT__entry(
+		__field(u32, id)
+		__field(u32, threshold)
+		__field(s32, timeout)
+		__field(u32, value)
+		__field(int, err)
+	),
+
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->threshold = threshold;
+		__entry->timeout = timeout;
+		__entry->value = value;
+		__entry->err = err;
+	),
+
+	TP_printk("id=%u, threshold=%u, timeout=%d, value=%u, err=%d",
+	  __entry->id, __entry->threshold, __entry->timeout,
+	  __entry->value, __entry->err)
+);
+
+TRACE_EVENT(nvhost_ioctl_ctrl_module_regrdwr,
+	TP_PROTO(u32 id, u32 num_offsets, bool write),
+
+	TP_ARGS(id, num_offsets, write),
+
+	TP_STRUCT__entry(
+		__field(u32, id)
+		__field(u32, num_offsets)
+		__field(bool, write)
+	),
+
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->num_offsets = num_offsets;
+		__entry->write = write;
+	),
+
+	TP_printk("id=%u, num_offsets=%u, write=%d",
+	  __entry->id, __entry->num_offsets, __entry->write)
+);
+
+TRACE_EVENT(nvhost_channel_submitted,
+	TP_PROTO(const char *name, u32 syncpt_base, u32 syncpt_max),
+
+	TP_ARGS(name, syncpt_base, syncpt_max),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(u32, syncpt_base)
+		__field(u32, syncpt_max)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->syncpt_base = syncpt_base;
+		__entry->syncpt_max = syncpt_max;
+	),
+
+	TP_printk("name=%s, syncpt_base=%d, syncpt_max=%d",
+		__entry->name, __entry->syncpt_base, __entry->syncpt_max)
+);
+
+TRACE_EVENT(nvhost_channel_submit_complete,
+	TP_PROTO(const char *name, int count, u32 thresh,
+		int hi_count, int med_count, int low_count),
+
+	TP_ARGS(name, count, thresh, hi_count, med_count, low_count),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(int, count)
+		__field(u32, thresh)
+		__field(int, hi_count)
+		__field(int, med_count)
+		__field(int, low_count)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->count = count;
+		__entry->thresh = thresh;
+		__entry->hi_count = hi_count;
+		__entry->med_count = med_count;
+		__entry->low_count = low_count;
+	),
+
+	TP_printk("name=%s, count=%d, thresh=%d, hi=%d, med=%d, low=%d",
+		__entry->name, __entry->count, __entry->thresh,
+		__entry->hi_count, __entry->med_count, __entry->low_count)
+);
+
+TRACE_EVENT(nvhost_wait_cdma,
+	TP_PROTO(const char *name, u32 eventid),
+
+	TP_ARGS(name, eventid),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(u32, eventid)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->eventid = eventid;
+	),
+
+	TP_printk("name=%s, event=%d", __entry->name, __entry->eventid)
+);
+
+TRACE_EVENT(nvhost_syncpt_update_min,
+	TP_PROTO(u32 id, u32 val),
+
+	TP_ARGS(id, val),
+
+	TP_STRUCT__entry(
+		__field(u32, id)
+		__field(u32, val)
+	),
+
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->val = val;
+	),
+
+	TP_printk("id=%d, val=%d", __entry->id, __entry->val)
+);
+
+TRACE_EVENT(nvhost_syncpt_wait_check,
+	TP_PROTO(u32 mem_id, u32 offset, u32 syncpt_id, u32 thresh, u32 min),
+
+	TP_ARGS(mem_id, offset, syncpt_id, thresh, min),
+
+	TP_STRUCT__entry(
+		__field(u32, mem_id)
+		__field(u32, offset)
+		__field(u32, syncpt_id)
+		__field(u32, thresh)
+		__field(u32, min)
+	),
+
+	TP_fast_assign(
+		__entry->mem_id = mem_id;
+		__entry->offset = offset;
+		__entry->syncpt_id = syncpt_id;
+		__entry->thresh = thresh;
+		__entry->min = min;
+	),
+
+	TP_printk("mem_id=%08x, offset=%05x, id=%d, thresh=%d, current=%d",
+		__entry->mem_id, __entry->offset,
+		__entry->syncpt_id, __entry->thresh,
+		__entry->min)
+);
+
+#endif /*  _TRACE_NVHOST_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/staging/tegra/include/trace/events/nvmap.h b/drivers/staging/tegra/include/trace/events/nvmap.h
new file mode 100644
index 000000000000..e8e09cef1972
--- /dev/null
+++ b/drivers/staging/tegra/include/trace/events/nvmap.h
@@ -0,0 +1,303 @@
+/*
+ * include/trace/events/nvmap.h
+ *
+ * NvMap event logging to ftrace.
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nvmap
+
+#if !defined(_TRACE_NVMAP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NVMAP_H
+
+#include <linux/nvmap.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(nvmap,
+	TP_PROTO(struct nvmap_client *client),
+	TP_ARGS(client),
+	TP_STRUCT__entry(
+		__field(struct nvmap_client *, client)
+	),
+	TP_fast_assign(
+		__entry->client = client;
+	),
+	TP_printk("client=%p, name=%s",
+		__entry->client, __entry->client->name)
+);
+
+DEFINE_EVENT(nvmap, nvmap_open,
+	TP_PROTO(struct nvmap_client *client),
+	TP_ARGS(client)
+);
+
+DEFINE_EVENT(nvmap, nvmap_release,
+	TP_PROTO(struct nvmap_client *client),
+	TP_ARGS(client)
+);
+
+TRACE_EVENT(nvmap_create_handle,
+	TP_PROTO(struct nvmap_client *client,
+		 struct nvmap_handle *h,
+		 u32 size,
+		 struct nvmap_handle_ref *ref
+	),
+
+	TP_ARGS(client, h, size, ref),
+
+	TP_STRUCT__entry(
+		__field(struct nvmap_client *, client)
+		__field(struct nvmap_handle *, h)
+		__field(u32, size)
+		__field(struct nvmap_handle_ref *, ref)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+		__entry->h = h;
+		__entry->size = size;
+		__entry->ref = ref;
+	),
+
+	TP_printk("client=%p, name=%s, handle=%p, size=%d, ref=%p",
+		__entry->client, __entry->client->name,
+		__entry->h, __entry->size, __entry->ref)
+);
+
+TRACE_EVENT(nvmap_alloc_handle_id,
+	TP_PROTO(struct nvmap_client *client,
+		 unsigned long handle_id,
+		 u32 heap_mask,
+		 u32 align,
+		 u32 flags
+	),
+
+	TP_ARGS(client, handle_id, heap_mask, align, flags),
+
+	TP_STRUCT__entry(
+		__field(struct nvmap_client *, client)
+		__field(unsigned long, handle_id)
+		__field(u32, heap_mask)
+		__field(u32, align)
+		__field(u32, flags)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+		__entry->handle_id = handle_id;
+		__entry->heap_mask = heap_mask;
+		__entry->align = align;
+		__entry->flags = flags;
+	),
+
+	TP_printk("client=%p, id=0x%lx, heap_mask=0x%x, align=%d, flags=0x%x",
+		__entry->client, __entry->handle_id, __entry->heap_mask,
+		__entry->align, __entry->flags)
+);
+
+TRACE_EVENT(nvmap_free_handle_id,
+	TP_PROTO(struct nvmap_client *client,
+		 unsigned long handle_id
+	),
+
+	TP_ARGS(client, handle_id),
+
+	TP_STRUCT__entry(
+		__field(struct nvmap_client *, client)
+		__field(unsigned long, handle_id)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+		__entry->handle_id = handle_id;
+	),
+
+	TP_printk("client=%p, id=0x%lx",
+		__entry->client, __entry->handle_id)
+);
+
+TRACE_EVENT(nvmap_duplicate_handle_id,
+	TP_PROTO(struct nvmap_client *client,
+		 unsigned long handle_id,
+		 struct nvmap_handle_ref *ref
+	),
+
+	TP_ARGS(client, handle_id, ref),
+
+	TP_STRUCT__entry(
+		__field(struct nvmap_client *, client)
+		__field(unsigned long, handle_id)
+		__field(struct nvmap_handle_ref *, ref)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+		__entry->handle_id = handle_id;
+		__entry->ref = ref;
+	),
+
+	TP_printk("client=%p, id=0x%lx, ref=%p",
+		__entry->client, __entry->handle_id, __entry->ref)
+);
+
+TRACE_EVENT(cache_maint,
+	TP_PROTO(struct nvmap_client *client,
+		 struct nvmap_handle *h,
+		 unsigned long start,
+		 unsigned long end,
+		 u32 op
+	),
+
+	TP_ARGS(client, h, start, end, op),
+
+	TP_STRUCT__entry(
+		__field(struct nvmap_client *, client)
+		__field(struct nvmap_handle *, h)
+		__field(unsigned long, start)
+		__field(unsigned long, end)
+		__field(u32, op)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+		__entry->h = h;
+		__entry->start = start;
+		__entry->end = end;
+		__entry->op = op;
+	),
+
+	TP_printk("client=%p, h=%p, start=0x%lx, end=0x%lx, op=%d",
+		__entry->client, __entry->h, __entry->start,
+		__entry->end, __entry->op)
+);
+
+TRACE_EVENT(nvmap_map_into_caller_ptr,
+	TP_PROTO(struct nvmap_client *client,
+		 struct nvmap_handle *h,
+		 u32 offset,
+		 u32 length,
+		 u32 flags
+	),
+
+	TP_ARGS(client, h, offset, length, flags),
+
+	TP_STRUCT__entry(
+		__field(struct nvmap_client *, client)
+		__field(struct nvmap_handle *, h)
+		__field(u32, offset)
+		__field(u32, length)
+		__field(u32, flags)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+		__entry->h = h;
+		__entry->offset = offset;
+		__entry->length = length;
+		__entry->flags = flags;
+	),
+
+	TP_printk("client=%p, h=%p, offset=%d, length=%d, flags=0x%x",
+		__entry->client, __entry->h, __entry->offset,
+		__entry->length, __entry->flags)
+);
+
+TRACE_EVENT(nvmap_ioctl_rw_handle,
+	TP_PROTO(struct nvmap_client *client,
+		 struct nvmap_handle *h,
+		 u32 is_read,
+		 u32 offset,
+		 unsigned long addr,
+		 u32 mem_stride,
+		 u32 user_stride,
+		 u32 elem_size,
+		 u32 count
+	),
+
+	TP_ARGS(client, h, is_read, offset, addr, mem_stride,
+		user_stride, elem_size, count),
+
+	TP_STRUCT__entry(
+		__field(struct nvmap_client *, client)
+		__field(struct nvmap_handle *, h)
+		__field(u32, is_read)
+		__field(u32, offset)
+		__field(unsigned long, addr)
+		__field(u32, mem_stride)
+		__field(u32, user_stride)
+		__field(u32, elem_size)
+		__field(u32, count)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+		__entry->h = h;
+		__entry->is_read = is_read;
+		__entry->offset = offset;
+		__entry->addr = addr;
+		__entry->mem_stride = mem_stride;
+		__entry->user_stride = user_stride;
+		__entry->elem_size = elem_size;
+		__entry->count = count;
+	),
+
+	TP_printk("client=%p, h=%p, is_read=%d, offset=%d, addr=0x%lx,"
+		"mem_stride=%d, user_stride=%d, elem_size=%d, count=%d",
+		__entry->client, __entry->h, __entry->is_read, __entry->offset,
+		__entry->addr, __entry->mem_stride, __entry->user_stride,
+		__entry->elem_size, __entry->count)
+);
+
+TRACE_EVENT(nvmap_ioctl_pinop,
+	TP_PROTO(struct nvmap_client *client,
+		 u32 is_pin,
+		 u32 count,
+		 unsigned long *ids
+	),
+
+	TP_ARGS(client, is_pin, count, ids),
+
+	TP_STRUCT__entry(
+		__field(struct nvmap_client *, client)
+		__field(u32, is_pin)
+		__field(u32, count)
+		__field(unsigned long *, ids)
+		__dynamic_array(unsigned long, ids, count)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+		__entry->is_pin = is_pin;
+		__entry->count = count;
+		__entry->ids = ids;
+		memcpy(__get_dynamic_array(ids), ids,
+		    sizeof(unsigned long) * count);
+	),
+
+	TP_printk("client=%p, is_pin=%d, count=%d, ids=[%s]",
+		__entry->client, __entry->is_pin, __entry->count,
+		__print_hex(__get_dynamic_array(ids), __entry->ids ?
+			    sizeof(unsigned long) * __entry->count : 0))
+);
+
+
+#endif /* _TRACE_NVMAP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/staging/tegra/include/video/nvhdcp.h b/drivers/staging/tegra/include/video/nvhdcp.h
new file mode 100644
index 000000000000..f282ff8caa99
--- /dev/null
+++ b/drivers/staging/tegra/include/video/nvhdcp.h
@@ -0,0 +1,91 @@
+/*
+ * include/video/nvhdcp.h
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_NVHDCP_H_
+#define _LINUX_NVHDCP_H_
+
+#include <linux/fb.h>
+#include <linux/types.h>
+#include <asm/ioctl.h>
+
+/* maximum receivers and repeaters connected at a time */
+#define TEGRA_NVHDCP_MAX_DEVS	127
+
+/* values for value_flags */
+#define TEGRA_NVHDCP_FLAG_AN			0x0001
+#define TEGRA_NVHDCP_FLAG_AKSV			0x0002
+#define TEGRA_NVHDCP_FLAG_BKSV			0x0004
+#define TEGRA_NVHDCP_FLAG_BSTATUS		0x0008 /* repeater status */
+#define TEGRA_NVHDCP_FLAG_CN			0x0010 /* c_n */
+#define TEGRA_NVHDCP_FLAG_CKSV			0x0020 /* c_ksv */
+#define TEGRA_NVHDCP_FLAG_DKSV			0x0040 /* d_ksv */
+#define TEGRA_NVHDCP_FLAG_KP			0x0080 /* k_prime */
+#define TEGRA_NVHDCP_FLAG_S			0x0100 /* hdcp_status */
+#define TEGRA_NVHDCP_FLAG_CS			0x0200 /* connection state */
+#define TEGRA_NVHDCP_FLAG_V			0x0400
+#define TEGRA_NVHDCP_FLAG_MP			0x0800
+#define TEGRA_NVHDCP_FLAG_BKSVLIST		0x1000
+
+/* values for packet_results */
+#define TEGRA_NVHDCP_RESULT_SUCCESS		0
+#define TEGRA_NVHDCP_RESULT_UNSUCCESSFUL	1
+#define TEGRA_NVHDCP_RESULT_PENDING		0x103
+#define TEGRA_NVHDCP_RESULT_LINK_FAILED		0xc0000013
+/* TODO: replace with -EINVAL */
+#define TEGRA_NVHDCP_RESULT_INVALID_PARAMETER	0xc000000d
+#define TEGRA_NVHDCP_RESULT_INVALID_PARAMETER_MIX	0xc0000030
+/* TODO: replace with -ENOMEM */
+#define TEGRA_NVHDCP_RESULT_NO_MEMORY		0xc0000017
+
+struct tegra_nvhdcp_packet {
+	__u32	value_flags;		// (IN/OUT)
+	__u32	packet_results;		// (OUT)
+
+	__u64	c_n;			// (IN) upstream exchange number
+	__u64	c_ksv;			// (IN)
+
+	__u32	b_status;	// (OUT) link/repeater status
+	__u64	hdcp_status;	// (OUT) READ_S
+	__u64	cs;		// (OUT) Connection State
+
+	__u64	k_prime;	// (OUT)
+	__u64	a_n;		// (OUT)
+	__u64	a_ksv;		// (OUT)
+	__u64	b_ksv;		// (OUT)
+	__u64	d_ksv;		// (OUT)
+	__u8	v_prime[20];	// (OUT) 160-bit
+	__u64	m_prime;	// (OUT)
+
+	// (OUT) Valid KSVs in the bKsvList. Maximum is 127 devices
+	__u32	num_bksv_list;
+
+	// (OUT) Up to 127 receivers & repeaters
+	__u64	bksv_list[TEGRA_NVHDCP_MAX_DEVS];
+};
+
+/* parameters to TEGRAIO_NVHDCP_SET_POLICY */
+#define TEGRA_NVHDCP_POLICY_ON_DEMAND	0
+#define TEGRA_NVHDCP_POLICY_ALWAYS_ON	1
+
+/* ioctls */
+#define TEGRAIO_NVHDCP_ON		_IO('F', 0x70)
+#define TEGRAIO_NVHDCP_OFF		_IO('F', 0x71)
+#define TEGRAIO_NVHDCP_SET_POLICY	_IOW('F', 0x72, __u32)
+#define TEGRAIO_NVHDCP_READ_M		_IOWR('F', 0x73, struct tegra_nvhdcp_packet)
+#define TEGRAIO_NVHDCP_READ_S		_IOWR('F', 0x74, struct tegra_nvhdcp_packet)
+#define TEGRAIO_NVHDCP_RENEGOTIATE	_IO('F', 0x75)
+
+#endif
diff --git a/drivers/staging/tegra/include/video/tegra_dc_ext.h b/drivers/staging/tegra/include/video/tegra_dc_ext.h
new file mode 100644
index 000000000000..8f9d0423f516
--- /dev/null
+++ b/drivers/staging/tegra/include/video/tegra_dc_ext.h
@@ -0,0 +1,331 @@
+/*
+ * Copyright (C) 2011, NVIDIA Corporation
+ *
+ * Author: Robert Morell <rmorell@nvidia.com>
+ * Some code based on fbdev extensions written by:
+ *	Erik Gilling <konkers@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TEGRA_DC_EXT_H
+#define __TEGRA_DC_EXT_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#if defined(__KERNEL__)
+# include <linux/time.h>
+#else
+# include <time.h>
+# include <unistd.h>
+#endif
+
+#define TEGRA_DC_EXT_FMT_P1		0
+#define TEGRA_DC_EXT_FMT_P2		1
+#define TEGRA_DC_EXT_FMT_P4		2
+#define TEGRA_DC_EXT_FMT_P8		3
+#define TEGRA_DC_EXT_FMT_B4G4R4A4	4
+#define TEGRA_DC_EXT_FMT_B5G5R5A	5
+#define TEGRA_DC_EXT_FMT_B5G6R5		6
+#define TEGRA_DC_EXT_FMT_AB5G5R5	7
+#define TEGRA_DC_EXT_FMT_B8G8R8A8	12
+#define TEGRA_DC_EXT_FMT_R8G8B8A8	13
+#define TEGRA_DC_EXT_FMT_B6x2G6x2R6x2A8	14
+#define TEGRA_DC_EXT_FMT_R6x2G6x2B6x2A8	15
+#define TEGRA_DC_EXT_FMT_YCbCr422	16
+#define TEGRA_DC_EXT_FMT_YUV422		17
+#define TEGRA_DC_EXT_FMT_YCbCr420P	18
+#define TEGRA_DC_EXT_FMT_YUV420P	19
+#define TEGRA_DC_EXT_FMT_YCbCr422P	20
+#define TEGRA_DC_EXT_FMT_YUV422P	21
+#define TEGRA_DC_EXT_FMT_YCbCr422R	22
+#define TEGRA_DC_EXT_FMT_YUV422R	23
+#define TEGRA_DC_EXT_FMT_YCbCr422RA	24
+#define TEGRA_DC_EXT_FMT_YUV422RA	25
+
+#define TEGRA_DC_EXT_BLEND_NONE		0
+#define TEGRA_DC_EXT_BLEND_PREMULT	1
+#define TEGRA_DC_EXT_BLEND_COVERAGE	2
+
+#define TEGRA_DC_EXT_FLIP_FLAG_INVERT_H	(1 << 0)
+#define TEGRA_DC_EXT_FLIP_FLAG_INVERT_V	(1 << 1)
+#define TEGRA_DC_EXT_FLIP_FLAG_TILED	(1 << 2)
+#define TEGRA_DC_EXT_FLIP_FLAG_CURSOR	(1 << 3)
+#define TEGRA_DC_EXT_FLIP_FLAG_GLOBAL_ALPHA	(1 << 4)
+
+struct tegra_dc_ext_flip_windowattr {
+	__s32	index;
+	__u32	buff_id;
+	__u32	blend;
+	__u32	offset;
+	__u32	offset_u;
+	__u32	offset_v;
+	__u32	stride;
+	__u32	stride_uv;
+	__u32	pixformat;
+	/*
+	 * x, y, w, h are fixed-point: 20 bits of integer (MSB) and 12 bits of
+	 * fractional (LSB)
+	 */
+	__u32	x;
+	__u32	y;
+	__u32	w;
+	__u32	h;
+	__u32	out_x;
+	__u32	out_y;
+	__u32	out_w;
+	__u32	out_h;
+	__u32	z;
+	__u32	swap_interval;
+	struct timespec timestamp;
+	__u32	pre_syncpt_id;
+	__u32	pre_syncpt_val;
+	/* These two are optional; if zero, U and V are taken from buff_id */
+	__u32	buff_id_u;
+	__u32	buff_id_v;
+	__u32	flags;
+	__u8	global_alpha; /* requires TEGRA_DC_EXT_FLIP_FLAG_GLOBAL_ALPHA */
+	/* Leave some wiggle room for future expansion */
+	__u8	pad1[3];
+	__u32   pad2[4];
+};
+
+#define TEGRA_DC_EXT_FLIP_N_WINDOWS	3
+
+struct tegra_dc_ext_flip {
+	struct tegra_dc_ext_flip_windowattr win[TEGRA_DC_EXT_FLIP_N_WINDOWS];
+	__u32	post_syncpt_id;
+	__u32	post_syncpt_val;
+};
+
+/*
+ * Cursor image format:
+ * - Tegra hardware supports two colors: foreground and background, specified
+ *   by the client in RGB8.
+ * - The image should be specified as two 1bpp bitmaps immediately following
+ *   each other in memory.  Each pixel in the final cursor will be constructed
+ *   from the bitmaps with the following logic:
+ *		bitmap1 bitmap0
+ *		(mask)  (color)
+ *		  1	   0	transparent
+ *		  1	   1	inverted
+ *		  0	   0	background color
+ *		  0	   1	foreground color
+ * - Exactly one of the SIZE flags must be specified.
+ */
+#define TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32	1
+#define TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64	2
+struct tegra_dc_ext_cursor_image {
+	struct {
+		__u8	r;
+		__u8	g;
+		__u8	b;
+	} foreground, background;
+	__u32	buff_id;
+	__u32	flags;
+};
+
+/* Possible flags for struct nvdc_cursor's flags field */
+#define TEGRA_DC_EXT_CURSOR_FLAGS_VISIBLE	1
+
+struct tegra_dc_ext_cursor {
+	__s16 x;
+	__s16 y;
+	__u32 flags;
+};
+
+/*
+ * Color conversion is performed as follows:
+ *
+ * r = sat(kyrgb * sat(y + yof) + kur * u + kvr * v)
+ * g = sat(kyrgb * sat(y + yof) + kug * u + kvg * v)
+ * b = sat(kyrgb * sat(y + yof) + kub * u + kvb * v)
+ *
+ * Coefficients should be specified as fixed-point values; the exact format
+ * varies for each coefficient.
+ * The format for each coefficient is listed below with the syntax:
+ * - A "s." prefix means that the coefficient has a sign bit (twos complement).
+ * - The first number is the number of bits in the integer component (not
+ *   including the optional sign bit).
+ * - The second number is the number of bits in the fractional component.
+ *
+ * All three fields should be tightly packed, justified to the LSB of the
+ * 16-bit value.  For example, the "s.2.8" value should be packed as:
+ * (MSB) 5 bits of 0, 1 bit of sign, 2 bits of integer, 8 bits of frac (LSB)
+ */
+struct tegra_dc_ext_csc {
+	__u32 win_index;
+	__u16 yof;	/* s.7.0 */
+	__u16 kyrgb;	/*   2.8 */
+	__u16 kur;	/* s.2.8 */
+	__u16 kvr;	/* s.2.8 */
+	__u16 kug;	/* s.1.8 */
+	__u16 kvg;	/* s.1.8 */
+	__u16 kub;	/* s.2.8 */
+	__u16 kvb;	/* s.2.8 */
+};
+
+/*
+ * RGB Lookup table
+ *
+ * In true-color and YUV modes this is used for post-CSC RGB->RGB lookup, i.e.
+ * gamma-correction. In palette-indexed RGB modes, this table designates the
+ * mode's color palette.
+ *
+ * To convert 8-bit per channel RGB values to 16-bit, duplicate the 8 bits
+ * in low and high byte, e.g. r=r|(r<<8)
+ *
+ * To just update flags, set len to 0.
+ *
+ * Current Tegra DC hardware supports 8-bit per channel to 8-bit per channel,
+ * and each hardware window (overlay) uses its own lookup table.
+ *
+ */
+struct tegra_dc_ext_lut {
+	__u32  win_index; /* window index to set lut for */
+	__u32  flags;     /* Flag bitmask, see TEGRA_DC_EXT_LUT_FLAGS_* */
+	__u32  start;     /* start index to update lut from */
+	__u32  len;       /* number of valid lut entries */
+	__u16 *r;         /* array of 16-bit red values, 0 to reset */
+	__u16 *g;         /* array of 16-bit green values, 0 to reset */
+	__u16 *b;         /* array of 16-bit blue values, 0 to reset */
+};
+
+/* tegra_dc_ext_lut.flags - override global fb device lookup table.
+ * Default behaviour is double-lookup.
+ */
+#define TEGRA_DC_EXT_LUT_FLAGS_FBOVERRIDE 0x01
+
+#define TEGRA_DC_EXT_FLAGS_ENABLED	1
+struct tegra_dc_ext_status {
+	__u32 flags;
+	/* Leave some wiggle room for future expansion */
+	__u32 pad[3];
+};
+
+struct tegra_dc_ext_feature {
+	__u32 length;
+	__u32 *entries;
+};
+
+#define TEGRA_DC_EXT_SET_NVMAP_FD \
+	_IOW('D', 0x00, __s32)
+
+#define TEGRA_DC_EXT_GET_WINDOW \
+	_IOW('D', 0x01, __u32)
+#define TEGRA_DC_EXT_PUT_WINDOW \
+	_IOW('D', 0x02, __u32)
+
+#define TEGRA_DC_EXT_FLIP \
+	_IOWR('D', 0x03, struct tegra_dc_ext_flip)
+
+#define TEGRA_DC_EXT_GET_CURSOR \
+	_IO('D', 0x04)
+#define TEGRA_DC_EXT_PUT_CURSOR \
+	_IO('D', 0x05)
+#define TEGRA_DC_EXT_SET_CURSOR_IMAGE \
+	_IOW('D', 0x06, struct tegra_dc_ext_cursor_image)
+#define TEGRA_DC_EXT_SET_CURSOR \
+	_IOW('D', 0x07, struct tegra_dc_ext_cursor)
+
+#define TEGRA_DC_EXT_SET_CSC \
+	_IOW('D', 0x08, struct tegra_dc_ext_csc)
+
+#define TEGRA_DC_EXT_GET_STATUS \
+	_IOR('D', 0x09, struct tegra_dc_ext_status)
+
+/*
+ * Returns the auto-incrementing vblank syncpoint for the head associated with
+ * this device node
+ */
+#define TEGRA_DC_EXT_GET_VBLANK_SYNCPT \
+	_IOR('D', 0x09, __u32)
+
+#define TEGRA_DC_EXT_SET_LUT \
+	_IOW('D', 0x0A, struct tegra_dc_ext_lut)
+
+#define TEGRA_DC_EXT_GET_FEATURES \
+	_IOW('D', 0x0B, struct tegra_dc_ext_feature)
+
+enum tegra_dc_ext_control_output_type {
+	TEGRA_DC_EXT_DSI,
+	TEGRA_DC_EXT_LVDS,
+	TEGRA_DC_EXT_VGA,
+	TEGRA_DC_EXT_HDMI,
+	TEGRA_DC_EXT_DVI,
+};
+
+/*
+ * Get the properties for a given output.
+ *
+ * handle (in): Which output to query
+ * type (out): Describes the type of the output
+ * connected (out): Non-zero iff the output is currently connected
+ * associated_head (out): The head number that the output is currently
+ *      bound to.  -1 iff the output is not associated with any head.
+ * head_mask (out): Bitmask of which heads the output may be bound to (some
+ *      outputs are permanently bound to a single head).
+ */
+struct tegra_dc_ext_control_output_properties {
+	__u32 handle;
+	enum tegra_dc_ext_control_output_type type;
+	__u32 connected;
+	__s32 associated_head;
+	__u32 head_mask;
+};
+
+/*
+ * This allows userspace to query the raw EDID data for the specified output
+ * handle.
+ *
+ * Here, the size parameter is both an input and an output:
+ * 1. Userspace passes in the size of the buffer allocated for data.
+ * 2. If size is too small, the call fails with the error EFBIG; otherwise, the
+ *    raw EDID data is written to the buffer pointed to by data.  In both
+ *    cases, size will be filled in with the size of the data.
+ */
+struct tegra_dc_ext_control_output_edid {
+	__u32 handle;
+	__u32 size;
+	void *data;
+};
+
+struct tegra_dc_ext_event {
+	__u32	type;
+	ssize_t	data_size;
+	char	data[0];
+};
+
+#define TEGRA_DC_EXT_EVENT_HOTPLUG	0x1
+struct tegra_dc_ext_control_event_hotplug {
+	__u32 handle;
+};
+
+
+#define TEGRA_DC_EXT_CAPABILITIES_CURSOR_MODE	(1 << 0)
+struct tegra_dc_ext_control_capabilities {
+	__u32 caps;
+	/* Leave some wiggle room for future expansion */
+	__u32 pad[3];
+};
+
+#define TEGRA_DC_EXT_CONTROL_GET_NUM_OUTPUTS \
+	_IOR('C', 0x00, __u32)
+#define TEGRA_DC_EXT_CONTROL_GET_OUTPUT_PROPERTIES \
+	_IOWR('C', 0x01, struct tegra_dc_ext_control_output_properties)
+#define TEGRA_DC_EXT_CONTROL_GET_OUTPUT_EDID \
+	_IOWR('C', 0x02, struct tegra_dc_ext_control_output_edid)
+#define TEGRA_DC_EXT_CONTROL_SET_EVENT_MASK \
+	_IOW('C', 0x03, __u32)
+#define TEGRA_DC_EXT_CONTROL_GET_CAPABILITIES \
+	_IOR('C', 0x04, struct tegra_dc_ext_control_capabilities)
+
+#endif /* __TEGRA_DC_EXT_H */
diff --git a/drivers/staging/tegra/include/video/tegrafb.h b/drivers/staging/tegra/include/video/tegrafb.h
new file mode 100644
index 000000000000..919661b1a8e0
--- /dev/null
+++ b/drivers/staging/tegra/include/video/tegrafb.h
@@ -0,0 +1,32 @@
+/*
+ * include/video/tegrafb.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_TEGRAFB_H_
+#define _LINUX_TEGRAFB_H_
+
+#include <linux/fb.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+struct tegra_fb_modedb {
+	struct fb_var_screeninfo *modedb;
+	__u32 modedb_len;
+};
+
+#define FBIO_TEGRA_GET_MODEDB	_IOWR('F', 0x42, struct tegra_fb_modedb)
+
+#endif
diff --git a/drivers/staging/tegra/pageattr.c b/drivers/staging/tegra/pageattr.c
new file mode 100644
index 000000000000..1d7743dee8fc
--- /dev/null
+++ b/drivers/staging/tegra/pageattr.c
@@ -0,0 +1,66 @@
+#include <linux/percpu.h>
+
+#include <asm/cacheflush.h>
+
+#define FLUSH_CLEAN_BY_SET_WAY_PAGE_THRESHOLD 8
+extern void __flush_dcache_page(struct address_space *, struct page *);
+
+static inline void __flush_cache_all(void *info)
+{
+	flush_cache_all();
+}
+
+inline void inner_flush_cache_all(void)
+{
+	on_each_cpu(__flush_cache_all, NULL, 1);
+}
+
+void update_page_count(int level, unsigned long pages)
+{
+}
+
+static void flush_cache(struct page **pages, int numpages)
+{
+	unsigned int i;
+	bool flush_inner = true;
+	unsigned long base;
+
+	if (numpages >= FLUSH_CLEAN_BY_SET_WAY_PAGE_THRESHOLD) {
+		inner_flush_cache_all();
+		flush_inner = false;
+	}
+
+	for (i = 0; i < numpages; i++) {
+		if (flush_inner)
+			__flush_dcache_page(page_mapping(pages[i]), pages[i]);
+		base = page_to_phys(pages[i]);
+		outer_flush_range(base, base + PAGE_SIZE);
+	}
+}
+
+int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+	flush_cache(pages, addrinarray);
+	return 0;
+}
+EXPORT_SYMBOL(set_pages_array_uc);
+
+int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+	flush_cache(pages, addrinarray);
+	return 0;
+}
+EXPORT_SYMBOL(set_pages_array_wc);
+
+int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+	return 0;
+}
+EXPORT_SYMBOL(set_pages_array_wb);
+
+int set_pages_array_iwb(struct page **pages, int addrinarray)
+{
+	flush_cache(pages, addrinarray);
+	return 0;
+}
+EXPORT_SYMBOL(set_pages_array_iwb);
diff --git a/drivers/staging/tegra/powergate.c b/drivers/staging/tegra/powergate.c
new file mode 100644
index 000000000000..492bd8f2e06b
--- /dev/null
+++ b/drivers/staging/tegra/powergate.c
@@ -0,0 +1,218 @@
+/*
+ * drivers/powergate/tegra-powergate.c
+ *
+ * Copyright (c) 2010 Google, Inc
+ *
+ * Author:
+ *	Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+
+#include <mach/fuse.h>
+#include <mach/powergate.h>
+
+#include <../arch/arm/mach-tegra/iomap.h>
+
+#define TEGRA_MC_BASE		0x7000F000
+
+#define MC_CLIENT_CTRL		0x100
+#define  MC_CLIENT_HOTRESETN	0x104
+#define  MC_CLIENT_ORRC_BASE	0x140
+
+static DEFINE_SPINLOCK(tegra_powergate_lock);
+
+enum mc_client {
+	MC_CLIENT_AVPC		= 0,
+	MC_CLIENT_DC		= 1,
+	MC_CLIENT_DCB		= 2,
+	MC_CLIENT_EPP		= 3,
+	MC_CLIENT_G2		= 4,
+	MC_CLIENT_HC		= 5,
+	MC_CLIENT_ISP		= 6,
+	MC_CLIENT_MPCORE	= 7,
+	MC_CLIENT_MPEA		= 8,
+	MC_CLIENT_MPEB		= 9,
+	MC_CLIENT_MPEC		= 10,
+	MC_CLIENT_NV		= 11,
+	MC_CLIENT_PPCS		= 12,
+	MC_CLIENT_VDE		= 13,
+	MC_CLIENT_VI		= 14,
+	MC_CLIENT_LAST		= -1,
+	MC_CLIENT_AFI		= MC_CLIENT_LAST,
+};
+
+#define MAX_HOTRESET_CLIENT_NUM		4
+#define MAX_CLK_EN_NUM			4
+struct powergate_partition {
+	enum mc_client hot_reset_clients[MAX_HOTRESET_CLIENT_NUM];
+};
+
+static struct powergate_partition powergate_partition_info[] = {
+	[TEGRA_POWERGATE_CPU]	= { {MC_CLIENT_LAST}, },
+	[TEGRA_POWERGATE_L2]	= { {MC_CLIENT_LAST}, },
+	[TEGRA_POWERGATE_3D]	= { {MC_CLIENT_NV, MC_CLIENT_LAST}, },
+	[TEGRA_POWERGATE_PCIE]	= { {MC_CLIENT_AFI, MC_CLIENT_LAST}, },
+	[TEGRA_POWERGATE_VDEC]	= { {MC_CLIENT_VDE, MC_CLIENT_LAST}, },
+	[TEGRA_POWERGATE_MPE]	= { {MC_CLIENT_MPEA, MC_CLIENT_MPEB,
+				     MC_CLIENT_MPEC, MC_CLIENT_LAST}, },
+	[TEGRA_POWERGATE_VENC]	= { {MC_CLIENT_ISP, MC_CLIENT_VI, MC_CLIENT_LAST}, },
+};
+
+static void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
+
+static u32 mc_read(unsigned long reg)
+{
+	return readl(mc + reg);
+}
+
+static void mc_write(u32 val, unsigned long reg)
+{
+	writel(val, mc + reg);
+}
+
+int tegra_powergate_mc_disable(int id)
+{
+	u32 idx, clt_ctrl, orrc_reg;
+	enum mc_client mcClientBit;
+	unsigned long flags;
+
+	if (id < 0 || id >= ARRAY_SIZE(powergate_partition_info))
+		return -EINVAL;
+
+	for (idx = 0; idx < MAX_HOTRESET_CLIENT_NUM; idx++) {
+		mcClientBit =
+			powergate_partition_info[id].hot_reset_clients[idx];
+		if (mcClientBit == MC_CLIENT_LAST)
+			break;
+
+		spin_lock_irqsave(&tegra_powergate_lock, flags);
+
+		/* clear client enable bit */
+		clt_ctrl = mc_read(MC_CLIENT_CTRL);
+		clt_ctrl &= ~(1 << mcClientBit);
+		mc_write(clt_ctrl, MC_CLIENT_CTRL);
+
+		/* read back to flush write */
+		clt_ctrl = mc_read(MC_CLIENT_CTRL);
+
+		spin_unlock_irqrestore(&tegra_powergate_lock, flags);
+
+		/* wait for outstanding requests to reach 0 */
+		orrc_reg = MC_CLIENT_ORRC_BASE + (mcClientBit * 4);
+		while (mc_read(orrc_reg) != 0)
+			udelay(10);
+	}
+
+	return 0;
+}
+
+int tegra_powergate_mc_flush(int id)
+{
+	u32 idx, hot_rstn;
+	enum mc_client mcClientBit;
+	unsigned long flags;
+
+	if (id < 0 || id >= ARRAY_SIZE(powergate_partition_info))
+		return -EINVAL;
+
+	for (idx = 0; idx < MAX_HOTRESET_CLIENT_NUM; idx++) {
+		mcClientBit =
+			powergate_partition_info[id].hot_reset_clients[idx];
+		if (mcClientBit == MC_CLIENT_LAST)
+			break;
+
+		spin_lock_irqsave(&tegra_powergate_lock, flags);
+
+		/* assert hotreset (client module is currently in reset) */
+		hot_rstn = mc_read(MC_CLIENT_HOTRESETN);
+		hot_rstn &= ~(1 << mcClientBit);
+		mc_write(hot_rstn, MC_CLIENT_HOTRESETN);
+
+		/* read back to flush write */
+		hot_rstn = mc_read(MC_CLIENT_HOTRESETN);
+
+		spin_unlock_irqrestore(&tegra_powergate_lock, flags);
+	}
+
+	return 0;
+}
+
+int tegra_powergate_mc_flush_done(int id)
+{
+	u32 idx, hot_rstn;
+	enum mc_client mcClientBit;
+	unsigned long flags;
+
+	if (id < 0 || id >= ARRAY_SIZE(powergate_partition_info))
+		return -EINVAL;
+
+	for (idx = 0; idx < MAX_HOTRESET_CLIENT_NUM; idx++) {
+		mcClientBit =
+			powergate_partition_info[id].hot_reset_clients[idx];
+		if (mcClientBit == MC_CLIENT_LAST)
+			break;
+
+		spin_lock_irqsave(&tegra_powergate_lock, flags);
+
+		/* deassert hotreset */
+		hot_rstn = mc_read(MC_CLIENT_HOTRESETN);
+		hot_rstn |= (1 << mcClientBit);
+		mc_write(hot_rstn, MC_CLIENT_HOTRESETN);
+
+		/* read back to flush write */
+		hot_rstn = mc_read(MC_CLIENT_HOTRESETN);
+
+		spin_unlock_irqrestore(&tegra_powergate_lock, flags);
+	}
+
+	return 0;
+}
+
+int tegra_powergate_mc_enable(int id)
+{
+	u32 idx, clt_ctrl;
+	enum mc_client mcClientBit;
+	unsigned long flags;
+
+	if (id < 0 || id >= ARRAY_SIZE(powergate_partition_info))
+		return -EINVAL;
+
+	for (idx = 0; idx < MAX_HOTRESET_CLIENT_NUM; idx++) {
+		mcClientBit =
+			powergate_partition_info[id].hot_reset_clients[idx];
+		if (mcClientBit == MC_CLIENT_LAST)
+			break;
+
+		spin_lock_irqsave(&tegra_powergate_lock, flags);
+
+		/* enable client */
+		clt_ctrl = mc_read(MC_CLIENT_CTRL);
+		clt_ctrl |= (1 << mcClientBit);
+		mc_write(clt_ctrl, MC_CLIENT_CTRL);
+
+		/* read back to flush write */
+		clt_ctrl = mc_read(MC_CLIENT_CTRL);
+
+		spin_unlock_irqrestore(&tegra_powergate_lock, flags);
+	}
+
+	return 0;
+}
diff --git a/drivers/staging/tegra/video/Kconfig b/drivers/staging/tegra/video/Kconfig
new file mode 100644
index 000000000000..bf6503e7d462
--- /dev/null
+++ b/drivers/staging/tegra/video/Kconfig
@@ -0,0 +1,201 @@
+if ARCH_TEGRA
+
+comment "NVIDIA Tegra Display Driver options"
+
+config TEGRA_GRHOST
+	tristate "Tegra graphics host driver"
+	depends on !TEGRA_HOST1X
+	help
+	  Driver for the Tegra graphics host hardware.
+
+config TEGRA_GRHOST_USE_NVMAP
+	bool "Use nvmap as graphics memory manager"
+	default y
+	help
+	  Use nvmap as the graphics memory manager. This is the only
+	  choice at the moment.
+
+config TEGRA_GRHOST_DEFAULT_TIMEOUT
+	depends on TEGRA_GRHOST
+	int "Default timeout for submits"
+	default 30000
+	help
+	  Default timeout for jobs in milliseconds. Set to zero for no timeout.
+
+config TEGRA_DC
+	tristate "Tegra Display Contoller"
+	depends on ARCH_TEGRA && TEGRA_GRHOST
+	select FB_MODE_HELPERS
+	select I2C
+	select VIDEOMODE_HELPERS
+	help
+	  Tegra display controller support.
+
+config TEGRA_OVERLAY
+	tristate "Tegra Overlay Device Node"
+	depends on TEGRA_DC && !TEGRA_DC_EXTENSIONS
+	help
+	  Device node for multi-client overlay support.
+
+config FB_TEGRA
+	tristate "Tegra Framebuffer driver"
+	depends on TEGRA_DC && FB = y
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	default FB
+	help
+	  Framebuffer device support for the Tegra display controller.
+
+config TEGRA_DC_EXTENSIONS
+	bool "Tegra Display Controller Extensions"
+	depends on TEGRA_DC
+	default y
+	help
+	  This exposes support for extended capabilities of the Tegra display
+	  controller to userspace drivers.
+
+config TEGRA_NVMAP
+	bool "Tegra GPU memory management driver (nvmap)"
+	depends on !TEGRA_HOST1X
+	default y
+	help
+	  Say Y here to include the memory management driver for the Tegra
+	  GPU, multimedia and display subsystems
+
+# config NVMAP_RECLAIM_UNPINNED_VM
+# 	bool "Virtualize IOVMM memory in nvmap"
+# 	depends on TEGRA_NVMAP && (TEGRA_IOVMM || IOMMU_API)
+# 	default y
+# 	help
+# 	  Say Y here to enable nvmap to reclaim I/O virtual memory after
+# 	  it has been unpinned, and re-use it for other handles. This can
+# 	  allow a larger virtual I/O VM space than would normally be
+# 	  supported by the hardware, at a slight cost in performance.
+
+config NVMAP_ALLOW_SYSMEM
+	bool "Allow physical system memory to be used by nvmap"
+	depends on TEGRA_NVMAP
+	default n
+	help
+	  Say Y here to allow nvmap to use physical system memory (i.e.,
+	  shared with the operating system but not translated through
+	  an IOVMM device) for allocations.
+
+config NVMAP_HIGHMEM_ONLY
+	bool "Use only HIGHMEM for nvmap"
+	depends on TEGRA_NVMAP && (NVMAP_ALLOW_SYSMEM || TEGRA_IOVMM || IOMMU_API) && HIGHMEM
+	default n
+	help
+	  Say Y here to restrict nvmap system memory allocations (both
+	  physical system memory and IOVMM) to just HIGHMEM pages.
+
+config NVMAP_CARVEOUT_KILLER
+	bool "Reclaim nvmap carveout by killing processes"
+	depends on TEGRA_NVMAP
+	default n
+	help
+	  Say Y here to allow the system to reclaim carveout space by killing
+	  processes. This will kill the largest consumers of lowest priority
+	  first.
+
+config NVMAP_CARVEOUT_COMPACTOR
+	bool "Compact carveout when it gets fragmented"
+	depends on TEGRA_NVMAP
+	default y
+	help
+	  When carveout allocation attempt fails, compactor defragements
+	  heap and retries the failed allocation.
+	  Say Y here to let nvmap to keep carveout fragmentation under control.
+
+config NVMAP_PAGE_POOLS
+	bool "Use page pools to reduce allocation overhead"
+	depends on TEGRA_NVMAP
+	default y
+	help
+	  say Y here to reduce the alloction overhead, which is significant
+	  for uncached, writecombine and inner cacheable memories as it
+	  involves changing page attributes during every allocation per page
+	  and flushing cache. Alloc time is reduced by allcoating the pages
+	  ahead and keeping them aside. The reserved pages would be released
+	  when system is low on memory and acquired back during release of
+	  memory.
+
+config NVMAP_PAGE_POOLS_INIT_FILLUP
+	bool "Fill up page pools during page pools init"
+	depends on NVMAP_PAGE_POOLS
+	default y
+	help
+	  Say Y here to fill up the page pools during page pool init time.
+	  This helps in faster alloctions right from the early alloction
+	  requests. Page pools fill up during init would increase the boot time.
+	  If increase in boot time is not acceptable, keep this option disabled.
+
+config NVMAP_PAGE_POOL_SIZE
+	hex
+	default 0x0
+
+#config NVMAP_CACHE_MAINT_BY_SET_WAYS
+#	bool "Enable cache maintenance by set/ways"
+#	depends on TEGRA_NVMAP
+#	help
+#	 Say Y here to reduce cache maintenance overhead by MVA.
+#	 This helps in reducing cache maintenance overhead in the systems,
+#	 where inner cache includes only L1. For the systems, where inner cache
+#	 includes L1 and L2, keep this option disabled.
+
+#config NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
+#	bool "Enable outer cache maintenance by set/ways"
+#	depends on TEGRA_NVMAP
+#	help
+#	  Say Y here if you want to optimize cache maintenance for ranges
+#	  bigger than size of outer cache. This option has no effect on
+#	  system without outer cache.
+
+config NVMAP_VPR
+	bool "Enable VPR Heap."
+	depends on TEGRA_NVMAP
+	default n
+	help
+	  Say Y here to enable Video Protection Region(VPR) heap.
+	  if unsure, say N.
+
+config NVMAP_FORCE_ZEROED_USER_PAGES
+	bool "Only alloc zeroed pages for user space"
+	depends on TEGRA_NVMAP
+	help
+	  Say Y here to force zeroing of pages allocated for user space. This
+	  avoids leaking kernel secure data to user space. This can add
+	  significant overhead to allocation operations depending on the
+	  allocation size requested.
+
+config TEGRA_DSI
+	bool "Enable DSI panel."
+	default n
+	help
+	  Say Y here to enable the DSI panel.
+
+#config NVMAP_CONVERT_CARVEOUT_TO_IOVMM
+#	bool "Convert carveout to IOVMM"
+#	depends on TEGRA_NVMAP &&  (TEGRA_IOVMM_SMMU || IOMMU_API)
+#	default y
+#	help
+#	  Say Y here to force to convert carveout memory requests to
+#	  I/O virtual memory requests.
+
+config TEGRA_NVHDCP
+	bool "Support NVHDCP content protection on HDMI"
+	default n
+	help
+	  Say Y here to support NVHDCP upstream and downstream protocols, this
+	  requires a correctly fused chip to negotiate keys.
+
+config TEGRA_HDMI_74MHZ_LIMIT
+	bool "Support only up to 74.25 MHz HDMI pixel frequency"
+	default n
+	help
+	  Say Y here to make kernel report only low bandwidth modes.
+	  Useful only for boards which can't deliver 148.50 MHz.
+
+endif
+
diff --git a/drivers/staging/tegra/video/Makefile b/drivers/staging/tegra/video/Makefile
new file mode 100644
index 000000000000..77d3b06c260a
--- /dev/null
+++ b/drivers/staging/tegra/video/Makefile
@@ -0,0 +1,10 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/staging/tegra/video/host
+EXTRA_CFLAGS += -Idrivers/staging/tegra/include
+
+# subdir-ccflags-y := -Werror
+EXTRA_CFLAGS += -Idrivers/video/tegra/host
+obj-$(CONFIG_TEGRA_GRHOST) += host/
+obj-$(CONFIG_TEGRA_DC) += dc/
+obj-$(CONFIG_FB_TEGRA) += fb.o
+obj-$(CONFIG_TEGRA_NVMAP) += nvmap/
diff --git a/drivers/staging/tegra/video/dc/Makefile b/drivers/staging/tegra/video/dc/Makefile
new file mode 100644
index 000000000000..dbddc1508f9c
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/Makefile
@@ -0,0 +1,14 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/staging/tegra/video/host
+EXTRA_CFLAGS += -Idrivers/staging/tegra/include
+obj-y += dc.o bandwidth.o mode.o clock.o lut.o csc.o window.o
+obj-y += rgb.o
+obj-y += hdmi.o
+obj-$(CONFIG_TEGRA_NVHDCP) += nvhdcp.o
+obj-y += edid.o
+obj-y += nvsd.o
+obj-y += dsi.o
+obj-y += dc_sysfs.o
+obj-y += dc_config.o
+obj-$(CONFIG_TEGRA_OVERLAY) += overlay.o
+obj-$(CONFIG_TEGRA_DC_EXTENSIONS) += ext/
diff --git a/drivers/staging/tegra/video/dc/bandwidth.c b/drivers/staging/tegra/video/dc/bandwidth.c
new file mode 100644
index 000000000000..1c9d12516b06
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/bandwidth.c
@@ -0,0 +1,284 @@
+/*
+ * drivers/video/tegra/dc/bandwidth.c
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+
+#include <mach/clk.h>
+#include <mach/mc.h>
+#include <linux/nvhost.h>
+#include <mach/latency_allowance.h>
+
+#include "dc.h"
+#include "fb.h"
+#include "dc_reg.h"
+#include "dc_config.h"
+#include "dc_priv.h"
+
+static int use_dynamic_emc = 1;
+
+module_param_named(use_dynamic_emc, use_dynamic_emc, int, S_IRUGO | S_IWUSR);
+
+/* uses the larger of w->bandwidth or w->new_bandwidth */
+static void tegra_dc_set_latency_allowance(struct tegra_dc *dc,
+	struct tegra_dc_win *w)
+{
+	/* windows A, B, C for first and second display */
+	static const enum tegra_la_id la_id_tab[2][3] = {
+		/* first display */
+		{ TEGRA_LA_DISPLAY_0A, TEGRA_LA_DISPLAY_0B,
+			TEGRA_LA_DISPLAY_0C },
+		/* second display */
+		{ TEGRA_LA_DISPLAY_0AB, TEGRA_LA_DISPLAY_0BB,
+			TEGRA_LA_DISPLAY_0CB },
+	};
+	/* window B V-filter tap for first and second display. */
+	static const enum tegra_la_id vfilter_tab[2] = {
+		TEGRA_LA_DISPLAY_1B, TEGRA_LA_DISPLAY_1BB,
+	};
+	unsigned long bw;
+
+	BUG_ON(dc->ndev->id >= ARRAY_SIZE(la_id_tab));
+	BUG_ON(dc->ndev->id >= ARRAY_SIZE(vfilter_tab));
+	BUG_ON(w->idx >= ARRAY_SIZE(*la_id_tab));
+
+	bw = max(w->bandwidth, w->new_bandwidth);
+
+	/* tegra_dc_get_bandwidth() treats V filter windows as double
+	 * bandwidth, but LA has a seperate client for V filter */
+	if (w->idx == 1 && win_use_v_filter(dc, w))
+		bw /= 2;
+
+	/* our bandwidth is in kbytes/sec, but LA takes MBps.
+	 * round up bandwidth to next 1MBps */
+	bw = bw / 1000 + 1;
+
+	tegra_set_latency_allowance(la_id_tab[dc->ndev->id][w->idx], bw);
+	/* if window B, also set the 1B client for the 2-tap V filter. */
+	if (w->idx == 1)
+		tegra_set_latency_allowance(vfilter_tab[dc->ndev->id], bw);
+}
+
+static unsigned int tegra_dc_windows_is_overlapped(struct tegra_dc_win *a,
+						   struct tegra_dc_win *b)
+{
+	if (!WIN_IS_ENABLED(a) || !WIN_IS_ENABLED(b))
+		return 0;
+
+	/* because memory access to load the fifo can overlap, only care
+	 * if windows overlap vertically */
+	return ((a->out_y + a->out_h > b->out_y) && (a->out_y <= b->out_y)) ||
+		((b->out_y + b->out_h > a->out_y) && (b->out_y <= a->out_y));
+}
+
+static unsigned long tegra_dc_find_max_bandwidth(struct tegra_dc_win *wins[],
+						 int n)
+{
+	unsigned i;
+	unsigned j;
+	unsigned overlap_count;
+	unsigned max_bw = 0;
+
+	WARN_ONCE(n > 3, "Code assumes at most 3 windows, bandwidth is likely"
+			 "inaccurate.\n");
+
+	/* If we had a large number of windows, we would compute adjacency
+	 * graph representing 2 window overlaps, find all cliques in the graph,
+	 * assign bandwidth to each clique, and then select the clique with
+	 * maximum bandwidth. But because we have at most 3 windows,
+	 * implementing proper Bron-Kerbosh algorithm would be an overkill,
+	 * brute force will suffice.
+	 *
+	 * Thus: find maximum bandwidth for either single or a pair of windows
+	 * and count number of window pair overlaps. If there are three
+	 * pairs, all 3 window overlap.
+	 */
+
+	overlap_count = 0;
+	for (i = 0; i < n; i++) {
+		unsigned int bw1;
+
+		if (wins[i] == NULL)
+			continue;
+		bw1 = wins[i]->new_bandwidth;
+		if (bw1 > max_bw)
+			/* Single window */
+			max_bw = bw1;
+
+		for (j = i + 1; j < n; j++) {
+			if (wins[j] == NULL)
+				continue;
+			if (tegra_dc_windows_is_overlapped(wins[i], wins[j])) {
+				unsigned int bw2 = wins[j]->new_bandwidth;
+				if (bw1 + bw2 > max_bw)
+					/* Window pair overlaps */
+					max_bw = bw1 + bw2;
+				overlap_count++;
+			}
+		}
+	}
+
+	if (overlap_count == 3)
+		/* All three windows overlap */
+		max_bw = wins[0]->new_bandwidth + wins[1]->new_bandwidth +
+			 wins[2]->new_bandwidth;
+
+	return max_bw;
+}
+
+/*
+ * Calculate peak EMC bandwidth for each enabled window =
+ * pixel_clock * win_bpp * (use_v_filter ? 2 : 1)) * H_scale_factor *
+ * (windows_tiling ? 2 : 1)
+ *
+ * note:
+ * (*) We use 2 tap V filter, so need double BW if use V filter
+ * (*) Tiling mode on T30 and DDR3 requires double BW
+ *
+ * return:
+ * bandwidth in kBps
+ */
+static unsigned long tegra_dc_calc_win_bandwidth(struct tegra_dc *dc,
+	struct tegra_dc_win *w)
+{
+	unsigned long ret;
+	int tiled_windows_bw_multiplier;
+	unsigned long bpp;
+
+	if (!WIN_IS_ENABLED(w))
+		return 0;
+
+	if (dfixed_trunc(w->w) == 0 || dfixed_trunc(w->h) == 0 ||
+	    w->out_w == 0 || w->out_h == 0)
+		return 0;
+
+	tiled_windows_bw_multiplier = 1;
+// 		tegra_mc_get_tiled_memory_bandwidth_multiplier();
+
+	/* all of tegra's YUV formats(420 and 422) fetch 2 bytes per pixel,
+	 * but the size reported by tegra_dc_fmt_bpp for the planar version
+	 * is of the luma plane's size only. */
+	bpp = tegra_dc_is_yuv_planar(w->fmt) ?
+		2 * tegra_dc_fmt_bpp(w->fmt) : tegra_dc_fmt_bpp(w->fmt);
+	ret = dc->mode.pclk / 1000UL * bpp / 8 * (
+		win_use_v_filter(dc, w) ? 2 : 1) *
+		dfixed_trunc(w->w) / w->out_w * (WIN_IS_TILED(w) ?
+		tiled_windows_bw_multiplier : 1);
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+	/*
+	 * Assuming 60% efficiency: i.e. if we calculate we need 70MBps, we
+	 * will request 117MBps from EMC.
+	 */
+	ret = ret + (17 * ret / 25);
+#endif
+	return ret;
+}
+
+static unsigned long tegra_dc_get_bandwidth(
+	struct tegra_dc_win *windows[], int n)
+{
+	int i;
+
+	BUG_ON(n > DC_N_WINDOWS);
+
+	/* emc rate and latency allowance both need to know per window
+	 * bandwidths */
+	for (i = 0; i < n; i++) {
+		struct tegra_dc_win *w = windows[i];
+
+		if (w)
+			w->new_bandwidth =
+				tegra_dc_calc_win_bandwidth(w->dc, w);
+	}
+
+	return tegra_dc_find_max_bandwidth(windows, n);
+}
+
+/* to save power, call when display memory clients would be idle */
+void tegra_dc_clear_bandwidth(struct tegra_dc *dc)
+{
+	trace_printk("%s:%s rate=%d\n", dc->ndev->name, __func__,
+		dc->emc_clk_rate);
+// 	if (tegra_is_clk_enabled(dc->emc_clk))
+// 		clk_disable_unprepare(dc->emc_clk);
+// 	dc->emc_clk_rate = 0;
+}
+
+/* use the larger of dc->emc_clk_rate or dc->new_emc_clk_rate, and copies
+ * dc->new_emc_clk_rate into dc->emc_clk_rate.
+ * calling this function both before and after a flip is sufficient to select
+ * the best possible frequency and latency allowance.
+ * set use_new to true to force dc->new_emc_clk_rate programming.
+ */
+void tegra_dc_program_bandwidth(struct tegra_dc *dc, bool use_new)
+{
+// 	unsigned i;
+// 
+// 	if (use_new || dc->emc_clk_rate != dc->new_emc_clk_rate) {
+// 		/* going from 0 to non-zero */
+// 		if (!dc->emc_clk_rate && !tegra_is_clk_enabled(dc->emc_clk))
+// 			clk_prepare_enable(dc->emc_clk);
+// 
+// 		clk_set_rate(dc->emc_clk,
+// 			max(dc->emc_clk_rate, dc->new_emc_clk_rate));
+// 		dc->emc_clk_rate = dc->new_emc_clk_rate;
+// 
+// 		/* going from non-zero to 0 */
+// 		if (!dc->new_emc_clk_rate && tegra_is_clk_enabled(dc->emc_clk))
+// 			clk_disable_unprepare(dc->emc_clk);
+// 	}
+// 
+// 	for (i = 0; i < DC_N_WINDOWS; i++) {
+// 		struct tegra_dc_win *w = &dc->windows[i];
+// 
+// 		if ((use_new || w->bandwidth != w->new_bandwidth) &&
+// 			w->new_bandwidth != 0)
+// 			tegra_dc_set_latency_allowance(dc, w);
+// 		w->bandwidth = w->new_bandwidth;
+// 		trace_printk("%s:win%u bandwidth=%d\n", dc->ndev->name, w->idx,
+// 			w->bandwidth);
+// 	}
+}
+
+int tegra_dc_set_dynamic_emc(struct tegra_dc_win *windows[], int n)
+{
+	unsigned long new_rate;
+	struct tegra_dc *dc;
+
+	if (!use_dynamic_emc)
+		return 0;
+
+	dc = windows[0]->dc;
+
+	if (tegra_dc_has_multiple_dc())
+		new_rate = ULONG_MAX;
+	else {
+		/* calculate the new rate based on this POST */
+		new_rate = tegra_dc_get_bandwidth(windows, n);
+		if (WARN_ONCE(new_rate > (ULONG_MAX / 1000), "bandwidth maxed out\n"))
+			new_rate = ULONG_MAX;
+		else
+			new_rate = max(EMC_BW_TO_FREQ(new_rate * 1000),
+				       dc->pdata->min_emc_clk_rate);
+	}
+
+	trace_printk("%s:new_emc_clk_rate=%ld\n", dc->ndev->name, new_rate);
+	dc->new_emc_clk_rate = new_rate;
+
+	return 0;
+}
diff --git a/drivers/staging/tegra/video/dc/clock.c b/drivers/staging/tegra/video/dc/clock.c
new file mode 100644
index 000000000000..3570a611909c
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/clock.c
@@ -0,0 +1,144 @@
+/*
+ * drivers/video/tegra/dc/clock.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+
+#include <mach/clk.h>
+
+#include "dc.h"
+#include "dc_reg.h"
+#include "dc_priv.h"
+
+unsigned long tegra_dc_pclk_round_rate(struct tegra_dc *dc, int pclk)
+{
+	unsigned long rate;
+	unsigned long div;
+
+	rate = tegra_dc_clk_get_rate(dc);
+
+	div = DIV_ROUND_CLOSEST(rate * 2, pclk);
+
+	if (div < 2)
+		return 0;
+
+	return rate * 2 / div;
+}
+
+static unsigned long tegra_dc_pclk_predict_rate(struct clk *parent, int pclk)
+{
+	unsigned long rate;
+	unsigned long div;
+
+	rate = clk_get_rate(parent);
+
+	div = DIV_ROUND_CLOSEST(rate * 2, pclk);
+
+	if (div < 2)
+		return 0;
+
+	return rate * 2 / div;
+}
+
+void tegra_dc_setup_clk(struct tegra_dc *dc, struct clk *clk)
+{
+	struct clk *parent_clk = clk_get(&dc->ndev->dev, "parent");
+	unsigned long rate;
+	int pclk;
+
+	BUG_ON(IS_ERR(parent_clk));
+
+	if (dc->out->type == TEGRA_DC_OUT_RGB) {
+// 		if (dc->out->parent_clk_backup &&
+// 		    (parent_clk == clk_get_sys(NULL, "pll_p"))) {
+// 			rate = tegra_dc_pclk_predict_rate(
+// 				parent_clk, dc->mode.pclk);
+// 			/* use pll_d as last resort */
+// 			if (rate < (dc->mode.pclk / 100 * 99) ||
+// 			    rate > (dc->mode.pclk / 100 * 109))
+// 				parent_clk = clk_get_sys(
+// 					NULL, dc->out->parent_clk_backup);
+// 		}
+
+		BUG_ON(clk_set_parent(clk, parent_clk));
+
+// 		if (parent_clk != clk_get_sys(NULL, "pll_p")) {
+// 			struct clk *base_clk = parent_clk;
+// 
+// 			/* Assuming either pll_d or pll_d2 is used */
+// 			rate = dc->mode.pclk * 2;
+// 
+// 			if (rate != clk_get_rate(base_clk))
+// 				clk_set_rate(base_clk, rate);
+// 		}
+	}
+
+	if (dc->out->type == TEGRA_DC_OUT_HDMI) {
+		struct clk *base_clk = clk_get_parent(parent_clk);
+
+		BUG_ON(IS_ERR(base_clk));
+
+		/*
+		 * Providing dynamic frequency rate setting for T20/T30 HDMI.
+		 * The required rate needs to be setup at 4x multiplier,
+		 * as out0 is 1/2 of the actual PLL output.
+		 */
+		rate = clk_round_rate(base_clk, dc->mode.pclk * 4);
+// 		pr_info("rate %ld\n", rate);
+		BUG_ON(rate <= 0);
+		BUG_ON(clk_set_rate(base_clk, rate));
+		BUG_ON(clk_set_parent(clk, parent_clk));
+	}
+
+	if (dc->out->type == TEGRA_DC_OUT_DSI) {
+		struct clk *base_clk;
+
+		if (clk == dc->clk) {
+			parent_clk = clk_get_sys(NULL,
+					dc->out->parent_clk ? : "pll_d_out0");
+			base_clk = clk_get_parent(parent_clk);
+			tegra_clk_cfg_ex(base_clk,
+					TEGRA_CLK_PLLD_DSI_OUT_ENB, 1);
+		} else {
+			if (dc->pdata->default_out->dsi->dsi_instance) {
+				parent_clk = clk_get_sys(NULL,
+					dc->out->parent_clk ? : "pll_d2_out0");
+				base_clk = clk_get_parent(parent_clk);
+				tegra_clk_cfg_ex(base_clk,
+						TEGRA_CLK_PLLD_CSI_OUT_ENB, 1);
+			} else {
+				parent_clk = clk_get_sys(NULL,
+					dc->out->parent_clk ? : "pll_d_out0");
+				base_clk = clk_get_parent(parent_clk);
+				tegra_clk_cfg_ex(base_clk,
+						TEGRA_CLK_PLLD_DSI_OUT_ENB, 1);
+			}
+		}
+
+		rate = dc->mode.pclk * dc->shift_clk_div * 2;
+		if (rate != clk_get_rate(base_clk))
+			clk_set_rate(base_clk, rate);
+
+		if (clk_get_parent(clk) != parent_clk)
+			clk_set_parent(clk, parent_clk);
+	}
+
+	pclk = tegra_dc_pclk_round_rate(dc, dc->mode.pclk);
+	tegra_dvfs_set_rate(clk, pclk);
+}
diff --git a/drivers/staging/tegra/video/dc/csc.c b/drivers/staging/tegra/video/dc/csc.c
new file mode 100644
index 000000000000..fee4f29c2986
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/csc.c
@@ -0,0 +1,69 @@
+/*
+ * drivers/video/tegra/dc/csc.c
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/types.h>
+
+#include "dc.h"
+#include "dc_reg.h"
+#include "dc_priv.h"
+
+void tegra_dc_init_csc_defaults(struct tegra_dc_csc *csc)
+{
+	csc->yof   = 0x00f0;
+	csc->kyrgb = 0x012a;
+	csc->kur   = 0x0000;
+	csc->kvr   = 0x0198;
+	csc->kug   = 0x039b;
+	csc->kvg   = 0x032f;
+	csc->kub   = 0x0204;
+	csc->kvb   = 0x0000;
+}
+
+void tegra_dc_set_csc(struct tegra_dc *dc, struct tegra_dc_csc *csc)
+{
+	tegra_dc_writel(dc, csc->yof,	DC_WIN_CSC_YOF);
+	tegra_dc_writel(dc, csc->kyrgb,	DC_WIN_CSC_KYRGB);
+	tegra_dc_writel(dc, csc->kur,	DC_WIN_CSC_KUR);
+	tegra_dc_writel(dc, csc->kvr,	DC_WIN_CSC_KVR);
+	tegra_dc_writel(dc, csc->kug,	DC_WIN_CSC_KUG);
+	tegra_dc_writel(dc, csc->kvg,	DC_WIN_CSC_KVG);
+	tegra_dc_writel(dc, csc->kub,	DC_WIN_CSC_KUB);
+	tegra_dc_writel(dc, csc->kvb,	DC_WIN_CSC_KVB);
+}
+
+int tegra_dc_update_csc(struct tegra_dc *dc, int win_idx)
+{
+	mutex_lock(&dc->lock);
+
+	if (!dc->enabled) {
+		mutex_unlock(&dc->lock);
+		return -EFAULT;
+	}
+
+	tegra_dc_hold_dc_out(dc);
+	tegra_dc_writel(dc, WINDOW_A_SELECT << win_idx,
+			DC_CMD_DISPLAY_WINDOW_HEADER);
+
+	tegra_dc_set_csc(dc, &dc->windows[win_idx].csc);
+	tegra_dc_release_dc_out(dc);
+
+	mutex_unlock(&dc->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_dc_update_csc);
+
diff --git a/drivers/staging/tegra/video/dc/dc.c b/drivers/staging/tegra/video/dc/dc.c
new file mode 100644
index 000000000000..4855b8706f3a
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/dc.c
@@ -0,0 +1,2417 @@
+/*
+ * drivers/video/tegra/dc/dc.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/backlight.h>
+#include <linux/gpio.h>
+#include <linux/nvhost.h>
+#include <video/tegrafb.h>
+#include <drm/drm_fixed.h>
+#ifdef CONFIG_SWITCH
+#include <linux/switch.h>
+#endif
+#include <linux/i2c.h>
+#include <linux/of_gpio.h>
+#include <linux/of_address.h>
+#include <linux/reset.h>
+
+#include <video/videomode.h>
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+
+#include <mach/clk.h>
+#include <mach/mc.h>
+#include <linux/nvhost.h>
+#include <mach/latency_allowance.h>
+
+#include "dc.h"
+#include "fb.h"
+#include "dc_reg.h"
+#include "dc_config.h"
+#include "dc_priv.h"
+#include "overlay.h"
+#include "nvsd.h"
+
+#define TEGRA_CRC_LATCHED_DELAY		34
+
+#define DC_COM_PIN_OUTPUT_POLARITY1_INIT_VAL	0x01000000
+#define DC_COM_PIN_OUTPUT_POLARITY3_INIT_VAL	0x0
+
+static struct fb_videomode tegra_dc_hdmi_fallback_mode = {
+	.refresh = 60,
+	.xres = 640,
+	.yres = 480,
+	.pixclock = KHZ2PICOS(25200),
+	.hsync_len = 96,	/* h_sync_width */
+	.vsync_len = 2,		/* v_sync_width */
+	.left_margin = 48,	/* h_back_porch */
+	.upper_margin = 33,	/* v_back_porch */
+	.right_margin = 16,	/* h_front_porch */
+	.lower_margin = 10,	/* v_front_porch */
+	.vmode = 0,
+	.sync = 0,
+};
+
+static struct tegra_dc_mode override_disp_mode[3];
+
+static void _tegra_dc_controller_disable(struct tegra_dc *dc);
+
+struct tegra_dc *tegra_dcs[TEGRA_MAX_DC];
+
+DEFINE_MUTEX(tegra_dc_lock);
+DEFINE_MUTEX(shared_lock);
+
+void tegra_dc_clk_enable(struct tegra_dc *dc)
+{
+	if (!tegra_is_clk_enabled(dc->clk)) {
+		clk_prepare_enable(dc->clk);
+		tegra_dvfs_set_rate(dc->clk, dc->mode.pclk);
+	}
+}
+
+void tegra_dc_clk_disable(struct tegra_dc *dc)
+{
+	if (tegra_is_clk_enabled(dc->clk)) {
+		clk_disable_unprepare(dc->clk);
+// 		tegra_dvfs_set_rate(dc->clk, 0);
+	}
+}
+
+void tegra_dc_hold_dc_out(struct tegra_dc *dc)
+{
+	if (dc->out_ops->hold)
+		dc->out_ops->hold(dc);
+}
+
+void tegra_dc_release_dc_out(struct tegra_dc *dc)
+{
+	if (dc->out_ops->release)
+		dc->out_ops->release(dc);
+}
+
+#define DUMP_REG(a) do {			\
+	snprintf(buff, sizeof(buff), "%-32s\t%03x\t%08lx\n", \
+		 #a, a, tegra_dc_readl(dc, a));		      \
+	print(data, buff);				      \
+	} while (0)
+
+#define print_underflow_info(dc) do {                 \
+	trace_printk("%s:Underflow stats: underflows : %llu, "      \
+			"undeflows_a : %llu, "                          \
+			"underflows_b : %llu, "                         \
+			"underflows_c : %llu\n",                        \
+			dc->ndev->name,                                 \
+			dc->stats.underflows,                           \
+			dc->stats.underflows_a, dc->stats.underflows_b, \
+			dc->stats.underflows_c);                        \
+	} while (0)
+
+static void _dump_regs(struct tegra_dc *dc, void *data,
+		       void (* print)(void *data, const char *str))
+{
+	int i;
+	char buff[256];
+
+	mutex_lock(&dc->lock);
+	tegra_dc_hold_dc_out(dc);
+	tegra_dc_io_start(dc);
+
+	DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0);
+	DUMP_REG(DC_CMD_DISPLAY_COMMAND);
+	DUMP_REG(DC_CMD_SIGNAL_RAISE);
+	DUMP_REG(DC_CMD_INT_STATUS);
+	DUMP_REG(DC_CMD_INT_MASK);
+	DUMP_REG(DC_CMD_INT_ENABLE);
+	DUMP_REG(DC_CMD_INT_TYPE);
+	DUMP_REG(DC_CMD_INT_POLARITY);
+	DUMP_REG(DC_CMD_SIGNAL_RAISE1);
+	DUMP_REG(DC_CMD_SIGNAL_RAISE2);
+	DUMP_REG(DC_CMD_SIGNAL_RAISE3);
+	DUMP_REG(DC_CMD_STATE_ACCESS);
+	DUMP_REG(DC_CMD_STATE_CONTROL);
+	DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
+	DUMP_REG(DC_CMD_REG_ACT_CONTROL);
+
+	DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0);
+	DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1);
+	DUMP_REG(DC_DISP_DISP_WIN_OPTIONS);
+	DUMP_REG(DC_DISP_MEM_HIGH_PRIORITY);
+	DUMP_REG(DC_DISP_MEM_HIGH_PRIORITY_TIMER);
+	DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS);
+	DUMP_REG(DC_DISP_REF_TO_SYNC);
+	DUMP_REG(DC_DISP_SYNC_WIDTH);
+	DUMP_REG(DC_DISP_BACK_PORCH);
+	DUMP_REG(DC_DISP_DISP_ACTIVE);
+	DUMP_REG(DC_DISP_FRONT_PORCH);
+	DUMP_REG(DC_DISP_H_PULSE0_CONTROL);
+	DUMP_REG(DC_DISP_H_PULSE0_POSITION_A);
+	DUMP_REG(DC_DISP_H_PULSE0_POSITION_B);
+	DUMP_REG(DC_DISP_H_PULSE0_POSITION_C);
+	DUMP_REG(DC_DISP_H_PULSE0_POSITION_D);
+	DUMP_REG(DC_DISP_H_PULSE1_CONTROL);
+	DUMP_REG(DC_DISP_H_PULSE1_POSITION_A);
+	DUMP_REG(DC_DISP_H_PULSE1_POSITION_B);
+	DUMP_REG(DC_DISP_H_PULSE1_POSITION_C);
+	DUMP_REG(DC_DISP_H_PULSE1_POSITION_D);
+	DUMP_REG(DC_DISP_H_PULSE2_CONTROL);
+	DUMP_REG(DC_DISP_H_PULSE2_POSITION_A);
+	DUMP_REG(DC_DISP_H_PULSE2_POSITION_B);
+	DUMP_REG(DC_DISP_H_PULSE2_POSITION_C);
+	DUMP_REG(DC_DISP_H_PULSE2_POSITION_D);
+	DUMP_REG(DC_DISP_V_PULSE0_CONTROL);
+	DUMP_REG(DC_DISP_V_PULSE0_POSITION_A);
+	DUMP_REG(DC_DISP_V_PULSE0_POSITION_B);
+	DUMP_REG(DC_DISP_V_PULSE0_POSITION_C);
+	DUMP_REG(DC_DISP_V_PULSE1_CONTROL);
+	DUMP_REG(DC_DISP_V_PULSE1_POSITION_A);
+	DUMP_REG(DC_DISP_V_PULSE1_POSITION_B);
+	DUMP_REG(DC_DISP_V_PULSE1_POSITION_C);
+	DUMP_REG(DC_DISP_V_PULSE2_CONTROL);
+	DUMP_REG(DC_DISP_V_PULSE2_POSITION_A);
+	DUMP_REG(DC_DISP_V_PULSE3_CONTROL);
+	DUMP_REG(DC_DISP_V_PULSE3_POSITION_A);
+	DUMP_REG(DC_DISP_M0_CONTROL);
+	DUMP_REG(DC_DISP_M1_CONTROL);
+	DUMP_REG(DC_DISP_DI_CONTROL);
+	DUMP_REG(DC_DISP_PP_CONTROL);
+	DUMP_REG(DC_DISP_PP_SELECT_A);
+	DUMP_REG(DC_DISP_PP_SELECT_B);
+	DUMP_REG(DC_DISP_PP_SELECT_C);
+	DUMP_REG(DC_DISP_PP_SELECT_D);
+	DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL);
+	DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL);
+	DUMP_REG(DC_DISP_DISP_COLOR_CONTROL);
+	DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS);
+	DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS);
+	DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS);
+	DUMP_REG(DC_DISP_LCD_SPI_OPTIONS);
+	DUMP_REG(DC_DISP_BORDER_COLOR);
+	DUMP_REG(DC_DISP_COLOR_KEY0_LOWER);
+	DUMP_REG(DC_DISP_COLOR_KEY0_UPPER);
+	DUMP_REG(DC_DISP_COLOR_KEY1_LOWER);
+	DUMP_REG(DC_DISP_COLOR_KEY1_UPPER);
+	DUMP_REG(DC_DISP_CURSOR_FOREGROUND);
+	DUMP_REG(DC_DISP_CURSOR_BACKGROUND);
+	DUMP_REG(DC_DISP_CURSOR_START_ADDR);
+	DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS);
+	DUMP_REG(DC_DISP_CURSOR_POSITION);
+	DUMP_REG(DC_DISP_CURSOR_POSITION_NS);
+	DUMP_REG(DC_DISP_INIT_SEQ_CONTROL);
+	DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A);
+	DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B);
+	DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C);
+	DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D);
+	DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL);
+	DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST);
+	DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST);
+	DUMP_REG(DC_DISP_MCCIF_DISPLAY0C_HYST);
+	DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST);
+	DUMP_REG(DC_DISP_DAC_CRT_CTRL);
+	DUMP_REG(DC_DISP_DISP_MISC_CONTROL);
+
+
+	for (i = 0; i < 3; i++) {
+		print(data, "\n");
+		snprintf(buff, sizeof(buff), "WINDOW %c:\n", 'A' + i);
+		print(data, buff);
+
+		tegra_dc_writel(dc, WINDOW_A_SELECT << i,
+				DC_CMD_DISPLAY_WINDOW_HEADER);
+		DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
+		DUMP_REG(DC_WIN_WIN_OPTIONS);
+		DUMP_REG(DC_WIN_BYTE_SWAP);
+		DUMP_REG(DC_WIN_BUFFER_CONTROL);
+		DUMP_REG(DC_WIN_COLOR_DEPTH);
+		DUMP_REG(DC_WIN_POSITION);
+		DUMP_REG(DC_WIN_SIZE);
+		DUMP_REG(DC_WIN_PRESCALED_SIZE);
+		DUMP_REG(DC_WIN_H_INITIAL_DDA);
+		DUMP_REG(DC_WIN_V_INITIAL_DDA);
+		DUMP_REG(DC_WIN_DDA_INCREMENT);
+		DUMP_REG(DC_WIN_LINE_STRIDE);
+		DUMP_REG(DC_WIN_BUF_STRIDE);
+		DUMP_REG(DC_WIN_UV_BUF_STRIDE);
+		DUMP_REG(DC_WIN_BLEND_NOKEY);
+		DUMP_REG(DC_WIN_BLEND_1WIN);
+		DUMP_REG(DC_WIN_BLEND_2WIN_X);
+		DUMP_REG(DC_WIN_BLEND_2WIN_Y);
+		DUMP_REG(DC_WIN_BLEND_3WIN_XY);
+		DUMP_REG(DC_WINBUF_START_ADDR);
+		DUMP_REG(DC_WINBUF_START_ADDR_U);
+		DUMP_REG(DC_WINBUF_START_ADDR_V);
+		DUMP_REG(DC_WINBUF_ADDR_H_OFFSET);
+		DUMP_REG(DC_WINBUF_ADDR_V_OFFSET);
+		DUMP_REG(DC_WINBUF_UFLOW_STATUS);
+		DUMP_REG(DC_WIN_CSC_YOF);
+		DUMP_REG(DC_WIN_CSC_KYRGB);
+		DUMP_REG(DC_WIN_CSC_KUR);
+		DUMP_REG(DC_WIN_CSC_KVR);
+		DUMP_REG(DC_WIN_CSC_KUG);
+		DUMP_REG(DC_WIN_CSC_KVG);
+		DUMP_REG(DC_WIN_CSC_KUB);
+		DUMP_REG(DC_WIN_CSC_KVB);
+	}
+
+	DUMP_REG(DC_CMD_DISPLAY_POWER_CONTROL);
+	DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE2);
+	DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY2);
+	DUMP_REG(DC_COM_PIN_OUTPUT_DATA2);
+	DUMP_REG(DC_COM_PIN_INPUT_ENABLE2);
+	DUMP_REG(DC_COM_PIN_OUTPUT_SELECT5);
+	DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0);
+	DUMP_REG(DC_DISP_M1_CONTROL);
+	DUMP_REG(DC_COM_PM1_CONTROL);
+	DUMP_REG(DC_COM_PM1_DUTY_CYCLE);
+	DUMP_REG(DC_DISP_SD_CONTROL);
+
+	tegra_dc_io_end(dc);
+	tegra_dc_release_dc_out(dc);
+	mutex_unlock(&dc->lock);
+}
+
+#undef DUMP_REG
+
+#ifdef DEBUG
+static void dump_regs_print(void *data, const char *str)
+{
+	struct tegra_dc *dc = data;
+	dev_dbg(&dc->ndev->dev, "%s", str);
+}
+
+static void dump_regs(struct tegra_dc *dc)
+{
+	_dump_regs(dc, dc, dump_regs_print);
+}
+#else /* !DEBUG */
+
+static void dump_regs(struct tegra_dc *dc) {}
+
+#endif /* DEBUG */
+
+#ifdef CONFIG_DEBUG_FS
+
+static void dbg_regs_print(void *data, const char *str)
+{
+	struct seq_file *s = data;
+
+	seq_printf(s, "%s", str);
+}
+
+#undef DUMP_REG
+
+static int dbg_dc_show(struct seq_file *s, void *unused)
+{
+	struct tegra_dc *dc = s->private;
+
+	_dump_regs(dc, s, dbg_regs_print);
+
+	return 0;
+}
+
+
+static int dbg_dc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dbg_dc_show, inode->i_private);
+}
+
+static const struct file_operations regs_fops = {
+	.open		= dbg_dc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int dbg_dc_mode_show(struct seq_file *s, void *unused)
+{
+	struct tegra_dc *dc = s->private;
+	struct tegra_dc_mode *m;
+
+	mutex_lock(&dc->lock);
+	m = &dc->mode;
+	seq_printf(s,
+		"pclk: %d\n"
+		"h_ref_to_sync: %d\n"
+		"v_ref_to_sync: %d\n"
+		"h_sync_width: %d\n"
+		"v_sync_width: %d\n"
+		"h_back_porch: %d\n"
+		"v_back_porch: %d\n"
+		"h_active: %d\n"
+		"v_active: %d\n"
+		"h_front_porch: %d\n"
+		"v_front_porch: %d\n"
+		"stereo_mode: %d\n",
+		m->pclk, m->h_ref_to_sync, m->v_ref_to_sync,
+		m->h_sync_width, m->v_sync_width,
+		m->h_back_porch, m->v_back_porch,
+		m->h_active, m->v_active,
+		m->h_front_porch, m->v_front_porch,
+		m->stereo_mode);
+	mutex_unlock(&dc->lock);
+	return 0;
+}
+
+static int dbg_dc_mode_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dbg_dc_mode_show, inode->i_private);
+}
+
+static const struct file_operations mode_fops = {
+	.open		= dbg_dc_mode_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int dbg_dc_stats_show(struct seq_file *s, void *unused)
+{
+	struct tegra_dc *dc = s->private;
+
+	mutex_lock(&dc->lock);
+	seq_printf(s,
+		"underflows: %llu\n"
+		"underflows_a: %llu\n"
+		"underflows_b: %llu\n"
+		"underflows_c: %llu\n",
+		dc->stats.underflows,
+		dc->stats.underflows_a,
+		dc->stats.underflows_b,
+		dc->stats.underflows_c);
+	mutex_unlock(&dc->lock);
+
+	return 0;
+}
+
+static int dbg_dc_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dbg_dc_stats_show, inode->i_private);
+}
+
+static const struct file_operations stats_fops = {
+	.open		= dbg_dc_stats_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void tegra_dc_remove_debugfs(struct tegra_dc *dc)
+{
+	if (dc->debugdir)
+		debugfs_remove_recursive(dc->debugdir);
+	dc->debugdir = NULL;
+}
+
+static void tegra_dc_create_debugfs(struct tegra_dc *dc)
+{
+	struct dentry *retval;
+
+	dc->debugdir = debugfs_create_dir(dev_name(&dc->ndev->dev), NULL);
+	if (!dc->debugdir)
+		goto remove_out;
+
+	retval = debugfs_create_file("regs", S_IRUGO, dc->debugdir, dc,
+		&regs_fops);
+	if (!retval)
+		goto remove_out;
+
+	retval = debugfs_create_file("mode", S_IRUGO, dc->debugdir, dc,
+		&mode_fops);
+	if (!retval)
+		goto remove_out;
+
+	retval = debugfs_create_file("stats", S_IRUGO, dc->debugdir, dc,
+		&stats_fops);
+	if (!retval)
+		goto remove_out;
+
+	return;
+remove_out:
+	dev_err(&dc->ndev->dev, "could not create debugfs\n");
+	tegra_dc_remove_debugfs(dc);
+}
+
+#else /* !CONFIG_DEBUGFS */
+static inline void tegra_dc_create_debugfs(struct tegra_dc *dc) { };
+static inline void __devexit tegra_dc_remove_debugfs(struct tegra_dc *dc) { };
+#endif /* CONFIG_DEBUGFS */
+
+static int tegra_dc_set(struct tegra_dc *dc, int index)
+{
+	int ret = 0;
+
+	mutex_lock(&tegra_dc_lock);
+	if (index >= TEGRA_MAX_DC) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (dc != NULL && tegra_dcs[index] != NULL) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	tegra_dcs[index] = dc;
+
+out:
+	mutex_unlock(&tegra_dc_lock);
+
+	return ret;
+}
+
+unsigned int tegra_dc_has_multiple_dc(void)
+{
+	unsigned int idx;
+	unsigned int cnt = 0;
+	struct tegra_dc *dc;
+
+	mutex_lock(&tegra_dc_lock);
+	for (idx = 0; idx < TEGRA_MAX_DC; idx++)
+		cnt += ((dc = tegra_dcs[idx]) != NULL && dc->enabled) ? 1 : 0;
+	mutex_unlock(&tegra_dc_lock);
+
+	return (cnt > 1);
+}
+
+/* get the stride size of a window.
+ * return: stride size in bytes for window win. or 0 if unavailble. */
+int tegra_dc_get_stride(struct tegra_dc *dc, unsigned win)
+{
+	u32 stride;
+
+// 	if (!dc->enabled)
+		return 0;
+	BUG_ON(win > DC_N_WINDOWS);
+	mutex_lock(&dc->lock);
+	tegra_dc_hold_dc_out(dc);
+	tegra_dc_writel(dc, WINDOW_A_SELECT << win,
+		DC_CMD_DISPLAY_WINDOW_HEADER);
+	stride = tegra_dc_readl(dc, DC_WIN_LINE_STRIDE);
+	tegra_dc_release_dc_out(dc);
+	mutex_unlock(&dc->lock);
+	return GET_LINE_STRIDE(stride);
+}
+EXPORT_SYMBOL(tegra_dc_get_stride);
+
+struct tegra_dc *tegra_dc_get_dc(unsigned idx)
+{
+	if (idx < TEGRA_MAX_DC)
+		return tegra_dcs[idx];
+	else
+		return NULL;
+}
+EXPORT_SYMBOL(tegra_dc_get_dc);
+
+struct tegra_dc_win *tegra_dc_get_window(struct tegra_dc *dc, unsigned win)
+{
+	if (win >= dc->n_windows)
+		return NULL;
+
+	return &dc->windows[win];
+}
+EXPORT_SYMBOL(tegra_dc_get_window);
+
+bool tegra_dc_get_connected(struct tegra_dc *dc)
+{
+	return dc->connected;
+}
+EXPORT_SYMBOL(tegra_dc_get_connected);
+
+bool tegra_dc_hpd(struct tegra_dc *dc)
+{
+	int sense;
+	int level;
+
+	level = gpio_get_value(dc->out->hotplug_gpio);
+
+	sense = dc->out->flags & TEGRA_DC_OUT_HOTPLUG_MASK;
+
+	return (sense == TEGRA_DC_OUT_HOTPLUG_HIGH && level) ||
+		(sense == TEGRA_DC_OUT_HOTPLUG_LOW && !level);
+}
+EXPORT_SYMBOL(tegra_dc_hpd);
+
+static void tegra_dc_set_scaling_filter(struct tegra_dc *dc)
+{
+	unsigned i;
+	unsigned v0 = 128;
+	unsigned v1 = 0;
+	/* linear horizontal and vertical filters */
+	for (i = 0; i < 16; i++) {
+		tegra_dc_writel(dc, (v1 << 16) | (v0 << 8),
+				DC_WIN_H_FILTER_P(i));
+
+		tegra_dc_writel(dc, v0,
+				DC_WIN_V_FILTER_P(i));
+		v0 -= 8;
+		v1 += 8;
+	}
+}
+
+static inline void disable_dc_irq(unsigned int irq)
+{
+	disable_irq(irq);
+}
+
+u32 tegra_dc_get_syncpt_id(const struct tegra_dc *dc, int i)
+{
+	return dc->syncpt[i].id;
+}
+EXPORT_SYMBOL(tegra_dc_get_syncpt_id);
+
+u32 tegra_dc_incr_syncpt_max(struct tegra_dc *dc, int i)
+{
+	u32 max;
+
+	mutex_lock(&dc->lock);
+	tegra_dc_hold_dc_out(dc);
+	max = nvhost_syncpt_incr_max_ext(dc->ndev,
+		dc->syncpt[i].id, ((dc->enabled) ? 1 : 0));
+	dc->syncpt[i].max = max;
+	tegra_dc_release_dc_out(dc);
+	mutex_unlock(&dc->lock);
+
+	return max;
+}
+
+void tegra_dc_incr_syncpt_min(struct tegra_dc *dc, int i, u32 val)
+{
+	mutex_lock(&dc->lock);
+	if (dc->enabled) {
+		tegra_dc_hold_dc_out(dc);
+		while (dc->syncpt[i].min < val) {
+			dc->syncpt[i].min++;
+			nvhost_syncpt_cpu_incr_ext(dc->ndev, dc->syncpt[i].id);
+		}
+		tegra_dc_release_dc_out(dc);
+	}
+	mutex_unlock(&dc->lock);
+}
+
+void
+tegra_dc_config_pwm(struct tegra_dc *dc, struct tegra_dc_pwm_params *cfg)
+{
+	unsigned int ctrl;
+	unsigned long out_sel;
+	unsigned long cmd_state;
+
+	mutex_lock(&dc->lock);
+	if (!dc->enabled) {
+		mutex_unlock(&dc->lock);
+		return;
+	}
+
+	tegra_dc_hold_dc_out(dc);
+
+	ctrl = ((cfg->period << PM_PERIOD_SHIFT) |
+		(cfg->clk_div << PM_CLK_DIVIDER_SHIFT) |
+		cfg->clk_select);
+
+	/* The new value should be effected immediately */
+	cmd_state = tegra_dc_readl(dc, DC_CMD_STATE_ACCESS);
+	tegra_dc_writel(dc, (cmd_state | (1 << 2)), DC_CMD_STATE_ACCESS);
+
+	switch (cfg->which_pwm) {
+	case TEGRA_PWM_PM0:
+		/* Select the LM0 on PM0 */
+		out_sel = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_SELECT5);
+		out_sel &= ~(7 << 0);
+		out_sel |= (3 << 0);
+		tegra_dc_writel(dc, out_sel, DC_COM_PIN_OUTPUT_SELECT5);
+		tegra_dc_writel(dc, ctrl, DC_COM_PM0_CONTROL);
+		tegra_dc_writel(dc, cfg->duty_cycle, DC_COM_PM0_DUTY_CYCLE);
+		break;
+	case TEGRA_PWM_PM1:
+		/* Select the LM1 on PM1 */
+		out_sel = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_SELECT5);
+		out_sel &= ~(7 << 4);
+		out_sel |= (3 << 4);
+		tegra_dc_writel(dc, out_sel, DC_COM_PIN_OUTPUT_SELECT5);
+		tegra_dc_writel(dc, ctrl, DC_COM_PM1_CONTROL);
+		tegra_dc_writel(dc, cfg->duty_cycle, DC_COM_PM1_DUTY_CYCLE);
+		break;
+	default:
+		dev_err(&dc->ndev->dev, "Error: Need which_pwm\n");
+		break;
+	}
+	tegra_dc_writel(dc, cmd_state, DC_CMD_STATE_ACCESS);
+	tegra_dc_release_dc_out(dc);
+	mutex_unlock(&dc->lock);
+}
+EXPORT_SYMBOL(tegra_dc_config_pwm);
+
+void tegra_dc_set_out_pin_polars(struct tegra_dc *dc,
+				const struct tegra_dc_out_pin *pins,
+				const unsigned int n_pins)
+{
+	unsigned int i;
+
+	int name;
+	int pol;
+
+	u32 pol1, pol3;
+
+	u32 set1, unset1;
+	u32 set3, unset3;
+
+	set1 = set3 = unset1 = unset3 = 0;
+
+	for (i = 0; i < n_pins; i++) {
+		name = (pins + i)->name;
+		pol  = (pins + i)->pol;
+
+		/* set polarity by name */
+		switch (name) {
+		case TEGRA_DC_OUT_PIN_DATA_ENABLE:
+			if (pol == TEGRA_DC_OUT_PIN_POL_LOW)
+				set3 |= LSPI_OUTPUT_POLARITY_LOW;
+			else
+				unset3 |= LSPI_OUTPUT_POLARITY_LOW;
+			break;
+		case TEGRA_DC_OUT_PIN_H_SYNC:
+			if (pol == TEGRA_DC_OUT_PIN_POL_LOW)
+				set1 |= LHS_OUTPUT_POLARITY_LOW;
+			else
+				unset1 |= LHS_OUTPUT_POLARITY_LOW;
+			break;
+		case TEGRA_DC_OUT_PIN_V_SYNC:
+			if (pol == TEGRA_DC_OUT_PIN_POL_LOW)
+				set1 |= LVS_OUTPUT_POLARITY_LOW;
+			else
+				unset1 |= LVS_OUTPUT_POLARITY_LOW;
+			break;
+		case TEGRA_DC_OUT_PIN_PIXEL_CLOCK:
+			if (pol == TEGRA_DC_OUT_PIN_POL_LOW)
+				set1 |= LSC0_OUTPUT_POLARITY_LOW;
+			else
+				unset1 |= LSC0_OUTPUT_POLARITY_LOW;
+			break;
+		default:
+			printk("Invalid argument in function %s\n",
+			       __FUNCTION__);
+			break;
+		}
+	}
+
+	pol1 = DC_COM_PIN_OUTPUT_POLARITY1_INIT_VAL;
+	pol3 = DC_COM_PIN_OUTPUT_POLARITY3_INIT_VAL;
+
+	pol1 |= set1;
+	pol1 &= ~unset1;
+
+	pol3 |= set3;
+	pol3 &= ~unset3;
+
+	tegra_dc_writel(dc, pol1, DC_COM_PIN_OUTPUT_POLARITY1);
+	tegra_dc_writel(dc, pol3, DC_COM_PIN_OUTPUT_POLARITY3);
+}
+
+static struct tegra_dc_mode *tegra_dc_get_override_mode(struct tegra_dc *dc)
+{
+	if (dc->out->type == TEGRA_DC_OUT_RGB ||
+		dc->out->type == TEGRA_DC_OUT_HDMI ||
+		dc->out->type == TEGRA_DC_OUT_DSI)
+		return override_disp_mode[dc->out->type].pclk ?
+			&override_disp_mode[dc->out->type] : NULL;
+	else
+		return NULL;
+}
+
+static void tegra_dc_set_out(struct tegra_dc *dc, struct tegra_dc_out *out)
+{
+	struct tegra_dc_mode *mode;
+
+	dc->out = out;
+	mode = tegra_dc_get_override_mode(dc);
+
+	if (mode)
+		tegra_dc_set_mode(dc, mode);
+	else if (out->n_modes > 0)
+		tegra_dc_set_mode(dc, &dc->out->modes[0]);
+
+	switch (out->type) {
+	case TEGRA_DC_OUT_RGB:
+		dc->out_ops = &tegra_dc_rgb_ops;
+		break;
+
+	case TEGRA_DC_OUT_HDMI:
+		dc->out_ops = &tegra_dc_hdmi_ops;
+		break;
+
+	case TEGRA_DC_OUT_DSI:
+		dc->out_ops = &tegra_dc_dsi_ops;
+		break;
+
+	default:
+		dc->out_ops = NULL;
+		break;
+	}
+
+	if (dc->out_ops && dc->out_ops->init)
+		dc->out_ops->init(dc);
+
+}
+
+unsigned tegra_dc_get_out_height(const struct tegra_dc *dc)
+{
+	if (dc->out)
+		return dc->out->height;
+	else
+		return 0;
+}
+EXPORT_SYMBOL(tegra_dc_get_out_height);
+
+unsigned tegra_dc_get_out_width(const struct tegra_dc *dc)
+{
+	if (dc->out)
+		return dc->out->width;
+	else
+		return 0;
+}
+EXPORT_SYMBOL(tegra_dc_get_out_width);
+
+unsigned tegra_dc_get_out_max_pixclock(const struct tegra_dc *dc)
+{
+	if (dc->out && dc->out->max_pixclock)
+		return dc->out->max_pixclock;
+	else
+		return 0;
+}
+EXPORT_SYMBOL(tegra_dc_get_out_max_pixclock);
+
+void tegra_dc_enable_crc(struct tegra_dc *dc)
+{
+	u32 val;
+
+	mutex_lock(&dc->lock);
+	tegra_dc_hold_dc_out(dc);
+	tegra_dc_io_start(dc);
+
+	val = CRC_ALWAYS_ENABLE | CRC_INPUT_DATA_ACTIVE_DATA |
+		CRC_ENABLE_ENABLE;
+	tegra_dc_writel(dc, val, DC_COM_CRC_CONTROL);
+	tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+	tegra_dc_release_dc_out(dc);
+	mutex_unlock(&dc->lock);
+}
+
+void tegra_dc_disable_crc(struct tegra_dc *dc)
+{
+	mutex_lock(&dc->lock);
+	tegra_dc_hold_dc_out(dc);
+	tegra_dc_writel(dc, 0x0, DC_COM_CRC_CONTROL);
+	tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+	tegra_dc_io_end(dc);
+	tegra_dc_release_dc_out(dc);
+	mutex_unlock(&dc->lock);
+}
+
+u32 tegra_dc_read_checksum_latched(struct tegra_dc *dc)
+{
+	int crc = 0;
+
+	if (!dc) {
+		dev_err(&dc->ndev->dev, "Failed to get dc.\n");
+		goto crc_error;
+	}
+
+	/* TODO: Replace mdelay with code to sync VBlANK, since
+	 * DC_COM_CRC_CHECKSUM_LATCHED is available after VBLANK */
+	mdelay(TEGRA_CRC_LATCHED_DELAY);
+
+	mutex_lock(&dc->lock);
+	tegra_dc_hold_dc_out(dc);
+	crc = tegra_dc_readl(dc, DC_COM_CRC_CHECKSUM_LATCHED);
+	tegra_dc_release_dc_out(dc);
+	mutex_unlock(&dc->lock);
+crc_error:
+	return crc;
+}
+
+static bool tegra_dc_windows_are_dirty(struct tegra_dc *dc)
+{
+	u32 val;
+
+	val = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+	if (val & (WIN_A_ACT_REQ | WIN_B_ACT_REQ | WIN_C_ACT_REQ))
+	    return true;
+
+	return false;
+}
+
+static inline void enable_dc_irq(unsigned int irq)
+{
+	enable_irq(irq);
+}
+
+void tegra_dc_get_fbvblank(struct tegra_dc *dc, struct fb_vblank *vblank)
+{
+	if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
+		vblank->flags = FB_VBLANK_HAVE_VSYNC;
+}
+
+int tegra_dc_wait_for_vsync(struct tegra_dc *dc)
+{
+	int ret = -ENOTTY;
+
+	if (!(dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) || !dc->enabled)
+		return ret;
+
+	/*
+	 * Logic is as follows
+	 * a) Indicate we need a vblank.
+	 * b) Wait for completion to be signalled from isr.
+	 * c) Initialize completion for next iteration.
+	 */
+
+	tegra_dc_hold_dc_out(dc);
+	dc->out->user_needs_vblank = true;
+
+	ret = wait_for_completion_interruptible(&dc->out->user_vblank_comp);
+	init_completion(&dc->out->user_vblank_comp);
+	tegra_dc_release_dc_out(dc);
+
+	return ret;
+}
+
+static void tegra_dc_vblank(struct work_struct *work)
+{
+	struct tegra_dc *dc = container_of(work, struct tegra_dc, vblank_work);
+	bool nvsd_updated = false;
+
+	mutex_lock(&dc->lock);
+
+	if (!dc->enabled) {
+		mutex_unlock(&dc->lock);
+		return;
+	}
+
+	tegra_dc_hold_dc_out(dc);
+	/* use the new frame's bandwidth setting instead of max(current, new),
+	 * skip this if we're using tegra_dc_one_shot_worker() */
+	if (!(dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE))
+		tegra_dc_program_bandwidth(dc, true);
+
+	/* Clear the V_BLANK_FLIP bit of vblank ref-count if update is clean. */
+	if (!tegra_dc_windows_are_dirty(dc))
+		clear_bit(V_BLANK_FLIP, &dc->vblank_ref_count);
+
+	/* Update the SD brightness */
+	if (dc->enabled && dc->out->sd_settings) {
+		nvsd_updated = nvsd_update_brightness(dc);
+		/* Ref-count vblank if nvsd is on-going. Otherwise, clean the
+		 * V_BLANK_NVSD bit of vblank ref-count. */
+		if (nvsd_updated) {
+			set_bit(V_BLANK_NVSD, &dc->vblank_ref_count);
+			tegra_dc_unmask_interrupt(dc, V_BLANK_INT);
+		} else {
+			clear_bit(V_BLANK_NVSD, &dc->vblank_ref_count);
+		}
+	}
+
+	/* Mask vblank interrupt if ref-count is zero. */
+	if (!dc->vblank_ref_count)
+		tegra_dc_mask_interrupt(dc, V_BLANK_INT);
+
+	tegra_dc_release_dc_out(dc);
+	mutex_unlock(&dc->lock);
+
+	/* Do the actual brightness update outside of the mutex */
+	if (nvsd_updated && dc->out->sd_settings &&
+	    dc->out->sd_settings->bl_device) {
+
+		struct platform_device *pdev = dc->out->sd_settings->bl_device;
+		struct backlight_device *bl = platform_get_drvdata(pdev);
+		if (bl)
+			backlight_update_status(bl);
+	}
+}
+
+static void tegra_dc_one_shot_worker(struct work_struct *work)
+{
+	struct tegra_dc *dc = container_of(
+		to_delayed_work(work), struct tegra_dc, one_shot_work);
+	mutex_lock(&dc->lock);
+
+	/* memory client has gone idle */
+	tegra_dc_clear_bandwidth(dc);
+
+	if (dc->out_ops->idle)
+		dc->out_ops->idle(dc);
+
+	mutex_unlock(&dc->lock);
+}
+
+/* return an arbitrarily large number if count overflow occurs.
+ * make it a nice base-10 number to show up in stats output */
+static u64 tegra_dc_underflow_count(struct tegra_dc *dc, unsigned reg)
+{
+	unsigned count = tegra_dc_readl(dc, reg);
+	tegra_dc_writel(dc, 0, reg);
+	return ((count & 0x80000000) == 0) ? count : 10000000000ll;
+}
+
+static void tegra_dc_underflow_handler(struct tegra_dc *dc)
+{
+	u32 val;
+	int i;
+
+	if (dc->underflow_mask) {
+		dc->stats.underflows++;
+		if (dc->underflow_mask & WIN_A_UF_INT) {
+			dc->stats.underflows_a += tegra_dc_underflow_count(dc,
+				DC_WINBUF_AD_UFLOW_STATUS);
+			trace_printk("%s:Window A Underflow\n", dc->ndev->name);
+		}
+		if (dc->underflow_mask & WIN_B_UF_INT) {
+			dc->stats.underflows_b += tegra_dc_underflow_count(dc,
+				DC_WINBUF_BD_UFLOW_STATUS);
+			trace_printk("%s:Window B Underflow\n", dc->ndev->name);
+		}
+		if (dc->underflow_mask & WIN_C_UF_INT) {
+			dc->stats.underflows_c += tegra_dc_underflow_count(dc,
+				DC_WINBUF_CD_UFLOW_STATUS);
+			trace_printk("%s:Window C Underflow\n", dc->ndev->name);
+		}
+	}
+
+	/* Check for any underflow reset conditions */
+	for (i = 0; i < DC_N_WINDOWS; i++) {
+		if (dc->underflow_mask & (WIN_A_UF_INT << i)) {
+			if (dc->windows[i].out_w < UNDERFLOW_IGNORE_W &&
+			    dc->windows[i].out_h < UNDERFLOW_IGNORE_H)
+				continue;
+
+			dc->windows[i].underflows++;
+
+			mdelay(30);
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+			if (dc->windows[i].underflows > 4) {
+				schedule_work(&dc->reset_work);
+				/* reset counter */
+				dc->windows[i].underflows = 0;
+				trace_printk("%s:Reset work scheduled for "
+						"window %c\n",
+						dc->ndev->name, (65 + i));
+			}
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+			if (dc->windows[i].underflows > 4) {
+				printk("%s:dc in underflow state."
+					" enable UF_LINE_FLUSH to clear up\n",
+					__func__);
+				tegra_dc_writel(dc, UF_LINE_FLUSH,
+						DC_DISP_DISP_MISC_CONTROL);
+				tegra_dc_writel(dc, GENERAL_UPDATE,
+						DC_CMD_STATE_CONTROL);
+				tegra_dc_writel(dc, GENERAL_ACT_REQ,
+						DC_CMD_STATE_CONTROL);
+
+				tegra_dc_writel(dc, 0,
+						DC_DISP_DISP_MISC_CONTROL);
+				tegra_dc_writel(dc, GENERAL_UPDATE,
+						DC_CMD_STATE_CONTROL);
+				tegra_dc_writel(dc, GENERAL_ACT_REQ,
+						DC_CMD_STATE_CONTROL);
+			}
+#endif
+		} else {
+			dc->windows[i].underflows = 0;
+		}
+	}
+
+	/* Clear the underflow mask now that we've checked it. */
+	tegra_dc_writel(dc, dc->underflow_mask, DC_CMD_INT_STATUS);
+	dc->underflow_mask = 0;
+	val = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+	tegra_dc_writel(dc, val | ALL_UF_INT, DC_CMD_INT_MASK);
+	print_underflow_info(dc);
+}
+
+static void tegra_dc_one_shot_irq(struct tegra_dc *dc, unsigned long status)
+{
+	/* pending user vblank, so wakeup */
+	if ((status & (V_BLANK_INT | MSF_INT)) &&
+	    (dc->out->user_needs_vblank)) {
+		dc->out->user_needs_vblank = false;
+		complete(&dc->out->user_vblank_comp);
+	}
+
+	if (status & V_BLANK_INT) {
+		/* Sync up windows. */
+		tegra_dc_trigger_windows(dc);
+
+		/* Schedule any additional bottom-half vblank actvities. */
+		queue_work(system_freezable_wq, &dc->vblank_work);
+	}
+
+	if (status & FRAME_END_INT) {
+		/* Check underflow */
+		tegra_dc_underflow_handler(dc);
+
+		/* Mark the frame_end as complete. */
+		if (!completion_done(&dc->frame_end_complete))
+			complete(&dc->frame_end_complete);
+	}
+}
+
+static void tegra_dc_continuous_irq(struct tegra_dc *dc, unsigned long status)
+{
+	/* Schedule any additional bottom-half vblank actvities. */
+	if (status & V_BLANK_INT) {
+		/* Check underflow */
+		tegra_dc_underflow_handler(dc);
+
+		queue_work(system_freezable_wq, &dc->vblank_work);
+	}
+
+	if (status & FRAME_END_INT) {
+		struct timespec tm = CURRENT_TIME;
+		dc->frame_end_timestamp = timespec_to_ns(&tm);
+		wake_up(&dc->timestamp_wq);
+
+		/* Mark the frame_end as complete. */
+		if (!completion_done(&dc->frame_end_complete))
+			complete(&dc->frame_end_complete);
+
+		tegra_dc_trigger_windows(dc);
+	}
+}
+
+/* XXX: Not sure if we limit look ahead to 1 frame */
+bool tegra_dc_is_within_n_vsync(struct tegra_dc *dc, s64 ts)
+{
+	BUG_ON(!dc->frametime_ns);
+	return ((ts - dc->frame_end_timestamp) < dc->frametime_ns);
+}
+
+bool tegra_dc_does_vsync_separate(struct tegra_dc *dc, s64 new_ts, s64 old_ts)
+{
+	BUG_ON(!dc->frametime_ns);
+	return (((new_ts - old_ts) > dc->frametime_ns)
+		|| (div_s64((new_ts - dc->frame_end_timestamp), dc->frametime_ns)
+			!= div_s64((old_ts - dc->frame_end_timestamp),
+				dc->frametime_ns)));
+}
+
+static irqreturn_t tegra_dc_irq(int irq, void *ptr)
+{
+	struct tegra_dc *dc = ptr;
+	unsigned long status;
+	unsigned long underflow_mask;
+	u32 val;
+
+	if (!nvhost_module_powered_ext(nvhost_get_parent(dc->ndev))) {
+		WARN(1, "IRQ when DC not powered!\n");
+		tegra_dc_io_start(dc);
+		status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
+		tegra_dc_writel(dc, status, DC_CMD_INT_STATUS);
+		tegra_dc_io_end(dc);
+		return IRQ_HANDLED;
+	}
+
+	/* clear all status flags except underflow, save those for the worker */
+	status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
+	tegra_dc_writel(dc, status & ~ALL_UF_INT, DC_CMD_INT_STATUS);
+	val = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+	tegra_dc_writel(dc, val & ~ALL_UF_INT, DC_CMD_INT_MASK);
+
+	/*
+	 * Overlays can get thier internal state corrupted during and underflow
+	 * condition.  The only way to fix this state is to reset the DC.
+	 * if we get 4 consecutive frames with underflows, assume we're
+	 * hosed and reset.
+	 */
+	underflow_mask = status & ALL_UF_INT;
+
+	/* Check underflow */
+	if (underflow_mask)
+		dc->underflow_mask |= underflow_mask;
+
+	if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
+		tegra_dc_one_shot_irq(dc, status);
+	else
+		tegra_dc_continuous_irq(dc, status);
+
+	return IRQ_HANDLED;
+}
+
+static void tegra_dc_set_color_control(struct tegra_dc *dc)
+{
+	u32 color_control;
+
+	switch (dc->out->depth) {
+	case 3:
+		color_control = BASE_COLOR_SIZE111;
+		break;
+
+	case 6:
+		color_control = BASE_COLOR_SIZE222;
+		break;
+
+	case 8:
+		color_control = BASE_COLOR_SIZE332;
+		break;
+
+	case 9:
+		color_control = BASE_COLOR_SIZE333;
+		break;
+
+	case 12:
+		color_control = BASE_COLOR_SIZE444;
+		break;
+
+	case 15:
+		color_control = BASE_COLOR_SIZE555;
+		break;
+
+	case 16:
+		color_control = BASE_COLOR_SIZE565;
+		break;
+
+	case 18:
+		color_control = BASE_COLOR_SIZE666;
+		break;
+
+	default:
+		color_control = BASE_COLOR_SIZE888;
+		break;
+	}
+
+	switch (dc->out->dither) {
+	case TEGRA_DC_DISABLE_DITHER:
+		color_control |= DITHER_CONTROL_DISABLE;
+		break;
+	case TEGRA_DC_ORDERED_DITHER:
+		color_control |= DITHER_CONTROL_ORDERED;
+		break;
+	case TEGRA_DC_ERRDIFF_DITHER:
+		/* The line buffer for error-diffusion dither is limited
+		 * to 1280 pixels per line. This limits the maximum
+		 * horizontal active area size to 1280 pixels when error
+		 * diffusion is enabled.
+		 */
+		BUG_ON(dc->mode.h_active > 1280);
+		color_control |= DITHER_CONTROL_ERRDIFF;
+		break;
+	}
+
+	tegra_dc_writel(dc, color_control, DC_DISP_DISP_COLOR_CONTROL);
+}
+
+static u32 get_syncpt(struct tegra_dc *dc, int idx)
+{
+	u32 syncpt_id;
+
+	switch (dc->ndev->id) {
+	case 0:
+		switch (idx) {
+		case 0:
+			syncpt_id = NVSYNCPT_DISP0_A;
+			break;
+		case 1:
+			syncpt_id = NVSYNCPT_DISP0_B;
+			break;
+		case 2:
+			syncpt_id = NVSYNCPT_DISP0_C;
+			break;
+		default:
+			BUG();
+			break;
+		}
+		break;
+	case 1:
+		switch (idx) {
+		case 0:
+			syncpt_id = NVSYNCPT_DISP1_A;
+			break;
+		case 1:
+			syncpt_id = NVSYNCPT_DISP1_B;
+			break;
+		case 2:
+			syncpt_id = NVSYNCPT_DISP1_C;
+			break;
+		default:
+			BUG();
+			break;
+		}
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	return syncpt_id;
+}
+
+static int tegra_dc_init(struct tegra_dc *dc)
+{
+	int i;
+	int int_enable;
+
+	tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+// 	if (dc->ndev->id == 0) {
+// 		tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0A,
+// 				      TEGRA_MC_PRIO_MED);
+// 		tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0B,
+// 				      TEGRA_MC_PRIO_MED);
+// 		tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0C,
+// 				      TEGRA_MC_PRIO_MED);
+// 		tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY1B,
+// 				      TEGRA_MC_PRIO_MED);
+// 		tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAYHC,
+// 				      TEGRA_MC_PRIO_HIGH);
+// 	} else if (dc->ndev->id == 1) {
+// 		tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0AB,
+// 				      TEGRA_MC_PRIO_MED);
+// 		tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0BB,
+// 				      TEGRA_MC_PRIO_MED);
+// 		tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0CB,
+// 				      TEGRA_MC_PRIO_MED);
+// 		tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY1BB,
+// 				      TEGRA_MC_PRIO_MED);
+// 		tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAYHCB,
+// 				      TEGRA_MC_PRIO_HIGH);
+// 	}
+	tegra_dc_writel(dc, 0x00000100 | dc->vblank_syncpt,
+			DC_CMD_CONT_SYNCPT_VSYNC);
+	tegra_dc_writel(dc, 0x00004700, DC_CMD_INT_TYPE);
+	tegra_dc_writel(dc, 0x0001c700, DC_CMD_INT_POLARITY);
+	tegra_dc_writel(dc, 0x00202020, DC_DISP_MEM_HIGH_PRIORITY);
+	tegra_dc_writel(dc, 0x00010101, DC_DISP_MEM_HIGH_PRIORITY_TIMER);
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+	tegra_dc_writel(dc, 0x00000000, DC_DISP_DISP_MISC_CONTROL);
+#endif
+	/* enable interrupts for vblank, frame_end and underflows */
+	int_enable = (FRAME_END_INT | V_BLANK_INT | ALL_UF_INT);
+	/* for panels with one-shot mode enable tearing effect interrupt */
+	if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
+		int_enable |= MSF_INT;
+
+	tegra_dc_writel(dc, int_enable, DC_CMD_INT_ENABLE);
+	tegra_dc_writel(dc, ALL_UF_INT, DC_CMD_INT_MASK);
+
+	tegra_dc_writel(dc, 0x00000000, DC_DISP_BORDER_COLOR);
+
+	tegra_dc_set_color_control(dc);
+	for (i = 0; i < DC_N_WINDOWS; i++) {
+		struct tegra_dc_win *win = &dc->windows[i];
+		tegra_dc_writel(dc, WINDOW_A_SELECT << i,
+				DC_CMD_DISPLAY_WINDOW_HEADER);
+		tegra_dc_set_csc(dc, &win->csc);
+		tegra_dc_set_lut(dc, win);
+		tegra_dc_set_scaling_filter(dc);
+	}
+
+
+	for (i = 0; i < dc->n_windows; i++) {
+		u32 syncpt = get_syncpt(dc, i);
+
+		dc->syncpt[i].id = syncpt;
+
+		dc->syncpt[i].min = dc->syncpt[i].max =
+			nvhost_syncpt_read_ext(dc->ndev, syncpt);
+	}
+
+	print_mode_info(dc, dc->mode);
+
+	if (dc->mode.pclk)
+		if (tegra_dc_program_mode(dc, &dc->mode))
+			return -EINVAL;
+
+	/* Initialize SD AFTER the modeset.
+	   nvsd_init handles the sd_settings = NULL case. */
+	nvsd_init(dc, dc->out->sd_settings);
+
+	return 0;
+}
+
+void tegra_enable_backlight(struct tegra_dc *dc)
+{
+	if (dc->out->type != TEGRA_DC_OUT_RGB)
+		return;
+
+	if (!IS_ERR_OR_NULL(dc->out->bl_vdd))
+		WARN_ON(regulator_enable(dc->out->bl_vdd) != 0);
+}
+
+int tegra_dc_panel_enable_common(struct tegra_dc *dc)
+{
+	if (dc->out->panel_enabled)
+		return 0;
+
+	switch (dc->out->type) {
+		case TEGRA_DC_OUT_RGB:
+			if (!IS_ERR_OR_NULL(dc->out->pnl_vdd))
+				WARN_ON(regulator_enable(dc->out->pnl_vdd) != 0);
+			if (!IS_ERR_OR_NULL(dc->out->lvds_vdd))
+				WARN_ON(regulator_enable(dc->out->lvds_vdd) != 0);
+			break;
+		case TEGRA_DC_OUT_HDMI:
+			if (!IS_ERR_OR_NULL(dc->out->hdmi_vdd))
+				WARN_ON(regulator_enable(dc->out->hdmi_vdd) != 0);
+			if (!IS_ERR_OR_NULL(dc->out->hdmi_pll))
+				WARN_ON(regulator_enable(dc->out->hdmi_pll) != 0);
+			break;
+		case TEGRA_DC_OUT_DSI:
+			break;
+		default:
+			dev_err(&dc->ndev->dev, "wrong dc type\n");
+			return 0;
+	};
+
+	dc->out->panel_enabled = true;
+
+	return 1;
+}
+
+void tegra_disable_backlight(struct tegra_dc *dc)
+{
+	if (dc->out->type != TEGRA_DC_OUT_RGB)
+		return;
+
+	if (!IS_ERR_OR_NULL(dc->out->bl_vdd))
+		regulator_disable(dc->out->bl_vdd);
+}
+
+void tegra_dc_panel_disable_common(struct tegra_dc *dc)
+{
+	if (!dc->out->panel_enabled)
+		return;
+
+	switch (dc->out->type) {
+		case TEGRA_DC_OUT_RGB:
+			if (!IS_ERR_OR_NULL(dc->out->lvds_vdd))
+				regulator_disable(dc->out->lvds_vdd);
+			if (!IS_ERR_OR_NULL(dc->out->pnl_vdd))
+				regulator_disable(dc->out->pnl_vdd);
+			break;
+		case TEGRA_DC_OUT_HDMI:
+			if (!IS_ERR_OR_NULL(dc->out->hdmi_vdd))
+				regulator_disable(dc->out->hdmi_vdd);
+			if (!IS_ERR_OR_NULL(dc->out->hdmi_pll))
+				regulator_disable(dc->out->hdmi_pll);
+			break;
+		case TEGRA_DC_OUT_DSI:
+			break;
+		default:
+			dev_err(&dc->ndev->dev, "wrong dc type\n");
+			return;
+	};
+
+	dc->out->panel_enabled = false;
+}
+
+static bool _tegra_dc_controller_enable(struct tegra_dc *dc)
+{
+	int failed_init = 0;
+
+	if (dc->out->enable)
+		dc->out->enable();
+	if (tegra_dc_panel_enable_common(dc))
+			msleep(dc->out->lvds_to_bl_timeout);
+
+	tegra_dc_setup_clk(dc, dc->clk);
+	tegra_dc_clk_enable(dc);
+
+	reset_control_assert(dc->ndev->rst);
+	usleep_range(1000, 2000);
+	reset_control_deassert(dc->ndev->rst);
+
+	/* do not accept interrupts during initialization */
+	tegra_dc_writel(dc, 0, DC_CMD_INT_MASK);
+
+	enable_dc_irq(dc->irq);
+
+	failed_init = tegra_dc_init(dc);
+	if (failed_init) {
+		tegra_dc_writel(dc, 0, DC_CMD_INT_MASK);
+		disable_dc_irq(dc->irq);
+		tegra_dc_clear_bandwidth(dc);
+		tegra_dc_clk_disable(dc);
+		if (dc->out && dc->out->disable)
+			dc->out->disable();
+		tegra_dc_panel_disable_common(dc);
+		return false;
+	}
+
+	if (dc->out_ops && dc->out_ops->enable)
+		dc->out_ops->enable(dc);
+
+	/* force a full blending update */
+	dc->blend.z[0] = -1;
+
+	tegra_dc_ext_enable(dc->ext);
+
+	trace_printk("%s:enable\n", dc->ndev->name);
+
+	tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+	if (dc->out->postpoweron)
+		dc->out->postpoweron();
+
+	return true;
+}
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+static bool _tegra_dc_controller_reset_enable(struct tegra_dc *dc)
+{
+	bool ret = true;
+
+	if (dc->out->enable)
+		dc->out->enable();
+
+	tegra_dc_setup_clk(dc, dc->clk);
+	tegra_dc_clk_enable(dc);
+
+	if (dc->ndev->id == 0 && tegra_dcs[1] != NULL) {
+		mutex_lock(&tegra_dcs[1]->lock);
+		disable_irq(tegra_dcs[1]->irq);
+	} else if (dc->ndev->id == 1 && tegra_dcs[0] != NULL) {
+		mutex_lock(&tegra_dcs[0]->lock);
+		disable_irq(tegra_dcs[0]->irq);
+	}
+
+	reset_control_assert(dc->ndev->rst);
+	usleep_range(1000, 2000);
+	reset_control_deassert(dc->ndev->rst);
+
+	if (dc->ndev->id == 0 && tegra_dcs[1] != NULL) {
+		enable_dc_irq(tegra_dcs[1]->irq);
+		mutex_unlock(&tegra_dcs[1]->lock);
+	} else if (dc->ndev->id == 1 && tegra_dcs[0] != NULL) {
+		enable_dc_irq(tegra_dcs[0]->irq);
+		mutex_unlock(&tegra_dcs[0]->lock);
+	}
+
+	enable_dc_irq(dc->irq);
+
+	if (tegra_dc_init(dc)) {
+		dev_err(&dc->ndev->dev, "cannot initialize\n");
+		ret = false;
+	}
+
+	if (dc->out->postpoweron)
+		dc->out->postpoweron();
+
+	/* force a full blending update */
+	dc->blend.z[0] = -1;
+
+	tegra_dc_ext_enable(dc->ext);
+
+	if (!ret) {
+		dev_err(&dc->ndev->dev, "initialization failed,disabling");
+		_tegra_dc_controller_disable(dc);
+	}
+
+	trace_printk("%s:reset enable\n", dc->ndev->name);
+	return ret;
+}
+#endif
+
+static int _tegra_dc_set_default_videomode(struct tegra_dc *dc)
+{
+	if (dc->mode.pclk == 0) {
+		switch (dc->out->type) {
+		case TEGRA_DC_OUT_HDMI:
+		/* DC enable called but no videomode is loaded.
+		     Check if HDMI is connected, then set fallback mdoe */
+		if (tegra_dc_hpd(dc)) {
+			return tegra_dc_set_fb_mode(dc,
+					&tegra_dc_hdmi_fallback_mode, 0);
+		} else
+			return false;
+
+		break;
+
+		/* Do nothing for other outputs for now */
+		case TEGRA_DC_OUT_RGB:
+
+		case TEGRA_DC_OUT_DSI:
+
+		default:
+			return false;
+		}
+	}
+
+	return false;
+}
+
+static bool _tegra_dc_enable(struct tegra_dc *dc)
+{
+	if (dc->mode.pclk == 0)
+		return false;
+
+	if (!dc->out)
+		return false;
+
+	tegra_dc_io_start(dc);
+
+	if (!_tegra_dc_controller_enable(dc)) {
+		tegra_dc_io_end(dc);
+		return false;
+	}
+	return true;
+}
+
+void tegra_dc_enable(struct tegra_dc *dc)
+{
+	cancel_delayed_work_sync(&dc->disable_work);
+
+	mutex_lock(&dc->lock);
+
+	if (!dc->enabled)
+		dc->enabled = _tegra_dc_enable(dc);
+
+	mutex_unlock(&dc->lock);
+	print_mode_info(dc, dc->mode);
+}
+
+static void _tegra_dc_controller_disable(struct tegra_dc *dc)
+{
+	unsigned i;
+
+	if (dc->out && dc->out->prepoweroff)
+		dc->out->prepoweroff();
+
+	if (dc->out_ops && dc->out_ops->disable)
+		dc->out_ops->disable(dc);
+
+	tegra_dc_writel(dc, 0, DC_CMD_INT_MASK);
+	tegra_dc_writel(dc, 0, DC_CMD_INT_ENABLE);
+	disable_irq(dc->irq);
+
+	tegra_dc_clear_bandwidth(dc);
+	tegra_dc_clk_disable(dc);
+
+	if (dc->out && dc->out->disable)
+		dc->out->disable();
+	tegra_dc_panel_disable_common(dc);
+
+	for (i = 0; i < dc->n_windows; i++) {
+		struct tegra_dc_win *w = &dc->windows[i];
+
+		/* reset window bandwidth */
+		w->bandwidth = 0;
+		w->new_bandwidth = 0;
+
+		/* disable windows */
+		w->flags &= ~TEGRA_WIN_FLAG_ENABLED;
+
+		/* flush any pending syncpt waits */
+		while (dc->syncpt[i].min < dc->syncpt[i].max) {
+			trace_printk("%s:syncpt flush id=%d\n", dc->ndev->name,
+				dc->syncpt[i].id);
+			dc->syncpt[i].min++;
+			nvhost_syncpt_cpu_incr_ext(dc->ndev, dc->syncpt[i].id);
+		}
+	}
+	trace_printk("%s:disabled\n", dc->ndev->name);
+}
+
+void tegra_dc_stats_enable(struct tegra_dc *dc, bool enable)
+{
+#if 0 /* underflow interrupt is already enabled by dc reset worker */
+	u32 val;
+	if (dc->enabled)  {
+		val = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
+		if (enable)
+			val |= (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT);
+		else
+			val &= ~(WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT);
+		tegra_dc_writel(dc, val, DC_CMD_INT_ENABLE);
+	}
+#endif
+}
+
+bool tegra_dc_stats_get(struct tegra_dc *dc)
+{
+#if 0 /* right now it is always enabled */
+	u32 val;
+	bool res;
+
+	if (dc->enabled)  {
+		val = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
+		res = !!(val & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT));
+	} else {
+		res = false;
+	}
+
+	return res;
+#endif
+	return true;
+}
+
+/* make the screen blank by disabling all windows */
+void tegra_dc_blank(struct tegra_dc *dc)
+{
+	struct tegra_dc_win *dcwins[DC_N_WINDOWS];
+	unsigned i;
+
+	for (i = 0; i < DC_N_WINDOWS; i++) {
+		dcwins[i] = tegra_dc_get_window(dc, i);
+		dcwins[i]->flags &= ~TEGRA_WIN_FLAG_ENABLED;
+	}
+
+	tegra_dc_update_windows(dcwins, DC_N_WINDOWS);
+	tegra_dc_sync_windows(dcwins, DC_N_WINDOWS);
+}
+
+static void _tegra_dc_disable(struct tegra_dc *dc)
+{
+	if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) {
+		mutex_lock(&dc->one_shot_lock);
+		cancel_delayed_work_sync(&dc->one_shot_work);
+	}
+
+	tegra_dc_hold_dc_out(dc);
+
+	_tegra_dc_controller_disable(dc);
+	tegra_dc_io_end(dc);
+
+	tegra_dc_release_dc_out(dc);
+
+	if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
+		mutex_unlock(&dc->one_shot_lock);
+}
+
+void tegra_dc_disable(struct tegra_dc *dc)
+{
+	if (dc->overlay)
+		tegra_overlay_disable(dc->overlay);
+
+	tegra_dc_ext_disable(dc->ext);
+
+	mutex_lock(&dc->lock);
+
+	if (dc->enabled) {
+		dc->enabled = false;
+		_tegra_dc_disable(dc);
+	}
+
+#ifdef CONFIG_SWITCH
+	switch_set_state(&dc->modeset_switch, 0);
+#endif
+
+	mutex_unlock(&dc->lock);
+	print_mode_info(dc, dc->mode);
+}
+
+static void tegra_dc_delayed_disable_work(struct work_struct *work)
+{
+	struct tegra_dc *dc =
+		container_of(work, struct tegra_dc, disable_work.work);
+
+	tegra_dc_disable(dc);
+}
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+static void tegra_dc_reset_worker(struct work_struct *work)
+{
+	struct tegra_dc *dc =
+		container_of(work, struct tegra_dc, reset_work);
+
+	unsigned long val = 0;
+
+	mutex_lock(&shared_lock);
+
+	dev_warn(&dc->ndev->dev,
+		"overlay stuck in underflow state.  resetting.\n");
+
+	tegra_dc_ext_disable(dc->ext);
+	tegra_overlay_disable(dc->overlay);
+
+	mutex_lock(&dc->lock);
+
+	if (dc->enabled == false)
+		goto unlock;
+
+	dc->enabled = false;
+
+	/*
+	 * off host read bus
+	 */
+	val = tegra_dc_readl(dc, DC_CMD_CONT_SYNCPT_VSYNC);
+	val &= ~(0x00000100);
+	tegra_dc_writel(dc, val, DC_CMD_CONT_SYNCPT_VSYNC);
+
+	/*
+	 * set DC to STOP mode
+	 */
+	tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
+
+	msleep(10);
+
+	_tegra_dc_controller_disable(dc);
+
+	/* _tegra_dc_controller_reset_enable deasserts reset */
+	_tegra_dc_controller_reset_enable(dc);
+
+	dc->enabled = true;
+
+	/* reopen host read bus */
+	val = tegra_dc_readl(dc, DC_CMD_CONT_SYNCPT_VSYNC);
+	val &= ~(0x00000100);
+	val |= 0x100;
+	tegra_dc_writel(dc, val, DC_CMD_CONT_SYNCPT_VSYNC);
+
+unlock:
+	mutex_unlock(&dc->lock);
+	mutex_unlock(&shared_lock);
+	trace_printk("%s:reset complete\n", dc->ndev->name);
+}
+#endif
+
+#ifdef CONFIG_SWITCH
+static ssize_t switch_modeset_print_mode(struct switch_dev *sdev, char *buf)
+{
+	struct tegra_dc *dc =
+		container_of(sdev, struct tegra_dc, modeset_switch);
+
+	if (!sdev->state)
+		return sprintf(buf, "offline\n");
+
+	return sprintf(buf, "%dx%d\n", dc->mode.h_active, dc->mode.v_active);
+}
+#endif
+
+static struct tegra_dc_platform_data *tegra_dc_parse_dt(struct nvhost_device *ndev)
+{
+	struct tegra_dc_platform_data *pdata;
+	struct tegra_dc_out *dc_out;
+	struct tegra_dc_mode *modes;
+	struct tegra_fb_data *fb_data;
+	struct display_timings *disp_timings = NULL;
+	struct device_node *np, *dc_np, *hdmi_np, *ddc;
+	struct i2c_adapter *ddc_i2c_adapter;
+	enum of_gpio_flags flags;
+	struct videomode vm;
+	int i, val;
+
+	dc_np = ndev->dev.of_node;
+
+	np = of_get_child_by_name(dc_np, "rgb");
+	if (!np || !of_device_is_available(np))
+		return NULL;
+
+	np = of_get_child_by_name(np, "display");
+	if (!np)
+		return NULL;
+
+	pdata = devm_kzalloc(&ndev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return NULL;
+
+	if (!of_property_read_u32(np, "flags", &val))
+		pdata->flags = val;
+
+	dc_out = devm_kzalloc(&ndev->dev, sizeof(*dc_out), GFP_KERNEL);
+	if (!dc_out)
+		return NULL;
+	pdata->default_out = dc_out;
+
+	if (!of_property_read_u32(np, "default-emc-rate", &val))
+		pdata->emc_clk_rate = val;
+
+	if (!of_property_read_u32(np, "min-emc-rate", &val))
+		pdata->min_emc_clk_rate = val;
+
+	/* set dc params */
+	if (!of_property_read_u32(np, "type", &val))
+		dc_out->type = val;
+
+	if (!of_property_read_u32(np, "order", &val))
+		dc_out->order = val;
+
+	if (!of_property_read_u32(np, "align", &val))
+		dc_out->align = val;
+
+	if (!of_property_read_u32(np, "depth", &val))
+		dc_out->depth = val;
+
+	if (!of_property_read_u32(np, "dither", &val))
+		dc_out->dither = val;
+
+	if (!of_property_read_u32(np, "max-clock", &val))
+		dc_out->max_pixclock = KHZ2PICOS(val);
+
+	if (!of_property_read_u32(np, "dc-flags", &val))
+		dc_out->flags = val;
+
+	if (!of_property_read_u32(np, "lvds-to-bl", &val))
+		dc_out->lvds_to_bl_timeout = val;
+
+	dc_out->lvds_vdd = devm_regulator_get(&ndev->dev, "lvds-vdd");
+	dc_out->pnl_vdd = devm_regulator_get(&ndev->dev, "pnl-vdd");
+	dc_out->bl_vdd = devm_regulator_get(&ndev->dev, "bl-vdd");
+	dc_out->hdmi_vdd = devm_regulator_get(&ndev->dev, "hdmi-vdd");
+	dc_out->hdmi_pll = devm_regulator_get(&ndev->dev, "hdmi-pll");
+
+	if (dc_out->type == TEGRA_DC_OUT_HDMI) {
+		hdmi_np = of_get_child_by_name(of_get_parent(dc_np), "hdmi");
+		if (!hdmi_np || !of_device_is_available(hdmi_np))
+			return NULL;
+
+		ddc = of_parse_phandle(hdmi_np, "nvidia,ddc-i2c-bus", 0);
+		if (!ddc)
+			return NULL;
+
+		ddc_i2c_adapter = of_find_i2c_adapter_by_node(ddc);
+
+		of_node_put(ddc);
+
+		if (!ddc_i2c_adapter)
+			return NULL;
+
+		dc_out->dcc_bus = ddc_i2c_adapter->nr;
+
+		dc_out->hotplug_gpio = of_get_named_gpio_flags(hdmi_np,
+							"nvidia,hpd-gpio", 0,
+							&flags);
+	} else
+		disp_timings = of_get_display_timings(np);
+
+	if (disp_timings) {
+		/* set video modes */
+		modes = devm_kzalloc(&ndev->dev,
+				sizeof(*modes) * disp_timings->num_timings,
+				GFP_KERNEL);
+		if (!modes) {
+			for (i = 0; i < disp_timings->num_timings; i++)
+				kfree(disp_timings->timings[i]);
+			kfree(disp_timings->timings);
+			kfree(disp_timings);
+			return NULL;
+		}
+
+		dc_out->modes = modes;
+
+		for (i = 0; i < disp_timings->num_timings; i++) {
+			if (!videomode_from_timings(disp_timings, &vm, i)) {
+				/* TODO: Convert to direct use of videomode */
+				struct tegra_dc_mode *mode =
+							&modes[dc_out->n_modes];
+
+				mode->pclk          = vm.pixelclock,
+				mode->h_ref_to_sync = 0, // ?
+				mode->v_ref_to_sync = 0, // ?
+				mode->h_sync_width  = vm.hsync_len,
+				mode->v_sync_width  = vm.vsync_len,
+				mode->h_back_porch  = vm.hback_porch,
+				mode->v_back_porch  = vm.vback_porch,
+				mode->h_active      = vm.hactive,
+				mode->v_active      = vm.vactive,
+				mode->h_front_porch = vm.hfront_porch,
+				mode->v_front_porch = vm.vfront_porch,
+
+				dc_out->n_modes++;
+			}
+
+			kfree(disp_timings->timings[i]);
+		}
+
+		kfree(disp_timings->timings);
+		kfree(disp_timings);
+	}
+
+	fb_data = devm_kzalloc(&ndev->dev, sizeof(*fb_data), GFP_KERNEL);
+	if (!fb_data)
+		return NULL;
+	pdata->fb = fb_data;
+
+	if (!of_property_read_u32(np, "fb-win", &val))
+		fb_data->win = val;
+
+	if (!of_property_read_u32(np, "fb-xres", &val))
+		fb_data->xres = val;
+
+	if (!of_property_read_u32(np, "fb-yres", &val))
+		fb_data->yres = val;
+
+	if (!of_property_read_u32(np, "fb-bpp", &val))
+		fb_data->bits_per_pixel = val;
+
+	if (!of_property_read_u32(np, "dc-index", &val))
+		ndev->id = val;
+
+	return pdata;
+}
+
+static int tegra_dc_probe(struct nvhost_device *ndev,
+	struct nvhost_device_id *id_table)
+{
+	struct tegra_dc *dc;
+	struct tegra_dc_mode *mode;
+	struct clk *clk;
+	struct clk *emc_clk;
+	struct resource	*res;
+	struct resource *base_res;
+	struct resource *fb_mem = NULL;
+	int ret = 0;
+	void __iomem *base;
+	int irq;
+	int i;
+
+	if (!ndev->dev.platform_data && ndev->dev.of_node)
+		ndev->dev.platform_data = tegra_dc_parse_dt(ndev);
+
+	if (!ndev->dev.platform_data) {
+		dev_err(&ndev->dev, "no platform data\n");
+		return -ENOENT;
+	}
+
+	dc = devm_kzalloc(&ndev->dev, sizeof(*dc), GFP_KERNEL);
+	if (!dc) {
+		dev_err(&ndev->dev, "can't allocate memory for tegra_dc\n");
+		return -ENOMEM;
+	}
+
+	ndev->rst = devm_reset_control_get(&ndev->dev, "dc");
+	if (IS_ERR(ndev->rst)) {
+		dev_err(&ndev->dev, "failed to get reset\n");
+		return PTR_ERR(ndev->rst);
+	}
+
+	irq = nvhost_get_irq_byname(ndev, "irq");
+	if (irq <= 0) {
+		dev_err(&ndev->dev, "no irq\n");
+		return -ENOENT;
+	}
+
+	res = nvhost_get_resource_byname(ndev, IORESOURCE_MEM, "regs");
+	if (!res) {
+		dev_err(&ndev->dev, "no mem resource\n");
+		return -ENOENT;
+	}
+
+	base_res = devm_request_mem_region(&ndev->dev, res->start,
+					   resource_size(res), ndev->name);
+	if (!base_res) {
+		dev_err(&ndev->dev, "request_mem_region failed\n");
+		return -EBUSY;
+	}
+
+	fb_mem = nvhost_get_resource_byname(ndev, IORESOURCE_MEM, "fbmem");
+	if (!fb_mem) {
+		dev_err(&ndev->dev, "no fb_mem resource\n");
+		return -ENOENT;
+	}
+
+	base = devm_ioremap(&ndev->dev, res->start, resource_size(res));
+	if (!base) {
+		dev_err(&ndev->dev, "registers can't be mapped\n");
+		return -EBUSY;
+	}
+
+	clk = devm_clk_get(&ndev->dev, NULL);
+	if (IS_ERR(clk)) {
+		dev_err(&ndev->dev, "can't get clock\n");
+		return -ENOENT;
+	}
+
+	emc_clk = devm_clk_get(&ndev->dev, "emc");
+	if (IS_ERR(emc_clk)) {
+		dev_err(&ndev->dev, "can't get emc clock\n");
+		return -ENOENT;
+	}
+
+	dc->clk = clk;
+	dc->emc_clk = emc_clk;
+	dc->shift_clk_div = 1;
+	/* Initialize one shot work delay, it will be assigned by dsi
+	 * according to refresh rate later. */
+	dc->one_shot_delay_ms = 40;
+
+	dc->base_res = base_res;
+	dc->base = base;
+	dc->irq = irq;
+	dc->ndev = ndev;
+	dc->pdata = ndev->dev.platform_data;
+
+	mutex_init(&dc->lock);
+	mutex_init(&dc->one_shot_lock);
+	init_completion(&dc->frame_end_complete);
+	init_waitqueue_head(&dc->wq);
+	init_waitqueue_head(&dc->timestamp_wq);
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+	INIT_WORK(&dc->reset_work, tegra_dc_reset_worker);
+#endif
+	INIT_WORK(&dc->vblank_work, tegra_dc_vblank);
+	INIT_DELAYED_WORK(&dc->one_shot_work, tegra_dc_one_shot_worker);
+	INIT_DELAYED_WORK(&dc->disable_work, tegra_dc_delayed_disable_work);
+
+	tegra_dc_init_lut_defaults(&dc->fb_lut);
+
+	dc->n_windows = DC_N_WINDOWS;
+	for (i = 0; i < dc->n_windows; i++) {
+		struct tegra_dc_win *win = &dc->windows[i];
+		win->idx = i;
+		win->dc = dc;
+		tegra_dc_init_csc_defaults(&win->csc);
+		tegra_dc_init_lut_defaults(&win->lut);
+	}
+
+	ret = tegra_dc_set(dc, ndev->id);
+	if (ret < 0) {
+		dev_err(&ndev->dev, "can't add dc\n");
+		return ret;
+	}
+
+	nvhost_set_drvdata(ndev, dc);
+
+#ifdef CONFIG_SWITCH
+	dc->modeset_switch.name = dev_name(&ndev->dev);
+	dc->modeset_switch.print_state = switch_modeset_print_mode;
+	switch_dev_register(&dc->modeset_switch);
+#endif
+
+	tegra_dc_feature_register(dc);
+
+	if (dc->pdata->default_out)
+		tegra_dc_set_out(dc, dc->pdata->default_out);
+	else
+		dev_err(&ndev->dev, "No default output specified.  Leaving output disabled.\n");
+
+	dc->vblank_syncpt = (dc->ndev->id == 0) ?
+		NVSYNCPT_VBLANK0 : NVSYNCPT_VBLANK1;
+
+#ifdef CONFIG_TEGRA_DC_EXTENSIONS
+	dc->ext = tegra_dc_ext_register(ndev, dc);
+	if (IS_ERR_OR_NULL(dc->ext)) {
+		dev_warn(&ndev->dev, "Failed to enable Tegra DC extensions.\n");
+		dc->ext = NULL;
+	}
+#endif
+
+	/* interrupt handler must be registered before tegra_fb_register() */
+	if (devm_request_irq(&ndev->dev, irq, tegra_dc_irq, 0,
+			dev_name(&ndev->dev), dc)) {
+		dev_err(&ndev->dev, "request_irq %d failed\n", irq);
+		return -EBUSY;
+	}
+
+	disable_dc_irq(dc->irq);
+
+	mutex_lock(&dc->lock);
+	if (dc->pdata->flags & TEGRA_DC_FLAG_ENABLED) {
+		_tegra_dc_set_default_videomode(dc);
+		dc->enabled = _tegra_dc_enable(dc);
+	}
+	mutex_unlock(&dc->lock);
+
+	tegra_enable_backlight(dc);
+
+	tegra_dc_create_debugfs(dc);
+
+	dev_info(&ndev->dev, "dc probed\n");
+
+	if (dc->pdata->fb) {
+		if (dc->enabled && dc->pdata->fb->bits_per_pixel == -1) {
+			unsigned long fmt;
+			tegra_dc_writel(dc,
+					WINDOW_A_SELECT << dc->pdata->fb->win,
+					DC_CMD_DISPLAY_WINDOW_HEADER);
+
+			fmt = tegra_dc_readl(dc, DC_WIN_COLOR_DEPTH);
+			dc->pdata->fb->bits_per_pixel =
+				tegra_dc_fmt_bpp(fmt);
+		}
+
+		mode = tegra_dc_get_override_mode(dc);
+		if (mode) {
+			dc->pdata->fb->xres = mode->h_active;
+			dc->pdata->fb->yres = mode->v_active;
+		}
+
+		dc->fb = tegra_fb_register(ndev, dc, dc->pdata->fb, fb_mem);
+		if (IS_ERR(dc->fb))
+			dc->fb = NULL;
+	}
+
+	if (dc->fb) {
+		dc->overlay = tegra_overlay_register(ndev, dc);
+		if (IS_ERR(dc->overlay))
+			dc->overlay = NULL;
+	}
+
+	if (dc->out && dc->out->hotplug_init)
+		dc->out->hotplug_init();
+
+	if (dc->out_ops && dc->out_ops->detect)
+		dc->out_ops->detect(dc);
+	else
+		dc->connected = true;
+
+	tegra_dc_create_sysfs(&dc->ndev->dev);
+
+	return 0;
+}
+
+static int tegra_dc_remove(struct nvhost_device *ndev)
+{
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+
+	tegra_dc_remove_sysfs(&dc->ndev->dev);
+	tegra_dc_remove_debugfs(dc);
+
+	if (dc->overlay) {
+		tegra_overlay_unregister(dc->overlay);
+	}
+
+	if (dc->fb) {
+		tegra_fb_unregister(dc->fb);
+		if (dc->fb_mem)
+			release_resource(dc->fb_mem);
+	}
+
+	tegra_dc_ext_disable(dc->ext);
+
+	if (dc->ext)
+		tegra_dc_ext_unregister(dc->ext);
+
+	if (dc->enabled)
+		_tegra_dc_disable(dc);
+
+#ifdef CONFIG_SWITCH
+	switch_dev_unregister(&dc->modeset_switch);
+#endif
+	tegra_dc_set(NULL, ndev->id);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_dc_suspend(struct nvhost_device *ndev, pm_message_t state)
+{
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+
+	trace_printk("%s:suspend\n", dc->ndev->name);
+	dev_info(&ndev->dev, "suspend\n");
+
+	flush_delayed_work(&dc->disable_work);
+
+	if (dc->overlay)
+		tegra_overlay_disable(dc->overlay);
+
+	tegra_dc_ext_disable(dc->ext);
+
+	mutex_lock(&dc->lock);
+
+	if (dc->out_ops && dc->out_ops->suspend)
+		dc->out_ops->suspend(dc);
+
+	if (dc->enabled)
+		_tegra_dc_disable(dc);
+
+	if (dc->out && dc->out->postsuspend) {
+		dc->out->postsuspend();
+		if (dc->out->type && dc->out->type == TEGRA_DC_OUT_HDMI)
+			/*
+			 * avoid resume event due to voltage falling
+			 */
+			msleep(100);
+	}
+
+	mutex_unlock(&dc->lock);
+
+	return 0;
+}
+
+static int tegra_dc_resume(struct nvhost_device *ndev)
+{
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+
+	trace_printk("%s:resume\n", dc->ndev->name);
+	dev_info(&ndev->dev, "resume\n");
+
+	mutex_lock(&dc->lock);
+
+	if (dc->enabled) {
+		_tegra_dc_set_default_videomode(dc);
+		_tegra_dc_enable(dc);
+	}
+
+	if (dc->out && dc->out->hotplug_init)
+		dc->out->hotplug_init();
+
+	if (dc->out_ops && dc->out_ops->resume)
+		dc->out_ops->resume(dc);
+	mutex_unlock(&dc->lock);
+
+	return 0;
+}
+
+#endif /* CONFIG_PM */
+
+static void tegra_dc_shutdown(struct nvhost_device *ndev)
+{
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+
+	if (!dc || !dc->enabled)
+		return;
+
+	tegra_dc_blank(dc);
+	tegra_dc_disable(dc);
+}
+
+extern int suspend_set(const char *val, struct kernel_param *kp)
+{
+	if (!strcmp(val, "dump"))
+		dump_regs(tegra_dcs[0]);
+#ifdef CONFIG_PM
+	else if (!strcmp(val, "suspend"))
+		tegra_dc_suspend(tegra_dcs[0]->ndev, PMSG_SUSPEND);
+	else if (!strcmp(val, "resume"))
+		tegra_dc_resume(tegra_dcs[0]->ndev);
+#endif
+
+	return 0;
+}
+
+extern int suspend_get(char *buffer, struct kernel_param *kp)
+{
+	return 0;
+}
+
+int suspend;
+
+module_param_call(suspend, suspend_set, suspend_get, &suspend, 0644);
+
+static struct of_device_id tegra_dc_of_match[] = {
+	{ .compatible = "nvidia,tegra20-dc", },
+	{ .compatible = "nvidia,tegra30-dc", },
+	{ },
+};
+
+struct nvhost_driver tegra_dc_driver = {
+	.driver = {
+		.name = "tegradc",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(tegra_dc_of_match),
+	},
+	.probe = tegra_dc_probe,
+	.remove = tegra_dc_remove,
+#ifdef CONFIG_PM
+	.suspend = tegra_dc_suspend,
+	.resume = tegra_dc_resume,
+#endif
+	.shutdown = tegra_dc_shutdown,
+};
+
+#ifndef MODULE
+static int __init parse_disp_params(char *options, struct tegra_dc_mode *mode)
+{
+	int i, params[11];
+	char *p;
+
+	for (i = 0; i < ARRAY_SIZE(params); i++) {
+		if ((p = strsep(&options, ",")) != NULL) {
+			if (*p)
+				params[i] = simple_strtoul(p, &p, 10);
+		} else
+			return -EINVAL;
+	}
+
+	if ((mode->pclk = params[0]) == 0)
+		return -EINVAL;
+
+	mode->h_active      = params[1];
+	mode->v_active      = params[2];
+	mode->h_ref_to_sync = params[3];
+	mode->v_ref_to_sync = params[4];
+	mode->h_sync_width  = params[5];
+	mode->v_sync_width  = params[6];
+	mode->h_back_porch  = params[7];
+	mode->v_back_porch  = params[8];
+	mode->h_front_porch = params[9];
+	mode->v_front_porch = params[10];
+
+	return 0;
+}
+
+static int __init tegra_dc_mode_override(char *str)
+{
+	char *p = str, *options;
+
+	if (!p || !*p)
+		return -EINVAL;
+
+	p = strstr(str, "hdmi:");
+	if (p) {
+		p += 5;
+		options = strsep(&p, ";");
+		if (parse_disp_params(options, &override_disp_mode[TEGRA_DC_OUT_HDMI]))
+			return -EINVAL;
+	}
+
+	p = strstr(str, "rgb:");
+	if (p) {
+		p += 4;
+		options = strsep(&p, ";");
+		if (parse_disp_params(options, &override_disp_mode[TEGRA_DC_OUT_RGB]))
+			return -EINVAL;
+	}
+
+	p = strstr(str, "dsi:");
+	if (p) {
+		p += 4;
+		options = strsep(&p, ";");
+		if (parse_disp_params(options, &override_disp_mode[TEGRA_DC_OUT_DSI]))
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+__setup("disp_params=", tegra_dc_mode_override);
+#endif
+
+static int __init tegra_dc_module_init(void)
+{
+	int ret = tegra_dc_ext_module_init();
+	if (ret)
+		return ret;
+	return nvhost_driver_register(&tegra_dc_driver);
+}
+
+static void __exit tegra_dc_module_exit(void)
+{
+	nvhost_driver_unregister(&tegra_dc_driver);
+	tegra_dc_ext_module_exit();
+}
+
+module_exit(tegra_dc_module_exit);
+late_initcall(tegra_dc_module_init);
diff --git a/drivers/staging/tegra/video/dc/dc.h b/drivers/staging/tegra/video/dc/dc.h
new file mode 100644
index 000000000000..17c7b6a68d38
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/dc.h
@@ -0,0 +1,618 @@
+/*
+ * arch/arm/mach-tegra/include/mach/dc.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ *	Erik Gilling <konkers@google.com>
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_DC_H
+#define __MACH_TEGRA_DC_H
+
+#include <linux/pm.h>
+#include <linux/types.h>
+#include <linux/fb.h>
+#include <linux/regulator/consumer.h>
+#include <drm/drm_fixed.h>
+
+#define TEGRA_MAX_DC		2
+#define DC_N_WINDOWS		3
+
+
+/* DSI pixel data format */
+enum {
+	TEGRA_DSI_PIXEL_FORMAT_16BIT_P,
+	TEGRA_DSI_PIXEL_FORMAT_18BIT_P,
+	TEGRA_DSI_PIXEL_FORMAT_18BIT_NP,
+	TEGRA_DSI_PIXEL_FORMAT_24BIT_P,
+};
+
+/* DSI virtual channel number */
+enum {
+	TEGRA_DSI_VIRTUAL_CHANNEL_0,
+	TEGRA_DSI_VIRTUAL_CHANNEL_1,
+	TEGRA_DSI_VIRTUAL_CHANNEL_2,
+	TEGRA_DSI_VIRTUAL_CHANNEL_3,
+};
+
+/* DSI transmit method for video data */
+enum {
+	TEGRA_DSI_VIDEO_TYPE_VIDEO_MODE,
+	TEGRA_DSI_VIDEO_TYPE_COMMAND_MODE,
+};
+
+/* DSI HS clock mode */
+enum {
+	TEGRA_DSI_VIDEO_CLOCK_CONTINUOUS,
+	TEGRA_DSI_VIDEO_CLOCK_TX_ONLY,
+};
+
+/* DSI burst mode setting in video mode. Each mode is assigned with a
+ * fixed value. The rationale behind this is to avoid change of these
+ * values, since the calculation of dsi clock depends on them. */
+enum {
+	TEGRA_DSI_VIDEO_NONE_BURST_MODE = 0,
+	TEGRA_DSI_VIDEO_NONE_BURST_MODE_WITH_SYNC_END = 1,
+	TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED = 2,
+	TEGRA_DSI_VIDEO_BURST_MODE_LOW_SPEED = 3,
+	TEGRA_DSI_VIDEO_BURST_MODE_MEDIUM_SPEED = 4,
+	TEGRA_DSI_VIDEO_BURST_MODE_FAST_SPEED = 5,
+	TEGRA_DSI_VIDEO_BURST_MODE_FASTEST_SPEED = 6,
+};
+
+enum {
+	TEGRA_DSI_PACKET_CMD,
+	TEGRA_DSI_DELAY_MS,
+};
+
+struct tegra_dsi_cmd {
+	u8	cmd_type;
+	u8	data_id;
+	union {
+		u16 data_len;
+		u16 delay_ms;
+		struct {
+			u8 data0;
+			u8 data1;
+		} sp;
+	} sp_len_dly;
+	u8	*pdata;
+};
+
+#define DSI_CMD_SHORT(di, p0, p1)	{ \
+					.cmd_type = TEGRA_DSI_PACKET_CMD, \
+					.data_id = di, \
+					.sp_len_dly.sp.data0 = p0, \
+					.sp_len_dly.sp.data1 = p1, \
+					}
+#define DSI_DLY_MS(ms)	{ \
+			.cmd_type = TEGRA_DSI_DELAY_MS, \
+			.sp_len_dly.delay_ms = ms, \
+			}
+
+#define DSI_CMD_LONG(di, ptr)	{ \
+				.cmd_type = TEGRA_DSI_PACKET_CMD, \
+				.data_id = di, \
+				.sp_len_dly.data_len = ARRAY_SIZE(ptr), \
+				.pdata = ptr, \
+				}
+
+struct dsi_phy_timing_ns {
+	u16		t_hsdexit_ns;
+	u16		t_hstrail_ns;
+	u16		t_datzero_ns;
+	u16		t_hsprepare_ns;
+
+	u16		t_clktrail_ns;
+	u16		t_clkpost_ns;
+	u16		t_clkzero_ns;
+	u16		t_tlpx_ns;
+
+	u16		t_clkprepare_ns;
+	u16		t_clkpre_ns;
+	u16		t_wakeup_ns;
+
+	u16		t_taget_ns;
+	u16		t_tasure_ns;
+	u16		t_tago_ns;
+};
+
+/* Aggressiveness level of DSI suspend. The higher, the more aggressive. */
+#define DSI_NO_SUSPEND			0
+#define DSI_HOST_SUSPEND_LV0		1
+#define DSI_HOST_SUSPEND_LV1		2
+#define DSI_HOST_SUSPEND_LV2		3
+#define DSI_SUSPEND_FULL		4
+
+struct tegra_dsi_out {
+	u8		n_data_lanes;			/* required */
+	u8		pixel_format;			/* required */
+	u8		refresh_rate;			/* required */
+	u8		rated_refresh_rate;
+	u8		panel_reset;			/* required */
+	u8		virtual_channel;		/* required */
+	u8		dsi_instance;
+	u8		chip_id;
+	u8		chip_rev;
+
+	bool		panel_has_frame_buffer;	/* required*/
+	bool		panel_send_dc_frames;
+
+	struct tegra_dsi_cmd	*dsi_init_cmd;		/* required */
+	u16		n_init_cmd;			/* required */
+
+	struct tegra_dsi_cmd	*dsi_early_suspend_cmd;
+	u16		n_early_suspend_cmd;
+
+	struct tegra_dsi_cmd	*dsi_late_resume_cmd;
+	u16		n_late_resume_cmd;
+
+	struct tegra_dsi_cmd	*dsi_suspend_cmd;	/* required */
+	u16		n_suspend_cmd;			/* required */
+
+	u8		video_data_type;		/* required */
+	u8		video_clock_mode;
+	u8		video_burst_mode;
+
+	u8		suspend_aggr;
+
+	u16		panel_buffer_size_byte;
+	u16		panel_reset_timeout_msec;
+
+	bool		hs_cmd_mode_supported;
+	bool		hs_cmd_mode_on_blank_supported;
+	bool		enable_hs_clock_on_lp_cmd_mode;
+	bool		no_pkt_seq_eot; /* 1st generation panel may not
+					 * support eot. Don't set it for
+					 * most panels. */
+	bool		te_polarity_low;
+	bool		power_saving_suspend;
+
+	u32		max_panel_freq_khz;
+	u32		lp_cmd_mode_freq_khz;
+	u32		lp_read_cmd_mode_freq_khz;
+	u32		hs_clk_in_lp_cmd_mode_freq_khz;
+	u32		burst_mode_freq_khz;
+
+	struct dsi_phy_timing_ns phy_timing;
+};
+
+enum {
+	TEGRA_DC_STEREO_MODE_2D,
+	TEGRA_DC_STEREO_MODE_3D
+};
+
+enum {
+	TEGRA_DC_STEREO_LANDSCAPE,
+	TEGRA_DC_STEREO_PORTRAIT
+};
+
+struct tegra_stereo_out {
+	int  mode_2d_3d;
+	int  orientation;
+
+	void (*set_mode)(int mode);
+	void (*set_orientation)(int orientation);
+};
+
+struct tegra_dc_mode {
+	int	pclk;
+	int	rated_pclk;
+	int	h_ref_to_sync;
+	int	v_ref_to_sync;
+	int	h_sync_width;
+	int	v_sync_width;
+	int	h_back_porch;
+	int	v_back_porch;
+	int	h_active;
+	int	v_active;
+	int	h_front_porch;
+	int	v_front_porch;
+	int	stereo_mode;
+	u32	flags;
+};
+
+#define TEGRA_DC_MODE_FLAG_NEG_V_SYNC	(1 << 0)
+#define TEGRA_DC_MODE_FLAG_NEG_H_SYNC	(1 << 1)
+
+enum {
+	TEGRA_DC_OUT_RGB,
+	TEGRA_DC_OUT_HDMI,
+	TEGRA_DC_OUT_DSI,
+};
+
+struct tegra_dc_out_pin {
+	int	name;
+	int	pol;
+};
+
+enum {
+	TEGRA_DC_OUT_PIN_DATA_ENABLE,
+	TEGRA_DC_OUT_PIN_H_SYNC,
+	TEGRA_DC_OUT_PIN_V_SYNC,
+	TEGRA_DC_OUT_PIN_PIXEL_CLOCK,
+};
+
+enum {
+	TEGRA_DC_OUT_PIN_POL_LOW,
+	TEGRA_DC_OUT_PIN_POL_HIGH,
+};
+
+enum {
+	TEGRA_DC_DISABLE_DITHER = 1,
+	TEGRA_DC_ORDERED_DITHER,
+	TEGRA_DC_ERRDIFF_DITHER,
+};
+
+typedef u8 tegra_dc_bl_output[256];
+typedef u8 *p_tegra_dc_bl_output;
+
+struct tegra_dc_sd_blp {
+	u16 time_constant;
+	u8 step;
+};
+
+struct tegra_dc_sd_fc {
+	u8 time_limit;
+	u8 threshold;
+};
+
+struct tegra_dc_sd_rgb {
+	u8 r;
+	u8 g;
+	u8 b;
+};
+
+struct tegra_dc_sd_agg_priorities {
+	u8 pri_lvl;
+	u8 agg[4];
+};
+
+struct tegra_dc_sd_settings {
+	unsigned enable;
+	bool use_auto_pwm;
+	u8 hw_update_delay;
+	u8 aggressiveness;
+	short bin_width;
+	u8 phase_in_settings;
+	u8 phase_in_adjustments;
+	u8 cmd;
+	u8 final_agg;
+	u16 cur_agg_step;
+	u16 phase_settings_step;
+	u16 phase_adj_step;
+	u16 num_phase_in_steps;
+
+	struct tegra_dc_sd_agg_priorities agg_priorities;
+
+	bool use_vid_luma;
+	struct tegra_dc_sd_rgb coeff;
+
+	struct tegra_dc_sd_fc fc;
+	struct tegra_dc_sd_blp blp;
+	u8 bltf[4][4][4];
+	struct tegra_dc_sd_rgb lut[4][9];
+
+	atomic_t *sd_brightness;
+	struct platform_device *bl_device;
+};
+
+enum {
+	NO_CMD = 0x0,
+	ENABLE = 0x1,
+	DISABLE = 0x2,
+	PHASE_IN = 0x4,
+	AGG_CHG = 0x8,
+};
+
+enum {
+	TEGRA_PIN_OUT_CONFIG_SEL_LHP0_LD21,
+	TEGRA_PIN_OUT_CONFIG_SEL_LHP1_LD18,
+	TEGRA_PIN_OUT_CONFIG_SEL_LHP2_LD19,
+	TEGRA_PIN_OUT_CONFIG_SEL_LVP0_LVP0_Out,
+	TEGRA_PIN_OUT_CONFIG_SEL_LVP1_LD20,
+
+	TEGRA_PIN_OUT_CONFIG_SEL_LM1_M1,
+	TEGRA_PIN_OUT_CONFIG_SEL_LM1_LD21,
+	TEGRA_PIN_OUT_CONFIG_SEL_LM1_PM1,
+
+	TEGRA_PIN_OUT_CONFIG_SEL_LDI_LD22,
+	TEGRA_PIN_OUT_CONFIG_SEL_LPP_LD23,
+	TEGRA_PIN_OUT_CONFIG_SEL_LDC_SDC,
+	TEGRA_PIN_OUT_CONFIG_SEL_LSPI_DE,
+};
+
+struct tegra_dc_out {
+	int				type;
+	unsigned			flags;
+
+	/* size in mm */
+	unsigned			h_size;
+	unsigned			v_size;
+
+	int				dcc_bus;
+	int				hotplug_gpio;
+	const char			*parent_clk;
+	const char			*parent_clk_backup;
+
+	unsigned			max_pixclock;
+	unsigned			order;
+	unsigned			align;
+	unsigned			depth;
+	unsigned			dither;
+
+	struct tegra_dc_mode		*modes;
+	int				n_modes;
+
+	struct tegra_dsi_out		*dsi;
+	struct tegra_stereo_out		*stereo;
+
+	unsigned			height; /* mm */
+	unsigned			width; /* mm */
+
+	struct tegra_dc_out_pin		*out_pins;
+	unsigned			n_out_pins;
+
+	struct tegra_dc_sd_settings	*sd_settings;
+
+	u8			*out_sel_configs;
+	unsigned		n_out_sel_configs;
+	bool			user_needs_vblank;
+	struct completion	user_vblank_comp;
+
+	struct regulator	*hdmi_vdd;
+	struct regulator	*hdmi_pll;
+
+	struct regulator	*lvds_vdd;
+	struct regulator	*pnl_vdd;
+	struct regulator	*bl_vdd;
+	unsigned		lvds_to_bl_timeout;
+	bool			panel_enabled;
+
+	int	(*enable)(void);
+	int	(*postpoweron)(void);
+	int	(*prepoweroff)(void);
+	int	(*disable)(void);
+
+	int	(*hotplug_init)(void);
+	int	(*postsuspend)(void);
+};
+
+/* bits for tegra_dc_out.flags */
+#define TEGRA_DC_OUT_HOTPLUG_HIGH		(0 << 1)
+#define TEGRA_DC_OUT_HOTPLUG_LOW		(1 << 1)
+#define TEGRA_DC_OUT_HOTPLUG_MASK		(1 << 1)
+#define TEGRA_DC_OUT_NVHDCP_POLICY_ALWAYS_ON	(0 << 2)
+#define TEGRA_DC_OUT_NVHDCP_POLICY_ON_DEMAND	(1 << 2)
+#define TEGRA_DC_OUT_NVHDCP_POLICY_MASK		(1 << 2)
+#define TEGRA_DC_OUT_CONTINUOUS_MODE		(0 << 3)
+#define TEGRA_DC_OUT_ONE_SHOT_MODE		(1 << 3)
+#define TEGRA_DC_OUT_N_SHOT_MODE		(1 << 4)
+#define TEGRA_DC_OUT_ONE_SHOT_LP_MODE		(1 << 5)
+
+#define TEGRA_DC_ALIGN_MSB		0
+#define TEGRA_DC_ALIGN_LSB		1
+
+#define TEGRA_DC_ORDER_RED_BLUE		0
+#define TEGRA_DC_ORDER_BLUE_RED		1
+
+#define V_BLANK_FLIP		0
+#define V_BLANK_NVSD		1
+
+struct tegra_dc;
+struct nvmap_handle_ref;
+
+struct tegra_dc_csc {
+	unsigned short yof;
+	unsigned short kyrgb;
+	unsigned short kur;
+	unsigned short kvr;
+	unsigned short kug;
+	unsigned short kvg;
+	unsigned short kub;
+	unsigned short kvb;
+};
+
+/* palette lookup table */
+struct tegra_dc_lut {
+	u8 r[256];
+	u8 g[256];
+	u8 b[256];
+};
+
+#define UNDERFLOW_IGNORE_W	400
+#define UNDERFLOW_IGNORE_H	400
+
+struct tegra_dc_win {
+	u8			idx;
+	u8			fmt;
+	u8			ppflags; /* see TEGRA_WIN_PPFLAG* */
+	u32			flags;
+
+	void			*virt_addr;
+	dma_addr_t		phys_addr;
+	dma_addr_t		phys_addr_u;
+	dma_addr_t		phys_addr_v;
+	unsigned		stride;
+	unsigned		stride_uv;
+	fixed20_12		x;
+	fixed20_12		y;
+	fixed20_12		w;
+	fixed20_12		h;
+	unsigned		out_x;
+	unsigned		out_y;
+	unsigned		out_w;
+	unsigned		out_h;
+	unsigned		z;
+	u8			global_alpha;
+
+	struct tegra_dc_csc	csc;
+
+	int			dirty;
+	int			underflows;
+	struct tegra_dc		*dc;
+
+	struct nvmap_handle_ref	*cur_handle;
+	unsigned		bandwidth;
+	unsigned		new_bandwidth;
+	struct tegra_dc_lut	lut;
+};
+
+#define TEGRA_WIN_PPFLAG_CP_ENABLE	(1 << 0) /* enable RGB color lut */
+#define TEGRA_WIN_PPFLAG_CP_FBOVERRIDE	(1 << 1) /* override fbdev color lut */
+
+#define TEGRA_WIN_FLAG_ENABLED		(1 << 0)
+#define TEGRA_WIN_FLAG_BLEND_PREMULT	(1 << 1)
+#define TEGRA_WIN_FLAG_BLEND_COVERAGE	(1 << 2)
+#define TEGRA_WIN_FLAG_INVERT_H		(1 << 3)
+#define TEGRA_WIN_FLAG_INVERT_V		(1 << 4)
+#define TEGRA_WIN_FLAG_TILED		(1 << 5)
+#define TEGRA_WIN_FLAG_H_FILTER		(1 << 6)
+#define TEGRA_WIN_FLAG_V_FILTER		(1 << 7)
+
+
+#define TEGRA_WIN_BLEND_FLAGS_MASK \
+	(TEGRA_WIN_FLAG_BLEND_PREMULT | TEGRA_WIN_FLAG_BLEND_COVERAGE)
+
+/* Note: These are the actual values written to the DC_WIN_COLOR_DEPTH register
+ * and may change in new tegra architectures.
+ */
+#define TEGRA_WIN_FMT_P1		0
+#define TEGRA_WIN_FMT_P2		1
+#define TEGRA_WIN_FMT_P4		2
+#define TEGRA_WIN_FMT_P8		3
+#define TEGRA_WIN_FMT_B4G4R4A4		4
+#define TEGRA_WIN_FMT_B5G5R5A		5
+#define TEGRA_WIN_FMT_B5G6R5		6
+#define TEGRA_WIN_FMT_AB5G5R5		7
+#define TEGRA_WIN_FMT_B8G8R8A8		12
+#define TEGRA_WIN_FMT_R8G8B8A8		13
+#define TEGRA_WIN_FMT_B6x2G6x2R6x2A8	14
+#define TEGRA_WIN_FMT_R6x2G6x2B6x2A8	15
+#define TEGRA_WIN_FMT_YCbCr422		16
+#define TEGRA_WIN_FMT_YUV422		17
+#define TEGRA_WIN_FMT_YCbCr420P		18
+#define TEGRA_WIN_FMT_YUV420P		19
+#define TEGRA_WIN_FMT_YCbCr422P		20
+#define TEGRA_WIN_FMT_YUV422P		21
+#define TEGRA_WIN_FMT_YCbCr422R		22
+#define TEGRA_WIN_FMT_YUV422R		23
+#define TEGRA_WIN_FMT_YCbCr422RA	24
+#define TEGRA_WIN_FMT_YUV422RA		25
+
+struct tegra_fb_data {
+	int		win;
+
+	int		xres;
+	int		yres;
+	int		bits_per_pixel; /* -1 means autodetect */
+
+	unsigned long	flags;
+};
+
+#define TEGRA_FB_FLIP_ON_PROBE		(1 << 0)
+
+struct tegra_dc_platform_data {
+	unsigned long		flags;
+	unsigned long		emc_clk_rate;
+	unsigned long		min_emc_clk_rate;
+	struct tegra_dc_out	*default_out;
+	struct tegra_fb_data	*fb;
+};
+
+#define TEGRA_DC_FLAG_ENABLED		(1 << 0)
+
+int tegra_dc_get_stride(struct tegra_dc *dc, unsigned win);
+struct tegra_dc *tegra_dc_get_dc(unsigned idx);
+struct tegra_dc_win *tegra_dc_get_window(struct tegra_dc *dc, unsigned win);
+bool tegra_dc_get_connected(struct tegra_dc *);
+bool tegra_dc_hpd(struct tegra_dc *dc);
+
+
+void tegra_dc_get_fbvblank(struct tegra_dc *dc, struct fb_vblank *vblank);
+int tegra_dc_wait_for_vsync(struct tegra_dc *dc);
+void tegra_dc_blank(struct tegra_dc *dc);
+
+void tegra_dc_enable(struct tegra_dc *dc);
+void tegra_dc_disable(struct tegra_dc *dc);
+
+int tegra_dc_panel_enable_common(struct tegra_dc *dc);
+void tegra_dc_panel_disable_common(struct tegra_dc *dc);
+void tegra_enable_backlight(struct tegra_dc *dc);
+void tegra_disable_backlight(struct tegra_dc *dc);
+
+u32 tegra_dc_get_syncpt_id(const struct tegra_dc *dc, int i);
+u32 tegra_dc_incr_syncpt_max(struct tegra_dc *dc, int i);
+void tegra_dc_incr_syncpt_min(struct tegra_dc *dc, int i, u32 val);
+
+/* tegra_dc_update_windows and tegra_dc_sync_windows do not support windows
+ * with differenct dcs in one call
+ */
+int tegra_dc_update_windows(struct tegra_dc_win *windows[], int n);
+int tegra_dc_sync_windows(struct tegra_dc_win *windows[], int n);
+int tegra_dc_config_frame_end_intr(struct tegra_dc *dc, bool enable);
+bool tegra_dc_is_within_n_vsync(struct tegra_dc *dc, s64 ts);
+bool tegra_dc_does_vsync_separate(struct tegra_dc *dc, s64 new_ts, s64 old_ts);
+
+int tegra_dc_set_mode(struct tegra_dc *dc, const struct tegra_dc_mode *mode);
+struct fb_videomode;
+int tegra_dc_set_fb_mode(struct tegra_dc *dc, const struct fb_videomode *fbmode,
+	bool stereo_mode);
+
+unsigned tegra_dc_get_out_height(const struct tegra_dc *dc);
+unsigned tegra_dc_get_out_width(const struct tegra_dc *dc);
+unsigned tegra_dc_get_out_max_pixclock(const struct tegra_dc *dc);
+
+/* PM0 and PM1 signal control */
+#define TEGRA_PWM_PM0 0
+#define TEGRA_PWM_PM1 1
+
+struct tegra_dc_pwm_params {
+	int which_pwm;
+	int gpio_conf_to_sfio;
+	unsigned int period;
+	unsigned int clk_div;
+	unsigned int clk_select;
+	unsigned int duty_cycle;
+};
+
+void tegra_dc_config_pwm(struct tegra_dc *dc, struct tegra_dc_pwm_params *cfg);
+
+int tegra_dsi_send_panel_short_cmd(struct tegra_dc *dc, u8 *pdata, u8 data_len);
+
+int tegra_dc_update_csc(struct tegra_dc *dc, int win_index);
+
+int tegra_dc_update_lut(struct tegra_dc *dc, int win_index, int fboveride);
+
+/*
+ * In order to get a dc's current EDID, first call tegra_dc_get_edid() from an
+ * interruptible context.  The returned value (if non-NULL) points to a
+ * snapshot of the current state; after copying data from it, call
+ * tegra_dc_put_edid() on that pointer.  Do not dereference anything through
+ * that pointer after calling tegra_dc_put_edid().
+ */
+struct tegra_dc_edid {
+	size_t		len;
+	u8		buf[0];
+};
+struct tegra_dc_edid *tegra_dc_get_edid(struct tegra_dc *dc);
+void tegra_dc_put_edid(struct tegra_dc_edid *edid);
+
+int tegra_dc_set_flip_callback(void (*callback)(void));
+int tegra_dc_unset_flip_callback(void);
+int tegra_dc_get_panel_sync_rate(void);
+
+#endif
diff --git a/drivers/staging/tegra/video/dc/dc_config.c b/drivers/staging/tegra/video/dc/dc_config.c
new file mode 100644
index 000000000000..f238faddab12
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/dc_config.c
@@ -0,0 +1,247 @@
+/*
+ * drivers/video/tegra/dc/dc_config.c
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "dc_config.h"
+
+static struct tegra_dc_feature_entry t20_feature_entries_a[] = {
+	{ 0, TEGRA_DC_FEATURE_FORMATS, {TEGRA_WIN_FMT_WIN_A,} },
+	{ 0, TEGRA_DC_FEATURE_BLEND_TYPE, {1,} },
+	{ 0, TEGRA_DC_FEATURE_MAXIMUM_SIZE, {4095, 16, 4095, 16,} },
+	{ 0, TEGRA_DC_FEATURE_MAXIMUM_SCALE, {2, 2, 2, 2,} },
+	{ 0, TEGRA_DC_FEATURE_FILTER_TYPE, {0, 0,} },
+	{ 0, TEGRA_DC_FEATURE_LAYOUT_TYPE, {1, 1,} },
+	{ 0, TEGRA_DC_FEATURE_INVERT_TYPE, {1, 1, 0,} },
+
+	{ 1, TEGRA_DC_FEATURE_FORMATS, {TEGRA_WIN_FMT_WIN_B,} },
+	{ 1, TEGRA_DC_FEATURE_PREFERRED_FORMATS, {TEGRA_WIN_PREF_FMT_WIN_B,} },
+	{ 1, TEGRA_DC_FEATURE_BLEND_TYPE, {1,} },
+	{ 1, TEGRA_DC_FEATURE_MAXIMUM_SIZE, {4095, 16, 4095, 16,} },
+	{ 1, TEGRA_DC_FEATURE_MAXIMUM_SCALE, {2, 2, 2, 2,} },
+	{ 1, TEGRA_DC_FEATURE_FILTER_TYPE, {1, 1,} },
+	{ 1, TEGRA_DC_FEATURE_LAYOUT_TYPE, {1, 1,} },
+	{ 1, TEGRA_DC_FEATURE_INVERT_TYPE, {1, 1, 0,} },
+
+	{ 2, TEGRA_DC_FEATURE_FORMATS, {TEGRA_WIN_FMT_WIN_C,} },
+	{ 2, TEGRA_DC_FEATURE_BLEND_TYPE, {1,} },
+	{ 2, TEGRA_DC_FEATURE_MAXIMUM_SIZE, {4095, 16, 4095, 16,} },
+	{ 2, TEGRA_DC_FEATURE_MAXIMUM_SCALE, {2, 2, 2, 2,} },
+	{ 2, TEGRA_DC_FEATURE_FILTER_TYPE, {0, 1,} },
+	{ 2, TEGRA_DC_FEATURE_LAYOUT_TYPE, {1, 1,} },
+	{ 2, TEGRA_DC_FEATURE_INVERT_TYPE, {1, 1, 0,} },
+};
+
+static struct tegra_dc_feature_entry t20_feature_entries_b[] = {
+	{ 0, TEGRA_DC_FEATURE_FORMATS, {TEGRA_WIN_FMT_WIN_A,} },
+	{ 0, TEGRA_DC_FEATURE_BLEND_TYPE, {1,} },
+	{ 0, TEGRA_DC_FEATURE_MAXIMUM_SIZE, {4095, 16, 4095, 16,} },
+	{ 0, TEGRA_DC_FEATURE_MAXIMUM_SCALE, {2, 2, 2, 2,} },
+	{ 0, TEGRA_DC_FEATURE_FILTER_TYPE, {0, 0,} },
+	{ 0, TEGRA_DC_FEATURE_LAYOUT_TYPE, {1, 1,} },
+	{ 0, TEGRA_DC_FEATURE_INVERT_TYPE, {1, 1, 0,} },
+
+	{ 1, TEGRA_DC_FEATURE_FORMATS, {TEGRA_WIN_FMT_WIN_B,} },
+	{ 1, TEGRA_DC_FEATURE_PREFERRED_FORMATS, {TEGRA_WIN_PREF_FMT_WIN_B,} },
+	{ 1, TEGRA_DC_FEATURE_BLEND_TYPE, {1,} },
+	{ 1, TEGRA_DC_FEATURE_MAXIMUM_SIZE, {4095, 16, 4095, 16,} },
+	{ 1, TEGRA_DC_FEATURE_MAXIMUM_SCALE, {2, 2, 2, 2,} },
+	{ 1, TEGRA_DC_FEATURE_FILTER_TYPE, {1, 1,} },
+	{ 1, TEGRA_DC_FEATURE_LAYOUT_TYPE, {1, 1,} },
+	{ 1, TEGRA_DC_FEATURE_INVERT_TYPE, {1, 1, 0,} },
+
+	{ 2, TEGRA_DC_FEATURE_FORMATS, {TEGRA_WIN_FMT_WIN_C,} },
+	{ 2, TEGRA_DC_FEATURE_BLEND_TYPE, {1,} },
+	{ 2, TEGRA_DC_FEATURE_MAXIMUM_SIZE, {4095, 16, 4095, 16,} },
+	{ 2, TEGRA_DC_FEATURE_MAXIMUM_SCALE, {2, 2, 2, 2,} },
+	{ 2, TEGRA_DC_FEATURE_FILTER_TYPE, {0, 1,} },
+	{ 2, TEGRA_DC_FEATURE_LAYOUT_TYPE, {1, 1,} },
+	{ 2, TEGRA_DC_FEATURE_INVERT_TYPE, {1, 1, 0,} },
+};
+
+struct tegra_dc_feature t20_feature_table_a = {
+	ARRAY_SIZE(t20_feature_entries_a), t20_feature_entries_a,
+};
+
+struct tegra_dc_feature t20_feature_table_b = {
+	ARRAY_SIZE(t20_feature_entries_b), t20_feature_entries_b,
+};
+
+static struct tegra_dc_feature_entry t30_feature_entries_a[] = {
+	{ 0, TEGRA_DC_FEATURE_FORMATS, {TEGRA_WIN_FMT_WIN_A,} },
+	{ 0, TEGRA_DC_FEATURE_BLEND_TYPE, {1,} },
+	{ 0, TEGRA_DC_FEATURE_MAXIMUM_SIZE, {4095, 16, 4095, 16,} },
+	{ 0, TEGRA_DC_FEATURE_MAXIMUM_SCALE, {2, 2, 2, 2,} },
+	{ 0, TEGRA_DC_FEATURE_FILTER_TYPE, {0, 0,} },
+	{ 0, TEGRA_DC_FEATURE_LAYOUT_TYPE, {1, 1} },
+	{ 0, TEGRA_DC_FEATURE_INVERT_TYPE, {1, 1, 0,} },
+
+	{ 1, TEGRA_DC_FEATURE_FORMATS, {TEGRA_WIN_FMT_WIN_B,} },
+	{ 1, TEGRA_DC_FEATURE_PREFERRED_FORMATS, {TEGRA_WIN_PREF_FMT_WIN_B,} },
+	{ 1, TEGRA_DC_FEATURE_BLEND_TYPE, {1,} },
+	{ 1, TEGRA_DC_FEATURE_MAXIMUM_SIZE, {4095, 16, 4095, 16,} },
+	{ 1, TEGRA_DC_FEATURE_MAXIMUM_SCALE, {2, 2, 2, 2,} },
+	{ 1, TEGRA_DC_FEATURE_FILTER_TYPE, {1, 1,} },
+	{ 1, TEGRA_DC_FEATURE_LAYOUT_TYPE, {1, 1} },
+	{ 1, TEGRA_DC_FEATURE_INVERT_TYPE, {1, 1, 0,} },
+
+	{ 2, TEGRA_DC_FEATURE_FORMATS, {TEGRA_WIN_FMT_WIN_C,} },
+	{ 2, TEGRA_DC_FEATURE_BLEND_TYPE, {1,} },
+	{ 2, TEGRA_DC_FEATURE_MAXIMUM_SIZE, {4095, 16, 4095, 16,} },
+	{ 2, TEGRA_DC_FEATURE_MAXIMUM_SCALE, {2, 2, 2, 2,} },
+	{ 2, TEGRA_DC_FEATURE_FILTER_TYPE, {0, 1,} },
+	{ 2, TEGRA_DC_FEATURE_LAYOUT_TYPE, {1, 1} },
+	{ 2, TEGRA_DC_FEATURE_INVERT_TYPE, {1, 1, 0,} },
+};
+
+static struct tegra_dc_feature_entry t30_feature_entries_b[] = {
+	{ 0, TEGRA_DC_FEATURE_FORMATS, {TEGRA_WIN_FMT_WIN_A,} },
+	{ 0, TEGRA_DC_FEATURE_BLEND_TYPE, {1,} },
+	{ 0, TEGRA_DC_FEATURE_MAXIMUM_SIZE, {4095, 16, 4095, 16,} },
+	{ 0, TEGRA_DC_FEATURE_MAXIMUM_SCALE, {2, 2, 2, 2,} },
+	{ 0, TEGRA_DC_FEATURE_FILTER_TYPE, {0, 0,} },
+	{ 0, TEGRA_DC_FEATURE_LAYOUT_TYPE, {1, 1,} },
+	{ 0, TEGRA_DC_FEATURE_INVERT_TYPE, {1, 1, 0,} },
+
+	{ 1, TEGRA_DC_FEATURE_FORMATS, {TEGRA_WIN_FMT_WIN_B,} },
+	{ 1, TEGRA_DC_FEATURE_PREFERRED_FORMATS, {TEGRA_WIN_PREF_FMT_WIN_B,} },
+	{ 1, TEGRA_DC_FEATURE_BLEND_TYPE, {1,} },
+	{ 1, TEGRA_DC_FEATURE_MAXIMUM_SIZE, {4095, 16, 4095, 16,} },
+	{ 1, TEGRA_DC_FEATURE_MAXIMUM_SCALE, {2, 2, 2, 2,} },
+	{ 1, TEGRA_DC_FEATURE_FILTER_TYPE, {1, 1,} },
+	{ 1, TEGRA_DC_FEATURE_LAYOUT_TYPE, {1, 1,} },
+	{ 1, TEGRA_DC_FEATURE_INVERT_TYPE, {1, 1, 0,} },
+
+	{ 2, TEGRA_DC_FEATURE_FORMATS, {TEGRA_WIN_FMT_WIN_C,} },
+	{ 2, TEGRA_DC_FEATURE_BLEND_TYPE, {1,} },
+	{ 2, TEGRA_DC_FEATURE_MAXIMUM_SIZE, {4095, 16, 4095, 16,} },
+	{ 2, TEGRA_DC_FEATURE_MAXIMUM_SCALE, {2, 2, 2, 2,} },
+	{ 2, TEGRA_DC_FEATURE_FILTER_TYPE, {0, 1,} },
+	{ 2, TEGRA_DC_FEATURE_LAYOUT_TYPE, {1, 1,} },
+	{ 2, TEGRA_DC_FEATURE_INVERT_TYPE, {1, 1, 0,} },
+};
+
+struct tegra_dc_feature t30_feature_table_a = {
+	ARRAY_SIZE(t30_feature_entries_a), t30_feature_entries_a,
+};
+
+struct tegra_dc_feature t30_feature_table_b = {
+	ARRAY_SIZE(t30_feature_entries_b), t30_feature_entries_b,
+};
+
+int tegra_dc_get_feature(struct tegra_dc_feature *feature, int win_idx,
+					enum tegra_dc_feature_option option)
+{
+	int i;
+	struct tegra_dc_feature_entry *entry;
+
+	if (!feature)
+		return -EINVAL;
+
+	for (i = 0; i < feature->num_entries; i++) {
+		entry = &feature->entries[i];
+		if (entry->window_index == win_idx && entry->option == option)
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+long *tegra_dc_parse_feature(struct tegra_dc *dc, int win_idx, int operation)
+{
+	int idx;
+	struct tegra_dc_feature_entry *entry;
+	enum tegra_dc_feature_option option;
+	struct tegra_dc_feature *feature = dc->feature;
+
+	switch (operation) {
+	case GET_WIN_FORMATS:
+		option = TEGRA_DC_FEATURE_FORMATS;
+		break;
+	case GET_WIN_SIZE:
+		option = TEGRA_DC_FEATURE_MAXIMUM_SIZE;
+		break;
+	case HAS_SCALE:
+		option = TEGRA_DC_FEATURE_MAXIMUM_SCALE;
+		break;
+	case HAS_TILED:
+		option = TEGRA_DC_FEATURE_LAYOUT_TYPE;
+		break;
+	case HAS_V_FILTER:
+		option = TEGRA_DC_FEATURE_FILTER_TYPE;
+		break;
+	case HAS_H_FILTER:
+		option = TEGRA_DC_FEATURE_FILTER_TYPE;
+		break;
+	case HAS_GEN2_BLEND:
+		option = TEGRA_DC_FEATURE_BLEND_TYPE;
+		break;
+	default:
+		return NULL;
+	}
+
+	idx = tegra_dc_get_feature(feature, win_idx, option);
+	if (IS_ERR_VALUE(idx))
+		return NULL;
+	entry = &feature->entries[idx];
+
+	return entry->arg;
+}
+
+int tegra_dc_feature_has_scaling(struct tegra_dc *dc, int win_idx)
+{
+	int i;
+	long *addr = tegra_dc_parse_feature(dc, win_idx, HAS_SCALE);
+
+	for (i = 0; i < ENTRY_SIZE; i++)
+		if (addr[i] != 1)
+			return 1;
+	return 0;
+}
+
+int tegra_dc_feature_has_tiling(struct tegra_dc *dc, int win_idx)
+{
+	long *addr = tegra_dc_parse_feature(dc, win_idx, HAS_TILED);
+
+	return addr[TILED_LAYOUT];
+}
+
+int tegra_dc_feature_has_filter(struct tegra_dc *dc, int win_idx, int operation)
+{
+	long *addr = tegra_dc_parse_feature(dc, win_idx, operation);
+
+	if (operation == HAS_V_FILTER)
+		return addr[V_FILTER];
+	else
+		return addr[H_FILTER];
+}
+
+void tegra_dc_feature_register(struct tegra_dc *dc)
+{
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+	if (!dc->ndev->id)
+		dc->feature = &t20_feature_table_a;
+	else
+		dc->feature = &t20_feature_table_b;
+#elif defined(CONFIG_ARCH_TEGRA_3x_SOC)
+	if (!dc->ndev->id)
+		dc->feature = &t30_feature_table_a;
+	else
+		dc->feature = &t30_feature_table_b;
+#endif
+}
diff --git a/drivers/staging/tegra/video/dc/dc_config.h b/drivers/staging/tegra/video/dc/dc_config.h
new file mode 100644
index 000000000000..0eadd660ecf4
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/dc_config.h
@@ -0,0 +1,162 @@
+/*
+ * drivers/video/tegra/dc/dc_config.c
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_DC_CONFIG_H
+#define __DRIVERS_VIDEO_TEGRA_DC_DC_CONFIG_H
+
+#include <linux/errno.h>
+
+#include "dc.h"
+#include "dc_priv.h"
+
+#define ENTRY_SIZE	4	/* Size of feature entry args */
+
+/* Define the supported formats. TEGRA_WIN_FMT_WIN_x macros are defined
+ * based on T20/T30 formats. */
+#define TEGRA_WIN_FMT_BASE_CNT	(TEGRA_WIN_FMT_YUV422RA + 1)
+#define TEGRA_WIN_FMT_BASE	((1 << TEGRA_WIN_FMT_P8) | \
+				(1 << TEGRA_WIN_FMT_B4G4R4A4) | \
+				(1 << TEGRA_WIN_FMT_B5G5R5A) | \
+				(1 << TEGRA_WIN_FMT_B5G6R5) | \
+				(1 << TEGRA_WIN_FMT_AB5G5R5) | \
+				(1 << TEGRA_WIN_FMT_B8G8R8A8) | \
+				(1 << TEGRA_WIN_FMT_R8G8B8A8) | \
+				(1 << TEGRA_WIN_FMT_YCbCr422) | \
+				(1 << TEGRA_WIN_FMT_YUV422) | \
+				(1 << TEGRA_WIN_FMT_YCbCr420P) | \
+				(1 << TEGRA_WIN_FMT_YUV420P) | \
+				(1 << TEGRA_WIN_FMT_YCbCr422P) | \
+				(1 << TEGRA_WIN_FMT_YUV422P) | \
+				(1 << TEGRA_WIN_FMT_YCbCr422R) | \
+				(1 << TEGRA_WIN_FMT_YUV422R))
+
+#define TEGRA_WIN_FMT_WIN_A	((1 << TEGRA_WIN_FMT_P1) | \
+				(1 << TEGRA_WIN_FMT_P2) | \
+				(1 << TEGRA_WIN_FMT_P4) | \
+				(1 << TEGRA_WIN_FMT_P8) | \
+				(1 << TEGRA_WIN_FMT_B4G4R4A4) | \
+				(1 << TEGRA_WIN_FMT_B5G5R5A) | \
+				(1 << TEGRA_WIN_FMT_B5G6R5) | \
+				(1 << TEGRA_WIN_FMT_AB5G5R5) | \
+				(1 << TEGRA_WIN_FMT_B8G8R8A8) | \
+				(1 << TEGRA_WIN_FMT_R8G8B8A8) | \
+				(1 << TEGRA_WIN_FMT_B6x2G6x2R6x2A8) | \
+				(1 << TEGRA_WIN_FMT_R6x2G6x2B6x2A8))
+
+#define TEGRA_WIN_FMT_WIN_B	(TEGRA_WIN_FMT_BASE | \
+				(1 << TEGRA_WIN_FMT_B6x2G6x2R6x2A8) | \
+				(1 << TEGRA_WIN_FMT_R6x2G6x2B6x2A8) | \
+				(1 << TEGRA_WIN_FMT_YCbCr422RA) | \
+				(1 << TEGRA_WIN_FMT_YUV422RA))
+
+#define TEGRA_WIN_FMT_WIN_C	(TEGRA_WIN_FMT_BASE | \
+				(1 << TEGRA_WIN_FMT_B6x2G6x2R6x2A8) | \
+				(1 << TEGRA_WIN_FMT_R6x2G6x2B6x2A8) | \
+				(1 << TEGRA_WIN_FMT_YCbCr422RA) | \
+				(1 << TEGRA_WIN_FMT_YUV422RA))
+
+/* preferred formats do not include 32-bpp formats */
+#define TEGRA_WIN_PREF_FMT_WIN_B	(TEGRA_WIN_FMT_WIN_B & \
+				~(1 << TEGRA_WIN_FMT_B8G8R8A8) & \
+				~(1 << TEGRA_WIN_FMT_R8G8B8A8))
+
+
+
+/* For each entry, we define the offset to read specific feature. Define the
+ * offset for TEGRA_DC_FEATURE_MAXIMUM_SCALE */
+#define H_SCALE_UP	0
+#define V_SCALE_UP	1
+#define H_FILTER_DOWN	2
+#define V_FILTER_DOWN	3
+
+/* Define the offset for TEGRA_DC_FEATURE_MAXIMUM_SIZE */
+#define MAX_WIDTH	0
+#define MIN_WIDTH	1
+#define MAX_HEIGHT	2
+#define MIN_HEIGHT	3
+#define CHECK_SIZE(val, min, max)	( \
+		((val) < (min) || (val) > (max)) ? -EINVAL : 0)
+
+/* Define the offset for TEGRA_DC_FEATURE_FILTER_TYPE */
+#define V_FILTER	0
+#define H_FILTER	1
+
+/* Define the offset for TEGRA_DC_FEATURE_INVERT_TYPE */
+#define H_INVERT	0
+#define V_INVERT	1
+#define SCAN_COLUMN	2
+
+/* Define the offset for TEGRA_DC_FEATURE_LAYOUT_TYPE. */
+#define PITCHED_LAYOUT	0
+#define TILED_LAYOUT	1
+
+/* Available operations on feature table. */
+enum {
+	HAS_SCALE,
+	HAS_TILED,
+	HAS_V_FILTER,
+	HAS_H_FILTER,
+	HAS_GEN2_BLEND,
+	GET_WIN_FORMATS,
+	GET_WIN_SIZE,
+};
+
+enum tegra_dc_feature_option {
+	TEGRA_DC_FEATURE_FORMATS,
+	TEGRA_DC_FEATURE_BLEND_TYPE,
+	TEGRA_DC_FEATURE_MAXIMUM_SIZE,
+	TEGRA_DC_FEATURE_MAXIMUM_SCALE,
+	TEGRA_DC_FEATURE_FILTER_TYPE,
+	TEGRA_DC_FEATURE_LAYOUT_TYPE,
+	TEGRA_DC_FEATURE_INVERT_TYPE,
+	TEGRA_DC_FEATURE_PREFERRED_FORMATS,
+};
+
+struct tegra_dc_feature_entry {
+	u32 window_index;
+	u32 option;
+	long arg[ENTRY_SIZE];
+};
+
+struct tegra_dc_feature {
+	u32 num_entries;
+	struct tegra_dc_feature_entry *entries;
+};
+
+int tegra_dc_feature_has_scaling(struct tegra_dc *dc, int win_idx);
+int tegra_dc_feature_has_tiling(struct tegra_dc *dc, int win_idx);
+int tegra_dc_feature_has_filter(struct tegra_dc *dc, int win_idx, int operation);
+
+long *tegra_dc_parse_feature(struct tegra_dc *dc, int win_idx, int operation);
+void tegra_dc_feature_register(struct tegra_dc *dc);
+
+static inline bool win_use_v_filter(struct tegra_dc *dc,
+	const struct tegra_dc_win *win)
+{
+	return tegra_dc_feature_has_filter(dc, win->idx, HAS_V_FILTER) &&
+		win->h.full != dfixed_const(win->out_h);
+}
+static inline bool win_use_h_filter(struct tegra_dc *dc,
+	const struct tegra_dc_win *win)
+{
+	return tegra_dc_feature_has_filter(dc, win->idx, HAS_H_FILTER) &&
+		win->w.full != dfixed_const(win->out_w);
+}
+
+#endif
diff --git a/drivers/staging/tegra/video/dc/dc_priv.h b/drivers/staging/tegra/video/dc/dc_priv.h
new file mode 100644
index 000000000000..9763c15907e7
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/dc_priv.h
@@ -0,0 +1,407 @@
+/*
+ * drivers/video/tegra/dc/dc_priv.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_DC_PRIV_H
+#define __DRIVERS_VIDEO_TEGRA_DC_DC_PRIV_H
+
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/fb.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#ifdef CONFIG_SWITCH
+#include <linux/switch.h>
+#endif
+#include <linux/nvhost.h>
+
+#include <mach/clk.h>
+
+#include "tegra_dc_ext.h"
+#include "dc.h"
+#include "dc_reg.h"
+
+#define WIN_IS_TILED(win)	((win)->flags & TEGRA_WIN_FLAG_TILED)
+#define WIN_IS_ENABLED(win)	((win)->flags & TEGRA_WIN_FLAG_ENABLED)
+
+#define NEED_UPDATE_EMC_ON_EVERY_FRAME (windows_idle_detection_time == 0)
+
+/* DDR: 8 bytes transfer per clock */
+#define DDR_BW_TO_FREQ(bw) ((bw) / 8)
+
+#if defined(CONFIG_TEGRA_EMC_TO_DDR_CLOCK)
+#define EMC_BW_TO_FREQ(bw) (DDR_BW_TO_FREQ(bw) * CONFIG_TEGRA_EMC_TO_DDR_CLOCK)
+#else
+#define EMC_BW_TO_FREQ(bw) (DDR_BW_TO_FREQ(bw) * 2)
+#endif
+
+#define ALL_UF_INT (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)
+
+struct tegra_dc;
+
+struct tegra_dc_blend {
+	unsigned z[DC_N_WINDOWS];
+	unsigned flags[DC_N_WINDOWS];
+};
+
+struct tegra_dc_out_ops {
+	/* initialize output.  dc clocks are not on at this point */
+	int (*init)(struct tegra_dc *dc);
+	/* destroy output.  dc clocks are not on at this point */
+	void (*destroy)(struct tegra_dc *dc);
+	/* detect connected display.  can sleep.*/
+	bool (*detect)(struct tegra_dc *dc);
+	/* enable output.  dc clocks are on at this point */
+	void (*enable)(struct tegra_dc *dc);
+	/* disable output.  dc clocks are on at this point */
+	void (*disable)(struct tegra_dc *dc);
+	/* hold output.  keeps dc clocks on. */
+	void (*hold)(struct tegra_dc *dc);
+	/* release output.  dc clocks may turn off after this. */
+	void (*release)(struct tegra_dc *dc);
+	/* idle routine of output.  dc clocks may turn off after this. */
+	void (*idle)(struct tegra_dc *dc);
+	/* suspend output.  dc clocks are on at this point */
+	void (*suspend)(struct tegra_dc *dc);
+	/* resume output.  dc clocks are on at this point */
+	void (*resume)(struct tegra_dc *dc);
+	/* mode filter. to provide a list of supported modes*/
+	bool (*mode_filter)(const struct tegra_dc *dc,
+			struct fb_videomode *mode);
+};
+
+struct tegra_dc {
+	struct nvhost_device		*ndev;
+	struct tegra_dc_platform_data	*pdata;
+
+	struct resource			*base_res;
+	void __iomem			*base;
+	int				irq;
+
+	struct clk			*clk;
+	struct clk			*emc_clk;
+	int				emc_clk_rate;
+	int				new_emc_clk_rate;
+	u32				shift_clk_div;
+
+	bool				connected;
+	bool				enabled;
+
+	struct tegra_dc_out		*out;
+	struct tegra_dc_out_ops		*out_ops;
+	void				*out_data;
+
+	struct tegra_dc_mode		mode;
+	s64				frametime_ns;
+
+	struct tegra_dc_win		windows[DC_N_WINDOWS];
+	struct tegra_dc_blend		blend;
+	int				n_windows;
+
+	wait_queue_head_t		wq;
+	wait_queue_head_t		timestamp_wq;
+
+	struct mutex			lock;
+	struct mutex			one_shot_lock;
+
+	struct resource			*fb_mem;
+	struct tegra_fb_info		*fb;
+
+	struct tegra_overlay_info	*overlay;
+
+	struct {
+		u32			id;
+		u32			min;
+		u32			max;
+	} syncpt[DC_N_WINDOWS];
+	u32				vblank_syncpt;
+
+	unsigned long			underflow_mask;
+	struct work_struct		reset_work;
+
+#ifdef CONFIG_SWITCH
+	struct switch_dev		modeset_switch;
+#endif
+
+	struct completion		frame_end_complete;
+
+	struct work_struct		vblank_work;
+	long				vblank_ref_count;
+
+	struct {
+		u64			underflows;
+		u64			underflows_a;
+		u64			underflows_b;
+		u64			underflows_c;
+	} stats;
+
+	struct tegra_dc_ext		*ext;
+
+	struct tegra_dc_feature		*feature;
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry			*debugdir;
+#endif
+	struct tegra_dc_lut		fb_lut;
+	u32				one_shot_delay_ms;
+	struct delayed_work		one_shot_work;
+	s64				frame_end_timestamp;
+	
+	struct delayed_work		disable_work;
+};
+
+#define print_mode_info(dc, mode) do {					\
+	trace_printk("%s:Mode settings: "				\
+			"ref_to_sync: H = %d V = %d, "			\
+			"sync_width: H = %d V = %d, "			\
+			"back_porch: H = %d V = %d, "			\
+			"active: H = %d V = %d, "			\
+			"front_porch: H = %d V = %d, "			\
+			"pclk = %d, stereo mode = %d\n",		\
+			dc->ndev->name,					\
+			mode.h_ref_to_sync, mode.v_ref_to_sync,		\
+			mode.h_sync_width, mode.v_sync_width,		\
+			mode.h_back_porch, mode.v_back_porch,		\
+			mode.h_active, mode.v_active,			\
+			mode.h_front_porch, mode.v_front_porch,		\
+			mode.pclk, mode.stereo_mode);			\
+	} while (0)
+
+static inline void tegra_dc_io_start(struct tegra_dc *dc)
+{
+	nvhost_module_busy_ext(nvhost_get_parent(dc->ndev));
+}
+
+static inline void tegra_dc_io_end(struct tegra_dc *dc)
+{
+	nvhost_module_idle_ext(nvhost_get_parent(dc->ndev));
+}
+
+static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
+					   unsigned long reg)
+{
+	unsigned long ret;
+
+	BUG_ON(!nvhost_module_powered_ext(nvhost_get_parent(dc->ndev)));
+	if (!tegra_is_clk_enabled(dc->clk))
+		WARN(1, "DC is clock-gated.\n");
+
+	ret = readl(dc->base + reg * 4);
+	trace_printk("readl %p=%#08lx\n", dc->base + reg * 4, ret);
+	return ret;
+}
+
+static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long val,
+				   unsigned long reg)
+{
+	BUG_ON(!nvhost_module_powered_ext(nvhost_get_parent(dc->ndev)));
+	if (!tegra_is_clk_enabled(dc->clk))
+		WARN(1, "DC is clock-gated.\n");
+
+	trace_printk("writel %p=%#08lx\n", dc->base + reg * 4, val);
+	writel(val, dc->base + reg * 4);
+}
+
+static inline void _tegra_dc_write_table(struct tegra_dc *dc, const u32 *table,
+					 unsigned len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		tegra_dc_writel(dc, table[i * 2 + 1], table[i * 2]);
+}
+
+#define tegra_dc_write_table(dc, table)		\
+	_tegra_dc_write_table(dc, table, ARRAY_SIZE(table) / 2)
+
+static inline void tegra_dc_set_outdata(struct tegra_dc *dc, void *data)
+{
+	dc->out_data = data;
+}
+
+static inline void *tegra_dc_get_outdata(struct tegra_dc *dc)
+{
+	return dc->out_data;
+}
+
+static inline unsigned long tegra_dc_get_default_emc_clk_rate(
+	struct tegra_dc *dc)
+{
+	return dc->pdata->emc_clk_rate ? dc->pdata->emc_clk_rate : ULONG_MAX;
+}
+
+static inline int tegra_dc_fmt_bpp(int fmt)
+{
+	switch (fmt) {
+	case TEGRA_WIN_FMT_P1:
+		return 1;
+
+	case TEGRA_WIN_FMT_P2:
+		return 2;
+
+	case TEGRA_WIN_FMT_P4:
+		return 4;
+
+	case TEGRA_WIN_FMT_P8:
+		return 8;
+
+	case TEGRA_WIN_FMT_B4G4R4A4:
+	case TEGRA_WIN_FMT_B5G5R5A:
+	case TEGRA_WIN_FMT_B5G6R5:
+	case TEGRA_WIN_FMT_AB5G5R5:
+		return 16;
+
+	case TEGRA_WIN_FMT_B8G8R8A8:
+	case TEGRA_WIN_FMT_R8G8B8A8:
+	case TEGRA_WIN_FMT_B6x2G6x2R6x2A8:
+	case TEGRA_WIN_FMT_R6x2G6x2B6x2A8:
+		return 32;
+
+	/* for planar formats, size of the Y plane, 8bit */
+	case TEGRA_WIN_FMT_YCbCr420P:
+	case TEGRA_WIN_FMT_YUV420P:
+	case TEGRA_WIN_FMT_YCbCr422P:
+	case TEGRA_WIN_FMT_YUV422P:
+	case TEGRA_WIN_FMT_YCbCr422R:
+	case TEGRA_WIN_FMT_YUV422R:
+	case TEGRA_WIN_FMT_YCbCr422RA:
+	case TEGRA_WIN_FMT_YUV422RA:
+		return 8;
+
+	/* YUYV packed into 32-bits */
+	case TEGRA_WIN_FMT_YCbCr422:
+	case TEGRA_WIN_FMT_YUV422:
+		return 16;
+	}
+	return 0;
+}
+
+static inline bool tegra_dc_is_yuv(int fmt)
+{
+	switch (fmt) {
+	case TEGRA_WIN_FMT_YUV420P:
+	case TEGRA_WIN_FMT_YCbCr420P:
+	case TEGRA_WIN_FMT_YCbCr422P:
+	case TEGRA_WIN_FMT_YUV422P:
+	case TEGRA_WIN_FMT_YCbCr422:
+	case TEGRA_WIN_FMT_YUV422:
+	case TEGRA_WIN_FMT_YCbCr422R:
+	case TEGRA_WIN_FMT_YUV422R:
+	case TEGRA_WIN_FMT_YCbCr422RA:
+	case TEGRA_WIN_FMT_YUV422RA:
+		return true;
+	}
+	return false;
+}
+
+static inline bool tegra_dc_is_yuv_planar(int fmt)
+{
+	switch (fmt) {
+	case TEGRA_WIN_FMT_YUV420P:
+	case TEGRA_WIN_FMT_YCbCr420P:
+	case TEGRA_WIN_FMT_YCbCr422P:
+	case TEGRA_WIN_FMT_YUV422P:
+	case TEGRA_WIN_FMT_YCbCr422R:
+	case TEGRA_WIN_FMT_YUV422R:
+	case TEGRA_WIN_FMT_YCbCr422RA:
+	case TEGRA_WIN_FMT_YUV422RA:
+		return true;
+	}
+	return false;
+}
+
+static inline void tegra_dc_unmask_interrupt(struct tegra_dc *dc, u32 int_val)
+{
+	u32 val;
+
+	val = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+	val |= int_val;
+	tegra_dc_writel(dc, val, DC_CMD_INT_MASK);
+}
+
+static inline void tegra_dc_mask_interrupt(struct tegra_dc *dc, u32 int_val)
+{
+	u32 val;
+
+	val = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+	val &= ~int_val;
+	tegra_dc_writel(dc, val, DC_CMD_INT_MASK);
+}
+
+static inline unsigned long tegra_dc_clk_get_rate(struct tegra_dc *dc)
+{
+	return clk_get_rate(dc->clk);
+}
+
+extern struct tegra_dc_out_ops tegra_dc_rgb_ops;
+extern struct tegra_dc_out_ops tegra_dc_hdmi_ops;
+extern struct tegra_dc_out_ops tegra_dc_dsi_ops;
+
+/* defined in dc_sysfs.c, used by dc.c */
+void tegra_dc_remove_sysfs(struct device *dev);
+void tegra_dc_create_sysfs(struct device *dev);
+
+/* defined in dc.c, used by dc_sysfs.c */
+void tegra_dc_stats_enable(struct tegra_dc *dc, bool enable);
+bool tegra_dc_stats_get(struct tegra_dc *dc);
+
+/* defined in dc.c, used by dc_sysfs.c */
+u32 tegra_dc_read_checksum_latched(struct tegra_dc *dc);
+void tegra_dc_enable_crc(struct tegra_dc *dc);
+void tegra_dc_disable_crc(struct tegra_dc *dc);
+
+void tegra_dc_set_out_pin_polars(struct tegra_dc *dc,
+				const struct tegra_dc_out_pin *pins,
+				const unsigned int n_pins);
+/* defined in dc.c, used in bandwidth.c and ext/dev.c */
+unsigned int tegra_dc_has_multiple_dc(void);
+
+/* defined in dc.c, used in dsi.c */
+void tegra_dc_clk_enable(struct tegra_dc *dc);
+void tegra_dc_clk_disable(struct tegra_dc *dc);
+
+/* defined in dc.c, used in nvsd.c and dsi.c */
+void tegra_dc_hold_dc_out(struct tegra_dc *dc);
+void tegra_dc_release_dc_out(struct tegra_dc *dc);
+
+/* defined in bandwidth.c, used in dc.c */
+void tegra_dc_clear_bandwidth(struct tegra_dc *dc);
+void tegra_dc_program_bandwidth(struct tegra_dc *dc, bool use_new);
+int tegra_dc_set_dynamic_emc(struct tegra_dc_win *windows[], int n);
+
+/* defined in mode.c, used in dc.c */
+int tegra_dc_program_mode(struct tegra_dc *dc, struct tegra_dc_mode *mode);
+int tegra_dc_calc_refresh(const struct tegra_dc_mode *m);
+
+/* defined in clock.c, used in dc.c, dsi.c and hdmi.c */
+void tegra_dc_setup_clk(struct tegra_dc *dc, struct clk *clk);
+unsigned long tegra_dc_pclk_round_rate(struct tegra_dc *dc, int pclk);
+
+/* defined in lut.c, used in dc.c */
+void tegra_dc_init_lut_defaults(struct tegra_dc_lut *lut);
+void tegra_dc_set_lut(struct tegra_dc *dc, struct tegra_dc_win *win);
+
+/* defined in csc.c, used in dc.c */
+void tegra_dc_init_csc_defaults(struct tegra_dc_csc *csc);
+void tegra_dc_set_csc(struct tegra_dc *dc, struct tegra_dc_csc *csc);
+
+/* defined in window.c, used in dc.c */
+void tegra_dc_trigger_windows(struct tegra_dc *dc);
+
+#endif
diff --git a/drivers/staging/tegra/video/dc/dc_reg.h b/drivers/staging/tegra/video/dc/dc_reg.h
new file mode 100644
index 000000000000..86b1029d3bba
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/dc_reg.h
@@ -0,0 +1,564 @@
+/*
+ * drivers/video/tegra/dc/dc_reg.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_DC_REG_H
+#define __DRIVERS_VIDEO_TEGRA_DC_DC_REG_H
+
+#define DC_CMD_GENERAL_INCR_SYNCPT		0x000
+#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL	0x001
+#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR	0x002
+#define DC_CMD_WIN_A_INCR_SYNCPT		0x008
+#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL		0x009
+#define DC_CMD_WIN_A_INCR_SYNCPT_ERROR		0x00a
+#define DC_CMD_WIN_B_INCR_SYNCPT		0x010
+#define DC_CMD_WIN_B_INCR_SYNCPT_CNTRL		0x011
+#define DC_CMD_WIN_B_INCR_SYNCPT_ERROR		0x012
+#define DC_CMD_WIN_C_INCR_SYNCPT		0x018
+#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL		0x019
+#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR		0x01a
+#define DC_CMD_CONT_SYNCPT_VSYNC		0x028
+#define DC_CMD_DISPLAY_COMMAND_OPTION0		0x031
+#define  MSF_POLARITY_HIGH			(0 << 0)
+#define  MSF_POLARITY_LOW			(1 << 0)
+#define  MSF_DISABLE				(0 << 1)
+#define  MSF_ENABLE				(1 << 1)
+#define  MSF_LSPI				(0 << 2)
+#define  MSF_LDC				(1 << 2)
+#define  MSF_LSDI				(2 << 2)
+
+#define DC_CMD_DISPLAY_COMMAND			0x032
+#define  DISP_COMMAND_RAISE		(1 << 0)
+#define  DISP_CTRL_MODE_STOP		(0 << 5)
+#define  DISP_CTRL_MODE_C_DISPLAY	(1 << 5)
+#define  DISP_CTRL_MODE_NC_DISPLAY	(2 << 5)
+#define  DISP_COMMAND_RAISE_VECTOR(x)	(((x) & 0x1f) << 22)
+#define  DISP_COMMAND_RAISE_CHANNEL_ID(x)	(((x) & 0xf) << 27)
+
+#define DC_CMD_SIGNAL_RAISE			0x033
+#define DC_CMD_DISPLAY_POWER_CONTROL		0x036
+#define  PW0_ENABLE		(1 << 0)
+#define  PW1_ENABLE		(1 << 2)
+#define  PW2_ENABLE		(1 << 4)
+#define  PW3_ENABLE		(1 << 6)
+#define  PW4_ENABLE		(1 << 8)
+#define  PM0_ENABLE		(1 << 16)
+#define  PM1_ENABLE		(1 << 18)
+#define  SPI_ENABLE		(1 << 24)
+#define  HSPI_ENABLE		(1 << 25)
+
+#define DC_CMD_INT_STATUS			0x037
+#define DC_CMD_INT_MASK				0x038
+#define DC_CMD_INT_ENABLE			0x039
+#define DC_CMD_INT_TYPE				0x03a
+#define DC_CMD_INT_POLARITY			0x03b
+#define  CTXSW_INT		(1 << 0)
+#define  FRAME_END_INT		(1 << 1)
+#define  V_BLANK_INT		(1 << 2)
+#define  H_BLANK_INT		(1 << 3)
+#define  V_PULSE3_INT		(1 << 4)
+#define  SPI_BUSY_INT		(1 << 7)
+#define  WIN_A_UF_INT		(1 << 8)
+#define  WIN_B_UF_INT		(1 << 9)
+#define  WIN_C_UF_INT		(1 << 10)
+#define  MSF_INT		(1 << 12)
+#define  SSF_INT		(1 << 13)
+#define  WIN_A_OF_INT		(1 << 14)
+#define  WIN_B_OF_INT		(1 << 15)
+#define  WIN_C_OF_INT		(1 << 16)
+#define  GPIO_0_INT		(1 << 18)
+#define  GPIO_1_INT		(1 << 19)
+#define  GPIO_2_INT		(1 << 20)
+
+#define DC_CMD_SIGNAL_RAISE1			0x03c
+#define DC_CMD_SIGNAL_RAISE2			0x03d
+#define DC_CMD_SIGNAL_RAISE3			0x03e
+#define DC_CMD_STATE_ACCESS			0x040
+#define  READ_MUX_ASSEMBLY	(0 << 0)
+#define  READ_MUX_ACTIVE	(1 << 0)
+#define  WRITE_MUX_ASSEMBLY	(0 << 2)
+#define  WRITE_MUX_ACTIVE	(1 << 2)
+
+#define DC_CMD_STATE_CONTROL			0x041
+#define  GENERAL_ACT_REQ	(1 << 0)
+#define  WIN_A_ACT_REQ		(1 << 1)
+#define  WIN_B_ACT_REQ		(1 << 2)
+#define  WIN_C_ACT_REQ		(1 << 3)
+#define  GENERAL_UPDATE		(1 << 8)
+#define  WIN_A_UPDATE		(1 << 9)
+#define  WIN_B_UPDATE		(1 << 10)
+#define  WIN_C_UPDATE		(1 << 11)
+#define  NC_HOST_TRIG		(1 << 24)
+
+#define DC_CMD_DISPLAY_WINDOW_HEADER		0x042
+#define  WINDOW_A_SELECT		(1 << 4)
+#define  WINDOW_B_SELECT		(1 << 5)
+#define  WINDOW_C_SELECT		(1 << 6)
+
+#define DC_CMD_REG_ACT_CONTROL			0x043
+
+#define DC_COM_CRC_CONTROL			0x300
+#define  CRC_ALWAYS_ENABLE		(1 << 3)
+#define  CRC_ALWAYS_DISABLE		(0 << 3)
+#define  CRC_INPUT_DATA_ACTIVE_DATA	(1 << 2)
+#define  CRC_INPUT_DATA_FULL_FRAME	(0 << 2)
+#define  CRC_WAIT_TWO_VSYNC		(1 << 1)
+#define  CRC_WAIT_ONE_VSYNC		(0 << 1)
+#define  CRC_ENABLE_ENABLE		(1 << 0)
+#define  CRC_ENABLE_DISABLE		(0 << 0)
+#define DC_COM_CRC_CHECKSUM			0x301
+#define DC_COM_PIN_OUTPUT_ENABLE0		0x302
+#define DC_COM_PIN_OUTPUT_ENABLE1		0x303
+#define DC_COM_PIN_OUTPUT_ENABLE2		0x304
+#define DC_COM_PIN_OUTPUT_ENABLE3		0x305
+#define  PIN_OUTPUT_LSPI_OUTPUT_EN		(1 << 8)
+#define  PIN_OUTPUT_LSPI_OUTPUT_DIS		(1 << 8)
+#define DC_COM_PIN_OUTPUT_POLARITY0		0x306
+
+#define DC_COM_PIN_OUTPUT_POLARITY1		0x307
+#define  LHS_OUTPUT_POLARITY_LOW	(1 << 30)
+#define  LVS_OUTPUT_POLARITY_LOW	(1 << 28)
+#define  LSC0_OUTPUT_POLARITY_LOW	(1 << 24)
+
+#define DC_COM_PIN_OUTPUT_POLARITY2		0x308
+
+#define DC_COM_PIN_OUTPUT_POLARITY3		0x309
+#define  LSPI_OUTPUT_POLARITY_LOW	(1 << 8)
+
+#define DC_COM_PIN_OUTPUT_DATA0			0x30a
+#define DC_COM_PIN_OUTPUT_DATA1			0x30b
+#define DC_COM_PIN_OUTPUT_DATA2			0x30c
+#define DC_COM_PIN_OUTPUT_DATA3			0x30d
+#define DC_COM_PIN_INPUT_ENABLE0		0x30e
+#define DC_COM_PIN_INPUT_ENABLE1		0x30f
+#define DC_COM_PIN_INPUT_ENABLE2		0x310
+#define DC_COM_PIN_INPUT_ENABLE3		0x311
+#define  PIN_INPUT_LSPI_INPUT_EN		(1 << 8)
+#define  PIN_INPUT_LSPI_INPUT_DIS		(1 << 8)
+#define DC_COM_PIN_INPUT_DATA0			0x312
+#define DC_COM_PIN_INPUT_DATA1			0x313
+#define DC_COM_PIN_OUTPUT_SELECT0		0x314
+#define DC_COM_PIN_OUTPUT_SELECT1		0x315
+#define DC_COM_PIN_OUTPUT_SELECT2		0x316
+#define DC_COM_PIN_OUTPUT_SELECT3		0x317
+#define DC_COM_PIN_OUTPUT_SELECT4		0x318
+#define DC_COM_PIN_OUTPUT_SELECT5		0x319
+#define DC_COM_PIN_OUTPUT_SELECT6		0x31a
+
+#define PIN5_LM1_LCD_M1_OUTPUT_MASK	(7 << 4)
+#define PIN5_LM1_LCD_M1_OUTPUT_M1	(0 << 4)
+#define PIN5_LM1_LCD_M1_OUTPUT_LD21	(2 << 4)
+#define PIN5_LM1_LCD_M1_OUTPUT_PM1	(3 << 4)
+
+#define  PIN1_LHS_OUTPUT		(1 << 30)
+#define  PIN1_LVS_OUTPUT		(1 << 28)
+
+#define DC_COM_PIN_MISC_CONTROL			0x31b
+#define DC_COM_PM0_CONTROL			0x31c
+#define DC_COM_PM0_DUTY_CYCLE			0x31d
+#define DC_COM_PM1_CONTROL			0x31e
+#define DC_COM_PM1_DUTY_CYCLE			0x31f
+
+#define PM_PERIOD_SHIFT                 18
+#define PM_CLK_DIVIDER_SHIFT		4
+
+#define DC_COM_SPI_CONTROL			0x320
+#define DC_COM_SPI_START_BYTE			0x321
+#define DC_COM_HSPI_WRITE_DATA_AB		0x322
+#define DC_COM_HSPI_WRITE_DATA_CD		0x323
+#define DC_COM_HSPI_CS_DC			0x324
+#define DC_COM_SCRATCH_REGISTER_A		0x325
+#define DC_COM_SCRATCH_REGISTER_B		0x326
+#define DC_COM_GPIO_CTRL			0x327
+#define DC_COM_GPIO_DEBOUNCE_COUNTER		0x328
+#define DC_COM_CRC_CHECKSUM_LATCHED		0x329
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS0		0x400
+#define  H_PULSE_0_ENABLE		(1 << 8)
+#define  H_PULSE_1_ENABLE		(1 << 10)
+#define  H_PULSE_2_ENABLE		(1 << 12)
+#define  V_PULSE_0_ENABLE		(1 << 16)
+#define  V_PULSE_1_ENABLE		(1 << 18)
+#define  V_PULSE_2_ENABLE		(1 << 19)
+#define  V_PULSE_3_ENABLE		(1 << 20)
+#define  M0_ENABLE			(1 << 24)
+#define  M1_ENABLE			(1 << 26)
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS1		0x401
+#define  DI_ENABLE			(1 << 16)
+#define  PP_ENABLE			(1 << 18)
+
+#define DC_DISP_DISP_WIN_OPTIONS		0x402
+#define  CURSOR_ENABLE			(1 << 16)
+#define  TVO_ENABLE			(1 << 28)
+#define  DSI_ENABLE			(1 << 29)
+#define  HDMI_ENABLE			(1 << 30)
+
+#define DC_DISP_MEM_HIGH_PRIORITY		0x403
+#define DC_DISP_MEM_HIGH_PRIORITY_TIMER		0x404
+#define DC_DISP_DISP_TIMING_OPTIONS		0x405
+#define  VSYNC_H_POSITION(x)		((x) & 0xfff)
+
+#define DC_DISP_REF_TO_SYNC			0x406
+#define DC_DISP_SYNC_WIDTH			0x407
+#define DC_DISP_BACK_PORCH			0x408
+#define DC_DISP_DISP_ACTIVE			0x409
+#define DC_DISP_FRONT_PORCH			0x40a
+#define DC_DISP_H_PULSE0_CONTROL		0x40b
+#define DC_DISP_H_PULSE0_POSITION_A		0x40c
+#define DC_DISP_H_PULSE0_POSITION_B		0x40d
+#define DC_DISP_H_PULSE0_POSITION_C		0x40e
+#define DC_DISP_H_PULSE0_POSITION_D		0x40f
+#define DC_DISP_H_PULSE1_CONTROL		0x410
+#define DC_DISP_H_PULSE1_POSITION_A		0x411
+#define DC_DISP_H_PULSE1_POSITION_B		0x412
+#define DC_DISP_H_PULSE1_POSITION_C		0x413
+#define DC_DISP_H_PULSE1_POSITION_D		0x414
+#define DC_DISP_H_PULSE2_CONTROL		0x415
+#define DC_DISP_H_PULSE2_POSITION_A		0x416
+#define DC_DISP_H_PULSE2_POSITION_B		0x417
+#define DC_DISP_H_PULSE2_POSITION_C		0x418
+#define DC_DISP_H_PULSE2_POSITION_D		0x419
+#define DC_DISP_V_PULSE0_CONTROL		0x41a
+#define DC_DISP_V_PULSE0_POSITION_A		0x41b
+#define DC_DISP_V_PULSE0_POSITION_B		0x41c
+#define DC_DISP_V_PULSE0_POSITION_C		0x41d
+#define DC_DISP_V_PULSE1_CONTROL		0x41e
+#define DC_DISP_V_PULSE1_POSITION_A		0x41f
+#define DC_DISP_V_PULSE1_POSITION_B		0x420
+#define DC_DISP_V_PULSE1_POSITION_C		0x421
+#define DC_DISP_V_PULSE2_CONTROL		0x422
+#define DC_DISP_V_PULSE2_POSITION_A		0x423
+#define DC_DISP_V_PULSE3_CONTROL		0x424
+#define DC_DISP_V_PULSE3_POSITION_A		0x425
+#define DC_DISP_M0_CONTROL			0x426
+#define DC_DISP_M1_CONTROL			0x427
+#define DC_DISP_DI_CONTROL			0x428
+#define DC_DISP_PP_CONTROL			0x429
+#define DC_DISP_PP_SELECT_A			0x42a
+#define DC_DISP_PP_SELECT_B			0x42b
+#define DC_DISP_PP_SELECT_C			0x42c
+#define DC_DISP_PP_SELECT_D			0x42d
+
+#define  PULSE_MODE_NORMAL		(0 << 3)
+#define  PULSE_MODE_ONE_CLOCK		(1 << 3)
+#define  PULSE_POLARITY_HIGH		(0 << 4)
+#define  PULSE_POLARITY_LOW		(1 << 4)
+#define  PULSE_QUAL_ALWAYS		(0 << 6)
+#define  PULSE_QUAL_VACTIVE		(2 << 6)
+#define  PULSE_QUAL_VACTIVE1		(3 << 6)
+#define  PULSE_LAST_START_A		(0 << 8)
+#define  PULSE_LAST_END_A		(1 << 8)
+#define  PULSE_LAST_START_B		(2 << 8)
+#define  PULSE_LAST_END_B		(3 << 8)
+#define  PULSE_LAST_START_C		(4 << 8)
+#define  PULSE_LAST_END_C		(5 << 8)
+#define  PULSE_LAST_START_D		(6 << 8)
+#define  PULSE_LAST_END_D		(7 << 8)
+
+#define  PULSE_START(x)			((x) & 0xfff)
+#define  PULSE_END(x)			(((x) & 0xfff) << 16)
+
+#define DC_DISP_DISP_CLOCK_CONTROL		0x42e
+#define  PIXEL_CLK_DIVIDER_PCD1		(0 << 8)
+#define  PIXEL_CLK_DIVIDER_PCD1H	(1 << 8)
+#define  PIXEL_CLK_DIVIDER_PCD2		(2 << 8)
+#define  PIXEL_CLK_DIVIDER_PCD3		(3 << 8)
+#define  PIXEL_CLK_DIVIDER_PCD4		(4 << 8)
+#define  PIXEL_CLK_DIVIDER_PCD6		(5 << 8)
+#define  PIXEL_CLK_DIVIDER_PCD8		(6 << 8)
+#define  PIXEL_CLK_DIVIDER_PCD9		(7 << 8)
+#define  PIXEL_CLK_DIVIDER_PCD12	(8 << 8)
+#define  PIXEL_CLK_DIVIDER_PCD16	(9 << 8)
+#define  PIXEL_CLK_DIVIDER_PCD18	(10 << 8)
+#define  PIXEL_CLK_DIVIDER_PCD24	(11 << 8)
+#define  PIXEL_CLK_DIVIDER_PCD13	(12 << 8)
+#define  SHIFT_CLK_DIVIDER(x)		((x) & 0xff)
+
+#define DC_DISP_DISP_INTERFACE_CONTROL		0x42f
+#define  DISP_DATA_FORMAT_DF1P1C	(0 << 0)
+#define  DISP_DATA_FORMAT_DF1P2C24B	(1 << 0)
+#define  DISP_DATA_FORMAT_DF1P2C18B	(2 << 0)
+#define  DISP_DATA_FORMAT_DF1P2C16B	(3 << 0)
+#define  DISP_DATA_FORMAT_DF2S		(5 << 0)
+#define  DISP_DATA_FORMAT_DF3S		(6 << 0)
+#define  DISP_DATA_FORMAT_DFSPI		(7 << 0)
+#define  DISP_DATA_FORMAT_DF1P3C24B	(8 << 0)
+#define  DISP_DATA_FORMAT_DF1P3C18B	(9 << 0)
+#define  DISP_DATA_ALIGNMENT_MSB	(0 << 8)
+#define  DISP_DATA_ALIGNMENT_LSB	(1 << 8)
+#define  DISP_DATA_ORDER_RED_BLUE	(0 << 9)
+#define  DISP_DATA_ORDER_BLUE_RED	(1 << 9)
+
+#define DC_DISP_DISP_COLOR_CONTROL		0x430
+#define  BASE_COLOR_SIZE666		(0 << 0)
+#define  BASE_COLOR_SIZE111		(1 << 0)
+#define  BASE_COLOR_SIZE222		(2 << 0)
+#define  BASE_COLOR_SIZE333		(3 << 0)
+#define  BASE_COLOR_SIZE444		(4 << 0)
+#define  BASE_COLOR_SIZE555		(5 << 0)
+#define  BASE_COLOR_SIZE565		(6 << 0)
+#define  BASE_COLOR_SIZE332		(7 << 0)
+#define  BASE_COLOR_SIZE888		(8 << 0)
+
+#define  DITHER_CONTROL_DISABLE		(0 << 8)
+#define  DITHER_CONTROL_ORDERED		(2 << 8)
+#define  DITHER_CONTROL_ERRDIFF		(3 << 8)
+
+#define DC_DISP_SHIFT_CLOCK_OPTIONS		0x431
+#define DC_DISP_DATA_ENABLE_OPTIONS		0x432
+#define   DE_SELECT_ACTIVE_BLANK	0x0
+#define   DE_SELECT_ACTIVE		0x1
+#define   DE_SELECT_ACTIVE_IS		0x2
+#define   DE_CONTROL_ONECLK		(0 << 2)
+#define   DE_CONTROL_NORMAL		(1 << 2)
+#define   DE_CONTROL_EARLY_EXT		(2 << 2)
+#define   DE_CONTROL_EARLY		(3 << 2)
+#define   DE_CONTROL_ACTIVE_BLANK	(4 << 2)
+
+#define DC_DISP_SERIAL_INTERFACE_OPTIONS	0x433
+#define DC_DISP_LCD_SPI_OPTIONS			0x434
+#define DC_DISP_BORDER_COLOR			0x435
+#define DC_DISP_COLOR_KEY0_LOWER		0x436
+#define DC_DISP_COLOR_KEY0_UPPER		0x437
+#define DC_DISP_COLOR_KEY1_LOWER		0x438
+#define DC_DISP_COLOR_KEY1_UPPER		0x439
+
+#define DC_DISP_CURSOR_FOREGROUND		0x43c
+#define DC_DISP_CURSOR_BACKGROUND		0x43d
+#define   CURSOR_COLOR(_r, _g, _b) ((_r) | ((_g) << 8) | ((_b) << 16))
+
+#define DC_DISP_CURSOR_START_ADDR		0x43e
+#define DC_DISP_CURSOR_START_ADDR_NS		0x43f
+#define   CURSOR_START_ADDR_MASK	(((1 << 22) - 1) << 10)
+#define   CURSOR_START_ADDR(_addr)	((_addr) >> 10)
+#define	  CURSOR_SIZE_64		(1 << 24)
+
+#define DC_DISP_CURSOR_POSITION			0x440
+#define   CURSOR_POSITION(_x, _y)		\
+	(((_x) & ((1 << 16) - 1)) |		\
+	(((_y) & ((1 << 16) - 1)) << 16))
+
+#define DC_DISP_CURSOR_POSITION_NS		0x441
+#define DC_DISP_INIT_SEQ_CONTROL		0x442
+#define DC_DISP_SPI_INIT_SEQ_DATA_A		0x443
+#define DC_DISP_SPI_INIT_SEQ_DATA_B		0x444
+#define DC_DISP_SPI_INIT_SEQ_DATA_C		0x445
+#define DC_DISP_SPI_INIT_SEQ_DATA_D		0x446
+#define DC_DISP_DC_MCCIF_FIFOCTRL		0x480
+#define DC_DISP_MCCIF_DISPLAY0A_HYST		0x481
+#define DC_DISP_MCCIF_DISPLAY0B_HYST		0x482
+#define DC_DISP_MCCIF_DISPLAY0C_HYST		0x483
+#define DC_DISP_MCCIF_DISPLAY1B_HYST		0x484
+#define DC_DISP_DAC_CRT_CTRL			0x4c0
+#define DC_DISP_DISP_MISC_CONTROL		0x4c1
+#define   UF_LINE_FLUSH                         (1 << 1)
+
+#define DC_WIN_COLOR_PALETTE(x)			(0x500 + (x))
+
+#define DC_WIN_PALETTE_COLOR_EXT		0x600
+#define DC_WIN_H_FILTER_P(x)			(0x601 + (x))
+#define DC_WIN_CSC_YOF				0x611
+#define DC_WIN_CSC_KYRGB			0x612
+#define DC_WIN_CSC_KUR				0x613
+#define DC_WIN_CSC_KVR				0x614
+#define DC_WIN_CSC_KUG				0x615
+#define DC_WIN_CSC_KVG				0x616
+#define DC_WIN_CSC_KUB				0x617
+#define DC_WIN_CSC_KVB				0x618
+#define DC_WIN_V_FILTER_P(x)			(0x619 + (x))
+#define DC_WIN_WIN_OPTIONS			0x700
+#define  H_DIRECTION_INCREMENT		(0 << 0)
+#define  H_DIRECTION_DECREMENT		(1 << 0)
+#define  V_DIRECTION_INCREMENT		(0 << 2)
+#define  V_DIRECTION_DECREMENT		(1 << 2)
+#define  COLOR_EXPAND			(1 << 6)
+#define  H_FILTER_ENABLE		(1 << 8)
+#define  V_FILTER_ENABLE		(1 << 10)
+#define  CP_ENABLE			(1 << 16)
+#define  CSC_ENABLE			(1 << 18)
+#define  DV_ENABLE			(1 << 20)
+#define  WIN_ENABLE			(1 << 30)
+
+#define DC_WIN_BYTE_SWAP			0x701
+#define  BYTE_SWAP_NOSWAP		0
+#define  BYTE_SWAP_SWAP2		1
+#define  BYTE_SWAP_SWAP4		2
+#define  BYTE_SWAP_SWAP4HW		3
+
+#define DC_WIN_BUFFER_CONTROL			0x702
+#define  BUFFER_CONTROL_HOST		0
+#define  BUFFER_CONTROL_VI		1
+#define  BUFFER_CONTROL_EPP		2
+#define  BUFFER_CONTROL_MPEGE		3
+#define  BUFFER_CONTROL_SB2D		4
+
+#define DC_WIN_COLOR_DEPTH			0x703
+
+#define DC_WIN_POSITION				0x704
+#define  H_POSITION(x)		(((x) & 0xfff) << 0)
+#define  V_POSITION(x)		(((x) & 0xfff) << 16)
+
+#define DC_WIN_SIZE				0x705
+#define  H_SIZE(x)		(((x) & 0xfff) << 0)
+#define  V_SIZE(x)		(((x) & 0xfff) << 16)
+
+#define DC_WIN_PRESCALED_SIZE			0x706
+#define  H_PRESCALED_SIZE(x)	(((x) & 0x3fff) << 0)
+#define  V_PRESCALED_SIZE(x)	(((x) & 0xfff) << 16)
+
+#define DC_WIN_H_INITIAL_DDA			0x707
+#define DC_WIN_V_INITIAL_DDA			0x708
+#define DC_WIN_DDA_INCREMENT			0x709
+#define  H_DDA_INC(x)		(((x) & 0xffff) << 0)
+#define  V_DDA_INC(x)		(((x) & 0xffff) << 16)
+
+#define DC_WIN_LINE_STRIDE			0x70a
+#define  LINE_STRIDE(x)		(x)
+#define  UV_LINE_STRIDE(x)	(((x) & 0xffff) << 16)
+#define  GET_LINE_STRIDE(x)	((x) & 0xffff)
+#define  GET_UV_LINE_STRIDE(x)	(((x) >> 16) & 0xffff)
+#define DC_WIN_BUF_STRIDE			0x70b
+#define DC_WIN_UV_BUF_STRIDE			0x70c
+#define DC_WIN_BUFFER_ADDR_MODE			0x70d
+#define  DC_WIN_BUFFER_ADDR_MODE_LINEAR		(0 << 0)
+#define  DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV	(0 << 16)
+#define  DC_WIN_BUFFER_ADDR_MODE_TILE		(1 << 0)
+#define  DC_WIN_BUFFER_ADDR_MODE_TILE_UV	(1 << 16)
+#define DC_WIN_DV_CONTROL			0x70e
+#define DC_WIN_BLEND_NOKEY			0x70f
+#define DC_WIN_BLEND_1WIN			0x710
+#define DC_WIN_BLEND_2WIN_X			0x711
+#define DC_WIN_BLEND_2WIN_Y			0x712
+#define DC_WIN_BLEND_3WIN_XY			0x713
+#define  CKEY_NOKEY			(0 << 0)
+#define  CKEY_KEY0			(1 << 0)
+#define  CKEY_KEY1			(2 << 0)
+#define  CKEY_KEY01			(3 << 0)
+#define  BLEND_CONTROL_FIX		(0 << 2)
+#define  BLEND_CONTROL_ALPHA		(1 << 2)
+#define  BLEND_CONTROL_DEPENDANT	(2 << 2)
+#define  BLEND_CONTROL_PREMULT		(3 << 2)
+#define  BLEND_WEIGHT0(x)		(((x) & 0xff) << 8)
+#define  BLEND_WEIGHT1(x)		(((x) & 0xff) << 16)
+#define  BLEND(key, control, weight0, weight1)			\
+	  (CKEY_ ## key | BLEND_CONTROL_ ## control |		\
+	   BLEND_WEIGHT0(weight0) | BLEND_WEIGHT1(weight1))
+
+
+#define DC_WIN_HP_FETCH_CONTROL			0x714
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+#define DC_WIN_GLOBAL_ALPHA			0x715
+#define  GLOBAL_ALPHA_ENABLE		0x10000
+#endif
+
+#define DC_WINBUF_START_ADDR			0x800
+#define DC_WINBUF_START_ADDR_NS			0x801
+#define DC_WINBUF_START_ADDR_U			0x802
+#define DC_WINBUF_START_ADDR_U_NS		0x803
+#define DC_WINBUF_START_ADDR_V			0x804
+#define DC_WINBUF_START_ADDR_V_NS		0x805
+#define DC_WINBUF_ADDR_H_OFFSET			0x806
+#define DC_WINBUF_ADDR_H_OFFSET_NS		0x807
+#define DC_WINBUF_ADDR_V_OFFSET			0x808
+#define DC_WINBUF_ADDR_V_OFFSET_NS		0x809
+#define DC_WINBUF_UFLOW_STATUS			0x80a
+
+/* direct versions of DC_WINBUF_UFLOW_STATUS */
+#define DC_WINBUF_AD_UFLOW_STATUS		0xbca
+#define DC_WINBUF_BD_UFLOW_STATUS		0xdca
+#define DC_WINBUF_CD_UFLOW_STATUS		0xfca
+
+#define DC_DISP_SD_CONTROL			0x4c2
+#define  SD_ENABLE_NORMAL		(1 << 0)
+#define  SD_ENABLE_ONESHOT		(2 << 0)
+#define  SD_USE_VID_LUMA		(1 << 2)
+#define  SD_BIN_WIDTH_ONE		(0 << 3)
+#define  SD_BIN_WIDTH_TWO		(1 << 3)
+#define  SD_BIN_WIDTH_FOUR		(2 << 3)
+#define  SD_BIN_WIDTH_EIGHT		(3 << 3)
+#define  SD_BIN_WIDTH_MASK		(3 << 3)
+#define  SD_AGGRESSIVENESS(x)	   	(((x) & 0x7) << 5)
+#define  SD_HW_UPDATE_DLY(x)		(((x) & 0x3) << 8)
+#define  SD_ONESHOT_ENABLE		(1 << 10)
+#define  SD_CORRECTION_MODE_AUTO	(0 << 11)
+#define  SD_CORRECTION_MODE_MAN		(1 << 11)
+
+#define NUM_BIN_WIDTHS 4
+#define STEPS_PER_AGG_LVL 64
+#define STEPS_PER_AGG_CHG_LOG2 5
+#define STEPS_PER_AGG_CHG (1<<STEPS_PER_AGG_CHG_LOG2)
+#define ADJ_PHASE_STEP 8
+#define K_STEP 4
+
+#define DC_DISP_SD_CSC_COEFF			0x4c3
+#define  SD_CSC_COEFF_R(x)		(((x) & 0xf) << 4)
+#define  SD_CSC_COEFF_G(x)		(((x) & 0xf) << 12)
+#define  SD_CSC_COEFF_B(x)		(((x) & 0xf) << 20)
+
+#define DC_DISP_SD_LUT(i)			(0x4c4 + i)
+#define DC_DISP_SD_LUT_NUM			9
+#define  SD_LUT_R(x)			(((x) & 0xff) << 0)
+#define  SD_LUT_G(x)			(((x) & 0xff) << 8)
+#define  SD_LUT_B(x)			(((x) & 0xff) << 16)
+
+#define DC_DISP_SD_FLICKER_CONTROL		0x4cd
+#define  SD_FC_TIME_LIMIT(x)		(((x) & 0xff) << 0)
+#define  SD_FC_THRESHOLD(x)		(((x) & 0xff) << 8)
+
+#define DC_DISP_SD_PIXEL_COUNT			0x4ce
+
+#define DC_DISP_SD_HISTOGRAM(i)			(0x4cf + i)
+#define DC_DISP_SD_HISTOGRAM_NUM		8
+#define  SD_HISTOGRAM_BIN_0(val)	(((val) & (0xff << 0)) >> 0)
+#define  SD_HISTOGRAM_BIN_1(val)	(((val) & (0xff << 8)) >> 8)
+#define  SD_HISTOGRAM_BIN_2(val)	(((val) & (0xff << 16)) >> 16)
+#define  SD_HISTOGRAM_BIN_3(val)	(((val) & (0xff << 24)) >> 24)
+
+#define DC_DISP_SD_BL_PARAMETERS		0x4d7
+#define  SD_BLP_TIME_CONSTANT(x)	(((x) & 0x7ff) << 0)
+#define  SD_BLP_STEP(x)			(((x) & 0xff) << 16)
+
+#define DC_DISP_SD_BL_TF(i)			(0x4d8 + i)
+#define DC_DISP_SD_BL_TF_NUM			4
+#define  SD_BL_TF_POINT_0(x)		(((x) & 0xff) << 0)
+#define  SD_BL_TF_POINT_1(x)		(((x) & 0xff) << 8)
+#define  SD_BL_TF_POINT_2(x)		(((x) & 0xff) << 16)
+#define  SD_BL_TF_POINT_3(x)		(((x) & 0xff) << 24)
+
+#define DC_DISP_SD_BL_CONTROL			0x4dc
+#define  SD_BLC_MODE_MAN		(0 << 0)
+#define  SD_BLC_MODE_AUTO		(1 << 1)
+#define  SD_BLC_BRIGHTNESS(val)	 	(((val) & (0xff << 8)) >> 8)
+
+#define DC_DISP_SD_HW_K_VALUES			0x4dd
+#define  SD_HW_K_R(val)			(((val) & (0x3ff << 0)) >> 0)
+#define  SD_HW_K_G(val)			(((val) & (0x3ff << 10)) >> 10)
+#define  SD_HW_K_B(val)			(((val) & (0x3ff << 20)) >> 20)
+
+#define DC_DISP_SD_MAN_K_VALUES			0x4de
+#define  SD_MAN_K_R(x)			(((x) & 0x3ff) << 0)
+#define  SD_MAN_K_G(x)			(((x) & 0x3ff) << 10)
+#define  SD_MAN_K_B(x)			(((x) & 0x3ff) << 20)
+
+#define  NUM_AGG_PRI_LVLS		4
+#define  SD_AGG_PRI_LVL(x)		((x) >> 3)
+#define  SD_GET_AGG(x)			((x) & 0x7)
+
+#endif
diff --git a/drivers/staging/tegra/video/dc/dc_sysfs.c b/drivers/staging/tegra/video/dc/dc_sysfs.c
new file mode 100644
index 000000000000..9d6f4260d9aa
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/dc_sysfs.c
@@ -0,0 +1,373 @@
+/*
+ * drivers/video/tegra/dc/dc_sysfs.c
+ *
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/fb.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+
+#include "dc.h"
+#include "fb.h"
+#include "dc_reg.h"
+#include "dc_priv.h"
+#include "nvsd.h"
+
+static ssize_t mode_show(struct device *device,
+	struct device_attribute *attr, char *buf)
+{
+	struct nvhost_device *ndev = to_nvhost_device(device);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+	struct tegra_dc_mode *m;
+	ssize_t res;
+
+	mutex_lock(&dc->lock);
+	m = &dc->mode;
+	res = snprintf(buf, PAGE_SIZE,
+		"pclk: %d\n"
+		"h_ref_to_sync: %d\n"
+		"v_ref_to_sync: %d\n"
+		"h_sync_width: %d\n"
+		"v_sync_width: %d\n"
+		"h_back_porch: %d\n"
+		"v_back_porch: %d\n"
+		"h_active: %d\n"
+		"v_active: %d\n"
+		"h_front_porch: %d\n"
+		"v_front_porch: %d\n"
+		"stereo_mode: %d\n",
+		m->pclk, m->h_ref_to_sync, m->v_ref_to_sync,
+		m->h_sync_width, m->v_sync_width,
+		m->h_back_porch, m->v_back_porch,
+		m->h_active, m->v_active,
+		m->h_front_porch, m->v_front_porch,
+		m->stereo_mode);
+	mutex_unlock(&dc->lock);
+
+	return res;
+}
+
+static DEVICE_ATTR(mode, S_IRUGO, mode_show, NULL);
+
+static ssize_t stats_enable_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct nvhost_device *ndev = to_nvhost_device(dev);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+	bool enabled;
+
+	if (mutex_lock_killable(&dc->lock))
+		return -EINTR;
+	enabled = tegra_dc_stats_get(dc);
+	mutex_unlock(&dc->lock);
+
+	return snprintf(buf, PAGE_SIZE, "%d", enabled);
+}
+
+static ssize_t stats_enable_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct nvhost_device *ndev = to_nvhost_device(dev);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+	unsigned long val = 0;
+
+	if (kstrtoul(buf, 10, &val) < 0)
+		return -EINVAL;
+
+	if (mutex_lock_killable(&dc->lock))
+		return -EINTR;
+	tegra_dc_stats_enable(dc, !!val);
+	mutex_unlock(&dc->lock);
+
+	return count;
+}
+
+static DEVICE_ATTR(stats_enable, S_IRUGO|S_IWUSR,
+	stats_enable_show, stats_enable_store);
+
+static ssize_t enable_show(struct device *device,
+	struct device_attribute *attr, char *buf)
+{
+	struct nvhost_device *ndev = to_nvhost_device(device);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+	ssize_t res;
+
+	mutex_lock(&dc->lock);
+	res = snprintf(buf, PAGE_SIZE, "%d\n", dc->enabled);
+	mutex_unlock(&dc->lock);
+	return res;
+}
+
+static ssize_t enable_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct nvhost_device *ndev = to_nvhost_device(dev);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+	unsigned long val = 0;
+
+	if (kstrtoul(buf, 10, &val) < 0)
+		return -EINVAL;
+
+	if (val) {
+		tegra_dc_enable(dc);
+	} else {
+		tegra_dc_disable(dc);
+	}
+
+	return count;
+}
+
+static DEVICE_ATTR(enable, S_IRUGO|S_IWUSR, enable_show, enable_store);
+
+static ssize_t crc_checksum_latched_show(struct device *device,
+	struct device_attribute *attr, char *buf)
+{
+	struct nvhost_device *ndev = to_nvhost_device(device);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+
+	u32 crc;
+
+	if (!dc->enabled) {
+		dev_err(&dc->ndev->dev, "Failed to get dc.\n");
+		return -EFAULT;
+	}
+
+	crc = tegra_dc_read_checksum_latched(dc);
+
+	return snprintf(buf, PAGE_SIZE, "%u", crc);
+}
+
+static ssize_t crc_checksum_latched_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct nvhost_device *ndev = to_nvhost_device(dev);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+	unsigned long val = 0;
+
+	if (!dc->enabled) {
+		dev_err(&dc->ndev->dev, "Failed to get dc.\n");
+		return -EFAULT;
+	}
+
+	if (kstrtoul(buf, 10, &val) < 0)
+		return -EINVAL;
+
+	if (val == 1) {
+		tegra_dc_enable_crc(dc);
+		dev_err(&dc->ndev->dev, "crc is enabled.\n");
+	} else if (val == 0) {
+		tegra_dc_disable_crc(dc);
+		dev_err(&dc->ndev->dev, "crc is disabled.\n");
+	} else
+		dev_err(&dc->ndev->dev, "Invalid input.\n");
+
+	return count;
+}
+static DEVICE_ATTR(crc_checksum_latched, S_IRUGO|S_IWUSR,
+		crc_checksum_latched_show, crc_checksum_latched_store);
+
+#define ORIENTATION_PORTRAIT	"portrait"
+#define ORIENTATION_LANDSCAPE	"landscape"
+
+static ssize_t orientation_3d_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct nvhost_device *ndev = to_nvhost_device(dev);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+	struct tegra_dc_out *dc_out = dc->out;
+	const char *orientation;
+	switch (dc_out->stereo->orientation) {
+	case TEGRA_DC_STEREO_LANDSCAPE:
+		orientation = ORIENTATION_LANDSCAPE;
+		break;
+	case TEGRA_DC_STEREO_PORTRAIT:
+		orientation = ORIENTATION_PORTRAIT;
+		break;
+	default:
+		pr_err("Invalid value is stored for stereo_orientation.\n");
+		return -EINVAL;
+	}
+	return snprintf(buf, PAGE_SIZE, "%s\n", orientation);
+}
+
+static ssize_t orientation_3d_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t cnt)
+{
+	struct nvhost_device *ndev = to_nvhost_device(dev);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+	struct tegra_dc_out *dc_out = dc->out;
+	struct tegra_stereo_out *stereo = dc_out->stereo;
+	int orientation;
+
+	if (0 == strncmp(buf, ORIENTATION_PORTRAIT,
+			min(cnt, ARRAY_SIZE(ORIENTATION_PORTRAIT) - 1))) {
+		orientation = TEGRA_DC_STEREO_PORTRAIT;
+	} else if (0 == strncmp(buf, ORIENTATION_LANDSCAPE,
+			min(cnt, ARRAY_SIZE(ORIENTATION_LANDSCAPE) - 1))) {
+		orientation = TEGRA_DC_STEREO_LANDSCAPE;
+	} else {
+		pr_err("Invalid property value for stereo_orientation.\n");
+		return -EINVAL;
+	}
+	stereo->orientation = orientation;
+	stereo->set_orientation(orientation);
+	return cnt;
+}
+
+static DEVICE_ATTR(stereo_orientation,
+	S_IRUGO|S_IWUSR, orientation_3d_show, orientation_3d_store);
+
+#define MODE_2D		"2d"
+#define MODE_3D		"3d"
+
+static ssize_t mode_3d_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct nvhost_device *ndev = to_nvhost_device(dev);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+	struct tegra_dc_out *dc_out = dc->out;
+	const char *mode;
+	switch (dc_out->stereo->mode_2d_3d) {
+	case TEGRA_DC_STEREO_MODE_2D:
+		mode = MODE_2D;
+		break;
+	case TEGRA_DC_STEREO_MODE_3D:
+		mode = MODE_3D;
+		break;
+	default:
+		pr_err("Invalid value is stored for stereo_mode.\n");
+		return -EINVAL;
+	}
+	return snprintf(buf, PAGE_SIZE, "%s\n", mode);
+}
+
+static ssize_t mode_3d_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t cnt)
+{
+	struct nvhost_device *ndev = to_nvhost_device(dev);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+	struct tegra_dc_out *dc_out = dc->out;
+	struct tegra_stereo_out *stereo = dc_out->stereo;
+	int mode;
+
+	if (0 == strncmp(buf, MODE_2D, min(cnt, ARRAY_SIZE(MODE_2D) - 1))) {
+		mode = TEGRA_DC_STEREO_MODE_2D;
+	} else if (0 == strncmp(buf, MODE_3D,
+			min(cnt, ARRAY_SIZE(MODE_3D) - 1))) {
+		mode = TEGRA_DC_STEREO_MODE_3D;
+	} else {
+		pr_err("Invalid property value for stereo_mode.\n");
+		return -EINVAL;
+	}
+	stereo->mode_2d_3d = mode;
+	stereo->set_mode(mode);
+	return cnt;
+}
+
+static DEVICE_ATTR(stereo_mode,
+	S_IRUGO|S_IWUSR, mode_3d_show, mode_3d_store);
+
+static ssize_t nvdps_show(struct device *device,
+	struct device_attribute *attr, char *buf)
+{
+	int refresh_rate;
+	struct nvhost_device *ndev = to_nvhost_device(device);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+
+	refresh_rate = tegra_fb_get_mode(dc);
+	return snprintf(buf, PAGE_SIZE, "%d\n", refresh_rate);
+}
+
+
+static ssize_t nvdps_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct nvhost_device *ndev = to_nvhost_device(dev);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+	int refresh_rate;
+	int e;
+
+	e = kstrtoint(buf, 10, &refresh_rate);
+	if (e)
+		return e;
+	e = tegra_fb_set_mode(dc, refresh_rate);
+
+	return count;
+}
+
+static DEVICE_ATTR(nvdps, S_IRUGO|S_IWUSR, nvdps_show, nvdps_store);
+
+static ssize_t smart_panel_show(struct device *device,
+	struct device_attribute *attr, char  *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "1\n");
+}
+
+static DEVICE_ATTR(smart_panel, S_IRUGO, smart_panel_show, NULL);
+
+void tegra_dc_remove_sysfs(struct device *dev)
+{
+	struct nvhost_device *ndev = to_nvhost_device(dev);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+	struct tegra_dc_sd_settings *sd_settings = dc->out->sd_settings;
+
+	device_remove_file(dev, &dev_attr_mode);
+	device_remove_file(dev, &dev_attr_nvdps);
+	device_remove_file(dev, &dev_attr_enable);
+	device_remove_file(dev, &dev_attr_stats_enable);
+	device_remove_file(dev, &dev_attr_crc_checksum_latched);
+
+	if (dc->out->stereo) {
+		device_remove_file(dev, &dev_attr_stereo_orientation);
+		device_remove_file(dev, &dev_attr_stereo_mode);
+	}
+
+	if (sd_settings)
+		nvsd_remove_sysfs(dev);
+
+	if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
+		device_remove_file(dev, &dev_attr_smart_panel);
+}
+
+void tegra_dc_create_sysfs(struct device *dev)
+{
+	struct nvhost_device *ndev = to_nvhost_device(dev);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+	struct tegra_dc_sd_settings *sd_settings = dc->out->sd_settings;
+	int error = 0;
+
+	error |= device_create_file(dev, &dev_attr_mode);
+	error |= device_create_file(dev, &dev_attr_nvdps);
+	error |= device_create_file(dev, &dev_attr_enable);
+	error |= device_create_file(dev, &dev_attr_stats_enable);
+	error |= device_create_file(dev, &dev_attr_crc_checksum_latched);
+
+	if (dc->out->stereo) {
+		error |= device_create_file(dev, &dev_attr_stereo_orientation);
+		error |= device_create_file(dev, &dev_attr_stereo_mode);
+	}
+
+	if (sd_settings)
+		error |= nvsd_create_sysfs(dev);
+
+	if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
+		error |= device_create_file(dev, &dev_attr_smart_panel);
+
+	if (error)
+		dev_err(&ndev->dev, "Failed to create sysfs attributes!\n");
+}
diff --git a/drivers/staging/tegra/video/dc/dsi.c b/drivers/staging/tegra/video/dc/dsi.c
new file mode 100644
index 000000000000..c4b49f88f550
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/dsi.c
@@ -0,0 +1,3509 @@
+/*
+ * drivers/video/tegra/dc/dsi.c
+ *
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/nvhost.h>
+#include <linux/module.h>
+
+#include <mach/clk.h>
+#include <mach/csi.h>
+#include <linux/nvhost.h>
+
+#include "dc.h"
+#include "fb.h"
+#include "dc_reg.h"
+#include "dc_priv.h"
+#include "dsi_regs.h"
+#include "dsi.h"
+
+#define TEGRA_APB_MISC_BASE		0x70000000
+#define APB_MISC_GP_MIPI_PAD_CTRL_0	(TEGRA_APB_MISC_BASE + 0x820)
+#define DSIB_MODE_ENABLE		0x2
+
+#define DSI_USE_SYNC_POINTS		1
+#define S_TO_MS(x)			(1000 * (x))
+
+#define DSI_MODULE_NOT_INIT		0x0
+#define DSI_MODULE_INIT			0x1
+
+#define DSI_LPHS_NOT_INIT		0x0
+#define DSI_LPHS_IN_LP_MODE		0x1
+#define DSI_LPHS_IN_HS_MODE		0x2
+
+#define DSI_VIDEO_TYPE_NOT_INIT		0x0
+#define DSI_VIDEO_TYPE_VIDEO_MODE	0x1
+#define DSI_VIDEO_TYPE_CMD_MODE		0x2
+
+#define DSI_DRIVEN_MODE_NOT_INIT	0x0
+#define DSI_DRIVEN_MODE_DC		0x1
+#define DSI_DRIVEN_MODE_HOST		0x2
+
+#define DSI_PHYCLK_OUT_DIS		0x0
+#define DSI_PHYCLK_OUT_EN		0x1
+
+#define DSI_PHYCLK_NOT_INIT		0x0
+#define DSI_PHYCLK_CONTINUOUS		0x1
+#define DSI_PHYCLK_TX_ONLY		0x2
+
+#define DSI_CLK_BURST_NOT_INIT		0x0
+#define DSI_CLK_BURST_NONE_BURST	0x1
+#define DSI_CLK_BURST_BURST_MODE	0x2
+
+#define DSI_DC_STREAM_DISABLE		0x0
+#define DSI_DC_STREAM_ENABLE		0x1
+
+#define DSI_LP_OP_NOT_INIT		0x0
+#define DSI_LP_OP_WRITE			0x1
+#define DSI_LP_OP_READ			0x2
+
+#define DSI_HOST_IDLE_PERIOD		1000
+
+static bool enable_read_debug;
+module_param(enable_read_debug, bool, 0644);
+MODULE_PARM_DESC(enable_read_debug,
+		"Enable to print read fifo and return packet type");
+
+struct dsi_status {
+	unsigned init:2;
+
+	unsigned lphs:2;
+
+	unsigned vtype:2;
+	unsigned driven:2;
+
+	unsigned clk_out:2;
+	unsigned clk_mode:2;
+	unsigned clk_burst:2;
+
+	unsigned lp_op:2;
+
+	unsigned dc_stream:1;
+};
+
+/* source of video data */
+enum {
+	TEGRA_DSI_DRIVEN_BY_DC,
+	TEGRA_DSI_DRIVEN_BY_HOST,
+};
+
+struct tegra_dc_dsi_data {
+	struct tegra_dc *dc;
+	void __iomem *base;
+	struct resource *base_res;
+
+	struct clk *dc_clk;
+	struct clk *dsi_clk;
+	struct clk *dsi_fixed_clk;
+	bool clk_ref;
+
+	struct mutex lock;
+
+	/* data from board info */
+	struct tegra_dsi_out info;
+
+	struct dsi_status status;
+
+	struct dsi_phy_timing_inclk phy_timing;
+
+	bool ulpm;
+	bool enabled;
+	bool host_suspended;
+	struct mutex host_resume_lock;
+	struct delayed_work idle_work;
+	unsigned long idle_delay;
+	spinlock_t host_ref_lock;
+	u8 host_ref;
+
+	u8 driven_mode;
+	u8 controller_index;
+
+	u8 pixel_scaler_mul;
+	u8 pixel_scaler_div;
+
+	u32 default_shift_clk_div;
+	u32 default_pixel_clk_khz;
+	u32 default_hs_clk_khz;
+
+	u32 shift_clk_div;
+	u32 target_hs_clk_khz;
+	u32 target_lp_clk_khz;
+
+	u32 syncpt_id;
+	u32 syncpt_val;
+
+	u16 current_bit_clk_ns;
+	u32 current_dsi_clk_khz;
+
+	u32 dsi_control_val;
+};
+
+const u32 dsi_pkt_seq_reg[NUMOF_PKT_SEQ] = {
+	DSI_PKT_SEQ_0_LO,
+	DSI_PKT_SEQ_0_HI,
+	DSI_PKT_SEQ_1_LO,
+	DSI_PKT_SEQ_1_HI,
+	DSI_PKT_SEQ_2_LO,
+	DSI_PKT_SEQ_2_HI,
+	DSI_PKT_SEQ_3_LO,
+	DSI_PKT_SEQ_3_HI,
+	DSI_PKT_SEQ_4_LO,
+	DSI_PKT_SEQ_4_HI,
+	DSI_PKT_SEQ_5_LO,
+	DSI_PKT_SEQ_5_HI,
+};
+
+const u32 dsi_pkt_seq_video_non_burst_syne[NUMOF_PKT_SEQ] = {
+	PKT_ID0(CMD_VS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+	0,
+	PKT_ID0(CMD_VE) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+	0,
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+	0,
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(1) |
+	PKT_ID2(CMD_HE) | PKT_LEN2(0),
+	PKT_ID3(CMD_BLNK) | PKT_LEN3(2) | PKT_ID4(CMD_RGB) | PKT_LEN4(3) |
+	PKT_ID5(CMD_BLNK) | PKT_LEN5(4),
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+	0,
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(1) |
+	PKT_ID2(CMD_HE) | PKT_LEN2(0),
+	PKT_ID3(CMD_BLNK) | PKT_LEN3(2) | PKT_ID4(CMD_RGB) | PKT_LEN4(3) |
+	PKT_ID5(CMD_BLNK) | PKT_LEN5(4),
+};
+
+const u32 dsi_pkt_seq_video_non_burst[NUMOF_PKT_SEQ] = {
+	PKT_ID0(CMD_VS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+	0,
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+	0,
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+	0,
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2) |
+	PKT_ID2(CMD_RGB) | PKT_LEN2(3),
+	PKT_ID3(CMD_BLNK) | PKT_LEN3(4),
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+	0,
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2) |
+	PKT_ID2(CMD_RGB) | PKT_LEN2(3),
+	PKT_ID3(CMD_BLNK) | PKT_LEN3(4),
+};
+
+static const u32 dsi_pkt_seq_video_burst[NUMOF_PKT_SEQ] = {
+	PKT_ID0(CMD_VS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(7) | PKT_LP,
+	0,
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(7) | PKT_LP,
+	0,
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(7) | PKT_LP,
+	0,
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2)|
+	PKT_ID2(CMD_RGB) | PKT_LEN2(3) | PKT_LP,
+	PKT_ID0(CMD_EOT) | PKT_LEN0(7),
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(7) | PKT_LP,
+	0,
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2)|
+	PKT_ID2(CMD_RGB) | PKT_LEN2(3) | PKT_LP,
+	PKT_ID0(CMD_EOT) | PKT_LEN0(7),
+};
+
+static const u32 dsi_pkt_seq_video_burst_no_eot[NUMOF_PKT_SEQ] = {
+	PKT_ID0(CMD_VS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+	0,
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+	0,
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+	0,
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2)|
+	PKT_ID2(CMD_RGB) | PKT_LEN2(3) | PKT_LP,
+	PKT_ID0(CMD_EOT) | PKT_LEN0(0),
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP,
+	0,
+	PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2)|
+	PKT_ID2(CMD_RGB) | PKT_LEN2(3) | PKT_LP,
+	PKT_ID0(CMD_EOT) | PKT_LEN0(0),
+};
+
+/* TODO: verify with hw about this format */
+const u32 dsi_pkt_seq_cmd_mode[NUMOF_PKT_SEQ] = {
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	PKT_ID0(CMD_LONGW) | PKT_LEN0(3) | PKT_ID1(CMD_EOT) | PKT_LEN1(7),
+	0,
+	0,
+	0,
+	PKT_ID0(CMD_LONGW) | PKT_LEN0(3) | PKT_ID1(CMD_EOT) | PKT_LEN1(7),
+	0,
+};
+
+const u32 init_reg[] = {
+	DSI_INT_ENABLE,
+	DSI_INT_STATUS,
+	DSI_INT_MASK,
+	DSI_INIT_SEQ_DATA_0,
+	DSI_INIT_SEQ_DATA_1,
+	DSI_INIT_SEQ_DATA_2,
+	DSI_INIT_SEQ_DATA_3,
+	DSI_INIT_SEQ_DATA_4,
+	DSI_INIT_SEQ_DATA_5,
+	DSI_INIT_SEQ_DATA_6,
+	DSI_INIT_SEQ_DATA_7,
+	DSI_DCS_CMDS,
+	DSI_PKT_SEQ_0_LO,
+	DSI_PKT_SEQ_1_LO,
+	DSI_PKT_SEQ_2_LO,
+	DSI_PKT_SEQ_3_LO,
+	DSI_PKT_SEQ_4_LO,
+	DSI_PKT_SEQ_5_LO,
+	DSI_PKT_SEQ_0_HI,
+	DSI_PKT_SEQ_1_HI,
+	DSI_PKT_SEQ_2_HI,
+	DSI_PKT_SEQ_3_HI,
+	DSI_PKT_SEQ_4_HI,
+	DSI_PKT_SEQ_5_HI,
+	DSI_CONTROL,
+	DSI_HOST_DSI_CONTROL,
+	DSI_PAD_CONTROL,
+	DSI_PAD_CONTROL_CD,
+	DSI_SOL_DELAY,
+	DSI_MAX_THRESHOLD,
+	DSI_TRIGGER,
+	DSI_TX_CRC,
+	DSI_INIT_SEQ_CONTROL,
+	DSI_PKT_LEN_0_1,
+	DSI_PKT_LEN_2_3,
+	DSI_PKT_LEN_4_5,
+	DSI_PKT_LEN_6_7,
+};
+
+static int tegra_dsi_host_suspend(struct tegra_dc *dc);
+static int tegra_dsi_host_resume(struct tegra_dc *dc);
+static void tegra_dc_dsi_idle_work(struct work_struct *work);
+
+inline unsigned long tegra_dsi_readl(struct tegra_dc_dsi_data *dsi, u32 reg)
+{
+	unsigned long ret;
+
+	BUG_ON(!nvhost_module_powered_ext(nvhost_get_parent(dsi->dc->ndev)));
+	ret = readl(dsi->base + reg * 4);
+	trace_printk("readl %p=%#08lx\n", dsi->base + reg * 4, ret);
+	return ret;
+}
+EXPORT_SYMBOL(tegra_dsi_readl);
+
+inline void tegra_dsi_writel(struct tegra_dc_dsi_data *dsi, u32 val, u32 reg)
+{
+	BUG_ON(!nvhost_module_powered_ext(nvhost_get_parent(dsi->dc->ndev)));
+	trace_printk("writel %p=%#08x\n", dsi->base + reg * 4, val);
+	writel(val, dsi->base + reg * 4);
+}
+EXPORT_SYMBOL(tegra_dsi_writel);
+
+#ifdef CONFIG_DEBUG_FS
+static int dbg_dsi_show(struct seq_file *s, void *unused)
+{
+	struct tegra_dc_dsi_data *dsi = s->private;
+
+#define DUMP_REG(a) do {						\
+		seq_printf(s, "%-32s\t%03x\t%08lx\n",			\
+		       #a, a, tegra_dsi_readl(dsi, a));		\
+	} while (0)
+
+	tegra_dc_io_start(dsi->dc);
+	clk_prepare_enable(dsi->dsi_clk);
+
+	DUMP_REG(DSI_INCR_SYNCPT_CNTRL);
+	DUMP_REG(DSI_INCR_SYNCPT_ERROR);
+	DUMP_REG(DSI_CTXSW);
+	DUMP_REG(DSI_POWER_CONTROL);
+	DUMP_REG(DSI_INT_ENABLE);
+	DUMP_REG(DSI_HOST_DSI_CONTROL);
+	DUMP_REG(DSI_CONTROL);
+	DUMP_REG(DSI_SOL_DELAY);
+	DUMP_REG(DSI_MAX_THRESHOLD);
+	DUMP_REG(DSI_TRIGGER);
+	DUMP_REG(DSI_TX_CRC);
+	DUMP_REG(DSI_STATUS);
+	DUMP_REG(DSI_INIT_SEQ_CONTROL);
+	DUMP_REG(DSI_INIT_SEQ_DATA_0);
+	DUMP_REG(DSI_INIT_SEQ_DATA_1);
+	DUMP_REG(DSI_INIT_SEQ_DATA_2);
+	DUMP_REG(DSI_INIT_SEQ_DATA_3);
+	DUMP_REG(DSI_INIT_SEQ_DATA_4);
+	DUMP_REG(DSI_INIT_SEQ_DATA_5);
+	DUMP_REG(DSI_INIT_SEQ_DATA_6);
+	DUMP_REG(DSI_INIT_SEQ_DATA_7);
+	DUMP_REG(DSI_PKT_SEQ_0_LO);
+	DUMP_REG(DSI_PKT_SEQ_0_HI);
+	DUMP_REG(DSI_PKT_SEQ_1_LO);
+	DUMP_REG(DSI_PKT_SEQ_1_HI);
+	DUMP_REG(DSI_PKT_SEQ_2_LO);
+	DUMP_REG(DSI_PKT_SEQ_2_HI);
+	DUMP_REG(DSI_PKT_SEQ_3_LO);
+	DUMP_REG(DSI_PKT_SEQ_3_HI);
+	DUMP_REG(DSI_PKT_SEQ_4_LO);
+	DUMP_REG(DSI_PKT_SEQ_4_HI);
+	DUMP_REG(DSI_PKT_SEQ_5_LO);
+	DUMP_REG(DSI_PKT_SEQ_5_HI);
+	DUMP_REG(DSI_DCS_CMDS);
+	DUMP_REG(DSI_PKT_LEN_0_1);
+	DUMP_REG(DSI_PKT_LEN_2_3);
+	DUMP_REG(DSI_PKT_LEN_4_5);
+	DUMP_REG(DSI_PKT_LEN_6_7);
+	DUMP_REG(DSI_PHY_TIMING_0);
+	DUMP_REG(DSI_PHY_TIMING_1);
+	DUMP_REG(DSI_PHY_TIMING_2);
+	DUMP_REG(DSI_BTA_TIMING);
+	DUMP_REG(DSI_TIMEOUT_0);
+	DUMP_REG(DSI_TIMEOUT_1);
+	DUMP_REG(DSI_TO_TALLY);
+	DUMP_REG(DSI_PAD_CONTROL);
+	DUMP_REG(DSI_PAD_CONTROL_CD);
+	DUMP_REG(DSI_PAD_CD_STATUS);
+	DUMP_REG(DSI_VID_MODE_CONTROL);
+#undef DUMP_REG
+
+	clk_disable_unprepare(dsi->dsi_clk);
+	tegra_dc_io_end(dsi->dc);
+
+	return 0;
+}
+
+static int dbg_dsi_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dbg_dsi_show, inode->i_private);
+}
+
+static const struct file_operations dbg_fops = {
+	.open		= dbg_dsi_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static struct dentry *dsidir;
+
+static void tegra_dc_dsi_debug_create(struct tegra_dc_dsi_data *dsi)
+{
+	struct dentry *retval;
+
+	dsidir = debugfs_create_dir("tegra_dsi", NULL);
+	if (!dsidir)
+		return;
+	retval = debugfs_create_file("regs", S_IRUGO, dsidir, dsi,
+		&dbg_fops);
+	if (!retval)
+		goto free_out;
+	return;
+free_out:
+	debugfs_remove_recursive(dsidir);
+	dsidir = NULL;
+	return;
+}
+#else
+static inline void tegra_dc_dsi_debug_create(struct tegra_dc_dsi_data *dsi)
+{ }
+#endif
+
+static inline void tegra_dsi_clk_enable(struct tegra_dc_dsi_data *dsi)
+{
+	if (!tegra_is_clk_enabled(dsi->dsi_clk)) {
+		clk_prepare_enable(dsi->dsi_clk);
+		clk_prepare_enable(dsi->dsi_fixed_clk);
+	}
+}
+
+static inline void tegra_dsi_clk_disable(struct tegra_dc_dsi_data *dsi)
+{
+	if (tegra_is_clk_enabled(dsi->dsi_clk)) {
+		clk_disable_unprepare(dsi->dsi_clk);
+		clk_disable_unprepare(dsi->dsi_fixed_clk);
+	}
+}
+
+static int tegra_dsi_syncpt(struct tegra_dc_dsi_data *dsi)
+{
+	u32 val;
+	int ret;
+
+	ret = 0;
+
+	dsi->syncpt_val = nvhost_syncpt_read_ext(dsi->dc->ndev, dsi->syncpt_id);
+
+	val = DSI_INCR_SYNCPT_COND(OP_DONE) |
+		DSI_INCR_SYNCPT_INDX(dsi->syncpt_id);
+	tegra_dsi_writel(dsi, val, DSI_INCR_SYNCPT);
+
+	/* TODO: Use interrupt rather than polling */
+	ret = nvhost_syncpt_wait_timeout_ext(dsi->dc->ndev, dsi->syncpt_id,
+		dsi->syncpt_val + 1, MAX_SCHEDULE_TIMEOUT, NULL);
+	if (ret < 0) {
+		dev_err(&dsi->dc->ndev->dev, "DSI sync point failure\n");
+		goto fail;
+	}
+
+	(dsi->syncpt_val)++;
+	return 0;
+fail:
+	return ret;
+}
+
+static u32 tegra_dsi_get_hs_clk_rate(struct tegra_dc_dsi_data *dsi)
+{
+	u32 dsi_clock_rate_khz;
+
+	switch (dsi->info.video_burst_mode) {
+	case TEGRA_DSI_VIDEO_BURST_MODE_LOW_SPEED:
+	case TEGRA_DSI_VIDEO_BURST_MODE_MEDIUM_SPEED:
+	case TEGRA_DSI_VIDEO_BURST_MODE_FAST_SPEED:
+	case TEGRA_DSI_VIDEO_BURST_MODE_FASTEST_SPEED:
+		/* Calculate DSI HS clock rate for DSI burst mode */
+		dsi_clock_rate_khz = dsi->default_pixel_clk_khz *
+							dsi->shift_clk_div;
+		break;
+	case TEGRA_DSI_VIDEO_NONE_BURST_MODE:
+	case TEGRA_DSI_VIDEO_NONE_BURST_MODE_WITH_SYNC_END:
+	case TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED:
+	default:
+		/* Clock rate is default DSI clock rate for non-burst mode */
+		dsi_clock_rate_khz = dsi->default_hs_clk_khz;
+		break;
+	}
+
+	return dsi_clock_rate_khz;
+}
+
+static u32 tegra_dsi_get_lp_clk_rate(struct tegra_dc_dsi_data *dsi, u8 lp_op)
+{
+	u32 dsi_clock_rate_khz;
+
+	if (dsi->info.enable_hs_clock_on_lp_cmd_mode)
+		if (dsi->info.hs_clk_in_lp_cmd_mode_freq_khz)
+			dsi_clock_rate_khz =
+				dsi->info.hs_clk_in_lp_cmd_mode_freq_khz;
+		else
+			dsi_clock_rate_khz = tegra_dsi_get_hs_clk_rate(dsi);
+	else
+		if (lp_op == DSI_LP_OP_READ)
+			dsi_clock_rate_khz =
+				dsi->info.lp_read_cmd_mode_freq_khz;
+		else
+			dsi_clock_rate_khz =
+				dsi->info.lp_cmd_mode_freq_khz;
+
+	return dsi_clock_rate_khz;
+}
+
+static u32 tegra_dsi_get_shift_clk_div(struct tegra_dc_dsi_data *dsi)
+{
+	u32 shift_clk_div;
+	u32 max_shift_clk_div;
+	u32 burst_width;
+	u32 burst_width_max;
+
+	/* Get the real value of default shift_clk_div. default_shift_clk_div
+	 * holds the real value of shift_clk_div.
+	 */
+	shift_clk_div = dsi->default_shift_clk_div;
+
+	/* Calculate shift_clk_div which can matche the video_burst_mode. */
+	if (dsi->info.video_burst_mode >=
+			TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED) {
+		/* The max_shift_clk_div is multiplied by 10 to save the
+		 * fraction
+		 */
+		if (dsi->info.max_panel_freq_khz >= dsi->default_hs_clk_khz)
+			max_shift_clk_div = dsi->info.max_panel_freq_khz
+				* shift_clk_div * 10 / dsi->default_hs_clk_khz;
+		else
+			max_shift_clk_div = shift_clk_div * 10;
+
+		burst_width = dsi->info.video_burst_mode
+				- TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED;
+		burst_width_max = TEGRA_DSI_VIDEO_BURST_MODE_FASTEST_SPEED
+				- TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED;
+
+		shift_clk_div = (max_shift_clk_div - shift_clk_div * 10) *
+			burst_width / (burst_width_max * 10) + shift_clk_div;
+	}
+
+	return shift_clk_div;
+}
+
+static void tegra_dsi_init_sw(struct tegra_dc *dc,
+			struct tegra_dc_dsi_data *dsi)
+{
+	u32 h_width_pixels;
+	u32 v_width_lines;
+	u32 pixel_clk_hz;
+	u32 byte_clk_hz;
+	u32 plld_clk_mhz;
+
+	switch (dsi->info.pixel_format) {
+	case TEGRA_DSI_PIXEL_FORMAT_16BIT_P:
+		/* 2 bytes per pixel */
+		dsi->pixel_scaler_mul = 2;
+		dsi->pixel_scaler_div = 1;
+		break;
+	case TEGRA_DSI_PIXEL_FORMAT_18BIT_P:
+		/* 2.25 bytes per pixel */
+		dsi->pixel_scaler_mul = 9;
+		dsi->pixel_scaler_div = 4;
+		break;
+	case TEGRA_DSI_PIXEL_FORMAT_18BIT_NP:
+	case TEGRA_DSI_PIXEL_FORMAT_24BIT_P:
+		/* 3 bytes per pixel */
+		dsi->pixel_scaler_mul = 3;
+		dsi->pixel_scaler_div = 1;
+		break;
+	default:
+		break;
+	}
+
+	dsi->controller_index = dc->ndev->id;
+	dsi->ulpm = false;
+	dsi->enabled = false;
+	dsi->clk_ref = false;
+
+	dsi->dsi_control_val =
+			DSI_CONTROL_VIRTUAL_CHANNEL(dsi->info.virtual_channel) |
+			DSI_CONTROL_NUM_DATA_LANES(dsi->info.n_data_lanes - 1) |
+			DSI_CONTROL_VID_SOURCE(dsi->controller_index) |
+			DSI_CONTROL_DATA_FORMAT(dsi->info.pixel_format);
+
+	/* Below we are going to calculate dsi and dc clock rate.
+	 * Calcuate the horizontal and vertical width.
+	 */
+	h_width_pixels = dc->mode.h_back_porch + dc->mode.h_front_porch +
+			dc->mode.h_sync_width + dc->mode.h_active;
+	v_width_lines = dc->mode.v_back_porch + dc->mode.v_front_porch +
+			dc->mode.v_sync_width + dc->mode.v_active;
+
+	/* Calculate minimum required pixel rate. */
+	pixel_clk_hz = h_width_pixels * v_width_lines * dsi->info.refresh_rate;
+	if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) {
+		if (dsi->info.rated_refresh_rate >= dsi->info.refresh_rate)
+			dev_info(&dc->ndev->dev, "DSI: measured refresh rate "
+				"should be larger than rated refresh rate.\n");
+		dc->mode.rated_pclk = h_width_pixels * v_width_lines *
+						dsi->info.rated_refresh_rate;
+	}
+
+	/* Calculate minimum byte rate on DSI interface. */
+	byte_clk_hz = (pixel_clk_hz * dsi->pixel_scaler_mul) /
+			(dsi->pixel_scaler_div * dsi->info.n_data_lanes);
+
+	/* Round up to multiple of mega hz. */
+	plld_clk_mhz = DIV_ROUND_UP((byte_clk_hz * NUMOF_BIT_PER_BYTE),
+								1000000);
+
+	/* Calculate default real shift_clk_div. */
+	dsi->default_shift_clk_div = (NUMOF_BIT_PER_BYTE / 2) *
+		dsi->pixel_scaler_mul / (dsi->pixel_scaler_div *
+		dsi->info.n_data_lanes);
+	/* Calculate default DSI hs clock. DSI interface is double data rate.
+	 * Data is transferred on both rising and falling edge of clk, div by 2
+	 * to get the actual clock rate.
+	 */
+	dsi->default_hs_clk_khz = plld_clk_mhz * 1000 / 2;
+	dsi->default_pixel_clk_khz = plld_clk_mhz * 1000 / 2
+						/ dsi->default_shift_clk_div;
+
+	/* Get the actual shift_clk_div and clock rates. */
+	dsi->shift_clk_div = tegra_dsi_get_shift_clk_div(dsi);
+	dsi->target_lp_clk_khz =
+			tegra_dsi_get_lp_clk_rate(dsi, DSI_LP_OP_WRITE);
+	dsi->target_hs_clk_khz = tegra_dsi_get_hs_clk_rate(dsi);
+
+	dev_info(&dc->ndev->dev, "DSI: HS clock rate is %d\n",
+							dsi->target_hs_clk_khz);
+
+	dsi->controller_index = dc->ndev->id;
+
+#if DSI_USE_SYNC_POINTS
+	dsi->syncpt_id = NVSYNCPT_DSI;
+#endif
+
+	/*
+	 * Force video clock to be continuous mode if
+	 * enable_hs_clock_on_lp_cmd_mode is set
+	 */
+	if (dsi->info.enable_hs_clock_on_lp_cmd_mode) {
+		if (dsi->info.video_clock_mode !=
+					TEGRA_DSI_VIDEO_CLOCK_CONTINUOUS)
+			dev_warn(&dc->ndev->dev,
+				"Force clock continuous mode\n");
+
+		dsi->info.video_clock_mode = TEGRA_DSI_VIDEO_CLOCK_CONTINUOUS;
+	}
+
+	dsi->host_ref = 0;
+	dsi->host_suspended = false;
+	spin_lock_init(&dsi->host_ref_lock);
+	mutex_init(&dsi->host_resume_lock);
+	init_completion(&dc->out->user_vblank_comp);
+	INIT_DELAYED_WORK(&dsi->idle_work, tegra_dc_dsi_idle_work);
+	dsi->idle_delay = msecs_to_jiffies(DSI_HOST_IDLE_PERIOD);
+}
+
+#define SELECT_T_PHY(platform_t_phy_ns, default_phy, clk_ns, hw_inc) ( \
+(platform_t_phy_ns) ? ( \
+((DSI_CONVERT_T_PHY_NS_TO_T_PHY(platform_t_phy_ns, clk_ns, hw_inc)) < 0 ? 0 : \
+(DSI_CONVERT_T_PHY_NS_TO_T_PHY(platform_t_phy_ns, clk_ns, hw_inc)))) : \
+((default_phy) < 0 ? 0 : (default_phy)))
+
+static void tegra_dsi_get_clk_phy_timing(struct tegra_dc_dsi_data *dsi,
+		struct dsi_phy_timing_inclk *phy_timing_clk, u32 clk_ns)
+{
+	phy_timing_clk->t_tlpx = SELECT_T_PHY(
+		dsi->info.phy_timing.t_tlpx_ns,
+		T_TLPX_DEFAULT(clk_ns), clk_ns, T_TLPX_HW_INC);
+
+	phy_timing_clk->t_clktrail = SELECT_T_PHY(
+		dsi->info.phy_timing.t_clktrail_ns,
+		T_CLKTRAIL_DEFAULT(clk_ns), clk_ns, T_CLKTRAIL_HW_INC);
+
+	phy_timing_clk->t_clkpost = SELECT_T_PHY(
+		dsi->info.phy_timing.t_clkpost_ns,
+		T_CLKPOST_DEFAULT(clk_ns), clk_ns, T_CLKPOST_HW_INC);
+
+	phy_timing_clk->t_clkzero = SELECT_T_PHY(
+		dsi->info.phy_timing.t_clkzero_ns,
+		T_CLKZERO_DEFAULT(clk_ns), clk_ns, T_CLKZERO_HW_INC);
+
+	phy_timing_clk->t_clkprepare = SELECT_T_PHY(
+		dsi->info.phy_timing.t_clkprepare_ns,
+		T_CLKPREPARE_DEFAULT(clk_ns), clk_ns, T_CLKPREPARE_HW_INC);
+
+	phy_timing_clk->t_clkpre = SELECT_T_PHY(
+		dsi->info.phy_timing.t_clkpre_ns,
+		T_CLKPRE_DEFAULT, clk_ns, T_CLKPRE_HW_INC);
+}
+
+static void tegra_dsi_get_hs_phy_timing(struct tegra_dc_dsi_data *dsi,
+		struct dsi_phy_timing_inclk *phy_timing_clk, u32 clk_ns)
+{
+	phy_timing_clk->t_tlpx = SELECT_T_PHY(
+		dsi->info.phy_timing.t_tlpx_ns,
+		T_TLPX_DEFAULT(clk_ns), clk_ns, T_TLPX_HW_INC);
+
+	phy_timing_clk->t_hsdexit = SELECT_T_PHY(
+		dsi->info.phy_timing.t_hsdexit_ns,
+		T_HSEXIT_DEFAULT(clk_ns), clk_ns, T_HSEXIT_HW_INC);
+
+	phy_timing_clk->t_hstrail = SELECT_T_PHY(
+		dsi->info.phy_timing.t_hstrail_ns,
+		T_HSTRAIL_DEFAULT(clk_ns), clk_ns, T_HSTRAIL_HW_INC);
+
+	phy_timing_clk->t_datzero = SELECT_T_PHY(
+		dsi->info.phy_timing.t_datzero_ns,
+		T_DATZERO_DEFAULT(clk_ns), clk_ns, T_DATZERO_HW_INC);
+
+	phy_timing_clk->t_hsprepare = SELECT_T_PHY(
+		dsi->info.phy_timing.t_hsprepare_ns,
+		T_HSPREPARE_DEFAULT(clk_ns), clk_ns, T_HSPREPARE_HW_INC);
+}
+
+static void tegra_dsi_get_escape_phy_timing(struct tegra_dc_dsi_data *dsi,
+		struct dsi_phy_timing_inclk *phy_timing_clk, u32 clk_ns)
+{
+	phy_timing_clk->t_tlpx = SELECT_T_PHY(
+		dsi->info.phy_timing.t_tlpx_ns,
+		T_TLPX_DEFAULT(clk_ns), clk_ns, T_TLPX_HW_INC);
+}
+
+static void tegra_dsi_get_bta_phy_timing(struct tegra_dc_dsi_data *dsi,
+		struct dsi_phy_timing_inclk *phy_timing_clk, u32 clk_ns)
+{
+	phy_timing_clk->t_tlpx = SELECT_T_PHY(
+		dsi->info.phy_timing.t_tlpx_ns,
+		T_TLPX_DEFAULT(clk_ns), clk_ns, T_TLPX_HW_INC);
+
+	phy_timing_clk->t_taget = SELECT_T_PHY(
+		dsi->info.phy_timing.t_taget_ns,
+		T_TAGET_DEFAULT(clk_ns), clk_ns, T_TAGET_HW_INC);
+
+	phy_timing_clk->t_tasure = SELECT_T_PHY(
+		dsi->info.phy_timing.t_tasure_ns,
+		T_TASURE_DEFAULT(clk_ns), clk_ns, T_TASURE_HW_INC);
+
+	phy_timing_clk->t_tago = SELECT_T_PHY(
+		dsi->info.phy_timing.t_tago_ns,
+		T_TAGO_DEFAULT(clk_ns), clk_ns, T_TAGO_HW_INC);
+}
+
+static void tegra_dsi_get_ulps_phy_timing(struct tegra_dc_dsi_data *dsi,
+		struct dsi_phy_timing_inclk *phy_timing_clk, u32 clk_ns)
+{
+	phy_timing_clk->t_tlpx = SELECT_T_PHY(
+		dsi->info.phy_timing.t_tlpx_ns,
+		T_TLPX_DEFAULT(clk_ns), clk_ns, T_TLPX_HW_INC);
+
+	phy_timing_clk->t_wakeup = SELECT_T_PHY(
+		dsi->info.phy_timing.t_wakeup_ns,
+		T_WAKEUP_DEFAULT, clk_ns, T_WAKEUP_HW_INC);
+}
+
+#undef SELECT_T_PHY
+
+static void tegra_dsi_get_phy_timing(struct tegra_dc_dsi_data *dsi,
+				struct dsi_phy_timing_inclk *phy_timing_clk,
+				u32 clk_ns, u8 lphs)
+{
+	if (lphs == DSI_LPHS_IN_HS_MODE) {
+		tegra_dsi_get_clk_phy_timing(dsi, phy_timing_clk, clk_ns);
+		tegra_dsi_get_hs_phy_timing(dsi, phy_timing_clk, clk_ns);
+	} else {
+		/* default is LP mode */
+		tegra_dsi_get_escape_phy_timing(dsi, phy_timing_clk, clk_ns);
+		tegra_dsi_get_bta_phy_timing(dsi, phy_timing_clk, clk_ns);
+		tegra_dsi_get_ulps_phy_timing(dsi, phy_timing_clk, clk_ns);
+		if (dsi->info.enable_hs_clock_on_lp_cmd_mode)
+			tegra_dsi_get_clk_phy_timing
+				(dsi, phy_timing_clk, clk_ns);
+	}
+}
+
+static int tegra_dsi_mipi_phy_timing_range(struct tegra_dc_dsi_data *dsi,
+				struct dsi_phy_timing_inclk *phy_timing,
+				u32 clk_ns, u8 lphs)
+{
+#define CHECK_RANGE(val, min, max) ( \
+		((min) == NOT_DEFINED ? 0 : (val) < (min)) || \
+		((max) == NOT_DEFINED ? 0 : (val) > (max)) ? -EINVAL : 0)
+
+	int err = 0;
+
+	err = CHECK_RANGE(
+	DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+			phy_timing->t_tlpx, clk_ns, T_TLPX_HW_INC),
+			MIPI_T_TLPX_NS_MIN, MIPI_T_TLPX_NS_MAX);
+	if (err < 0) {
+		dev_warn(&dsi->dc->ndev->dev,
+			"dsi: Tlpx mipi range violated\n");
+		goto fail;
+	}
+
+	if (lphs == DSI_LPHS_IN_HS_MODE) {
+		err = CHECK_RANGE(
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+			phy_timing->t_hsdexit, clk_ns, T_HSEXIT_HW_INC),
+			MIPI_T_HSEXIT_NS_MIN, MIPI_T_HSEXIT_NS_MAX);
+		if (err < 0) {
+			dev_warn(&dsi->dc->ndev->dev,
+				"dsi: HsExit mipi range violated\n");
+			goto fail;
+		}
+
+		err = CHECK_RANGE(
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+			phy_timing->t_hstrail, clk_ns, T_HSTRAIL_HW_INC),
+			MIPI_T_HSTRAIL_NS_MIN(clk_ns), MIPI_T_HSTRAIL_NS_MAX);
+		if (err < 0) {
+			dev_warn(&dsi->dc->ndev->dev,
+				"dsi: HsTrail mipi range violated\n");
+			goto fail;
+		}
+
+		err = CHECK_RANGE(
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+			phy_timing->t_datzero, clk_ns, T_DATZERO_HW_INC),
+			MIPI_T_HSZERO_NS_MIN, MIPI_T_HSZERO_NS_MAX);
+		if (err < 0) {
+			dev_warn(&dsi->dc->ndev->dev,
+				"dsi: HsZero mipi range violated\n");
+			goto fail;
+		}
+
+		err = CHECK_RANGE(
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+			phy_timing->t_hsprepare, clk_ns, T_HSPREPARE_HW_INC),
+			MIPI_T_HSPREPARE_NS_MIN(clk_ns),
+			MIPI_T_HSPREPARE_NS_MAX(clk_ns));
+		if (err < 0) {
+			dev_warn(&dsi->dc->ndev->dev,
+				"dsi: HsPrepare mipi range violated\n");
+			goto fail;
+		}
+
+		err = CHECK_RANGE(
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+		phy_timing->t_hsprepare, clk_ns, T_HSPREPARE_HW_INC) +
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+			phy_timing->t_datzero, clk_ns, T_DATZERO_HW_INC),
+			MIPI_T_HSPREPARE_ADD_HSZERO_NS_MIN(clk_ns),
+			MIPI_T_HSPREPARE_ADD_HSZERO_NS_MAX);
+		if (err < 0) {
+			dev_warn(&dsi->dc->ndev->dev,
+			"dsi: HsPrepare + HsZero mipi range violated\n");
+			goto fail;
+		}
+	} else {
+		/* default is LP mode */
+		err = CHECK_RANGE(
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+			phy_timing->t_wakeup, clk_ns, T_WAKEUP_HW_INC),
+			MIPI_T_WAKEUP_NS_MIN, MIPI_T_WAKEUP_NS_MAX);
+		if (err < 0) {
+			dev_warn(&dsi->dc->ndev->dev,
+				"dsi: WakeUp mipi range violated\n");
+			goto fail;
+		}
+
+		err = CHECK_RANGE(
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+			phy_timing->t_tasure, clk_ns, T_TASURE_HW_INC),
+			MIPI_T_TASURE_NS_MIN(DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+			phy_timing->t_tlpx, clk_ns, T_TLPX_HW_INC)),
+			MIPI_T_TASURE_NS_MAX(DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+			phy_timing->t_tlpx, clk_ns, T_TLPX_HW_INC)));
+		if (err < 0) {
+			dev_warn(&dsi->dc->ndev->dev,
+				"dsi: TaSure mipi range violated\n");
+			goto fail;
+		}
+	}
+
+	if (lphs == DSI_LPHS_IN_HS_MODE ||
+		dsi->info.enable_hs_clock_on_lp_cmd_mode) {
+		err = CHECK_RANGE(
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+			phy_timing->t_clktrail, clk_ns, T_CLKTRAIL_HW_INC),
+			MIPI_T_CLKTRAIL_NS_MIN, MIPI_T_CLKTRAIL_NS_MAX);
+		if (err < 0) {
+			dev_warn(&dsi->dc->ndev->dev,
+				"dsi: ClkTrail mipi range violated\n");
+			goto fail;
+		}
+
+		err = CHECK_RANGE(
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+			phy_timing->t_clkpost, clk_ns, T_CLKPOST_HW_INC),
+			MIPI_T_CLKPOST_NS_MIN(clk_ns), MIPI_T_CLKPOST_NS_MAX);
+		if (err < 0) {
+			dev_warn(&dsi->dc->ndev->dev,
+				"dsi: ClkPost mipi range violated\n");
+			goto fail;
+		}
+
+		err = CHECK_RANGE(
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+			phy_timing->t_clkzero, clk_ns, T_CLKZERO_HW_INC),
+			MIPI_T_CLKZERO_NS_MIN, MIPI_T_CLKZERO_NS_MAX);
+		if (err < 0) {
+			dev_warn(&dsi->dc->ndev->dev,
+				"dsi: ClkZero mipi range violated\n");
+			goto fail;
+		}
+
+		err = CHECK_RANGE(
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+			phy_timing->t_clkprepare, clk_ns, T_CLKPREPARE_HW_INC),
+			MIPI_T_CLKPREPARE_NS_MIN, MIPI_T_CLKPREPARE_NS_MAX);
+		if (err < 0) {
+			dev_warn(&dsi->dc->ndev->dev,
+				"dsi: ClkPrepare mipi range violated\n");
+			goto fail;
+		}
+
+		err = CHECK_RANGE(
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+			phy_timing->t_clkpre, clk_ns, T_CLKPRE_HW_INC),
+			MIPI_T_CLKPRE_NS_MIN, MIPI_T_CLKPRE_NS_MAX);
+		if (err < 0) {
+			dev_warn(&dsi->dc->ndev->dev,
+				"dsi: ClkPre mipi range violated\n");
+			goto fail;
+		}
+
+		err = CHECK_RANGE(
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+		phy_timing->t_clkprepare, clk_ns, T_CLKPREPARE_HW_INC) +
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+			phy_timing->t_clkzero, clk_ns, T_CLKZERO_HW_INC),
+			MIPI_T_CLKPREPARE_ADD_CLKZERO_NS_MIN,
+			MIPI_T_CLKPREPARE_ADD_CLKZERO_NS_MAX);
+		if (err < 0) {
+			dev_warn(&dsi->dc->ndev->dev,
+			"dsi: ClkPrepare + ClkZero mipi range violated\n");
+			goto fail;
+		}
+	}
+fail:
+#undef CHECK_RANGE
+	return err;
+}
+
+static int tegra_dsi_hs_phy_len(struct tegra_dc_dsi_data *dsi,
+				struct dsi_phy_timing_inclk *phy_timing,
+				u32 clk_ns, u8 lphs)
+{
+	u32 hs_t_phy_ns;
+	u32 clk_t_phy_ns;
+	u32 t_phy_ns;
+	u32 h_blank_ns;
+	struct tegra_dc_mode *modes;
+	u32 t_pix_ns;
+	int err = 0;
+
+	if (!(lphs == DSI_LPHS_IN_HS_MODE))
+		goto fail;
+
+	modes = dsi->dc->out->modes;
+	t_pix_ns = clk_ns * BITS_PER_BYTE *
+		dsi->pixel_scaler_mul / dsi->pixel_scaler_div;
+
+	hs_t_phy_ns =
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+		phy_timing->t_tlpx, clk_ns, T_TLPX_HW_INC) +
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+		phy_timing->t_hsprepare, clk_ns, T_HSPREPARE_HW_INC) +
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+		phy_timing->t_datzero, clk_ns, T_DATZERO_HW_INC) +
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+		phy_timing->t_hstrail, clk_ns, T_HSTRAIL_HW_INC) +
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+		phy_timing->t_hsdexit, clk_ns, T_HSEXIT_HW_INC);
+
+	clk_t_phy_ns =
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+		phy_timing->t_clkpost, clk_ns, T_CLKPOST_HW_INC) +
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+		phy_timing->t_clktrail, clk_ns, T_CLKTRAIL_HW_INC) +
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+		phy_timing->t_hsdexit, clk_ns, T_HSEXIT_HW_INC) +
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+		phy_timing->t_tlpx, clk_ns, T_TLPX_HW_INC) +
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+		phy_timing->t_clkprepare, clk_ns, T_CLKPREPARE_HW_INC) +
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+		phy_timing->t_clkzero, clk_ns, T_CLKZERO_HW_INC) +
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+		phy_timing->t_clkpre, clk_ns, T_CLKPRE_HW_INC);
+
+	h_blank_ns = t_pix_ns * (modes->h_sync_width + modes->h_back_porch +
+						modes->h_front_porch);
+
+	/* Extra tlpx and byte cycle required by dsi HW */
+	t_phy_ns = dsi->info.n_data_lanes * (hs_t_phy_ns + clk_t_phy_ns +
+		DSI_CONVERT_T_PHY_TO_T_PHY_NS(
+		phy_timing->t_tlpx, clk_ns, T_TLPX_HW_INC) +
+		clk_ns * BITS_PER_BYTE);
+
+	if (h_blank_ns < t_phy_ns) {
+		err = -EINVAL;
+		dev_err(&dsi->dc->ndev->dev,
+			"dsi: Hblank is smaller than HS trans phy timing\n");
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return err;
+}
+
+static int tegra_dsi_constraint_phy_timing(struct tegra_dc_dsi_data *dsi,
+				struct dsi_phy_timing_inclk *phy_timing,
+				u32 clk_ns, u8 lphs)
+{
+	int err = 0;
+
+	err = tegra_dsi_mipi_phy_timing_range(dsi, phy_timing, clk_ns, lphs);
+	if (err < 0) {
+		dev_warn(&dsi->dc->ndev->dev, "dsi: mipi range violated\n");
+		goto fail;
+	}
+
+	err = tegra_dsi_hs_phy_len(dsi, phy_timing, clk_ns, lphs);
+	if (err < 0) {
+		dev_err(&dsi->dc->ndev->dev, "dsi: Hblank too short\n");
+		goto fail;
+	}
+
+	/* TODO: add more contraints */
+fail:
+	return err;
+}
+
+static void tegra_dsi_set_phy_timing(struct tegra_dc_dsi_data *dsi, u8 lphs)
+{
+	u32 val;
+	struct dsi_phy_timing_inclk phy_timing = dsi->phy_timing;
+
+	tegra_dsi_get_phy_timing
+		(dsi, &phy_timing, dsi->current_bit_clk_ns, lphs);
+
+	tegra_dsi_constraint_phy_timing(dsi, &phy_timing,
+					dsi->current_bit_clk_ns, lphs);
+
+	val = DSI_PHY_TIMING_0_THSDEXIT(phy_timing.t_hsdexit) |
+			DSI_PHY_TIMING_0_THSTRAIL(phy_timing.t_hstrail) |
+			DSI_PHY_TIMING_0_TDATZERO(phy_timing.t_datzero) |
+			DSI_PHY_TIMING_0_THSPREPR(phy_timing.t_hsprepare);
+	tegra_dsi_writel(dsi, val, DSI_PHY_TIMING_0);
+
+	val = DSI_PHY_TIMING_1_TCLKTRAIL(phy_timing.t_clktrail) |
+			DSI_PHY_TIMING_1_TCLKPOST(phy_timing.t_clkpost) |
+			DSI_PHY_TIMING_1_TCLKZERO(phy_timing.t_clkzero) |
+			DSI_PHY_TIMING_1_TTLPX(phy_timing.t_tlpx);
+	tegra_dsi_writel(dsi, val, DSI_PHY_TIMING_1);
+
+	val = DSI_PHY_TIMING_2_TCLKPREPARE(phy_timing.t_clkprepare) |
+		DSI_PHY_TIMING_2_TCLKPRE(phy_timing.t_clkpre) |
+			DSI_PHY_TIMING_2_TWAKEUP(phy_timing.t_wakeup);
+	tegra_dsi_writel(dsi, val, DSI_PHY_TIMING_2);
+
+	val = DSI_BTA_TIMING_TTAGET(phy_timing.t_taget) |
+			DSI_BTA_TIMING_TTASURE(phy_timing.t_tasure) |
+			DSI_BTA_TIMING_TTAGO(phy_timing.t_tago);
+	tegra_dsi_writel(dsi, val, DSI_BTA_TIMING);
+
+	dsi->phy_timing = phy_timing;
+}
+
+static u32 tegra_dsi_sol_delay_burst(struct tegra_dc *dc,
+				struct tegra_dc_dsi_data *dsi)
+{
+	u32 dsi_to_pixel_clk_ratio;
+	u32 temp;
+	u32 temp1;
+	u32 mipi_clk_adj_kHz = 0;
+	u32 sol_delay;
+	struct tegra_dc_mode *dc_modes = &dc->mode;
+
+	/* Get Fdsi/Fpixel ration (note: Fdsi is in bit format) */
+	dsi_to_pixel_clk_ratio = (dsi->current_dsi_clk_khz * 2 +
+		dsi->default_pixel_clk_khz - 1) / dsi->default_pixel_clk_khz;
+
+	/* Convert Fdsi to byte format */
+	dsi_to_pixel_clk_ratio *= 1000/8;
+
+	/* Multiplying by 1000 so that we don't loose the fraction part */
+	temp = dc_modes->h_active * 1000;
+	temp1 = dc_modes->h_active + dc_modes->h_back_porch +
+			dc_modes->h_sync_width;
+
+	sol_delay = temp1 * dsi_to_pixel_clk_ratio -
+			temp * dsi->pixel_scaler_mul /
+			(dsi->pixel_scaler_div * dsi->info.n_data_lanes);
+
+	/* Do rounding on sol delay */
+	sol_delay = (sol_delay + 1000 - 1)/1000;
+
+	/* TODO:
+	 * 1. find out the correct sol fifo depth to use
+	 * 2. verify with hw about the clamping function
+	 */
+	if (sol_delay > (480 * 4)) {
+		sol_delay = (480 * 4);
+		mipi_clk_adj_kHz = sol_delay +
+			(dc_modes->h_active * dsi->pixel_scaler_mul) /
+			(dsi->info.n_data_lanes * dsi->pixel_scaler_div);
+
+		mipi_clk_adj_kHz *= (dsi->default_pixel_clk_khz / temp1);
+
+		mipi_clk_adj_kHz *= 4;
+	}
+
+	dsi->target_hs_clk_khz = mipi_clk_adj_kHz;
+
+	return sol_delay;
+}
+
+static void tegra_dsi_set_sol_delay(struct tegra_dc *dc,
+				struct tegra_dc_dsi_data *dsi)
+{
+	u32 sol_delay;
+
+	if (dsi->info.video_burst_mode == TEGRA_DSI_VIDEO_NONE_BURST_MODE ||
+		dsi->info.video_burst_mode ==
+				TEGRA_DSI_VIDEO_NONE_BURST_MODE_WITH_SYNC_END) {
+#define VIDEO_FIFO_LATENCY_PIXEL_CLK 8
+		sol_delay = VIDEO_FIFO_LATENCY_PIXEL_CLK *
+			dsi->pixel_scaler_mul / dsi->pixel_scaler_div;
+#undef VIDEO_FIFO_LATENCY_PIXEL_CLK
+		dsi->status.clk_burst = DSI_CLK_BURST_NONE_BURST;
+	} else {
+		sol_delay = tegra_dsi_sol_delay_burst(dc, dsi);
+		dsi->status.clk_burst = DSI_CLK_BURST_BURST_MODE;
+	}
+
+	tegra_dsi_writel(dsi, DSI_SOL_DELAY_SOL_DELAY(sol_delay),
+								DSI_SOL_DELAY);
+}
+
+static void tegra_dsi_set_timeout(struct tegra_dc_dsi_data *dsi)
+{
+	u32 val;
+	u32 bytes_per_frame;
+	u32 timeout = 0;
+
+	/* TODO: verify the following equation */
+	bytes_per_frame = dsi->current_dsi_clk_khz * 1000 * 2 /
+						(dsi->info.refresh_rate * 8);
+	timeout = bytes_per_frame / DSI_CYCLE_COUNTER_VALUE;
+	timeout = (timeout + DSI_HTX_TO_MARGIN) & 0xffff;
+
+	val = DSI_TIMEOUT_0_LRXH_TO(DSI_LRXH_TO_VALUE) |
+			DSI_TIMEOUT_0_HTX_TO(timeout);
+	tegra_dsi_writel(dsi, val, DSI_TIMEOUT_0);
+
+	if (dsi->info.panel_reset_timeout_msec)
+		timeout = (dsi->info.panel_reset_timeout_msec * 1000*1000)
+					/ dsi->current_bit_clk_ns;
+	else
+		timeout = DSI_PR_TO_VALUE;
+
+	val = DSI_TIMEOUT_1_PR_TO(timeout) |
+		DSI_TIMEOUT_1_TA_TO(DSI_TA_TO_VALUE);
+	tegra_dsi_writel(dsi, val, DSI_TIMEOUT_1);
+
+	val = DSI_TO_TALLY_P_RESET_STATUS(IN_RESET) |
+		DSI_TO_TALLY_TA_TALLY(DSI_TA_TALLY_VALUE)|
+		DSI_TO_TALLY_LRXH_TALLY(DSI_LRXH_TALLY_VALUE)|
+		DSI_TO_TALLY_HTX_TALLY(DSI_HTX_TALLY_VALUE);
+	tegra_dsi_writel(dsi, val, DSI_TO_TALLY);
+}
+
+static void tegra_dsi_setup_video_mode_pkt_length(struct tegra_dc *dc,
+						struct tegra_dc_dsi_data *dsi)
+{
+	u32 val;
+	u32 hact_pkt_len;
+	u32 hsa_pkt_len;
+	u32 hbp_pkt_len;
+	u32 hfp_pkt_len;
+
+	hact_pkt_len = dc->mode.h_active * dsi->pixel_scaler_mul /
+							dsi->pixel_scaler_div;
+	hsa_pkt_len = dc->mode.h_sync_width * dsi->pixel_scaler_mul /
+							dsi->pixel_scaler_div;
+	hbp_pkt_len = dc->mode.h_back_porch * dsi->pixel_scaler_mul /
+							dsi->pixel_scaler_div;
+	hfp_pkt_len = dc->mode.h_front_porch * dsi->pixel_scaler_mul /
+							dsi->pixel_scaler_div;
+
+	if (dsi->info.video_burst_mode !=
+				TEGRA_DSI_VIDEO_NONE_BURST_MODE_WITH_SYNC_END)
+		hbp_pkt_len += hsa_pkt_len;
+
+	hsa_pkt_len -= DSI_HSYNC_BLNK_PKT_OVERHEAD;
+	hbp_pkt_len -= DSI_HBACK_PORCH_PKT_OVERHEAD;
+	hfp_pkt_len -= DSI_HFRONT_PORCH_PKT_OVERHEAD;
+
+	val = DSI_PKT_LEN_0_1_LENGTH_0(0) |
+			DSI_PKT_LEN_0_1_LENGTH_1(hsa_pkt_len);
+	tegra_dsi_writel(dsi, val, DSI_PKT_LEN_0_1);
+
+	val = DSI_PKT_LEN_2_3_LENGTH_2(hbp_pkt_len) |
+			DSI_PKT_LEN_2_3_LENGTH_3(hact_pkt_len);
+	tegra_dsi_writel(dsi, val, DSI_PKT_LEN_2_3);
+
+	val = DSI_PKT_LEN_4_5_LENGTH_4(hfp_pkt_len) |
+			DSI_PKT_LEN_4_5_LENGTH_5(0);
+	tegra_dsi_writel(dsi, val, DSI_PKT_LEN_4_5);
+
+	val = DSI_PKT_LEN_6_7_LENGTH_6(0) | DSI_PKT_LEN_6_7_LENGTH_7(0);
+	tegra_dsi_writel(dsi, val, DSI_PKT_LEN_6_7);
+}
+
+static void tegra_dsi_setup_cmd_mode_pkt_length(struct tegra_dc *dc,
+						struct tegra_dc_dsi_data *dsi)
+{
+	unsigned long	val;
+	unsigned long	act_bytes;
+
+	act_bytes = dc->mode.h_active * dsi->pixel_scaler_mul /
+			dsi->pixel_scaler_div + 1;
+
+	val = DSI_PKT_LEN_0_1_LENGTH_0(0) | DSI_PKT_LEN_0_1_LENGTH_1(0);
+	tegra_dsi_writel(dsi, val, DSI_PKT_LEN_0_1);
+
+	val = DSI_PKT_LEN_2_3_LENGTH_2(0) | DSI_PKT_LEN_2_3_LENGTH_3(act_bytes);
+	tegra_dsi_writel(dsi, val, DSI_PKT_LEN_2_3);
+
+	val = DSI_PKT_LEN_4_5_LENGTH_4(0) | DSI_PKT_LEN_4_5_LENGTH_5(act_bytes);
+	tegra_dsi_writel(dsi, val, DSI_PKT_LEN_4_5);
+
+	val = DSI_PKT_LEN_6_7_LENGTH_6(0) | DSI_PKT_LEN_6_7_LENGTH_7(0x0f0f);
+	tegra_dsi_writel(dsi, val, DSI_PKT_LEN_6_7);
+}
+
+static void tegra_dsi_set_pkt_length(struct tegra_dc *dc,
+				struct tegra_dc_dsi_data *dsi)
+{
+	if (dsi->driven_mode == TEGRA_DSI_DRIVEN_BY_HOST)
+		return;
+
+	if (dsi->info.video_data_type == TEGRA_DSI_VIDEO_TYPE_VIDEO_MODE)
+		tegra_dsi_setup_video_mode_pkt_length(dc, dsi);
+	else
+		tegra_dsi_setup_cmd_mode_pkt_length(dc, dsi);
+}
+
+static void tegra_dsi_set_pkt_seq(struct tegra_dc *dc,
+				struct tegra_dc_dsi_data *dsi)
+{
+	const u32 *pkt_seq;
+	u32 rgb_info;
+	u32 pkt_seq_3_5_rgb_lo;
+	u32 pkt_seq_3_5_rgb_hi;
+	u32	val;
+	u32 reg;
+	u8  i;
+
+	if (dsi->driven_mode == TEGRA_DSI_DRIVEN_BY_HOST)
+		return;
+
+	switch (dsi->info.pixel_format) {
+	case TEGRA_DSI_PIXEL_FORMAT_16BIT_P:
+		rgb_info = CMD_RGB_16BPP;
+		break;
+	case TEGRA_DSI_PIXEL_FORMAT_18BIT_P:
+		rgb_info = CMD_RGB_18BPP;
+		break;
+	case TEGRA_DSI_PIXEL_FORMAT_18BIT_NP:
+		rgb_info = CMD_RGB_18BPPNP;
+		break;
+	case TEGRA_DSI_PIXEL_FORMAT_24BIT_P:
+	default:
+		rgb_info = CMD_RGB_24BPP;
+		break;
+	}
+
+	pkt_seq_3_5_rgb_lo = 0;
+	pkt_seq_3_5_rgb_hi = 0;
+	if (dsi->info.video_data_type == TEGRA_DSI_VIDEO_TYPE_COMMAND_MODE)
+		pkt_seq = dsi_pkt_seq_cmd_mode;
+	else {
+		switch (dsi->info.video_burst_mode) {
+		case TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED:
+		case TEGRA_DSI_VIDEO_BURST_MODE_LOW_SPEED:
+		case TEGRA_DSI_VIDEO_BURST_MODE_MEDIUM_SPEED:
+		case TEGRA_DSI_VIDEO_BURST_MODE_FAST_SPEED:
+		case TEGRA_DSI_VIDEO_BURST_MODE_FASTEST_SPEED:
+			pkt_seq_3_5_rgb_lo =
+					DSI_PKT_SEQ_3_LO_PKT_32_ID(rgb_info);
+			if (!dsi->info.no_pkt_seq_eot)
+				pkt_seq = dsi_pkt_seq_video_burst;
+			else
+				pkt_seq = dsi_pkt_seq_video_burst_no_eot;
+			break;
+		case TEGRA_DSI_VIDEO_NONE_BURST_MODE_WITH_SYNC_END:
+			pkt_seq_3_5_rgb_hi =
+					DSI_PKT_SEQ_3_HI_PKT_34_ID(rgb_info);
+			pkt_seq = dsi_pkt_seq_video_non_burst_syne;
+			break;
+		case TEGRA_DSI_VIDEO_NONE_BURST_MODE:
+		default:
+			pkt_seq_3_5_rgb_lo =
+					DSI_PKT_SEQ_3_LO_PKT_32_ID(rgb_info);
+			pkt_seq = dsi_pkt_seq_video_non_burst;
+			break;
+		}
+	}
+
+	for (i = 0; i < NUMOF_PKT_SEQ; i++) {
+		val = pkt_seq[i];
+		reg = dsi_pkt_seq_reg[i];
+		if ((reg == DSI_PKT_SEQ_3_LO) || (reg == DSI_PKT_SEQ_5_LO))
+			val |= pkt_seq_3_5_rgb_lo;
+		if ((reg == DSI_PKT_SEQ_3_HI) || (reg == DSI_PKT_SEQ_5_HI))
+			val |= pkt_seq_3_5_rgb_hi;
+		tegra_dsi_writel(dsi, val, reg);
+	}
+}
+
+static void tegra_dsi_reset_underflow_overflow
+				(struct tegra_dc_dsi_data *dsi)
+{
+	u32 val;
+
+	val = tegra_dsi_readl(dsi, DSI_STATUS);
+	val &= (DSI_STATUS_LB_OVERFLOW(0x1) | DSI_STATUS_LB_UNDERFLOW(0x1));
+	if (val) {
+		if (val & DSI_STATUS_LB_OVERFLOW(0x1))
+			dev_warn(&dsi->dc->ndev->dev,
+				"dsi: video fifo overflow. Resetting flag\n");
+		if (val & DSI_STATUS_LB_UNDERFLOW(0x1))
+			dev_warn(&dsi->dc->ndev->dev,
+				"dsi: video fifo underflow. Resetting flag\n");
+		val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL);
+		val |= DSI_HOST_CONTROL_FIFO_STAT_RESET(0x1);
+		tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL);
+		udelay(5);
+	}
+}
+
+static void tegra_dsi_soft_reset(struct tegra_dc_dsi_data *dsi)
+{
+	u32 trigger;
+
+	tegra_dsi_writel(dsi,
+		DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_DISABLE),
+		DSI_POWER_CONTROL);
+	/* stabilization delay */
+	udelay(300);
+
+	tegra_dsi_writel(dsi,
+		DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_ENABLE),
+		DSI_POWER_CONTROL);
+	/* stabilization delay */
+	udelay(300);
+
+	/* dsi HW does not clear host trigger bit automatically
+	 * on dsi interface disable if host fifo is empty or in mid
+	 * of host transmission
+	 */
+	trigger = tegra_dsi_readl(dsi, DSI_TRIGGER);
+	if (trigger)
+		tegra_dsi_writel(dsi, 0x0, DSI_TRIGGER);
+}
+
+static void tegra_dsi_stop_dc_stream(struct tegra_dc *dc,
+					struct tegra_dc_dsi_data *dsi)
+{
+	/* Mask the MSF interrupt. */
+	if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
+		tegra_dc_mask_interrupt(dc, MSF_INT);
+
+	tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
+	tegra_dc_writel(dc, 0, DC_DISP_DISP_WIN_OPTIONS);
+	tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ , DC_CMD_STATE_CONTROL);
+
+	dsi->status.dc_stream = DSI_DC_STREAM_DISABLE;
+}
+
+static void tegra_dsi_stop_dc_stream_at_frame_end(struct tegra_dc *dc,
+						struct tegra_dc_dsi_data *dsi)
+{
+	int val;
+	long timeout;
+	u32 frame_period = DIV_ROUND_UP(S_TO_MS(1), dsi->info.refresh_rate);
+
+	init_completion(&dc->frame_end_complete);
+
+	/* unmask frame end interrupt */
+	val = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+	tegra_dc_writel(dc, val | FRAME_END_INT, DC_CMD_INT_MASK);
+
+	tegra_dsi_stop_dc_stream(dc, dsi);
+
+	/* wait for frame_end completion.
+	 * timeout is 2 frame duration to accomodate for
+	 * internal delay.
+	 */
+	timeout = wait_for_completion_interruptible_timeout(
+			&dc->frame_end_complete,
+			msecs_to_jiffies(2 * frame_period));
+
+	/* give 2 line time to dsi HW to catch up
+	 * with pixels sent by dc
+	 */
+	udelay(50);
+
+	tegra_dsi_soft_reset(dsi);
+
+	/* reinstate interrupt mask */
+	tegra_dc_writel(dc, val, DC_CMD_INT_MASK);
+
+	if (timeout == 0)
+		dev_warn(&dc->ndev->dev,
+			"DC doesn't stop at end of frame.\n");
+
+	tegra_dsi_reset_underflow_overflow(dsi);
+}
+
+static void tegra_dsi_start_dc_stream(struct tegra_dc *dc,
+					struct tegra_dc_dsi_data *dsi)
+{
+	u32 val;
+
+	tegra_dc_writel(dc, DSI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
+
+	/* TODO: clean up */
+	tegra_dc_writel(dc, PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+			PW4_ENABLE | PM0_ENABLE | PM1_ENABLE,
+			DC_CMD_DISPLAY_POWER_CONTROL);
+
+	/* Configure one-shot mode or continuous mode */
+	if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) {
+		/* disable LSPI/LCD_DE output */
+		val = PIN_OUTPUT_LSPI_OUTPUT_DIS;
+		tegra_dc_writel(dc, val, DC_COM_PIN_OUTPUT_ENABLE3);
+
+		/* enable MSF & set MSF polarity */
+		val = MSF_ENABLE | MSF_LSPI;
+		if (!dsi->info.te_polarity_low)
+			val |= MSF_POLARITY_HIGH;
+		else
+			val |= MSF_POLARITY_LOW;
+		tegra_dc_writel(dc, val, DC_CMD_DISPLAY_COMMAND_OPTION0);
+
+		/* set non-continuous mode */
+		tegra_dc_writel(dc, DISP_CTRL_MODE_NC_DISPLAY,
+						DC_CMD_DISPLAY_COMMAND);
+		tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL);
+		tegra_dc_writel(dc, GENERAL_ACT_REQ | NC_HOST_TRIG,
+						DC_CMD_STATE_CONTROL);
+
+		/* Unmask the MSF interrupt. */
+		tegra_dc_unmask_interrupt(dc, MSF_INT);
+	} else {
+		/* set continuous mode */
+		tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY,
+						DC_CMD_DISPLAY_COMMAND);
+		tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL);
+		tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+	}
+
+	dsi->status.dc_stream = DSI_DC_STREAM_ENABLE;
+}
+
+static void tegra_dsi_set_dc_clk(struct tegra_dc *dc,
+				struct tegra_dc_dsi_data *dsi)
+{
+	u32 shift_clk_div_register;
+	u32 val;
+
+	/* Get the corresponding register value of shift_clk_div. */
+	shift_clk_div_register = dsi->shift_clk_div * 2 - 2;
+
+	/* TODO: find out if PCD3 option is required */
+	val = PIXEL_CLK_DIVIDER_PCD1 |
+		SHIFT_CLK_DIVIDER(shift_clk_div_register);
+	tegra_dc_writel(dc, val, DC_DISP_DISP_CLOCK_CONTROL);
+}
+
+static void tegra_dsi_set_dsi_clk(struct tegra_dc *dc,
+			struct tegra_dc_dsi_data *dsi, u32 clk)
+{
+	u32 rm;
+
+	/* Round up to MHz */
+	rm = clk % 1000;
+	if (rm != 0)
+		clk -= rm;
+
+	/* Set up pixel clock */
+	dc->shift_clk_div = dsi->shift_clk_div;
+	dc->mode.pclk = (clk * 1000) / dsi->shift_clk_div;
+	/* TODO: Define one shot work delay in board file. */
+	/* Since for one-shot mode, refresh rate is usually set larger than
+	 * expected refresh rate, it needs at least 3 frame period. Less
+	 * delay one shot work is, more powering saving we have. */
+	dc->one_shot_delay_ms = 4 *
+			DIV_ROUND_UP(S_TO_MS(1), dsi->info.refresh_rate);
+
+	/* Enable DSI clock */
+	tegra_dc_setup_clk(dc, dsi->dsi_clk);
+	tegra_dsi_clk_enable(dsi);
+	tegra_periph_reset_deassert(dsi->dsi_clk);
+
+	dsi->current_dsi_clk_khz = clk_get_rate(dsi->dsi_clk) / 1000;
+	dsi->current_bit_clk_ns =  1000*1000 / (dsi->current_dsi_clk_khz * 2);
+}
+
+static void tegra_dsi_hs_clk_out_enable(struct tegra_dc_dsi_data *dsi)
+{
+	u32 val;
+
+	val = tegra_dsi_readl(dsi, DSI_CONTROL);
+	val &= ~DSI_CONTROL_HS_CLK_CTRL(1);
+
+	if (dsi->info.video_clock_mode == TEGRA_DSI_VIDEO_CLOCK_CONTINUOUS) {
+		val |= DSI_CONTROL_HS_CLK_CTRL(CONTINUOUS);
+		dsi->status.clk_mode = DSI_PHYCLK_CONTINUOUS;
+	} else {
+		val |= DSI_CONTROL_HS_CLK_CTRL(TX_ONLY);
+		dsi->status.clk_mode = DSI_PHYCLK_TX_ONLY;
+	}
+	tegra_dsi_writel(dsi, val, DSI_CONTROL);
+
+	val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL);
+	val &= ~DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(1);
+	val |= DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(TEGRA_DSI_HIGH);
+	tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL);
+
+	dsi->status.clk_out = DSI_PHYCLK_OUT_EN;
+}
+
+static void tegra_dsi_hs_clk_out_enable_in_lp(struct tegra_dc_dsi_data *dsi)
+{
+	u32 val;
+	tegra_dsi_hs_clk_out_enable(dsi);
+
+	val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL);
+	val &= ~DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(1);
+	val |= DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(TEGRA_DSI_LOW);
+	tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL);
+}
+
+static void tegra_dsi_hs_clk_out_disable(struct tegra_dc *dc,
+						struct tegra_dc_dsi_data *dsi)
+{
+	u32 val;
+
+	if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE)
+		tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi);
+
+	tegra_dsi_writel(dsi, TEGRA_DSI_DISABLE, DSI_POWER_CONTROL);
+	/* stabilization delay */
+	udelay(300);
+
+	val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL);
+	val &= ~DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(1);
+	val |= DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(TEGRA_DSI_LOW);
+	tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL);
+
+	tegra_dsi_writel(dsi, TEGRA_DSI_ENABLE, DSI_POWER_CONTROL);
+	/* stabilization delay */
+	udelay(300);
+
+	dsi->status.clk_mode = DSI_PHYCLK_NOT_INIT;
+	dsi->status.clk_out = DSI_PHYCLK_OUT_DIS;
+}
+
+static void tegra_dsi_set_control_reg_lp(struct tegra_dc_dsi_data *dsi)
+{
+	u32 dsi_control;
+	u32 host_dsi_control;
+	u32 max_threshold;
+
+	dsi_control = dsi->dsi_control_val | DSI_CTRL_HOST_DRIVEN;
+	host_dsi_control = HOST_DSI_CTRL_COMMON |
+			HOST_DSI_CTRL_HOST_DRIVEN |
+			DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(TEGRA_DSI_LOW);
+	max_threshold = DSI_MAX_THRESHOLD_MAX_THRESHOLD(DSI_HOST_FIFO_DEPTH);
+
+	tegra_dsi_writel(dsi, max_threshold, DSI_MAX_THRESHOLD);
+	tegra_dsi_writel(dsi, dsi_control, DSI_CONTROL);
+	tegra_dsi_writel(dsi, host_dsi_control, DSI_HOST_DSI_CONTROL);
+
+	dsi->status.driven = DSI_DRIVEN_MODE_HOST;
+	dsi->status.clk_burst = DSI_CLK_BURST_NOT_INIT;
+	dsi->status.vtype = DSI_VIDEO_TYPE_NOT_INIT;
+}
+
+static void tegra_dsi_set_control_reg_hs(struct tegra_dc_dsi_data *dsi,
+						u8 driven_mode)
+{
+	u32 dsi_control;
+	u32 host_dsi_control;
+	u32 max_threshold;
+	u32 dcs_cmd;
+
+	dsi_control = dsi->dsi_control_val;
+	host_dsi_control = HOST_DSI_CTRL_COMMON;
+	max_threshold = 0;
+	dcs_cmd = 0;
+
+	if (driven_mode == TEGRA_DSI_DRIVEN_BY_HOST) {
+		dsi_control |= DSI_CTRL_HOST_DRIVEN;
+		host_dsi_control |= HOST_DSI_CTRL_HOST_DRIVEN;
+		max_threshold =
+			DSI_MAX_THRESHOLD_MAX_THRESHOLD(DSI_HOST_FIFO_DEPTH);
+		dsi->status.driven = DSI_DRIVEN_MODE_HOST;
+	} else {
+		dsi_control |= DSI_CTRL_DC_DRIVEN;
+		host_dsi_control |= HOST_DSI_CTRL_DC_DRIVEN;
+		max_threshold =
+			DSI_MAX_THRESHOLD_MAX_THRESHOLD(DSI_VIDEO_FIFO_DEPTH);
+		dsi->status.driven = DSI_DRIVEN_MODE_DC;
+
+		if (dsi->info.video_data_type ==
+			TEGRA_DSI_VIDEO_TYPE_COMMAND_MODE) {
+			dsi_control |= DSI_CTRL_CMD_MODE;
+			dcs_cmd = DSI_DCS_CMDS_LT5_DCS_CMD(
+				DSI_WRITE_MEMORY_START)|
+				DSI_DCS_CMDS_LT3_DCS_CMD(
+				DSI_WRITE_MEMORY_CONTINUE);
+			dsi->status.vtype = DSI_VIDEO_TYPE_CMD_MODE;
+		} else {
+			dsi_control |= DSI_CTRL_VIDEO_MODE;
+			dsi->status.vtype = DSI_VIDEO_TYPE_VIDEO_MODE;
+		}
+	}
+
+	tegra_dsi_writel(dsi, max_threshold, DSI_MAX_THRESHOLD);
+	tegra_dsi_writel(dsi, dcs_cmd, DSI_DCS_CMDS);
+	tegra_dsi_writel(dsi, dsi_control, DSI_CONTROL);
+	tegra_dsi_writel(dsi, host_dsi_control, DSI_HOST_DSI_CONTROL);
+}
+
+static void tegra_dsi_pad_calibration(struct tegra_dc_dsi_data *dsi)
+{
+	u32 val;
+
+	val =	DSI_PAD_CONTROL_PAD_LPUPADJ(0x1) |
+		DSI_PAD_CONTROL_PAD_LPDNADJ(0x1) |
+		DSI_PAD_CONTROL_PAD_PREEMP_EN(0x1) |
+		DSI_PAD_CONTROL_PAD_SLEWDNADJ(0x6) |
+		DSI_PAD_CONTROL_PAD_SLEWUPADJ(0x6);
+	if (!dsi->ulpm) {
+		val |=	DSI_PAD_CONTROL_PAD_PDIO(0) |
+			DSI_PAD_CONTROL_PAD_PDIO_CLK(0) |
+			DSI_PAD_CONTROL_PAD_PULLDN_ENAB(TEGRA_DSI_DISABLE);
+	} else {
+		val |=	DSI_PAD_CONTROL_PAD_PDIO(0x3) |
+			DSI_PAD_CONTROL_PAD_PDIO_CLK(0x1) |
+			DSI_PAD_CONTROL_PAD_PULLDN_ENAB(TEGRA_DSI_ENABLE);
+	}
+	tegra_dsi_writel(dsi, val, DSI_PAD_CONTROL);
+
+	val = MIPI_CAL_TERMOSA(0x4);
+	tegra_vi_csi_writel(val, CSI_CILA_MIPI_CAL_CONFIG_0);
+
+	val = MIPI_CAL_TERMOSB(0x4);
+	tegra_vi_csi_writel(val, CSI_CILB_MIPI_CAL_CONFIG_0);
+
+	val = MIPI_CAL_HSPUOSD(0x3) | MIPI_CAL_HSPDOSD(0x4);
+	tegra_vi_csi_writel(val, CSI_DSI_MIPI_CAL_CONFIG);
+
+	val = PAD_DRIV_DN_REF(0x5) | PAD_DRIV_UP_REF(0x7);
+	tegra_vi_csi_writel(val, CSI_MIPIBIAS_PAD_CONFIG);
+
+	val = PAD_CIL_PDVREG(0x0);
+	tegra_vi_csi_writel(val, CSI_CIL_PAD_CONFIG);
+}
+
+static void tegra_dsi_panelB_enable(void)
+{
+	unsigned int val;
+	void __iomem *to_mem = ioremap(APB_MISC_GP_MIPI_PAD_CTRL_0, 32);
+
+	val = readl(to_mem);
+	val |= DSIB_MODE_ENABLE;
+	writel(val, to_mem);
+	iounmap(to_mem);
+}
+
+static int tegra_dsi_init_hw(struct tegra_dc *dc,
+						struct tegra_dc_dsi_data *dsi)
+{
+	u32 i;
+
+	tegra_dsi_writel(dsi,
+		DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_DISABLE),
+		DSI_POWER_CONTROL);
+	/* stabilization delay */
+	udelay(300);
+
+	tegra_dsi_set_dsi_clk(dc, dsi, dsi->target_lp_clk_khz);
+	if (dsi->info.dsi_instance) {
+		tegra_dsi_panelB_enable();
+	}
+
+	/* TODO: only need to change the timing for bta */
+	tegra_dsi_set_phy_timing(dsi, DSI_LPHS_IN_LP_MODE);
+
+	if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE)
+		tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi);
+
+	/* Initializing DSI registers */
+	for (i = 0; i < ARRAY_SIZE(init_reg); i++)
+		tegra_dsi_writel(dsi, 0, init_reg[i]);
+
+	tegra_dsi_writel(dsi, dsi->dsi_control_val, DSI_CONTROL);
+
+	tegra_dsi_pad_calibration(dsi);
+
+	tegra_dsi_writel(dsi,
+		DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_ENABLE),
+		DSI_POWER_CONTROL);
+	/* stabilization delay */
+	udelay(300);
+
+	dsi->status.init = DSI_MODULE_INIT;
+	dsi->status.lphs = DSI_LPHS_NOT_INIT;
+	dsi->status.vtype = DSI_VIDEO_TYPE_NOT_INIT;
+	dsi->status.driven = DSI_DRIVEN_MODE_NOT_INIT;
+	dsi->status.clk_out = DSI_PHYCLK_OUT_DIS;
+	dsi->status.clk_mode = DSI_PHYCLK_NOT_INIT;
+	dsi->status.clk_burst = DSI_CLK_BURST_NOT_INIT;
+	dsi->status.dc_stream = DSI_DC_STREAM_DISABLE;
+	dsi->status.lp_op = DSI_LP_OP_NOT_INIT;
+
+	return 0;
+}
+
+static int tegra_dsi_set_to_lp_mode(struct tegra_dc *dc,
+			struct tegra_dc_dsi_data *dsi, u8 lp_op)
+{
+	int err;
+
+	if (dsi->status.init != DSI_MODULE_INIT) {
+		err = -EPERM;
+		goto fail;
+	}
+
+	if (dsi->status.lphs == DSI_LPHS_IN_LP_MODE &&
+			dsi->status.lp_op == lp_op)
+		goto success;
+
+	if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE)
+		tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi);
+
+	/* disable/enable hs clk according to enable_hs_clock_on_lp_cmd_mode */
+	if ((dsi->status.clk_out == DSI_PHYCLK_OUT_EN) &&
+		(!dsi->info.enable_hs_clock_on_lp_cmd_mode))
+		tegra_dsi_hs_clk_out_disable(dc, dsi);
+
+	dsi->target_lp_clk_khz = tegra_dsi_get_lp_clk_rate(dsi, lp_op);
+	if (dsi->current_dsi_clk_khz != dsi->target_lp_clk_khz) {
+		tegra_dsi_set_dsi_clk(dc, dsi, dsi->target_lp_clk_khz);
+		tegra_dsi_set_timeout(dsi);
+	}
+
+	tegra_dsi_set_phy_timing(dsi, DSI_LPHS_IN_LP_MODE);
+
+	tegra_dsi_set_control_reg_lp(dsi);
+
+	if ((dsi->status.clk_out == DSI_PHYCLK_OUT_DIS) &&
+		(dsi->info.enable_hs_clock_on_lp_cmd_mode))
+		tegra_dsi_hs_clk_out_enable_in_lp(dsi);
+
+	dsi->status.lphs = DSI_LPHS_IN_LP_MODE;
+	dsi->status.lp_op = lp_op;
+	dsi->driven_mode = TEGRA_DSI_DRIVEN_BY_HOST;
+success:
+	err = 0;
+fail:
+	return err;
+}
+
+static int tegra_dsi_set_to_hs_mode(struct tegra_dc *dc,
+					struct tegra_dc_dsi_data *dsi,
+					u8 driven_mode)
+{
+	int err;
+
+	if (dsi->status.init != DSI_MODULE_INIT) {
+		err = -EPERM;
+		goto fail;
+	}
+
+	if (dsi->status.lphs == DSI_LPHS_IN_HS_MODE &&
+		dsi->driven_mode == driven_mode)
+		goto success;
+
+	dsi->driven_mode = driven_mode;
+
+	if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE)
+		tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi);
+
+	if ((dsi->status.clk_out == DSI_PHYCLK_OUT_EN) &&
+		(!dsi->info.enable_hs_clock_on_lp_cmd_mode))
+		tegra_dsi_hs_clk_out_disable(dc, dsi);
+
+	if (dsi->current_dsi_clk_khz != dsi->target_hs_clk_khz) {
+		tegra_dsi_set_dsi_clk(dc, dsi, dsi->target_hs_clk_khz);
+		tegra_dsi_set_timeout(dsi);
+	}
+
+	tegra_dsi_set_phy_timing(dsi, DSI_LPHS_IN_HS_MODE);
+
+	if (driven_mode == TEGRA_DSI_DRIVEN_BY_DC) {
+		tegra_dsi_set_pkt_seq(dc, dsi);
+		tegra_dsi_set_pkt_length(dc, dsi);
+		tegra_dsi_set_sol_delay(dc, dsi);
+		tegra_dsi_set_dc_clk(dc, dsi);
+	}
+
+	tegra_dsi_set_control_reg_hs(dsi, driven_mode);
+
+	if (dsi->status.clk_out == DSI_PHYCLK_OUT_DIS ||
+		dsi->info.enable_hs_clock_on_lp_cmd_mode)
+		tegra_dsi_hs_clk_out_enable(dsi);
+
+	dsi->status.lphs = DSI_LPHS_IN_HS_MODE;
+success:
+	dsi->status.lp_op = DSI_LP_OP_NOT_INIT;
+	err = 0;
+fail:
+	return err;
+}
+
+static bool tegra_dsi_write_busy(struct tegra_dc_dsi_data *dsi)
+{
+	u32 timeout = 0;
+	bool retVal = true;
+
+	while (timeout <= DSI_MAX_COMMAND_DELAY_USEC) {
+		if (!(DSI_TRIGGER_HOST_TRIGGER(0x1) &
+			tegra_dsi_readl(dsi, DSI_TRIGGER))) {
+			retVal = false;
+			break;
+		}
+		udelay(DSI_COMMAND_DELAY_STEPS_USEC);
+		timeout += DSI_COMMAND_DELAY_STEPS_USEC;
+	}
+
+	return retVal;
+}
+
+static bool tegra_dsi_read_busy(struct tegra_dc_dsi_data *dsi)
+{
+	u32 timeout = 0;
+	bool retVal = true;
+
+	while (timeout <  DSI_STATUS_POLLING_DURATION_USEC) {
+		if (!(DSI_HOST_DSI_CONTROL_IMM_BTA(0x1) &
+			tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL))) {
+			retVal = false;
+			break;
+		}
+		udelay(DSI_STATUS_POLLING_DELAY_USEC);
+		timeout += DSI_STATUS_POLLING_DELAY_USEC;
+	}
+
+	return retVal;
+}
+
+static bool tegra_dsi_host_busy(struct tegra_dc_dsi_data *dsi)
+{
+	int err = 0;
+
+	if (tegra_dsi_write_busy(dsi)) {
+		err = -EBUSY;
+		dev_err(&dsi->dc->ndev->dev,
+			"DSI trigger bit already set\n");
+		goto fail;
+	}
+
+	if (tegra_dsi_read_busy(dsi)) {
+		err = -EBUSY;
+		dev_err(&dsi->dc->ndev->dev,
+			"DSI immediate bta bit already set\n");
+		goto fail;
+	}
+fail:
+	return (err < 0 ? true : false);
+}
+
+static void tegra_dsi_reset_read_count(struct tegra_dc_dsi_data *dsi)
+{
+	u32 val;
+
+	val = tegra_dsi_readl(dsi, DSI_STATUS);
+	val &= DSI_STATUS_RD_FIFO_COUNT(0x1f);
+	if (val) {
+		dev_warn(&dsi->dc->ndev->dev,
+			"DSI read count not zero, resetting\n");
+		tegra_dsi_soft_reset(dsi);
+	}
+}
+
+static struct dsi_status *tegra_dsi_save_state_switch_to_host_cmd_mode(
+						struct tegra_dc_dsi_data *dsi,
+						struct tegra_dc *dc,
+						u8 lp_op)
+{
+	struct dsi_status *init_status = NULL;
+	int err;
+
+	if (dsi->status.init != DSI_MODULE_INIT ||
+		dsi->status.lphs == DSI_LPHS_NOT_INIT) {
+		err = -EPERM;
+		goto fail;
+	}
+
+	init_status = kzalloc(sizeof(*init_status), GFP_KERNEL);
+	if (!init_status)
+		return ERR_PTR(-ENOMEM);
+
+	*init_status = dsi->status;
+
+	if (dsi->info.hs_cmd_mode_supported) {
+		err = tegra_dsi_set_to_hs_mode(dc, dsi,
+				TEGRA_DSI_DRIVEN_BY_HOST);
+		if (err < 0) {
+			dev_err(&dc->ndev->dev,
+			"Switch to HS host mode failed\n");
+			goto fail;
+		}
+
+		goto success;
+	}
+
+	if (dsi->status.lp_op != lp_op) {
+		err = tegra_dsi_set_to_lp_mode(dc, dsi, lp_op);
+		if (err < 0) {
+			dev_err(&dc->ndev->dev,
+			"DSI failed to go to LP mode\n");
+			goto fail;
+		}
+	}
+success:
+	return init_status;
+fail:
+	kfree(init_status);
+	return ERR_PTR(err);
+}
+
+static struct dsi_status *tegra_dsi_prepare_host_transmission(
+				struct tegra_dc *dc,
+				struct tegra_dc_dsi_data *dsi,
+				u8 lp_op)
+{
+	int err = 0;
+	struct dsi_status *init_status;
+	bool restart_dc_stream = false;
+
+	if (dsi->status.init != DSI_MODULE_INIT ||
+		dsi->ulpm) {
+		err = -EPERM;
+		goto fail;
+	}
+
+	if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE) {
+		restart_dc_stream = true;
+		tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi);
+	}
+
+	if (tegra_dsi_host_busy(dsi)) {
+		tegra_dsi_soft_reset(dsi);
+		if (tegra_dsi_host_busy(dsi)) {
+			err = -EBUSY;
+			dev_err(&dc->ndev->dev, "DSI host busy\n");
+			goto fail;
+		}
+	}
+
+	if (lp_op == DSI_LP_OP_READ)
+		tegra_dsi_reset_read_count(dsi);
+
+	if (dsi->status.lphs == DSI_LPHS_NOT_INIT) {
+		err = tegra_dsi_set_to_lp_mode(dc, dsi, lp_op);
+		if (err < 0) {
+			dev_err(&dc->ndev->dev, "Failed to config LP write\n");
+			goto fail;
+		}
+	}
+
+	init_status = tegra_dsi_save_state_switch_to_host_cmd_mode
+					(dsi, dc, lp_op);
+	if (IS_ERR_OR_NULL(init_status)) {
+		err = PTR_ERR(init_status);
+		dev_err(&dc->ndev->dev, "DSI state saving failed\n");
+		goto fail;
+	}
+
+	if (restart_dc_stream)
+		init_status->dc_stream = DSI_DC_STREAM_ENABLE;
+
+	return init_status;
+fail:
+	return ERR_PTR(err);
+}
+
+static int tegra_dsi_restore_state(struct tegra_dc *dc,
+				struct tegra_dc_dsi_data *dsi,
+				struct dsi_status *init_status)
+{
+	int err = 0;
+
+	if (init_status->lphs == DSI_LPHS_IN_LP_MODE) {
+		err = tegra_dsi_set_to_lp_mode(dc, dsi, init_status->lp_op);
+		if (err < 0) {
+			dev_err(&dc->ndev->dev,
+				"Failed to config LP mode\n");
+			goto fail;
+		}
+		goto success;
+	}
+
+	if (init_status->lphs == DSI_LPHS_IN_HS_MODE) {
+		u8 driven = (init_status->driven == DSI_DRIVEN_MODE_DC) ?
+			TEGRA_DSI_DRIVEN_BY_DC : TEGRA_DSI_DRIVEN_BY_HOST;
+		err = tegra_dsi_set_to_hs_mode(dc, dsi, driven);
+		if (err < 0) {
+			dev_err(&dc->ndev->dev, "Failed to config HS mode\n");
+			goto fail;
+		}
+	}
+
+	if (init_status->dc_stream == DSI_DC_STREAM_ENABLE)
+		tegra_dsi_start_dc_stream(dc, dsi);
+success:
+fail:
+	kfree(init_status);
+	return err;
+}
+
+static int tegra_dsi_host_trigger(struct tegra_dc_dsi_data *dsi)
+{
+	int status = 0;
+
+	if (tegra_dsi_readl(dsi, DSI_TRIGGER)) {
+		status = -EBUSY;
+		goto fail;
+	}
+
+	tegra_dsi_writel(dsi,
+		DSI_TRIGGER_HOST_TRIGGER(TEGRA_DSI_ENABLE), DSI_TRIGGER);
+
+#if DSI_USE_SYNC_POINTS
+	status = tegra_dsi_syncpt(dsi);
+	if (status < 0) {
+		dev_err(&dsi->dc->ndev->dev,
+			"DSI syncpt for host trigger failed\n");
+		goto fail;
+	}
+#else
+	if (tegra_dsi_write_busy(dsi)) {
+		status = -EBUSY;
+		dev_err(&dsi->dc->ndev->dev,
+			"Timeout waiting on write completion\n");
+	}
+#endif
+
+fail:
+	return status;
+}
+
+static int _tegra_dsi_write_data(struct tegra_dc_dsi_data *dsi,
+					u8 *pdata, u8 data_id, u16 data_len)
+{
+	u8 virtual_channel;
+	u32 val;
+	int err;
+
+	err = 0;
+
+	virtual_channel = dsi->info.virtual_channel <<
+						DSI_VIR_CHANNEL_BIT_POSITION;
+
+	/* always use hw for ecc */
+	val = (virtual_channel | data_id) << 0 |
+			data_len << 8;
+	tegra_dsi_writel(dsi, val, DSI_WR_DATA);
+
+	/* if pdata != NULL, pkt type is long pkt */
+	if (pdata != NULL) {
+		while (data_len) {
+			if (data_len >= 4) {
+				val = ((u32 *) pdata)[0];
+				data_len -= 4;
+				pdata += 4;
+			} else {
+				val = 0;
+				memcpy(&val, pdata, data_len);
+				pdata += data_len;
+				data_len = 0;
+			}
+			tegra_dsi_writel(dsi, val, DSI_WR_DATA);
+		}
+	}
+
+	err = tegra_dsi_host_trigger(dsi);
+	if (err < 0)
+		dev_err(&dsi->dc->ndev->dev, "DSI host trigger failed\n");
+
+	return err;
+}
+
+static void tegra_dc_dsi_hold_host(struct tegra_dc *dc)
+{
+	struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc);
+
+	if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_LP_MODE) {
+		spin_lock(&dsi->host_ref_lock);
+		dsi->host_ref++;
+		spin_unlock(&dsi->host_ref_lock);
+		tegra_dsi_host_resume(dc);
+
+		/*
+		 * Take an extra refrence to count for the clk_disable in
+		 * tegra_dc_release_host.
+		 */
+		clk_prepare_enable(dc->clk);
+	}
+}
+
+static void tegra_dc_dsi_release_host(struct tegra_dc *dc)
+{
+	struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc);
+	if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_LP_MODE) {
+		clk_disable(dc->clk);
+		spin_lock(&dsi->host_ref_lock);
+		dsi->host_ref--;
+
+		if (!dsi->host_ref &&
+		    (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE))
+			schedule_delayed_work(&dsi->idle_work, dsi->idle_delay);
+
+		spin_unlock(&dsi->host_ref_lock);
+	}
+}
+
+static void tegra_dc_dsi_idle_work(struct work_struct *work)
+{
+	struct tegra_dc_dsi_data *dsi = container_of(
+		to_delayed_work(work), struct tegra_dc_dsi_data, idle_work);
+
+	if (dsi->dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_LP_MODE)
+		tegra_dsi_host_suspend(dsi->dc);
+}
+
+int tegra_dsi_write_data(struct tegra_dc *dc,
+			struct tegra_dc_dsi_data *dsi,
+			u8 *pdata, u8 data_id, u16 data_len)
+{
+	int err = 0;
+	struct dsi_status *init_status;
+
+	tegra_dc_io_start(dc);
+
+	init_status = tegra_dsi_prepare_host_transmission(
+				dc, dsi, DSI_LP_OP_WRITE);
+	if (IS_ERR(init_status)) {
+		err = PTR_ERR(init_status);
+		dev_err(&dc->ndev->dev, "DSI host config failed\n");
+		goto fail;
+	}
+
+	err = _tegra_dsi_write_data(dsi, pdata, data_id, data_len);
+fail:
+	err = tegra_dsi_restore_state(dc, dsi, init_status);
+	if (err < 0)
+		dev_err(&dc->ndev->dev, "Failed to restore prev state\n");
+	tegra_dc_io_end(dc);
+
+	return err;
+}
+EXPORT_SYMBOL(tegra_dsi_write_data);
+
+static int tegra_dsi_send_panel_cmd(struct tegra_dc *dc,
+					struct tegra_dc_dsi_data *dsi,
+					struct tegra_dsi_cmd *cmd,
+					u32 n_cmd)
+{
+	u32 i;
+	int err;
+
+	err = 0;
+	for (i = 0; i < n_cmd; i++) {
+		struct tegra_dsi_cmd *cur_cmd;
+		cur_cmd = &cmd[i];
+
+		if (cur_cmd->cmd_type == TEGRA_DSI_DELAY_MS)
+			mdelay(cur_cmd->sp_len_dly.delay_ms);
+		else {
+			err = tegra_dsi_write_data(dc, dsi,
+						cur_cmd->pdata,
+						cur_cmd->data_id,
+						cur_cmd->sp_len_dly.data_len);
+			if (err < 0)
+				break;
+		}
+	}
+	return err;
+}
+
+static u8 tegra_dsi_ecc(u32 header)
+{
+	char ecc_parity[24] = {
+		0x07, 0x0b, 0x0d, 0x0e, 0x13, 0x15, 0x16, 0x19,
+		0x1a, 0x1c, 0x23, 0x25, 0x26, 0x29, 0x2a, 0x2c,
+		0x31, 0x32, 0x34, 0x38, 0x1f, 0x2f, 0x37, 0x3b
+	};
+	u8 ecc_byte;
+	int i;
+
+	ecc_byte = 0;
+	for (i = 0; i < 24; i++)
+		ecc_byte ^= ((header >> i) & 1) ? ecc_parity[i] : 0x00;
+
+	return ecc_byte;
+}
+
+static u16 tegra_dsi_cs(char *pdata, u16 data_len)
+{
+	u16 byte_cnt;
+	u8 bit_cnt;
+	char curr_byte;
+	u16 crc = 0xFFFF;
+	u16 poly = 0x8408;
+
+	if (data_len > 0) {
+		for (byte_cnt = 0; byte_cnt < data_len; byte_cnt++) {
+			curr_byte = pdata[byte_cnt];
+			for (bit_cnt = 0; bit_cnt < 8; bit_cnt++) {
+				if (((crc & 0x0001 ) ^
+					(curr_byte & 0x0001)) > 0)
+					crc = ((crc >> 1) & 0x7FFF) ^ poly;
+				else
+					crc = (crc >> 1) & 0x7FFF;
+
+				curr_byte = (curr_byte >> 1 ) & 0x7F;
+			}
+		}
+	}
+	return crc;
+}
+
+static int tegra_dsi_dcs_pkt_seq_ctrl_init(struct tegra_dc_dsi_data *dsi,
+						struct tegra_dsi_cmd *cmd)
+{
+	u8 virtual_channel;
+	u32 val;
+	u16 data_len = cmd->sp_len_dly.data_len;
+	u8 seq_ctrl_reg = 0;
+
+	virtual_channel = dsi->info.virtual_channel <<
+				DSI_VIR_CHANNEL_BIT_POSITION;
+
+	val = (virtual_channel | cmd->data_id) << 0 |
+		data_len << 8;
+
+	val |= tegra_dsi_ecc(val) << 24;
+
+	tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_0 + seq_ctrl_reg++);
+
+	/* if pdata != NULL, pkt type is long pkt */
+	if (cmd->pdata != NULL) {
+		u8 *pdata;
+		u8 *pdata_mem;
+		/*  allocate memory for pdata + 2 bytes checksum */
+		pdata_mem = kzalloc(sizeof(u8) * data_len + 2, GFP_KERNEL);
+		if (!pdata_mem) {
+			dev_err(&dsi->dc->ndev->dev, "dsi: memory err\n");
+			tegra_dsi_soft_reset(dsi);
+			return -ENOMEM;
+		}
+
+		memcpy(pdata_mem, cmd->pdata, data_len);
+		pdata = pdata_mem;
+		*((u16 *)(pdata + data_len)) = tegra_dsi_cs(pdata, data_len);
+
+		/* data_len = length of pdata + 2 byte checksum */
+		data_len += 2;
+
+		while (data_len) {
+			if (data_len >= 4) {
+				val = ((u32 *) pdata)[0];
+				data_len -= 4;
+				pdata += 4;
+			} else {
+				val = 0;
+				memcpy(&val, pdata, data_len);
+				pdata += data_len;
+				data_len = 0;
+			}
+			tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_0 +
+							seq_ctrl_reg++);
+		}
+		kfree(pdata_mem);
+	}
+
+	return 0;
+}
+
+int tegra_dsi_start_host_cmd_v_blank_dcs(struct tegra_dc_dsi_data * dsi,
+						struct tegra_dsi_cmd *cmd)
+{
+#define PKT_HEADER_LEN_BYTE	4
+#define CHECKSUM_LEN_BYTE	2
+
+	int err = 0;
+	u32 val;
+	u16 tot_pkt_len = PKT_HEADER_LEN_BYTE;
+	struct tegra_dc *dc = dsi->dc;
+
+	if (cmd->cmd_type != TEGRA_DSI_PACKET_CMD)
+		return -EINVAL;
+
+	mutex_lock(&dsi->lock);
+	tegra_dc_dsi_hold_host(dc);
+
+	tegra_dc_io_start(dc);
+
+
+	err = tegra_dsi_dcs_pkt_seq_ctrl_init(dsi, cmd);
+	if (err < 0) {
+		dev_err(&dsi->dc->ndev->dev,
+			"dsi: dcs pkt seq ctrl init failed\n");
+		goto fail;
+	}
+
+	if (cmd->pdata) {
+		u16 data_len = cmd->sp_len_dly.data_len;
+		tot_pkt_len += data_len + CHECKSUM_LEN_BYTE;
+	}
+
+	val = DSI_INIT_SEQ_CONTROL_DSI_FRAME_INIT_BYTE_COUNT(tot_pkt_len) |
+		DSI_INIT_SEQ_CONTROL_DSI_SEND_INIT_SEQUENCE(
+						TEGRA_DSI_ENABLE);
+	tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_CONTROL);
+
+fail:
+	tegra_dc_io_end(dc);
+	tegra_dc_dsi_release_host(dc);
+	mutex_unlock(&dsi->lock);
+	return err;
+
+#undef PKT_HEADER_LEN_BYTE
+#undef CHECKSUM_LEN_BYTE
+}
+EXPORT_SYMBOL(tegra_dsi_start_host_cmd_v_blank_dcs);
+
+void tegra_dsi_stop_host_cmd_v_blank_dcs(struct tegra_dc_dsi_data * dsi)
+{
+	struct tegra_dc *dc = dsi->dc;
+	u32 cnt;
+
+	mutex_lock(&dsi->lock);
+	tegra_dc_dsi_hold_host(dc);
+
+	tegra_dc_io_start(dc);
+
+	tegra_dsi_writel(dsi, TEGRA_DSI_DISABLE, DSI_INIT_SEQ_CONTROL);
+
+	/* clear seq data registers */
+	for (cnt = 0; cnt < 8; cnt++)
+		tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_0 + cnt);
+
+	tegra_dc_io_end(dc);
+
+	tegra_dc_dsi_release_host(dc);
+	mutex_unlock(&dsi->lock);
+}
+EXPORT_SYMBOL(tegra_dsi_stop_host_cmd_v_blank_dcs);
+
+static int tegra_dsi_bta(struct tegra_dc_dsi_data *dsi)
+{
+	u32 val;
+	u32 poll_time;
+	int err;
+
+	poll_time = 0;
+	err = 0;
+
+	val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL);
+	val |= DSI_HOST_DSI_CONTROL_IMM_BTA(TEGRA_DSI_ENABLE);
+	tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL);
+
+#if DSI_USE_SYNC_POINTS
+	dsi->syncpt_val = nvhost_syncpt_read_ext(dsi->dc->ndev,
+			dsi->syncpt_id);
+
+	val = DSI_INCR_SYNCPT_COND(OP_DONE) |
+		DSI_INCR_SYNCPT_INDX(dsi->syncpt_id);
+	tegra_dsi_writel(dsi, val, DSI_INCR_SYNCPT);
+
+	/* TODO: Use interrupt rather than polling */
+	err = nvhost_syncpt_wait_timeout_ext(dsi->dc->ndev, dsi->syncpt_id,
+		dsi->syncpt_val + 1, MAX_SCHEDULE_TIMEOUT, NULL);
+	if (err < 0)
+		dev_err(&dsi->dc->ndev->dev,
+			"DSI sync point failure\n");
+	else
+		(dsi->syncpt_val)++;
+#else
+	if (tegra_dsi_read_busy(dsi)) {
+		err = -EBUSY;
+		dev_err(&dsi->dc->ndev->dev,
+			"Timeout wating on read completion\n");
+	}
+#endif
+
+	return err;
+}
+
+static int tegra_dsi_parse_read_response(struct tegra_dc *dc,
+					u32 rd_fifo_cnt, u8 *read_fifo)
+{
+	int err;
+	u32 payload_size;
+
+	payload_size = 0;
+	err = 0;
+
+	switch (read_fifo[0]) {
+	case DSI_ESCAPE_CMD:
+		dev_info(&dc->ndev->dev, "escape cmd[0x%x]\n", read_fifo[0]);
+		break;
+	case DSI_ACK_NO_ERR:
+		dev_info(&dc->ndev->dev,
+			"Panel ack, no err[0x%x]\n", read_fifo[0]);
+		return err;
+	default:
+		dev_info(&dc->ndev->dev, "Invalid read response\n");
+		break;
+	}
+
+	switch (read_fifo[4] & 0xff) {
+	case GEN_LONG_RD_RES:
+		/* Fall through */
+	case DCS_LONG_RD_RES:
+		payload_size = (read_fifo[5] |
+				(read_fifo[6] << 8)) & 0xFFFF;
+		dev_info(&dc->ndev->dev, "Long read response Packet\n"
+				"payload_size[0x%x]\n", payload_size);
+		break;
+	case GEN_1_BYTE_SHORT_RD_RES:
+		/* Fall through */
+	case DCS_1_BYTE_SHORT_RD_RES:
+		payload_size = 1;
+		dev_info(&dc->ndev->dev, "Short read response Packet\n"
+			"payload_size[0x%x]\n", payload_size);
+		break;
+	case GEN_2_BYTE_SHORT_RD_RES:
+		/* Fall through */
+	case DCS_2_BYTE_SHORT_RD_RES:
+		payload_size = 2;
+		dev_info(&dc->ndev->dev, "Short read response Packet\n"
+			"payload_size[0x%x]\n", payload_size);
+		break;
+	case ACK_ERR_RES:
+		payload_size = 2;
+		dev_info(&dc->ndev->dev, "Acknowledge error report response\n"
+			"Packet payload_size[0x%x]\n", payload_size);
+		break;
+	default:
+		dev_info(&dc->ndev->dev, "Invalid response packet\n");
+		err = -EINVAL;
+		break;
+	}
+	return err;
+}
+
+static int tegra_dsi_read_fifo(struct tegra_dc *dc,
+			struct tegra_dc_dsi_data *dsi,
+			u8 *read_fifo)
+{
+	u32 val;
+	u32 i;
+	u32 poll_time = 0;
+	u32 rd_fifo_cnt;
+	int err = 0;
+	u8 *read_fifo_cp = read_fifo;
+
+	while (poll_time <  DSI_DELAY_FOR_READ_FIFO) {
+		mdelay(1);
+		val = tegra_dsi_readl(dsi, DSI_STATUS);
+		rd_fifo_cnt = val & DSI_STATUS_RD_FIFO_COUNT(0x1f);
+		if (rd_fifo_cnt << 2 > DSI_READ_FIFO_DEPTH)
+			dev_err(&dc->ndev->dev,
+			"DSI RD_FIFO_CNT is greater than RD_FIFO_DEPTH\n");
+			break;
+		poll_time++;
+	}
+
+	if (rd_fifo_cnt == 0) {
+		dev_info(&dc->ndev->dev,
+			"DSI RD_FIFO_CNT is zero\n");
+		err = -EINVAL;
+		goto fail;
+	}
+
+	if (val & (DSI_STATUS_LB_UNDERFLOW(0x1) |
+		DSI_STATUS_LB_OVERFLOW(0x1))) {
+		dev_warn(&dc->ndev->dev,
+			"DSI overflow/underflow error\n");
+	}
+
+	/* Read data from FIFO */
+	for (i = 0; i < rd_fifo_cnt; i++) {
+		val = tegra_dsi_readl(dsi, DSI_RD_DATA);
+		if (enable_read_debug)
+			dev_info(&dc->ndev->dev,
+			"Read data[%d]: 0x%x\n", i, val);
+		memcpy(read_fifo, &val, 4);
+		read_fifo += 4;
+	}
+
+	/* Make sure all the data is read from the FIFO */
+	val = tegra_dsi_readl(dsi, DSI_STATUS);
+	val &= DSI_STATUS_RD_FIFO_COUNT(0x1f);
+	if (val)
+		dev_err(&dc->ndev->dev, "DSI FIFO_RD_CNT not zero"
+		" even after reading FIFO_RD_CNT words from read fifo\n");
+
+	if (enable_read_debug) {
+		err =
+		tegra_dsi_parse_read_response(dc, rd_fifo_cnt, read_fifo_cp);
+		if (err < 0)
+			dev_warn(&dc->ndev->dev, "Unexpected read data\n");
+	}
+fail:
+	return err;
+}
+
+int tegra_dsi_read_data(struct tegra_dc *dc,
+				struct tegra_dc_dsi_data *dsi,
+				u32 max_ret_payload_size,
+				u32 panel_reg_addr, u8 *read_data)
+{
+	int err = 0;
+	struct dsi_status *init_status;
+
+	mutex_lock(&dsi->lock);
+	tegra_dc_io_start(dc);
+
+	init_status = tegra_dsi_prepare_host_transmission(
+				dc, dsi, DSI_LP_OP_WRITE);
+	if (IS_ERR(init_status)) {
+		err = PTR_ERR(init_status);
+		dev_err(&dc->ndev->dev, "DSI host config failed\n");
+		goto fail;
+	}
+
+	/* Set max return payload size in words */
+	err = _tegra_dsi_write_data(dsi, NULL,
+		dsi_command_max_return_pkt_size,
+		max_ret_payload_size);
+	if (err < 0) {
+		dev_err(&dc->ndev->dev,
+				"DSI write failed\n");
+		goto fail;
+	}
+
+	/* DCS to read given panel register */
+	err = _tegra_dsi_write_data(dsi, NULL,
+		dsi_command_dcs_read_with_no_params,
+		panel_reg_addr);
+	if (err < 0) {
+		dev_err(&dc->ndev->dev,
+				"DSI write failed\n");
+		goto fail;
+	}
+
+	tegra_dsi_reset_read_count(dsi);
+
+	if (dsi->status.lp_op == DSI_LP_OP_WRITE) {
+		err = tegra_dsi_set_to_lp_mode(dc, dsi, DSI_LP_OP_READ);
+		if (err < 0) {
+			dev_err(&dc->ndev->dev,
+			"DSI failed to go to LP read mode\n");
+			goto fail;
+		}
+	}
+
+	err = tegra_dsi_bta(dsi);
+	if (err < 0) {
+		dev_err(&dc->ndev->dev,
+			"DSI IMM BTA timeout\n");
+		goto fail;
+	}
+
+	err = tegra_dsi_read_fifo(dc, dsi, read_data);
+	if (err < 0) {
+		dev_err(&dc->ndev->dev, "DSI read fifo failure\n");
+		goto fail;
+	}
+fail:
+	err = tegra_dsi_restore_state(dc, dsi, init_status);
+	if (err < 0)
+		dev_err(&dc->ndev->dev, "Failed to restore prev state\n");
+	tegra_dc_io_end(dc);
+	mutex_unlock(&dsi->lock);
+	return err;
+}
+EXPORT_SYMBOL(tegra_dsi_read_data);
+
+int tegra_dsi_panel_sanity_check(struct tegra_dc *dc,
+				struct tegra_dc_dsi_data *dsi)
+{
+	int err = 0;
+	u8 read_fifo[DSI_READ_FIFO_DEPTH];
+	struct dsi_status *init_status;
+	static struct tegra_dsi_cmd dsi_nop_cmd =
+			DSI_CMD_SHORT(0x05, 0x0, 0x0);
+
+	tegra_dc_io_start(dc);
+
+	init_status = tegra_dsi_prepare_host_transmission(
+					dc, dsi, DSI_LP_OP_WRITE);
+	if (IS_ERR_OR_NULL(init_status)) {
+		err = PTR_ERR(init_status);
+		dev_err(&dc->ndev->dev, "DSI host config failed\n");
+		goto fail;
+	}
+
+	err = _tegra_dsi_write_data(dsi, NULL, dsi_nop_cmd.data_id, 0x0);
+	if (err < 0) {
+		dev_err(&dc->ndev->dev, "DSI nop write failed\n");
+		goto fail;
+	}
+
+	tegra_dsi_reset_read_count(dsi);
+
+	if (dsi->status.lp_op == DSI_LP_OP_WRITE) {
+		err = tegra_dsi_set_to_lp_mode(dc, dsi, DSI_LP_OP_READ);
+		if (err < 0) {
+			dev_err(&dc->ndev->dev,
+			"DSI failed to go to LP read mode\n");
+			goto fail;
+		}
+	}
+
+	err = tegra_dsi_bta(dsi);
+	if (err < 0) {
+		dev_err(&dc->ndev->dev, "DSI BTA failed\n");
+		goto fail;
+	}
+
+	err = tegra_dsi_read_fifo(dc, dsi, read_fifo);
+	if (err < 0) {
+		dev_err(&dc->ndev->dev, "DSI read fifo failure\n");
+		goto fail;
+	}
+
+	if (read_fifo[0] != DSI_ACK_NO_ERR) {
+		dev_warn(&dc->ndev->dev,
+			"Ack no error trigger message not received\n");
+		err = -EAGAIN;
+	}
+fail:
+	err = tegra_dsi_restore_state(dc, dsi, init_status);
+	if (err < 0)
+		dev_err(&dc->ndev->dev, "Failed to restore prev state\n");
+	tegra_dc_io_end(dc);
+	return err;
+}
+EXPORT_SYMBOL(tegra_dsi_panel_sanity_check);
+
+static int tegra_dsi_enter_ulpm(struct tegra_dc_dsi_data *dsi)
+{
+	u32 val;
+	int ret;
+
+	ret = 0;
+
+	val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL);
+	val &= ~DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(3);
+	val |= DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(ENTER_ULPM);
+	tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL);
+
+#if DSI_USE_SYNC_POINTS
+	ret = tegra_dsi_syncpt(dsi);
+	if (ret < 0) {
+		dev_err(&dsi->dc->ndev->dev,
+			"DSI syncpt for ulpm enter failed\n");
+		goto fail;
+	}
+#else
+	/* TODO: Find exact delay required */
+	mdelay(10);
+#endif
+	dsi->ulpm = true;
+fail:
+	return ret;
+}
+
+static int tegra_dsi_exit_ulpm(struct tegra_dc_dsi_data *dsi)
+{
+	u32 val;
+	int ret;
+
+	ret = 0;
+
+	val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL);
+	val &= ~DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(3);
+	val |= DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(EXIT_ULPM);
+	tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL);
+
+#if DSI_USE_SYNC_POINTS
+	ret = tegra_dsi_syncpt(dsi);
+	if (ret < 0) {
+		dev_err(&dsi->dc->ndev->dev,
+			"DSI syncpt for ulpm exit failed\n");
+		goto fail;
+	}
+#else
+	/* TODO: Find exact delay required */
+	mdelay(10);
+#endif
+	dsi->ulpm = false;
+
+	val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL);
+	val &= ~DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(0x3);
+	val |= DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(NORMAL);
+	tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL);
+fail:
+	return ret;
+
+}
+
+static void tegra_dsi_send_dc_frames(struct tegra_dc *dc,
+				     struct tegra_dc_dsi_data *dsi,
+				     int no_of_frames)
+{
+	int err;
+	u32 frame_period = DIV_ROUND_UP(S_TO_MS(1), dsi->info.refresh_rate);
+	u8 lp_op = dsi->status.lp_op;
+	bool switch_to_lp = (dsi->status.lphs == DSI_LPHS_IN_LP_MODE);
+
+	if (dsi->status.lphs != DSI_LPHS_IN_HS_MODE) {
+		err = tegra_dsi_set_to_hs_mode(dc, dsi,
+				TEGRA_DSI_DRIVEN_BY_DC);
+		if (err < 0) {
+			dev_err(&dc->ndev->dev,
+				"Switch to HS host mode failed\n");
+			return;
+		}
+	}
+
+	/*
+	 * Some panels need DC frames be sent under certain
+	 * conditions. We are working on the right fix for this
+	 * requirement, while using this current fix.
+	 */
+	tegra_dsi_start_dc_stream(dc, dsi);
+
+	/*
+	 * Send frames in Continuous or One-shot mode.
+	 */
+	if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) {
+		while (no_of_frames--) {
+			tegra_dc_writel(dc, GENERAL_ACT_REQ | NC_HOST_TRIG,
+					DC_CMD_STATE_CONTROL);
+			mdelay(frame_period);
+		}
+	} else
+		mdelay(no_of_frames * frame_period);
+
+	tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi);
+
+	if (switch_to_lp) {
+		err = tegra_dsi_set_to_lp_mode(dc, dsi, lp_op);
+		if (err < 0)
+			dev_err(&dc->ndev->dev,
+				"DSI failed to go to LP mode\n");
+	}
+}
+
+static void tegra_dc_dsi_enable(struct tegra_dc *dc)
+{
+	struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc);
+	int err;
+	u32 val;
+
+	mutex_lock(&dsi->lock);
+	tegra_dc_dsi_hold_host(dc);
+
+	tegra_dc_io_start(dc);
+	/* Stop DC stream before configuring DSI registers
+	 * to avoid visible glitches on panel during transition
+	 * from bootloader to kernel driver
+	 */
+	tegra_dsi_stop_dc_stream(dc, dsi);
+
+	if (dsi->enabled) {
+		if (dsi->ulpm) {
+			if (tegra_dsi_exit_ulpm(dsi) < 0) {
+				dev_err(&dc->ndev->dev,
+					"DSI failed to exit ulpm\n");
+				goto fail;
+			}
+		}
+
+		if (dsi->info.panel_reset) {
+			/*
+			 * Certain panels need dc frames be sent before
+			 * waking panel.
+			 */
+			if (dsi->info.panel_send_dc_frames)
+				tegra_dsi_send_dc_frames(dc, dsi, 2);
+
+			err = tegra_dsi_send_panel_cmd(dc, dsi,
+							dsi->info.dsi_init_cmd,
+							dsi->info.n_init_cmd);
+			if (err < 0) {
+				dev_err(&dc->ndev->dev,
+				"dsi: error sending dsi init cmd\n");
+				goto fail;
+			}
+		} else if (dsi->info.dsi_late_resume_cmd) {
+			err = tegra_dsi_send_panel_cmd(dc, dsi,
+						dsi->info.dsi_late_resume_cmd,
+						dsi->info.n_late_resume_cmd);
+			if (err < 0) {
+				dev_err(&dc->ndev->dev,
+				"dsi: error sending late resume cmd\n");
+				goto fail;
+			}
+		}
+	} else {
+		err = tegra_dsi_init_hw(dc, dsi);
+		if (err < 0) {
+			dev_err(&dc->ndev->dev,
+				"dsi: not able to init dsi hardware\n");
+			goto fail;
+		}
+
+		if (dsi->ulpm) {
+			if (tegra_dsi_enter_ulpm(dsi) < 0) {
+				dev_err(&dc->ndev->dev,
+					"DSI failed to enter ulpm\n");
+				goto fail;
+			}
+
+			val = tegra_dsi_readl(dsi, DSI_PAD_CONTROL);
+
+			/* erase bits we're about to set */
+			val &= ~(DSI_PAD_CONTROL_PAD_PDIO(0x3) |
+				DSI_PAD_CONTROL_PAD_PDIO_CLK(0x1) |
+				DSI_PAD_CONTROL_PAD_PULLDN_ENAB(0x1));
+
+			val |= (DSI_PAD_CONTROL_PAD_PDIO(0) |
+				DSI_PAD_CONTROL_PAD_PDIO_CLK(0) |
+				DSI_PAD_CONTROL_PAD_PULLDN_ENAB
+						(TEGRA_DSI_DISABLE));
+
+			tegra_dsi_writel(dsi, val, DSI_PAD_CONTROL);
+			if (tegra_dsi_exit_ulpm(dsi) < 0) {
+				dev_err(&dc->ndev->dev,
+					"DSI failed to exit ulpm\n");
+				goto fail;
+			}
+		}
+
+		/*
+		 * Certain panels need dc frames be sent before
+		 * waking panel.
+		 */
+		if (dsi->info.panel_send_dc_frames)
+			tegra_dsi_send_dc_frames(dc, dsi, 2);
+
+		err = tegra_dsi_set_to_lp_mode(dc, dsi, DSI_LP_OP_WRITE);
+		if (err < 0) {
+			dev_err(&dc->ndev->dev,
+				"dsi: not able to set to lp mode\n");
+			goto fail;
+		}
+
+		err = tegra_dsi_send_panel_cmd(dc, dsi, dsi->info.dsi_init_cmd,
+						dsi->info.n_init_cmd);
+		if (err < 0) {
+			dev_err(&dc->ndev->dev,
+				"dsi: error while sending dsi init cmd\n");
+			goto fail;
+		}
+
+		err = tegra_dsi_set_to_hs_mode(dc, dsi,
+				TEGRA_DSI_DRIVEN_BY_DC);
+		if (err < 0) {
+			dev_err(&dc->ndev->dev,
+				"dsi: not able to set to hs mode\n");
+			goto fail;
+		}
+
+		dsi->enabled = true;
+	}
+
+	if (dsi->status.driven == DSI_DRIVEN_MODE_DC)
+		tegra_dsi_start_dc_stream(dc, dsi);
+fail:
+	tegra_dc_io_end(dc);
+	tegra_dc_dsi_release_host(dc);
+	mutex_unlock(&dsi->lock);
+}
+
+static void _tegra_dc_dsi_init(struct tegra_dc *dc)
+{
+	struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc);
+
+	tegra_dc_dsi_debug_create(dsi);
+	tegra_dsi_init_sw(dc, dsi);
+	/* TODO: Configure the CSI pad configuration */
+}
+
+static int tegra_dc_dsi_cp_p_cmd(struct tegra_dsi_cmd *src,
+					struct tegra_dsi_cmd *dst, u16 n_cmd)
+{
+	u16 i;
+	u16 len;
+
+	memcpy(dst, src, sizeof(*dst) * n_cmd);
+
+	for (i = 0; i < n_cmd; i++)
+		if (src[i].pdata) {
+			len = sizeof(*src[i].pdata) *
+					src[i].sp_len_dly.data_len;
+			dst[i].pdata = kzalloc(len, GFP_KERNEL);
+			if (!dst[i].pdata)
+				goto free_cmd_pdata;
+			memcpy(dst[i].pdata, src[i].pdata, len);
+		}
+
+	return 0;
+
+free_cmd_pdata:
+	for (--i; i >= 0; i--)
+		if (dst[i].pdata)
+			kfree(dst[i].pdata);
+	return -ENOMEM;
+}
+
+static int tegra_dc_dsi_cp_info(struct tegra_dc_dsi_data *dsi,
+					struct tegra_dsi_out *p_dsi)
+{
+	struct tegra_dsi_cmd *p_init_cmd;
+	struct tegra_dsi_cmd *p_early_suspend_cmd = NULL;
+	struct tegra_dsi_cmd *p_late_resume_cmd = NULL;
+	struct tegra_dsi_cmd *p_suspend_cmd;
+	int err;
+
+	if (p_dsi->n_data_lanes > MAX_DSI_DATA_LANES)
+		return -EINVAL;
+
+	p_init_cmd = kzalloc(sizeof(*p_init_cmd) *
+				p_dsi->n_init_cmd, GFP_KERNEL);
+	if (!p_init_cmd)
+		return -ENOMEM;
+
+	if (p_dsi->dsi_early_suspend_cmd) {
+		p_early_suspend_cmd = kzalloc(sizeof(*p_early_suspend_cmd) *
+					p_dsi->n_early_suspend_cmd,
+					GFP_KERNEL);
+		if (!p_early_suspend_cmd) {
+			err = -ENOMEM;
+			goto err_free_init_cmd;
+		}
+	}
+
+	if (p_dsi->dsi_late_resume_cmd) {
+		p_late_resume_cmd = kzalloc(sizeof(*p_late_resume_cmd) *
+					p_dsi->n_late_resume_cmd,
+					GFP_KERNEL);
+		if (!p_late_resume_cmd) {
+			err = -ENOMEM;
+			goto err_free_p_early_suspend_cmd;
+		}
+	}
+
+	p_suspend_cmd = kzalloc(sizeof(*p_suspend_cmd) * p_dsi->n_suspend_cmd,
+				GFP_KERNEL);
+	if (!p_suspend_cmd) {
+		err = -ENOMEM;
+		goto err_free_p_late_resume_cmd;
+	}
+
+	memcpy(&dsi->info, p_dsi, sizeof(dsi->info));
+
+	/* Copy panel init cmd */
+	err = tegra_dc_dsi_cp_p_cmd(p_dsi->dsi_init_cmd,
+						p_init_cmd, p_dsi->n_init_cmd);
+	if (err < 0)
+		goto err_free;
+	dsi->info.dsi_init_cmd = p_init_cmd;
+
+	/* Copy panel early suspend cmd */
+	if (p_dsi->dsi_early_suspend_cmd) {
+		err = tegra_dc_dsi_cp_p_cmd(p_dsi->dsi_early_suspend_cmd,
+					p_early_suspend_cmd,
+					p_dsi->n_early_suspend_cmd);
+		if (err < 0)
+			goto err_free;
+		dsi->info.dsi_early_suspend_cmd = p_early_suspend_cmd;
+	}
+
+	/* Copy panel late resume cmd */
+	if (p_dsi->dsi_late_resume_cmd) {
+		err = tegra_dc_dsi_cp_p_cmd(p_dsi->dsi_late_resume_cmd,
+						p_late_resume_cmd,
+						p_dsi->n_late_resume_cmd);
+		if (err < 0)
+			goto err_free;
+		dsi->info.dsi_late_resume_cmd = p_late_resume_cmd;
+	}
+
+	/* Copy panel suspend cmd */
+	err = tegra_dc_dsi_cp_p_cmd(p_dsi->dsi_suspend_cmd, p_suspend_cmd,
+					p_dsi->n_suspend_cmd);
+	if (err < 0)
+		goto err_free;
+	dsi->info.dsi_suspend_cmd = p_suspend_cmd;
+
+	if (!dsi->info.panel_reset_timeout_msec)
+		dsi->info.panel_reset_timeout_msec =
+						DEFAULT_PANEL_RESET_TIMEOUT;
+
+	if (!dsi->info.panel_buffer_size_byte)
+		dsi->info.panel_buffer_size_byte = DEFAULT_PANEL_BUFFER_BYTE;
+
+	if (!dsi->info.max_panel_freq_khz) {
+		dsi->info.max_panel_freq_khz = DEFAULT_MAX_DSI_PHY_CLK_KHZ;
+
+		if (dsi->info.video_burst_mode >
+				TEGRA_DSI_VIDEO_NONE_BURST_MODE_WITH_SYNC_END){
+			dev_err(&dsi->dc->ndev->dev, "DSI: max_panel_freq_khz"
+					"is not set for DSI burst mode.\n");
+			dsi->info.video_burst_mode =
+				TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED;
+		}
+	}
+
+	if (!dsi->info.lp_cmd_mode_freq_khz)
+		dsi->info.lp_cmd_mode_freq_khz = DEFAULT_LP_CMD_MODE_CLK_KHZ;
+
+	if (!dsi->info.chip_id || !dsi->info.chip_rev)
+		dev_warn(&dsi->dc->ndev->dev,
+			"DSI: Failed to get chip info\n");
+
+	if (!dsi->info.lp_read_cmd_mode_freq_khz)
+		dsi->info.lp_read_cmd_mode_freq_khz =
+			dsi->info.lp_cmd_mode_freq_khz;
+
+	/* host mode is for testing only */
+	dsi->driven_mode = TEGRA_DSI_DRIVEN_BY_DC;
+	return 0;
+
+err_free:
+	kfree(p_suspend_cmd);
+err_free_p_late_resume_cmd:
+	kfree(p_late_resume_cmd);
+err_free_p_early_suspend_cmd:
+	kfree(p_early_suspend_cmd);
+err_free_init_cmd:
+	kfree(p_init_cmd);
+	return err;
+}
+
+static int tegra_dc_dsi_init(struct tegra_dc *dc)
+{
+	struct tegra_dc_dsi_data *dsi;
+	struct resource *res;
+	struct resource *base_res;
+	void __iomem *base;
+	struct clk *dc_clk = NULL;
+	struct clk *dsi_clk = NULL;
+	struct clk *dsi_fixed_clk = NULL;
+	struct tegra_dsi_out *dsi_pdata;
+	int err;
+
+	err = 0;
+
+	dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
+	if (!dsi)
+		return -ENOMEM;
+
+	res = nvhost_get_resource_byname(dc->ndev, IORESOURCE_MEM,
+					"dsi_regs");
+	if (!res) {
+		dev_err(&dc->ndev->dev, "dsi: no mem resource\n");
+		err = -ENOENT;
+		goto err_free_dsi;
+	}
+
+	base_res = request_mem_region(res->start, resource_size(res),
+				dc->ndev->name);
+	if (!base_res) {
+		dev_err(&dc->ndev->dev, "dsi: request_mem_region failed\n");
+		err = -EBUSY;
+		goto err_free_dsi;
+	}
+
+	base = ioremap(res->start, resource_size(res));
+	if (!base) {
+		dev_err(&dc->ndev->dev, "dsi: registers can't be mapped\n");
+		err = -EBUSY;
+		goto err_release_regs;
+	}
+
+	dsi_pdata = dc->pdata->default_out->dsi;
+	if (!dsi_pdata) {
+		dev_err(&dc->ndev->dev, "dsi: dsi data not available\n");
+		goto err_release_regs;
+	}
+
+	if (dsi_pdata->dsi_instance)
+		dsi_clk = clk_get(&dc->ndev->dev, "dsib");
+	else
+		dsi_clk = clk_get(&dc->ndev->dev, "dsia");
+	dsi_fixed_clk = clk_get(&dc->ndev->dev, "dsi-fixed");
+
+	if (IS_ERR_OR_NULL(dsi_clk) || IS_ERR_OR_NULL(dsi_fixed_clk)) {
+		dev_err(&dc->ndev->dev, "dsi: can't get clock\n");
+		err = -EBUSY;
+		goto err_release_regs;
+	}
+
+	dc_clk = clk_get_sys(dev_name(&dc->ndev->dev), NULL);
+	if (IS_ERR_OR_NULL(dc_clk)) {
+		dev_err(&dc->ndev->dev, "dsi: dc clock %s unavailable\n",
+			dev_name(&dc->ndev->dev));
+		err = -EBUSY;
+		goto err_clk_put;
+	}
+
+	mutex_init(&dsi->lock);
+	dsi->dc = dc;
+	dsi->base = base;
+	dsi->base_res = base_res;
+	dsi->dc_clk = dc_clk;
+	dsi->dsi_clk = dsi_clk;
+	dsi->dsi_fixed_clk = dsi_fixed_clk;
+
+	err = tegra_dc_dsi_cp_info(dsi, dsi_pdata);
+	if (err < 0)
+		goto err_dsi_data;
+
+	tegra_dc_set_outdata(dc, dsi);
+	_tegra_dc_dsi_init(dc);
+
+	return 0;
+
+err_dsi_data:
+err_clk_put:
+	clk_put(dsi_clk);
+	clk_put(dsi_fixed_clk);
+err_release_regs:
+	release_resource(base_res);
+err_free_dsi:
+	kfree(dsi);
+
+	return err;
+}
+
+static void tegra_dc_dsi_destroy(struct tegra_dc *dc)
+{
+	struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc);
+	u16 i;
+	u32 val;
+
+	mutex_lock(&dsi->lock);
+
+	/* free up the pdata */
+	for (i = 0; i < dsi->info.n_init_cmd; i++) {
+		if (dsi->info.dsi_init_cmd[i].pdata)
+			kfree(dsi->info.dsi_init_cmd[i].pdata);
+	}
+	kfree(dsi->info.dsi_init_cmd);
+
+	/* Disable dc stream */
+	if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE)
+		tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi);
+
+	/* Disable dsi phy clock */
+	if (dsi->status.clk_out == DSI_PHYCLK_OUT_EN)
+		tegra_dsi_hs_clk_out_disable(dc, dsi);
+
+	val = DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_DISABLE);
+	tegra_dsi_writel(dsi, val, DSI_POWER_CONTROL);
+
+	iounmap(dsi->base);
+	release_resource(dsi->base_res);
+
+	clk_put(dsi->dc_clk);
+	clk_put(dsi->dsi_clk);
+
+	mutex_unlock(&dsi->lock);
+
+	mutex_destroy(&dsi->lock);
+	kfree(dsi);
+}
+
+static void tegra_dsi_config_phy_clk(struct tegra_dc_dsi_data *dsi,
+							u32 settings)
+{
+	struct clk *parent_clk = NULL;
+	struct clk *base_clk = NULL;
+
+	/* Disable dsi fast and slow clock */
+	parent_clk = clk_get_parent(dsi->dsi_clk);
+	base_clk = clk_get_parent(parent_clk);
+	if (dsi->info.dsi_instance)
+		tegra_clk_cfg_ex(base_clk,
+				TEGRA_CLK_PLLD_CSI_OUT_ENB,
+				settings);
+	else
+		tegra_clk_cfg_ex(base_clk,
+				TEGRA_CLK_PLLD_DSI_OUT_ENB,
+				settings);
+}
+
+static int tegra_dsi_deep_sleep(struct tegra_dc *dc,
+				struct tegra_dc_dsi_data *dsi, u32 suspend_aggr)
+{
+	int val = 0;
+	int err = 0;
+
+	if (!dsi->enabled) {
+		err = -EPERM;
+		goto fail;
+	}
+
+	switch (suspend_aggr) {
+	case DSI_SUSPEND_FULL:
+		/* Suspend DSI panel */
+		err = tegra_dsi_set_to_lp_mode(dc, dsi, DSI_LP_OP_WRITE);
+		if (err < 0) {
+			dev_err(&dc->ndev->dev,
+			"DSI failed to go to LP mode\n");
+			goto fail;
+		}
+
+		err = tegra_dsi_send_panel_cmd(dc, dsi,
+				dsi->info.dsi_suspend_cmd,
+				dsi->info.n_suspend_cmd);
+		/*
+		 * Certain panels need dc frames be sent after
+		 * putting panel to sleep.
+		 */
+		if (dsi->info.panel_send_dc_frames)
+			tegra_dsi_send_dc_frames(dc, dsi, 2);
+
+		if (err < 0) {
+			dev_err(&dc->ndev->dev,
+				"dsi: Error sending suspend cmd\n");
+			goto fail;
+		}
+	case DSI_HOST_SUSPEND_LV2:
+		/* Set DSI to ULPM and suspend pads. DSI will be set to the
+		 * lowest power state in this level. */
+		if (!dsi->ulpm) {
+			err = tegra_dsi_enter_ulpm(dsi);
+			if (err < 0) {
+				dev_err(&dc->ndev->dev,
+					"DSI failed to enter ulpm\n");
+				goto fail;
+			}
+		}
+
+		val = DSI_PAD_CONTROL_PAD_PDIO(0x3) |
+			DSI_PAD_CONTROL_PAD_PDIO_CLK(0x1) |
+			DSI_PAD_CONTROL_PAD_PULLDN_ENAB(TEGRA_DSI_ENABLE);
+		tegra_dsi_writel(dsi, val, DSI_PAD_CONTROL);
+
+		/* Suspend core-logic */
+		val = DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_DISABLE);
+		tegra_dsi_writel(dsi, val, DSI_POWER_CONTROL);
+	case DSI_HOST_SUSPEND_LV1:
+		/* Disable dsi fast and slow clock */
+		tegra_dsi_config_phy_clk(dsi, TEGRA_DSI_DISABLE);
+	case DSI_HOST_SUSPEND_LV0:
+		/* Disable dsi source clock */
+		tegra_dsi_clk_disable(dsi);
+		break;
+	case DSI_NO_SUSPEND:
+		break;
+	default:
+		dev_err(&dc->ndev->dev, "DSI suspend aggressiveness"
+						"is not supported.\n");
+	}
+
+	dsi->enabled = false;
+
+	return 0;
+fail:
+	return err;
+}
+
+static int tegra_dsi_host_suspend(struct tegra_dc *dc)
+{
+	int err = 0;
+	struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc);
+
+	if (dsi->host_suspended)
+		return 0;
+
+	BUG_ON(!tegra_is_clk_enabled(dc->clk));
+	tegra_dc_io_start(dc);
+	dsi->host_suspended = true;
+
+	tegra_dsi_stop_dc_stream(dc, dsi);
+
+	err = tegra_dsi_deep_sleep(dc, dsi, dsi->info.suspend_aggr);
+	if (err < 0)
+		dev_err(&dc->ndev->dev,
+			"DSI failed to enter deep sleep\n");
+
+	tegra_dc_clk_disable(dc);
+
+	tegra_dc_io_end(dc);
+	return err;
+}
+
+static int tegra_dsi_host_resume(struct tegra_dc *dc)
+{
+	int val = 0;
+	int err = 0;
+	struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc);
+
+	mutex_lock(&dsi->host_resume_lock);
+	cancel_delayed_work_sync(&dsi->idle_work);
+	if (!dsi->host_suspended) {
+		mutex_unlock(&dsi->host_resume_lock);
+		return 0;
+	}
+
+	tegra_dc_clk_enable(dc);
+	switch (dsi->info.suspend_aggr) {
+	case DSI_HOST_SUSPEND_LV0:
+		tegra_dsi_clk_enable(dsi);
+		break;
+	case DSI_HOST_SUSPEND_LV1:
+		tegra_dsi_config_phy_clk(dsi, TEGRA_DSI_ENABLE);
+		tegra_dsi_clk_enable(dsi);
+		break;
+	case DSI_HOST_SUSPEND_LV2:
+		tegra_dsi_config_phy_clk(dsi, TEGRA_DSI_ENABLE);
+		tegra_dsi_clk_enable(dsi);
+
+		tegra_dsi_writel(dsi,
+			DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_ENABLE),
+			DSI_POWER_CONTROL);
+
+		if (dsi->ulpm) {
+			err = tegra_dsi_enter_ulpm(dsi);
+			if (err < 0) {
+				dev_err(&dc->ndev->dev,
+					"DSI failed to enter ulpm\n");
+				goto fail;
+			}
+
+			val = tegra_dsi_readl(dsi, DSI_PAD_CONTROL);
+			val &= ~(DSI_PAD_CONTROL_PAD_PDIO(0x3) |
+				DSI_PAD_CONTROL_PAD_PDIO_CLK(0x1) |
+				DSI_PAD_CONTROL_PAD_PULLDN_ENAB(0x1));
+			tegra_dsi_writel(dsi, val, DSI_PAD_CONTROL);
+
+			if (tegra_dsi_exit_ulpm(dsi) < 0) {
+				dev_err(&dc->ndev->dev,
+					"DSI failed to exit ulpm\n");
+				goto fail;
+			}
+		}
+		break;
+	case DSI_NO_SUSPEND:
+		break;
+	default:
+		dev_err(&dc->ndev->dev, "DSI suspend aggressivenes"
+						"is not supported.\n");
+	}
+
+	tegra_dsi_start_dc_stream(dc, dsi);
+
+	dsi->enabled = true;
+	dsi->host_suspended = false;
+	mutex_unlock(&dsi->host_resume_lock);
+fail:
+	return err;
+}
+
+static void tegra_dc_dsi_disable(struct tegra_dc *dc)
+{
+	int err;
+	struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc);
+
+	tegra_dc_io_start(dc);
+	mutex_lock(&dsi->lock);
+
+	if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE)
+		tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi);
+
+	if (dsi->info.power_saving_suspend) {
+		if (tegra_dsi_deep_sleep(dc, dsi, DSI_SUSPEND_FULL) < 0) {
+			dev_err(&dc->ndev->dev,
+				"DSI failed to enter deep sleep\n");
+			goto fail;
+		}
+	} else {
+		if (dsi->info.dsi_early_suspend_cmd) {
+			err = tegra_dsi_send_panel_cmd(dc, dsi,
+				dsi->info.dsi_early_suspend_cmd,
+				dsi->info.n_early_suspend_cmd);
+			if (err < 0) {
+				dev_err(&dc->ndev->dev,
+				"dsi: Error sending early suspend cmd\n");
+				goto fail;
+			}
+		}
+
+		if (!dsi->ulpm) {
+			if (tegra_dsi_enter_ulpm(dsi) < 0) {
+				dev_err(&dc->ndev->dev,
+					"DSI failed to enter ulpm\n");
+				goto fail;
+			}
+		}
+	}
+fail:
+	mutex_unlock(&dsi->lock);
+	tegra_dc_io_end(dc);
+}
+
+#ifdef CONFIG_PM
+static void tegra_dc_dsi_suspend(struct tegra_dc *dc)
+{
+	struct tegra_dc_dsi_data *dsi;
+
+	dsi = tegra_dc_get_outdata(dc);
+
+	if (!dsi->enabled)
+		return;
+
+	tegra_dc_io_start(dc);
+	mutex_lock(&dsi->lock);
+
+	if (!dsi->info.power_saving_suspend) {
+		if (dsi->ulpm) {
+			if (tegra_dsi_exit_ulpm(dsi) < 0) {
+				dev_err(&dc->ndev->dev,
+					"DSI failed to exit ulpm");
+				goto fail;
+			}
+		}
+
+		if (tegra_dsi_deep_sleep(dc, dsi, DSI_SUSPEND_FULL) < 0) {
+			dev_err(&dc->ndev->dev,
+				"DSI failed to enter deep sleep\n");
+			goto fail;
+		}
+	}
+fail:
+	mutex_unlock(&dsi->lock);
+	tegra_dc_io_end(dc);
+}
+
+static void tegra_dc_dsi_resume(struct tegra_dc *dc)
+{
+	/* Not required since tegra_dc_dsi_enable
+	 * will reconfigure the controller from scratch
+	 */
+}
+#endif
+
+struct tegra_dc_out_ops tegra_dc_dsi_ops = {
+	.init = tegra_dc_dsi_init,
+	.destroy = tegra_dc_dsi_destroy,
+	.enable = tegra_dc_dsi_enable,
+	.disable = tegra_dc_dsi_disable,
+	.hold = tegra_dc_dsi_hold_host,
+	.release = tegra_dc_dsi_release_host,
+#ifdef CONFIG_PM
+	.suspend = tegra_dc_dsi_suspend,
+	.resume = tegra_dc_dsi_resume,
+#endif
+};
diff --git a/drivers/staging/tegra/video/dc/dsi.h b/drivers/staging/tegra/video/dc/dsi.h
new file mode 100644
index 000000000000..bf54913a1794
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/dsi.h
@@ -0,0 +1,375 @@
+/*
+ * drivers/video/tegra/dc/dsi.h
+ *
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_DSI_H__
+#define __DRIVERS_VIDEO_TEGRA_DC_DSI_H__
+
+/* source of video data */
+enum {
+	TEGRA_DSI_VIDEO_DRIVEN_BY_DC,
+	TEGRA_DSI_VIDEO_DRIVEN_BY_HOST,
+};
+
+/* Max number of data lanes supported */
+#define MAX_DSI_DATA_LANES	2
+/* Default Peripheral reset timeout */
+#define DSI_PR_TO_VALUE		0x2000
+
+/* DCS commands for command mode */
+#define DSI_ENTER_PARTIAL_MODE	0x12
+#define DSI_SET_PIXEL_FORMAT	0x3A
+#define DSI_AREA_COLOR_MODE	0x4C
+#define DSI_SET_PARTIAL_AREA	0x30
+#define DSI_SET_PAGE_ADDRESS	0x2B
+#define DSI_SET_ADDRESS_MODE	0x36
+#define DSI_SET_COLUMN_ADDRESS	0x2A
+#define DSI_WRITE_MEMORY_START	0x2C
+#define DSI_WRITE_MEMORY_CONTINUE	0x3C
+#define DSI_MAX_COMMAND_DELAY_USEC	250000
+#define DSI_COMMAND_DELAY_STEPS_USEC	10
+
+/* Trigger message */
+#define DSI_ESCAPE_CMD	0x87
+#define DSI_ACK_NO_ERR	0x84
+
+/* DSI return packet types */
+#define GEN_LONG_RD_RES 0x1A
+#define DCS_LONG_RD_RES 0x1C
+#define GEN_1_BYTE_SHORT_RD_RES 0x11
+#define DCS_1_BYTE_SHORT_RD_RES 0x21
+#define GEN_2_BYTE_SHORT_RD_RES 0x12
+#define DCS_2_BYTE_SHORT_RD_RES 0x22
+#define ACK_ERR_RES 0x02
+
+/* End of Transmit command for HS mode */
+#define DSI_CMD_HS_EOT_PACKAGE          0x000F0F08
+
+/* Delay required after issuing the trigger*/
+#define DSI_COMMAND_COMPLETION_DELAY_USEC   5
+
+#define DSI_DELAY_FOR_READ_FIFO 5
+
+/* Dsi virtual channel bit position, refer to the DSI specs */
+#define DSI_VIR_CHANNEL_BIT_POSITION	6
+
+/* DSI packet commands from Host to peripherals */
+enum {
+	dsi_command_v_sync_start = 0x01,
+	dsi_command_v_sync_end = 0x11,
+	dsi_command_h_sync_start = 0x21,
+	dsi_command_h_sync_end = 0x31,
+	dsi_command_end_of_transaction = 0x08,
+	dsi_command_blanking = 0x19,
+	dsi_command_null_packet = 0x09,
+	dsi_command_h_active_length_16bpp = 0x0E,
+	dsi_command_h_active_length_18bpp = 0x1E,
+	dsi_command_h_active_length_18bpp_np = 0x2E,
+	dsi_command_h_active_length_24bpp = 0x3E,
+	dsi_command_h_sync_active = dsi_command_blanking,
+	dsi_command_h_back_porch = dsi_command_blanking,
+	dsi_command_h_front_porch = dsi_command_blanking,
+	dsi_command_writ_no_param = 0x05,
+	dsi_command_long_write = 0x39,
+	dsi_command_max_return_pkt_size = 0x37,
+	dsi_command_generic_read_request_with_2_param = 0x24,
+	dsi_command_dcs_read_with_no_params = 0x06,
+};
+
+/* Maximum polling time for reading the dsi status register */
+#define DSI_STATUS_POLLING_DURATION_USEC    100000
+#define DSI_STATUS_POLLING_DELAY_USEC       100
+
+/*
+  * Horizontal Sync Blank Packet Over head
+  * DSI_overhead = size_of(HS packet header)
+  *             + size_of(BLANK packet header) + size_of(checksum)
+  * DSI_overhead = 4 + 4 + 2 = 10
+  */
+#define DSI_HSYNC_BLNK_PKT_OVERHEAD  10
+
+/*
+ * Horizontal Front Porch Packet Overhead
+ * DSI_overhead = size_of(checksum)
+ *            + size_of(BLANK packet header) + size_of(checksum)
+ * DSI_overhead = 2 + 4 + 2 = 8
+ */
+#define DSI_HFRONT_PORCH_PKT_OVERHEAD 8
+
+/*
+ * Horizontal Back Porch Packet
+ * DSI_overhead = size_of(HE packet header)
+ *            + size_of(BLANK packet header) + size_of(checksum)
+ *            + size_of(RGB packet header)
+ * DSI_overhead = 4 + 4 + 2 + 4 = 14
+ */
+#define DSI_HBACK_PORCH_PKT_OVERHEAD  14
+
+/* Additional Hs TX timeout margin */
+#define DSI_HTX_TO_MARGIN   720
+
+#define DSI_CYCLE_COUNTER_VALUE     512
+
+#define DSI_LRXH_TO_VALUE   0x2000
+
+/* Turn around timeout terminal count */
+#define DSI_TA_TO_VALUE     0x2000
+
+/* Turn around timeout tally */
+#define DSI_TA_TALLY_VALUE      0x0
+/* LP Rx timeout tally */
+#define DSI_LRXH_TALLY_VALUE    0x0
+/* HS Tx Timeout tally */
+#define DSI_HTX_TALLY_VALUE     0x0
+
+/* DSI Power control settle time 10 micro seconds */
+#define DSI_POWER_CONTROL_SETTLE_TIME_US    10
+
+#define DSI_HOST_FIFO_DEPTH     64
+#define DSI_VIDEO_FIFO_DEPTH    480
+#define DSI_READ_FIFO_DEPTH	(32 << 2)
+
+#define NUMOF_BIT_PER_BYTE			8
+#define DEFAULT_LP_CMD_MODE_CLK_KHZ		10000
+#define DEFAULT_MAX_DSI_PHY_CLK_KHZ		(500*1000)
+#define DEFAULT_PANEL_RESET_TIMEOUT		2
+#define DEFAULT_PANEL_BUFFER_BYTE		512
+
+/*
+ * TODO: are DSI_HOST_DSI_CONTROL_CRC_RESET(RESET_CRC) and
+ * DSI_HOST_DSI_CONTROL_HOST_TX_TRIG_SRC(IMMEDIATE) required for everyone?
+ */
+#define HOST_DSI_CTRL_COMMON \
+			(DSI_HOST_DSI_CONTROL_PHY_CLK_DIV(DSI_PHY_CLK_DIV1) | \
+			DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(NORMAL) | \
+			DSI_HOST_DSI_CONTROL_PERIPH_RESET(TEGRA_DSI_DISABLE) | \
+			DSI_HOST_DSI_CONTROL_RAW_DATA(TEGRA_DSI_DISABLE) | \
+			DSI_HOST_DSI_CONTROL_IMM_BTA(TEGRA_DSI_DISABLE) | \
+			DSI_HOST_DSI_CONTROL_PKT_BTA(TEGRA_DSI_DISABLE) | \
+			DSI_HOST_DSI_CONTROL_CS_ENABLE(TEGRA_DSI_ENABLE) | \
+			DSI_HOST_DSI_CONTROL_ECC_ENABLE(TEGRA_DSI_ENABLE) | \
+			DSI_HOST_DSI_CONTROL_PKT_WR_FIFO_SEL(HOST_ONLY))
+
+#define HOST_DSI_CTRL_HOST_DRIVEN \
+			(DSI_HOST_DSI_CONTROL_CRC_RESET(RESET_CRC) | \
+			DSI_HOST_DSI_CONTROL_HOST_TX_TRIG_SRC(IMMEDIATE))
+
+#define HOST_DSI_CTRL_DC_DRIVEN 0
+
+#define DSI_CTRL_HOST_DRIVEN	(DSI_CONTROL_VID_ENABLE(TEGRA_DSI_DISABLE) | \
+				DSI_CONTROL_HOST_ENABLE(TEGRA_DSI_ENABLE))
+
+#define DSI_CTRL_DC_DRIVEN	(DSI_CONTROL_VID_TX_TRIG_SRC(SOL) | \
+				DSI_CONTROL_VID_ENABLE(TEGRA_DSI_ENABLE) | \
+				DSI_CONTROL_HOST_ENABLE(TEGRA_DSI_DISABLE))
+
+#define DSI_CTRL_CMD_MODE	(DSI_CONTROL_VID_DCS_ENABLE(TEGRA_DSI_ENABLE))
+
+#define DSI_CTRL_VIDEO_MODE	(DSI_CONTROL_VID_DCS_ENABLE(TEGRA_DSI_DISABLE))
+
+
+enum {
+	CMD_VS		= 0x01,
+	CMD_VE		= 0x11,
+
+	CMD_HS		= 0x21,
+	CMD_HE		= 0x31,
+
+	CMD_EOT		= 0x08,
+	CMD_NULL	= 0x09,
+	CMD_SHORTW	= 0x15,
+	CMD_BLNK	= 0x19,
+	CMD_LONGW	= 0x39,
+
+	CMD_RGB	= 0x00,
+	CMD_RGB_16BPP	= 0x0E,
+	CMD_RGB_18BPP	= 0x1E,
+	CMD_RGB_18BPPNP = 0x2E,
+	CMD_RGB_24BPP	= 0x3E,
+};
+
+#define PKT_ID0(id)		(DSI_PKT_SEQ_0_LO_PKT_00_ID(id) | \
+				DSI_PKT_SEQ_1_LO_PKT_10_EN(TEGRA_DSI_ENABLE))
+#define PKT_LEN0(len)	(DSI_PKT_SEQ_0_LO_PKT_00_SIZE(len))
+
+#define PKT_ID1(id)		(DSI_PKT_SEQ_0_LO_PKT_01_ID(id) | \
+				DSI_PKT_SEQ_1_LO_PKT_11_EN(TEGRA_DSI_ENABLE))
+#define PKT_LEN1(len)	(DSI_PKT_SEQ_0_LO_PKT_01_SIZE(len))
+
+#define PKT_ID2(id)		(DSI_PKT_SEQ_0_LO_PKT_02_ID(id) | \
+				DSI_PKT_SEQ_1_LO_PKT_12_EN(TEGRA_DSI_ENABLE))
+#define PKT_LEN2(len)	(DSI_PKT_SEQ_0_LO_PKT_02_SIZE(len))
+
+#define PKT_ID3(id)		(DSI_PKT_SEQ_0_HI_PKT_03_ID(id) | \
+				DSI_PKT_SEQ_1_HI_PKT_13_EN(TEGRA_DSI_ENABLE))
+#define PKT_LEN3(len)	(DSI_PKT_SEQ_0_HI_PKT_03_SIZE(len))
+
+#define PKT_ID4(id)		(DSI_PKT_SEQ_0_HI_PKT_04_ID(id) | \
+				DSI_PKT_SEQ_1_HI_PKT_14_EN(TEGRA_DSI_ENABLE))
+#define PKT_LEN4(len)	(DSI_PKT_SEQ_0_HI_PKT_04_SIZE(len))
+
+#define PKT_ID5(id)		(DSI_PKT_SEQ_0_HI_PKT_05_ID(id) | \
+				DSI_PKT_SEQ_1_HI_PKT_15_EN(TEGRA_DSI_ENABLE))
+#define PKT_LEN5(len)	(DSI_PKT_SEQ_0_HI_PKT_05_SIZE(len))
+
+#define PKT_LP		(DSI_PKT_SEQ_0_LO_SEQ_0_FORCE_LP(TEGRA_DSI_ENABLE))
+
+#define NUMOF_PKT_SEQ	12
+
+/* Mipi v1.00.00 phy timing range */
+#define NOT_DEFINED			-1
+#define MIPI_T_HSEXIT_NS_MIN		100
+#define MIPI_T_HSEXIT_NS_MAX		NOT_DEFINED
+#define	MIPI_T_HSTRAIL_NS_MIN(clk_ns)	max((8 * (clk_ns)), (60 + 4 * (clk_ns)))
+#define MIPI_T_HSTRAIL_NS_MAX		NOT_DEFINED
+#define MIPI_T_HSZERO_NS_MIN		NOT_DEFINED
+#define MIPI_T_HSZERO_NS_MAX		NOT_DEFINED
+#define MIPI_T_HSPREPARE_NS_MIN(clk_ns)	(40 + 4 * (clk_ns))
+#define MIPI_T_HSPREPARE_NS_MAX(clk_ns)	(85 + 6 * (clk_ns))
+#define MIPI_T_CLKTRAIL_NS_MIN		60
+#define MIPI_T_CLKTRAIL_NS_MAX		NOT_DEFINED
+#define	MIPI_T_CLKPOST_NS_MIN(clk_ns)	(60 + 52 * (clk_ns))
+#define MIPI_T_CLKPOST_NS_MAX		NOT_DEFINED
+#define	MIPI_T_CLKZERO_NS_MIN		NOT_DEFINED
+#define MIPI_T_CLKZERO_NS_MAX		NOT_DEFINED
+#define MIPI_T_TLPX_NS_MIN		50
+#define MIPI_T_TLPX_NS_MAX		NOT_DEFINED
+#define MIPI_T_CLKPREPARE_NS_MIN	38
+#define MIPI_T_CLKPREPARE_NS_MAX	95
+#define MIPI_T_CLKPRE_NS_MIN		8
+#define MIPI_T_CLKPRE_NS_MAX		NOT_DEFINED
+#define	MIPI_T_WAKEUP_NS_MIN		1
+#define MIPI_T_WAKEUP_NS_MAX		NOT_DEFINED
+#define MIPI_T_TASURE_NS_MIN(tlpx_ns)	(tlpx_ns)
+#define MIPI_T_TASURE_NS_MAX(tlpx_ns)	(2 * (tlpx_ns))
+#define MIPI_T_HSPREPARE_ADD_HSZERO_NS_MIN(clk_ns)	(145 + 10 * (clk_ns))
+#define MIPI_T_HSPREPARE_ADD_HSZERO_NS_MAX		NOT_DEFINED
+#define	MIPI_T_CLKPREPARE_ADD_CLKZERO_NS_MIN		300
+#define MIPI_T_CLKPREPARE_ADD_CLKZERO_NS_MAX		NOT_DEFINED
+
+#define DSI_TBYTE(clk_ns)	((clk_ns) * (BITS_PER_BYTE))
+#define DSI_CONVERT_T_PHY_NS_TO_T_PHY(t_phy_ns, clk_ns, hw_inc) \
+				((int)((DIV_ROUND_CLOSEST((t_phy_ns), \
+				(DSI_TBYTE(clk_ns)))) - (hw_inc)))
+
+#define DSI_CONVERT_T_PHY_TO_T_PHY_NS(t_phy, clk_ns, hw_inc) \
+				(((t_phy) + (hw_inc)) * (DSI_TBYTE(clk_ns)))
+
+/* Default phy timing in ns */
+#define T_HSEXIT_NS_DEFAULT		120
+#define T_HSTRAIL_NS_DEFAULT(clk_ns) \
+			max((8 * (clk_ns)), (60 + 4 * (clk_ns)))
+
+#define T_DATZERO_NS_DEFAULT(clk_ns)	(145 + 5 * (clk_ns))
+#define T_HSPREPARE_NS_DEFAULT(clk_ns)	(65 + 5 * (clk_ns))
+#define T_CLKTRAIL_NS_DEFAULT		80
+#define T_CLKPOST_NS_DEFAULT(clk_ns)	(70 + 52 * (clk_ns))
+#define T_CLKZERO_NS_DEFAULT		260
+#define T_TLPX_NS_DEFAULT		60
+#define T_CLKPREPARE_NS_DEFAULT		65
+#define T_TAGO_NS_DEFAULT		(4 * (T_TLPX_NS_DEFAULT))
+#define T_TASURE_NS_DEFAULT		(2 * (T_TLPX_NS_DEFAULT))
+#define T_TAGET_NS_DEFAULT		(5 * (T_TLPX_NS_DEFAULT))
+
+/* HW increment to phy register values */
+#define T_HSEXIT_HW_INC		1
+#define T_HSTRAIL_HW_INC	0
+#define T_DATZERO_HW_INC	3
+#define T_HSPREPARE_HW_INC	1
+#define T_CLKTRAIL_HW_INC	1
+#define T_CLKPOST_HW_INC	1
+#define T_CLKZERO_HW_INC	1
+#define T_TLPX_HW_INC		1
+#define T_CLKPREPARE_HW_INC	1
+#define T_TAGO_HW_INC		1
+#define T_TASURE_HW_INC		1
+#define T_TAGET_HW_INC		1
+#define T_CLKPRE_HW_INC		1
+#define T_WAKEUP_HW_INC		1
+
+/* Default phy timing reg values */
+#define T_HSEXIT_DEFAULT(clk_ns) \
+(DSI_CONVERT_T_PHY_NS_TO_T_PHY( \
+T_HSEXIT_NS_DEFAULT, clk_ns, T_HSEXIT_HW_INC))
+
+#define T_HSTRAIL_DEFAULT(clk_ns) \
+(3 + (DSI_CONVERT_T_PHY_NS_TO_T_PHY( \
+T_HSTRAIL_NS_DEFAULT(clk_ns), clk_ns, T_HSTRAIL_HW_INC)))
+
+#define T_DATZERO_DEFAULT(clk_ns) \
+(DSI_CONVERT_T_PHY_NS_TO_T_PHY( \
+T_DATZERO_NS_DEFAULT(clk_ns), clk_ns, T_DATZERO_HW_INC))
+
+#define T_HSPREPARE_DEFAULT(clk_ns) \
+(DSI_CONVERT_T_PHY_NS_TO_T_PHY( \
+T_HSPREPARE_NS_DEFAULT(clk_ns), clk_ns, T_HSPREPARE_HW_INC))
+
+#define T_CLKTRAIL_DEFAULT(clk_ns) \
+(DSI_CONVERT_T_PHY_NS_TO_T_PHY( \
+T_CLKTRAIL_NS_DEFAULT, clk_ns, T_CLKTRAIL_HW_INC))
+
+#define T_CLKPOST_DEFAULT(clk_ns) \
+(DSI_CONVERT_T_PHY_NS_TO_T_PHY( \
+T_CLKPOST_NS_DEFAULT(clk_ns), clk_ns, T_CLKPOST_HW_INC))
+
+#define T_CLKZERO_DEFAULT(clk_ns) \
+(DSI_CONVERT_T_PHY_NS_TO_T_PHY( \
+T_CLKZERO_NS_DEFAULT, clk_ns, T_CLKZERO_HW_INC))
+
+#define T_TLPX_DEFAULT(clk_ns) \
+(DSI_CONVERT_T_PHY_NS_TO_T_PHY( \
+T_TLPX_NS_DEFAULT, clk_ns, T_TLPX_HW_INC))
+
+#define T_CLKPREPARE_DEFAULT(clk_ns) \
+(DSI_CONVERT_T_PHY_NS_TO_T_PHY( \
+T_CLKPREPARE_NS_DEFAULT, clk_ns, T_CLKPREPARE_HW_INC))
+
+#define T_CLKPRE_DEFAULT	0x1
+#define T_WAKEUP_DEFAULT	0x7f
+
+#define T_TAGO_DEFAULT(clk_ns) \
+(DSI_CONVERT_T_PHY_NS_TO_T_PHY( \
+T_TAGO_NS_DEFAULT, clk_ns, T_TAGO_HW_INC))
+
+#define T_TASURE_DEFAULT(clk_ns) \
+(DSI_CONVERT_T_PHY_NS_TO_T_PHY( \
+T_TASURE_NS_DEFAULT, clk_ns, T_TASURE_HW_INC))
+
+#define T_TAGET_DEFAULT(clk_ns) \
+(DSI_CONVERT_T_PHY_NS_TO_T_PHY( \
+T_TAGET_NS_DEFAULT, clk_ns, T_TAGET_HW_INC))
+
+/* Defines the DSI phy timing parameters */
+struct dsi_phy_timing_inclk {
+	unsigned	t_hsdexit;
+	unsigned	t_hstrail;
+	unsigned	t_hsprepare;
+	unsigned	t_datzero;
+
+	unsigned	t_clktrail;
+	unsigned	t_clkpost;
+	unsigned	t_clkzero;
+	unsigned	t_tlpx;
+
+	unsigned	t_clkpre;
+	unsigned	t_clkprepare;
+	unsigned	t_wakeup;
+
+	unsigned	t_taget;
+	unsigned	t_tasure;
+	unsigned	t_tago;
+};
+
+#endif
diff --git a/drivers/staging/tegra/video/dc/dsi_regs.h b/drivers/staging/tegra/video/dc/dsi_regs.h
new file mode 100644
index 000000000000..71045fcec29e
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/dsi_regs.h
@@ -0,0 +1,351 @@
+/*
+ * drivers/video/tegra/dc/dsi_regs.h
+ *
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_DSI_REG_H__
+#define __DRIVERS_VIDEO_TEGRA_DC_DSI_REG_H__
+
+enum {
+	TEGRA_DSI_DISABLE,
+	TEGRA_DSI_ENABLE,
+};
+
+/* These are word offsets from base (not byte offsets) */
+enum {
+	OP_DONE = 1,
+};
+#define DSI_INCR_SYNCPT 0x00
+#define DSI_INCR_SYNCPT_COND(x)		(((x) & 0xff) << 8)
+#define DSI_INCR_SYNCPT_INDX(x)		(((x) & 0xff) << 0)
+
+#define DSI_INCR_SYNCPT_CNTRL 0x01
+#define DSI_INCR_SYNCPT_ERROR 0x02
+#define DSI_CTXSW 0x08
+#define DSI_RD_DATA 0x09
+#define DSI_WR_DATA 0x0a
+
+#define DSI_POWER_CONTROL 0x0b
+#define   DSI_POWER_CONTROL_LEG_DSI_ENABLE(x)		(((x) & 0x1) << 0)
+
+#define DSI_INT_ENABLE 0x0c
+#define DSI_INT_STATUS 0x0d
+#define DSI_INT_MASK 0x0e
+
+#define DSI_HOST_DSI_CONTROL 0x0f
+enum {
+	RESET_CRC = 1,
+};
+#define   DSI_HOST_CONTROL_FIFO_STAT_RESET(x)		(((x) & 0x1) << 21)
+#define   DSI_HOST_DSI_CONTROL_CRC_RESET(x)		(((x) & 0x1) << 20)
+enum {
+	DSI_PHY_CLK_DIV1,
+	DSI_PHY_CLK_DIV2,
+};
+#define   DSI_HOST_DSI_CONTROL_PHY_CLK_DIV(x)		(((x) & 0x7) << 16)
+enum {
+	SOL,
+	FIFO_LEVEL,
+	IMMEDIATE,
+};
+#define   DSI_HOST_DSI_CONTROL_HOST_TX_TRIG_SRC(x)	(((x) & 0x3) << 12)
+enum {
+	NORMAL,
+	ENTER_ULPM,
+	EXIT_ULPM,
+};
+#define   DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(x)	(((x) & 0x3) << 8)
+#define   DSI_HOST_DSI_CONTROL_PERIPH_RESET(x)		(((x) & 0x1) << 7)
+#define   DSI_HOST_DSI_CONTROL_RAW_DATA(x)		(((x) & 0x1) << 6)
+enum {
+	TEGRA_DSI_LOW,
+	TEGRA_DSI_HIGH,
+};
+#define   DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(x)	(((x) & 0x1) << 5)
+enum {
+	HOST_ONLY,
+	VIDEO_HOST,
+};
+#define   DSI_HOST_DSI_CONTROL_PKT_WR_FIFO_SEL(x)	(((x) & 0x1) << 4)
+#define   DSI_HOST_DSI_CONTROL_IMM_BTA(x)		(((x) & 0x1) << 3)
+#define   DSI_HOST_DSI_CONTROL_PKT_BTA(x)		(((x) & 0x1) << 2)
+#define   DSI_HOST_DSI_CONTROL_CS_ENABLE(x)		(((x) & 0x1) << 1)
+#define   DSI_HOST_DSI_CONTROL_ECC_ENABLE(x)		(((x) & 0x1) << 0)
+
+#define DSI_CONTROL 0x10
+#define   DSI_CONTROL_DBG_ENABLE(x)			(((x) & 0x1) << 31)
+enum {
+	CONTINUOUS,
+	TX_ONLY,
+};
+#define   DSI_CONTROL_HS_CLK_CTRL(x)			(((x) & 0x1) << 20)
+#define   DSI_CONTROL_VIRTUAL_CHANNEL(x)		(((x) & 0x3) << 16)
+#define   DSI_CONTROL_DATA_FORMAT(x)			(((x) & 0x3) << 12)
+#define   DSI_CONTROL_VID_TX_TRIG_SRC(x)		(((x) & 0x3) << 8)
+#define   DSI_CONTROL_NUM_DATA_LANES(x)			(((x) & 0x3) << 4)
+#define   DSI_CONTROL_VID_DCS_ENABLE(x)			(((x) & 0x1) << 3)
+#define   DSI_CONTROL_VID_SOURCE(x)			(((x) & 0x1) << 2)
+#define   DSI_CONTROL_VID_ENABLE(x)			(((x) & 0x1) << 1)
+#define   DSI_CONTROL_HOST_ENABLE(x)			(((x) & 0x1) << 0)
+
+#define DSI_SOL_DELAY 0x11
+#define DSI_SOL_DELAY_SOL_DELAY(x)			(((x) & 0xffff) << 0)
+
+#define DSI_MAX_THRESHOLD 0x12
+#define DSI_MAX_THRESHOLD_MAX_THRESHOLD(x)		(((x) & 0xffff) << 0)
+
+#define DSI_TRIGGER 0x13
+#define DSI_TRIGGER_HOST_TRIGGER(x)			(((x) & 0x1) << 1)
+#define DSI_TRIGGER_VID_TRIGGER(x)			(((x) & 0x1) << 0)
+
+#define DSI_TX_CRC 0x14
+#define DSI_TX_CRC_TX_CRC(x)			(((x) & 0xffffffff) << 0)
+
+#define DSI_STATUS 0x15
+#define DSI_STATUS_IDLE(x)			(((x) & 0x1) << 10)
+#define DSI_STATUS_LB_UNDERFLOW(x)		(((x) & 0x1) << 9)
+#define DSI_STATUS_LB_OVERFLOW(x)		(((x) & 0x1) << 8)
+#define DSI_STATUS_RD_FIFO_COUNT(x)		(((x) & 0x1f) << 0)
+
+#define DSI_INIT_SEQ_CONTROL 0x1a
+#define   DSI_INIT_SEQ_CONTROL_DSI_FRAME_INIT_BYTE_COUNT(x) \
+				(((x) & 0x3f) << 8)
+#define   DSI_INIT_SEQ_CONTROL_DSI_SEND_INIT_SEQUENCE(x) \
+				(((x) & 0xff) << 0)
+
+#define DSI_INIT_SEQ_DATA_0 0x1b
+#define DSI_INIT_SEQ_DATA_1 0x1c
+#define DSI_INIT_SEQ_DATA_2 0x1d
+#define DSI_INIT_SEQ_DATA_3 0x1e
+#define DSI_INIT_SEQ_DATA_4 0x1f
+#define DSI_INIT_SEQ_DATA_5 0x20
+#define DSI_INIT_SEQ_DATA_6 0x21
+#define DSI_INIT_SEQ_DATA_7 0x22
+
+#define DSI_PKT_SEQ_0_LO 0x23
+#define   DSI_PKT_SEQ_0_LO_SEQ_0_FORCE_LP(x)	(((x) & 0x1) << 30)
+#define   DSI_PKT_SEQ_0_LO_PKT_02_EN(x)		(((x) & 0x1) << 29)
+#define   DSI_PKT_SEQ_0_LO_PKT_02_ID(x)		(((x) & 0x3f) << 23)
+#define   DSI_PKT_SEQ_0_LO_PKT_02_SIZE(x)	(((x) & 0x7) << 20)
+#define   DSI_PKT_SEQ_0_LO_PKT_01_EN(x)		(((x) & 0x1) << 19)
+#define   DSI_PKT_SEQ_0_LO_PKT_01_ID(x)		(((x) & 0x3f) << 13)
+#define   DSI_PKT_SEQ_0_LO_PKT_01_SIZE(x)	(((x) & 0x7) << 10)
+#define   DSI_PKT_SEQ_0_LO_PKT_00_EN(x)		(((x) & 0x1) << 9)
+#define   DSI_PKT_SEQ_0_LO_PKT_00_ID(x)		(((x) & 0x3f) << 3)
+#define   DSI_PKT_SEQ_0_LO_PKT_00_SIZE(x)	(((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_0_HI 0x24
+#define   DSI_PKT_SEQ_0_HI_PKT_05_EN(x)		(((x) & 0x1) << 29)
+#define   DSI_PKT_SEQ_0_HI_PKT_05_ID(x)		(((x) & 0x3f) << 23)
+#define   DSI_PKT_SEQ_0_HI_PKT_05_SIZE(x)	(((x) & 0x7) << 20)
+#define   DSI_PKT_SEQ_0_HI_PKT_04_EN(x)		(((x) & 0x1) << 19)
+#define   DSI_PKT_SEQ_0_HI_PKT_04_ID(x)		(((x) & 0x3f) << 13)
+#define   DSI_PKT_SEQ_0_HI_PKT_04_SIZE(x)	(((x) & 0x7) << 10)
+#define   DSI_PKT_SEQ_0_HI_PKT_03_EN(x)		(((x) & 0x1) << 9)
+#define   DSI_PKT_SEQ_0_HI_PKT_03_ID(x)		(((x) & 0x3f) << 3)
+#define   DSI_PKT_SEQ_0_HI_PKT_03_SIZE(x)	(((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_1_LO 0x25
+#define   DSI_PKT_SEQ_1_LO_SEQ_1_FORCE_LP(x)	(((x) & 0x1) << 30)
+#define   DSI_PKT_SEQ_1_LO_PKT_12_EN(x)		(((x) & 0x1) << 29)
+#define   DSI_PKT_SEQ_1_LO_PKT_12_ID(x)		(((x) & 0x3f) << 23)
+#define   DSI_PKT_SEQ_1_LO_PKT_12_SIZE(x)	(((x) & 0x7) << 20)
+#define   DSI_PKT_SEQ_1_LO_PKT_11_EN(x)		(((x) & 0x1) << 19)
+#define   DSI_PKT_SEQ_1_LO_PKT_11_ID(x)		(((x) & 0x3f) << 13)
+#define   DSI_PKT_SEQ_1_LO_PKT_11_SIZE(x)	(((x) & 0x7) << 10)
+#define   DSI_PKT_SEQ_1_LO_PKT_10_EN(x)		(((x) & 0x1) << 9)
+#define   DSI_PKT_SEQ_1_LO_PKT_10_ID(x)		(((x) & 0x3f) << 3)
+#define   DSI_PKT_SEQ_1_LO_PKT_10_SIZE(x)	(((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_1_HI 0x26
+#define   DSI_PKT_SEQ_1_HI_PKT_15_EN(x)		(((x) & 0x1) << 29)
+#define   DSI_PKT_SEQ_1_HI_PKT_15_ID(x)		(((x) & 0x3f) << 23)
+#define   DSI_PKT_SEQ_1_HI_PKT_15_SIZE(x)	(((x) & 0x7) << 20)
+#define   DSI_PKT_SEQ_1_HI_PKT_14_EN(x)		(((x) & 0x1) << 19)
+#define   DSI_PKT_SEQ_1_HI_PKT_14_ID(x)		(((x) & 0x3f) << 13)
+#define   DSI_PKT_SEQ_1_HI_PKT_14_SIZE(x)	(((x) & 0x7) << 10)
+#define   DSI_PKT_SEQ_1_HI_PKT_13_EN(x)		(((x) & 0x1) << 9)
+#define   DSI_PKT_SEQ_1_HI_PKT_13_ID(x)		(((x) & 0x3f) << 3)
+#define   DSI_PKT_SEQ_1_HI_PKT_13_SIZE(x)	(((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_2_LO 0x27
+#define   DSI_PKT_SEQ_2_LO_SEQ_2_FORCE_LP(x)	(((x) & 0x1) << 30)
+#define   DSI_PKT_SEQ_2_LO_PKT_22_EN(x)		(((x) & 0x1) << 29)
+#define   DSI_PKT_SEQ_2_LO_PKT_22_ID(x)		(((x) & 0x3f) << 23)
+#define   DSI_PKT_SEQ_2_LO_PKT_22_SIZE(x)	(((x) & 0x7) << 20)
+#define   DSI_PKT_SEQ_2_LO_PKT_21_EN(x)		(((x) & 0x1) << 19)
+#define   DSI_PKT_SEQ_2_LO_PKT_21_ID(x)		(((x) & 0x3f) << 13)
+#define   DSI_PKT_SEQ_2_LO_PKT_21_SIZE(x)	(((x) & 0x7) << 10)
+#define   DSI_PKT_SEQ_2_LO_PKT_20_EN(x)		(((x) & 0x1) << 9)
+#define   DSI_PKT_SEQ_2_LO_PKT_20_ID(x)		(((x) & 0x3f) << 3)
+#define   DSI_PKT_SEQ_2_LO_PKT_20_SIZE(x)	(((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_2_HI 0x28
+#define   DSI_PKT_SEQ_2_HI_PKT_25_EN(x)		(((x) & 0x1) << 29)
+#define   DSI_PKT_SEQ_2_HI_PKT_25_ID(x)		(((x) & 0x3f) << 23)
+#define   DSI_PKT_SEQ_2_HI_PKT_25_SIZE(x)	(((x) & 0x7) << 20)
+#define   DSI_PKT_SEQ_2_HI_PKT_24_EN(x)		(((x) & 0x1) << 19)
+#define   DSI_PKT_SEQ_2_HI_PKT_24_ID(x)		(((x) & 0x3f) << 13)
+#define   DSI_PKT_SEQ_2_HI_PKT_24_SIZE(x)	(((x) & 0x7) << 10)
+#define   DSI_PKT_SEQ_2_HI_PKT_23_EN(x)		(((x) & 0x1) << 9)
+#define   DSI_PKT_SEQ_2_HI_PKT_23_ID(x)		(((x) & 0x3f) << 3)
+#define   DSI_PKT_SEQ_2_HI_PKT_23_SIZE(x)	(((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_3_LO 0x29
+#define   DSI_PKT_SEQ_3_LO_SEQ_3_FORCE_LP(x)	(((x) & 0x1) << 30)
+#define   DSI_PKT_SEQ_3_LO_PKT_32_EN(x)		(((x) & 0x1) << 29)
+#define   DSI_PKT_SEQ_3_LO_PKT_32_ID(x)		(((x) & 0x3f) << 23)
+#define   DSI_PKT_SEQ_3_LO_PKT_32_SIZE(x)	(((x) & 0x7) << 20)
+#define   DSI_PKT_SEQ_3_LO_PKT_31_EN(x)		(((x) & 0x1) << 19)
+#define   DSI_PKT_SEQ_3_LO_PKT_31_ID(x)		(((x) & 0x3f) << 13)
+#define   DSI_PKT_SEQ_3_LO_PKT_31_SIZE(x)	(((x) & 0x7) << 10)
+#define   DSI_PKT_SEQ_3_LO_PKT_30_EN(x)		(((x) & 0x1) << 9)
+#define   DSI_PKT_SEQ_3_LO_PKT_30_ID(x)		(((x) & 0x3f) << 3)
+#define   DSI_PKT_SEQ_3_LO_PKT_30_SIZE(x)	(((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_3_HI 0x2a
+#define   DSI_PKT_SEQ_3_HI_PKT_35_EN(x)		(((x) & 0x1) << 29)
+#define   DSI_PKT_SEQ_3_HI_PKT_35_ID(x)		(((x) & 0x3f) << 23)
+#define   DSI_PKT_SEQ_3_HI_PKT_35_SIZE(x)	(((x) & 0x7) << 20)
+#define   DSI_PKT_SEQ_3_HI_PKT_34_EN(x)		(((x) & 0x1) << 19)
+#define   DSI_PKT_SEQ_3_HI_PKT_34_ID(x)		(((x) & 0x3f) << 13)
+#define   DSI_PKT_SEQ_3_HI_PKT_34_SIZE(x)	(((x) & 0x7) << 10)
+#define   DSI_PKT_SEQ_3_HI_PKT_33_EN(x)		(((x) & 0x1) << 9)
+#define   DSI_PKT_SEQ_3_HI_PKT_33_ID(x)		(((x) & 0x3f) << 3)
+#define   DSI_PKT_SEQ_3_HI_PKT_33_SIZE(x)	(((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_4_LO 0x2b
+#define   DSI_PKT_SEQ_4_LO_SEQ_4_FORCE_LP(x)	(((x) & 0x1) << 30)
+#define   DSI_PKT_SEQ_4_LO_PKT_42_EN(x)		(((x) & 0x1) << 29)
+#define   DSI_PKT_SEQ_4_LO_PKT_42_ID(x)		(((x) & 0x3f) << 23)
+#define   DSI_PKT_SEQ_4_LO_PKT_42_SIZE(x)	(((x) & 0x7) << 20)
+#define   DSI_PKT_SEQ_4_LO_PKT_41_EN(x)		(((x) & 0x1) << 19)
+#define   DSI_PKT_SEQ_4_LO_PKT_41_ID(x)		(((x) & 0x3f) << 13)
+#define   DSI_PKT_SEQ_4_LO_PKT_41_SIZE(x)	(((x) & 0x7) << 10)
+#define   DSI_PKT_SEQ_4_LO_PKT_40_EN(x)		(((x) & 0x1) << 9)
+#define   DSI_PKT_SEQ_4_LO_PKT_40_ID(x)		(((x) & 0x3f) << 3)
+#define   DSI_PKT_SEQ_4_LO_PKT_40_SIZE(x)	(((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_4_HI 0x2c
+#define   DSI_PKT_SEQ_4_HI_PKT_45_EN(x)		(((x) & 0x1) << 29)
+#define   DSI_PKT_SEQ_4_HI_PKT_45_ID(x)		(((x) & 0x3f) << 23)
+#define   DSI_PKT_SEQ_4_HI_PKT_45_SIZE(x)	(((x) & 0x7) << 20)
+#define   DSI_PKT_SEQ_4_HI_PKT_44_EN(x)		(((x) & 0x1) << 19)
+#define   DSI_PKT_SEQ_4_HI_PKT_44_ID(x)		(((x) & 0x3f) << 13)
+#define   DSI_PKT_SEQ_4_HI_PKT_44_SIZE(x)	(((x) & 0x7) << 10)
+#define   DSI_PKT_SEQ_4_HI_PKT_43_EN(x)		(((x) & 0x1) << 9)
+#define   DSI_PKT_SEQ_4_HI_PKT_43_ID(x)		(((x) & 0x3f) << 3)
+#define   DSI_PKT_SEQ_4_HI_PKT_43_SIZE(x)	(((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_5_LO 0x2d
+#define   DSI_PKT_SEQ_5_LO_SEQ_5_FORCE_LP(x)	(((x) & 0x1) << 30)
+#define   DSI_PKT_SEQ_5_LO_PKT_52_EN(x)		(((x) & 0x1) << 29)
+#define   DSI_PKT_SEQ_5_LO_PKT_52_ID(x)		(((x) & 0x3f) << 23)
+#define   DSI_PKT_SEQ_5_LO_PKT_52_SIZE(x)	(((x) & 0x7) << 20)
+#define   DSI_PKT_SEQ_5_LO_PKT_51_EN(x)		(((x) & 0x1) << 19)
+#define   DSI_PKT_SEQ_5_LO_PKT_51_ID(x)		(((x) & 0x3f) << 13)
+#define   DSI_PKT_SEQ_5_LO_PKT_51_SIZE(x)	(((x) & 0x7) << 10)
+#define   DSI_PKT_SEQ_5_LO_PKT_50_EN(x)		(((x) & 0x1) << 9)
+#define   DSI_PKT_SEQ_5_LO_PKT_50_ID(x)		(((x) & 0x3f) << 3)
+#define   DSI_PKT_SEQ_5_LO_PKT_50_SIZE(x)	(((x) & 0x7) << 0)
+
+#define DSI_PKT_SEQ_5_HI 0x2e
+#define   DSI_PKT_SEQ_5_HI_PKT_55_EN(x)		(((x) & 0x1) << 29)
+#define   DSI_PKT_SEQ_5_HI_PKT_55_ID(x)		(((x) & 0x3f) << 23)
+#define   DSI_PKT_SEQ_5_HI_PKT_55_SIZE(x)	(((x) & 0x7) << 20)
+#define   DSI_PKT_SEQ_5_HI_PKT_54_EN(x)		(((x) & 0x1) << 19)
+#define   DSI_PKT_SEQ_5_HI_PKT_54_ID(x)		(((x) & 0x3f) << 13)
+#define   DSI_PKT_SEQ_5_HI_PKT_54_SIZE(x)	(((x) & 0x7) << 10)
+#define   DSI_PKT_SEQ_5_HI_PKT_53_EN(x)		(((x) & 0x1) << 9)
+#define   DSI_PKT_SEQ_5_HI_PKT_53_ID(x)		(((x) & 0x3f) << 3)
+#define   DSI_PKT_SEQ_5_HI_PKT_53_SIZE(x)	(((x) & 0x7) << 0)
+
+#define DSI_DCS_CMDS 0x33
+#define   DSI_DCS_CMDS_LT5_DCS_CMD(x)		(((x) & 0xff) << 8)
+#define   DSI_DCS_CMDS_LT3_DCS_CMD(x)		(((x) & 0xff) << 0)
+
+#define DSI_PKT_LEN_0_1 0x34
+#define   DSI_PKT_LEN_0_1_LENGTH_1(x)		(((x) & 0xffff) << 16)
+#define   DSI_PKT_LEN_0_1_LENGTH_0(x)		(((x) & 0xffff) << 0)
+
+#define DSI_PKT_LEN_2_3 0x35
+#define   DSI_PKT_LEN_2_3_LENGTH_3(x)		(((x) & 0xffff) << 16)
+#define   DSI_PKT_LEN_2_3_LENGTH_2(x)		(((x) & 0xffff) << 0)
+
+
+#define DSI_PKT_LEN_4_5 0x36
+#define   DSI_PKT_LEN_4_5_LENGTH_5(x)		(((x) & 0xffff) << 16)
+#define   DSI_PKT_LEN_4_5_LENGTH_4(x)		(((x) & 0xffff) << 0)
+
+#define DSI_PKT_LEN_6_7 0x37
+#define   DSI_PKT_LEN_6_7_LENGTH_7(x)		(((x) & 0xffff) << 16)
+#define   DSI_PKT_LEN_6_7_LENGTH_6(x)		(((x) & 0xffff) << 0)
+
+#define DSI_PHY_TIMING_0 0x3c
+#define   DSI_PHY_TIMING_0_THSDEXIT(x)		(((x) & 0xff) << 24)
+#define   DSI_PHY_TIMING_0_THSTRAIL(x)		(((x) & 0xff) << 16)
+#define   DSI_PHY_TIMING_0_TDATZERO(x)		(((x) & 0xff) << 8)
+#define   DSI_PHY_TIMING_0_THSPREPR(x)		(((x) & 0xff) << 0)
+
+#define DSI_PHY_TIMING_1 0x3d
+#define   DSI_PHY_TIMING_1_TCLKTRAIL(x)		(((x) & 0xff) << 24)
+#define	  DSI_PHY_TIMING_1_TCLKPOST(x)		(((x) & 0xff) << 16)
+#define   DSI_PHY_TIMING_1_TCLKZERO(x)		(((x) & 0xff) << 8)
+#define   DSI_PHY_TIMING_1_TTLPX(x)		(((x) & 0xff) << 0)
+
+#define DSI_PHY_TIMING_2 0x3e
+#define   DSI_PHY_TIMING_2_TCLKPREPARE(x)	(((x) & 0xff) << 16)
+#define	  DSI_PHY_TIMING_2_TCLKPRE(x)		(((x) & 0xff) << 8)
+#define   DSI_PHY_TIMING_2_TWAKEUP(x)		(((x) & 0xff) << 0)
+
+#define DSI_BTA_TIMING 0x3f
+#define   DSI_BTA_TIMING_TTAGET(x)		(((x) & 0xff) << 16)
+#define	  DSI_BTA_TIMING_TTASURE(x)		(((x) & 0xff) << 8)
+#define   DSI_BTA_TIMING_TTAGO(x)		(((x) & 0xff) << 0)
+
+
+#define DSI_TIMEOUT_0 0x44
+#define	  DSI_TIMEOUT_0_LRXH_TO(x)		(((x) & 0xffff) << 16)
+#define   DSI_TIMEOUT_0_HTX_TO(x)		(((x) & 0xffff) << 0)
+
+#define DSI_TIMEOUT_1 0x45
+#define	  DSI_TIMEOUT_1_PR_TO(x)		(((x) & 0xffff) << 16)
+#define   DSI_TIMEOUT_1_TA_TO(x)		(((x) & 0xffff) << 0)
+
+#define DSI_TO_TALLY 0x46
+enum {
+	IN_RESET,
+	READY,
+};
+#define DSI_TO_TALLY_P_RESET_STATUS(x)		(((x) & 0x1) << 24)
+#define DSI_TO_TALLY_TA_TALLY(x)		(((x) & 0xff) << 16)
+#define DSI_TO_TALLY_LRXH_TALLY(x)		(((x) & 0xff) << 8)
+#define DSI_TO_TALLY_HTX_TALLY(x)		(((x) & 0xff) << 0)
+
+#define DSI_PAD_CONTROL 0x4b
+#define DSI_PAD_CONTROL_PAD_PULLDN_ENAB(x)	(((x) & 0x1) << 28)
+#define DSI_PAD_CONTROL_PAD_SLEWUPADJ(x)	(((x) & 0x7) << 24)
+#define DSI_PAD_CONTROL_PAD_SLEWDNADJ(x)	(((x) & 0x7) << 20)
+#define DSI_PAD_CONTROL_PAD_PREEMP_EN(x)	(((x) & 0x1) << 19)
+#define DSI_PAD_CONTROL_PAD_PDIO_CLK(x)		(((x) & 0x1) << 18)
+#define DSI_PAD_CONTROL_PAD_PDIO(x)		(((x) & 0x3) << 16)
+#define DSI_PAD_CONTROL_PAD_LPUPADJ(x)		(((x) & 0x3) << 14)
+#define DSI_PAD_CONTROL_PAD_LPDNADJ(x)		(((x) & 0x3) << 12)
+
+#define DSI_PAD_CONTROL_CD 0x4c
+#define DSI_PAD_CD_STATUS 0x4d
+#define DSI_VID_MODE_CONTROL 0x4e
+
+#endif
+
diff --git a/drivers/staging/tegra/video/dc/edid.c b/drivers/staging/tegra/video/dc/edid.c
new file mode 100644
index 000000000000..4d132e90e1e1
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/edid.c
@@ -0,0 +1,605 @@
+/*
+ * drivers/video/tegra/dc/edid.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include <linux/debugfs.h>
+#include <linux/fb.h>
+#include <linux/i2c.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+
+#include "edid.h"
+
+#if defined(DEBUG) || defined(CONFIG_DEBUG_FS)
+static int tegra_edid_show(struct seq_file *s, void *unused)
+{
+	struct tegra_edid *edid = s->private;
+	struct tegra_dc_edid *data;
+	u8 *buf;
+	int i;
+
+	data = tegra_edid_get_data(edid);
+	if (!data) {
+		seq_printf(s, "No EDID\n");
+		return 0;
+	}
+
+	buf = data->buf;
+
+	for (i = 0; i < data->len; i++) {
+		if (i % 16 == 0)
+			seq_printf(s, "edid[%03x] =", i);
+
+		seq_printf(s, " %02x", buf[i]);
+
+		if (i % 16 == 15)
+			seq_printf(s, "\n");
+	}
+
+	tegra_edid_put_data(data);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_edid_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, tegra_edid_show, inode->i_private);
+}
+
+static const struct file_operations tegra_edid_debug_fops = {
+	.open		= tegra_edid_debug_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+void tegra_edid_debug_add(struct tegra_edid *edid)
+{
+	char name[] = "edidX";
+
+	snprintf(name, sizeof(name), "edid%1d", edid->bus);
+	debugfs_create_file(name, S_IRUGO, NULL, edid, &tegra_edid_debug_fops);
+}
+#else
+void tegra_edid_debug_add(struct tegra_edid *edid)
+{
+}
+#endif
+
+#ifdef DEBUG
+static char tegra_edid_dump_buff[16 * 1024];
+
+static void tegra_edid_dump(struct tegra_edid *edid)
+{
+	struct seq_file s;
+	int i;
+	char c;
+
+	memset(&s, 0x0, sizeof(s));
+
+	s.buf = tegra_edid_dump_buff;
+	s.size = sizeof(tegra_edid_dump_buff);
+	s.private = edid;
+
+	tegra_edid_show(&s, NULL);
+
+	i = 0;
+	while (i < s.count ) {
+		if ((s.count - i) > 256) {
+			c = s.buf[i + 256];
+			s.buf[i + 256] = 0;
+			printk("%s", s.buf + i);
+			s.buf[i + 256] = c;
+		} else {
+			printk("%s", s.buf + i);
+		}
+		i += 256;
+	}
+}
+#else
+static void tegra_edid_dump(struct tegra_edid *edid)
+{
+}
+#endif
+
+int tegra_edid_read_block(struct tegra_edid *edid, int block, u8 *data)
+{
+	u8 block_buf[] = {block >> 1};
+	u8 cmd_buf[] = {(block & 0x1) * 128};
+	int status;
+	struct i2c_msg msg[] = {
+		{
+			.addr = 0x30,
+			.flags = 0,
+			.len = 1,
+			.buf = block_buf,
+		},
+		{
+			.addr = 0x50,
+			.flags = 0,
+			.len = 1,
+			.buf = cmd_buf,
+		},
+		{
+			.addr = 0x50,
+			.flags = I2C_M_RD,
+			.len = 128,
+			.buf = data,
+		}};
+	struct i2c_msg *m;
+	int msg_len;
+
+	if (block > 1) {
+		msg_len = 3;
+		m = msg;
+	} else {
+		msg_len = 2;
+		m = &msg[1];
+	}
+
+	status = i2c_transfer(edid->client->adapter, m, msg_len);
+
+	if (status < 0)
+		return status;
+
+	if (status != msg_len)
+		return -EIO;
+
+	return 0;
+}
+
+int tegra_edid_parse_ext_block(const u8 *raw, int idx,
+			       struct tegra_edid_pvt *edid, u8 *vsdb)
+{
+	const u8 *ptr;
+	u8 tmp;
+	u8 code;
+	int len;
+	int i;
+	bool basic_audio = false;
+
+	ptr = &raw[0];
+
+	/* If CEA 861 block get info for eld struct */
+	if (edid && ptr) {
+		if (*ptr <= 3)
+			edid->eld.eld_ver = 0x02;
+		edid->eld.cea_edid_ver = ptr[1];
+
+		/* check for basic audio support in CEA 861 block */
+		if(raw[3] & (1<<6)) {
+			/* For basic audio, set spk_alloc to Left+Right.
+			 * If there is a Speaker Alloc block this will
+			 * get over written with that value */
+			basic_audio = true;
+		}
+	}
+
+	if (raw[3] & 0x80)
+		edid->support_underscan = 1;
+	else
+		edid->support_underscan = 0;
+
+	ptr = &raw[4];
+
+	while (ptr < &raw[idx]) {
+		tmp = *ptr;
+		len = tmp & 0x1f;
+
+		/* HDMI Specification v1.4a, section 8.3.2:
+		 * see Table 8-16 for HDMI VSDB format.
+		 * data blocks have tags in top 3 bits:
+		 * tag code 2: video data block
+		 * tag code 3: vendor specific data block
+		 */
+		code = (tmp >> 5) & 0x7;
+		switch (code) {
+		case 1:
+		{
+			edid->eld.sad_count = len;
+			edid->eld.conn_type = 0x00;
+			edid->eld.support_hdcp = 0x00;
+			for (i = 0; (i < len) && (i < ELD_MAX_SAD); i ++)
+				edid->eld.sad[i] = ptr[i + 1];
+			len++;
+			ptr += len; /* adding the header */
+			/* Got an audio data block so enable audio */
+			if(basic_audio == true)
+				edid->eld.spk_alloc = 1;
+			break;
+		}
+		/* case 2 is commented out for now */
+		case 3:
+		{
+			int j = 0;
+
+			if ((ptr[1] == 0x03) &&
+				(ptr[2] == 0x0c) &&
+				(ptr[3] == 0)) {
+				edid->eld.port_id[0] = ptr[4];
+				edid->eld.port_id[1] = ptr[5];
+			}
+			if ((len >= 8) &&
+				(ptr[1] == 0x03) &&
+				(ptr[2] == 0x0c) &&
+				(ptr[3] == 0)) {
+				edid->eld.vsdb = 1;
+				*vsdb = 1;
+				j = 8;
+				tmp = ptr[j++];
+				/* HDMI_Video_present? */
+				if (tmp & 0x20) {
+					/* Latency_Fields_present? */
+					if (tmp & 0x80)
+						j += 2;
+					/* I_Latency_Fields_present? */
+					if (tmp & 0x40)
+						j += 2;
+					/* 3D_present? */
+					if (j <= len && (ptr[j] & 0x80))
+						edid->support_stereo = 1;
+				}
+			}
+			if ((len > 5) &&
+				(ptr[1] == 0x03) &&
+				(ptr[2] == 0x0c) &&
+				(ptr[3] == 0)) {
+
+				edid->eld.support_ai = (ptr[6] & 0x80);
+			}
+
+			if ((len > 9) &&
+				(ptr[1] == 0x03) &&
+				(ptr[2] == 0x0c) &&
+				(ptr[3] == 0)) {
+
+				edid->eld.aud_synch_delay = ptr[10];
+			}
+			len++;
+			ptr += len; /* adding the header */
+			break;
+		}
+		case 4:
+		{
+			edid->eld.spk_alloc = ptr[1];
+			len++;
+			ptr += len; /* adding the header */
+			break;
+		}
+		default:
+			len++; /* len does not include header */
+			ptr += len;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+int tegra_edid_mode_support_stereo(struct fb_videomode *mode)
+{
+	if (!mode)
+		return 0;
+
+	if (mode->xres == 1280 &&
+		mode->yres == 720 &&
+		((mode->refresh == 60) || (mode->refresh == 50)))
+		return 1;
+
+	if (mode->xres == 1920 && mode->yres == 1080 && mode->refresh == 24)
+		return 1;
+
+	return 0;
+}
+
+static void data_release(struct kref *ref)
+{
+	struct tegra_edid_pvt *data =
+		container_of(ref, struct tegra_edid_pvt, refcnt);
+	vfree(data);
+}
+
+int tegra_edid_get_monspecs_test(struct tegra_edid *edid,
+			struct fb_monspecs *specs, unsigned char *edid_ptr)
+{
+	int i, j, ret;
+	int extension_blocks;
+	struct tegra_edid_pvt *new_data, *old_data;
+	u8 *data;
+	u8 vsdb = 0;
+
+	new_data = vmalloc(SZ_32K + sizeof(struct tegra_edid_pvt));
+	if (!new_data)
+		return -ENOMEM;
+
+	kref_init(&new_data->refcnt);
+
+	new_data->support_stereo = 0;
+	new_data->support_underscan = 0;
+
+	data = new_data->dc_edid.buf;
+	memcpy(data, edid_ptr, 128);
+
+	memset(specs, 0x0, sizeof(struct fb_monspecs));
+	memset(&new_data->eld, 0x0, sizeof(new_data->eld));
+	fb_edid_to_monspecs(data, specs);
+	if (specs->modedb == NULL) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	memcpy(new_data->eld.monitor_name, specs->monitor,
+					sizeof(specs->monitor));
+
+	new_data->eld.mnl = strlen(new_data->eld.monitor_name) + 1;
+	new_data->eld.product_id[0] = data[0x8];
+	new_data->eld.product_id[1] = data[0x9];
+	new_data->eld.manufacture_id[0] = data[0xA];
+	new_data->eld.manufacture_id[1] = data[0xB];
+
+	extension_blocks = data[0x7e];
+	for (i = 1; i <= extension_blocks; i++) {
+		memcpy(data+128, edid_ptr+128, 128);
+
+		if (data[i * 128] == 0x2) {
+			fb_edid_add_monspecs(data + i * 128, specs);
+
+			tegra_edid_parse_ext_block(data + i * 128,
+					data[i * 128 + 2], new_data, &vsdb);
+
+			if (new_data->support_stereo) {
+				for (j = 0; j < specs->modedb_len; j++) {
+					if (tegra_edid_mode_support_stereo(
+						&specs->modedb[j]))
+						specs->modedb[j].vmode |=
+#ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT
+						FB_VMODE_STEREO_FRAME_PACK;
+#else
+						FB_VMODE_STEREO_LEFT_RIGHT;
+#endif
+				}
+			}
+		}
+	}
+
+	new_data->dc_edid.len = i * 128;
+
+	mutex_lock(&edid->lock);
+	old_data = edid->data;
+	edid->data = new_data;
+	mutex_unlock(&edid->lock);
+
+	if (old_data)
+		kref_put(&old_data->refcnt, data_release);
+
+	tegra_edid_dump(edid);
+	return 0;
+fail:
+	vfree(new_data);
+	return ret;
+}
+
+int tegra_edid_get_monspecs(struct tegra_edid *edid, struct fb_monspecs *specs)
+{
+	int i;
+	int j;
+	int ret;
+	int extension_blocks;
+	struct tegra_edid_pvt *new_data, *old_data;
+	u8 *data;
+	u8 vsdb = 0;
+
+	new_data = vmalloc(SZ_32K + sizeof(struct tegra_edid_pvt));
+	if (!new_data)
+		return -ENOMEM;
+
+	kref_init(&new_data->refcnt);
+
+	new_data->support_stereo = 0;
+
+	data = new_data->dc_edid.buf;
+
+	ret = tegra_edid_read_block(edid, 0, data);
+	if (ret)
+		goto fail;
+
+	memset(specs, 0x0, sizeof(struct fb_monspecs));
+	memset(&new_data->eld, 0x0, sizeof(new_data->eld));
+	fb_edid_to_monspecs(data, specs);
+	if (specs->modedb == NULL) {
+		ret = -EINVAL;
+		goto fail;
+	}
+	memcpy(new_data->eld.monitor_name, specs->monitor, sizeof(specs->monitor));
+	new_data->eld.mnl = strlen(new_data->eld.monitor_name) + 1;
+	new_data->eld.product_id[0] = data[0x8];
+	new_data->eld.product_id[1] = data[0x9];
+	new_data->eld.manufacture_id[0] = data[0xA];
+	new_data->eld.manufacture_id[1] = data[0xB];
+
+	extension_blocks = data[0x7e];
+
+	for (i = 1; i <= extension_blocks; i++) {
+		ret = tegra_edid_read_block(edid, i, data + i * 128);
+		if (ret < 0)
+			break;
+
+		if (data[i * 128] == 0x2) {
+			fb_edid_add_monspecs(data + i * 128, specs);
+
+			tegra_edid_parse_ext_block(data + i * 128,
+					data[i * 128 + 2], new_data, &vsdb);
+
+			if (new_data->support_stereo) {
+				for (j = 0; j < specs->modedb_len; j++) {
+					if (tegra_edid_mode_support_stereo(
+						&specs->modedb[j]))
+						specs->modedb[j].vmode |=
+#ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT
+						FB_VMODE_STEREO_FRAME_PACK;
+#else
+						FB_VMODE_STEREO_LEFT_RIGHT;
+#endif
+				}
+			}
+		}
+	}
+
+	new_data->dc_edid.len = i * 128;
+
+	mutex_lock(&edid->lock);
+	old_data = edid->data;
+	edid->data = new_data;
+	edid->vsdb = vsdb;
+
+	mutex_unlock(&edid->lock);
+
+	if (old_data)
+		kref_put(&old_data->refcnt, data_release);
+
+	tegra_edid_dump(edid);
+	return 0;
+
+fail:
+	vfree(new_data);
+	return ret;
+}
+
+int tegra_edid_underscan_supported(struct tegra_edid *edid)
+{
+	if ((!edid) || (!edid->data))
+		return 0;
+
+	return edid->data->support_underscan;
+}
+
+int tegra_edid_get_eld(struct tegra_edid *edid, struct tegra_edid_hdmi_eld *elddata)
+{
+	if (!elddata || !edid->data)
+		return -EFAULT;
+
+	memcpy(elddata,&edid->data->eld,sizeof(struct tegra_edid_hdmi_eld));
+
+	return 0;
+}
+
+struct tegra_edid *tegra_edid_create(int bus)
+{
+	struct tegra_edid *edid;
+	struct i2c_adapter *adapter;
+	int err;
+
+	edid = kzalloc(sizeof(struct tegra_edid), GFP_KERNEL);
+	if (!edid)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_init(&edid->lock);
+	strlcpy(edid->info.type, "tegra_edid", sizeof(edid->info.type));
+	edid->bus = bus;
+	edid->info.addr = 0x50;
+	edid->info.platform_data = edid;
+
+	adapter = i2c_get_adapter(bus);
+	if (!adapter) {
+		pr_err("can't get adpater for bus %d\n", bus);
+		err = -EBUSY;
+		goto free_edid;
+	}
+
+	edid->client = i2c_new_device(adapter, &edid->info);
+	i2c_put_adapter(adapter);
+
+	if (!edid->client) {
+		pr_err("can't create new device\n");
+		err = -EBUSY;
+		goto free_edid;
+	}
+
+	tegra_edid_debug_add(edid);
+
+	return edid;
+
+free_edid:
+	kfree(edid);
+
+	return ERR_PTR(err);
+}
+
+void tegra_edid_destroy(struct tegra_edid *edid)
+{
+	i2c_release_client(edid->client);
+	if (edid->data)
+		kref_put(&edid->data->refcnt, data_release);
+	kfree(edid);
+}
+
+struct tegra_dc_edid *tegra_edid_get_data(struct tegra_edid *edid)
+{
+	struct tegra_edid_pvt *data;
+
+	mutex_lock(&edid->lock);
+	data = edid->data;
+	if (data)
+		kref_get(&data->refcnt);
+	mutex_unlock(&edid->lock);
+
+	return data ? &data->dc_edid : NULL;
+}
+
+void tegra_edid_put_data(struct tegra_dc_edid *data)
+{
+	struct tegra_edid_pvt *pvt;
+
+	if (!data)
+		return;
+
+	pvt = container_of(data, struct tegra_edid_pvt, dc_edid);
+
+	kref_put(&pvt->refcnt, data_release);
+}
+
+static const struct i2c_device_id tegra_edid_id[] = {
+        { "tegra_edid", 0 },
+        { }
+};
+
+MODULE_DEVICE_TABLE(i2c, tegra_edid_id);
+
+static struct i2c_driver tegra_edid_driver = {
+        .id_table = tegra_edid_id,
+        .driver = {
+                .name = "tegra_edid",
+        },
+};
+
+static int __init tegra_edid_init(void)
+{
+        return i2c_add_driver(&tegra_edid_driver);
+}
+
+static void __exit tegra_edid_exit(void)
+{
+        i2c_del_driver(&tegra_edid_driver);
+}
+
+module_init(tegra_edid_init);
+module_exit(tegra_edid_exit);
diff --git a/drivers/staging/tegra/video/dc/edid.h b/drivers/staging/tegra/video/dc/edid.h
new file mode 100644
index 000000000000..d9c1a3efdf43
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/edid.h
@@ -0,0 +1,86 @@
+/*
+ * drivers/video/tegra/dc/edid.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_EDID_H
+#define __DRIVERS_VIDEO_TEGRA_DC_EDID_H
+
+#include <linux/i2c.h>
+#include <linux/wait.h>
+
+#include "dc.h"
+
+#define ELD_MAX_MNL	16
+#define ELD_MAX_SAD	16
+struct tegra_edid;
+
+/*
+ * ELD: EDID Like Data
+ */
+struct tegra_edid_hdmi_eld {
+	u8	baseline_len;
+	u8	eld_ver;
+	u8	cea_edid_ver;
+	char	monitor_name[ELD_MAX_MNL + 1];
+	u8	mnl;
+	u8	manufacture_id[2];
+	u8	product_id[2];
+	u8	port_id[8];
+	u8	support_hdcp;
+	u8	support_ai;
+	u8	conn_type;
+	u8	aud_synch_delay;
+	u8	spk_alloc;
+	u8	sad_count;
+	u8	vsdb;
+	u8	sad[ELD_MAX_SAD];
+};
+
+// The struct "tegra_edid_pvt" come from file "edid.c".
+struct tegra_edid_pvt {
+	struct kref					refcnt;
+	struct tegra_edid_hdmi_eld	eld;
+	bool						support_stereo;
+	bool						support_underscan;
+	/* Note: dc_edid must remain the last member */
+	struct tegra_dc_edid		dc_edid;
+};
+
+// The struct "tegra_edid" come from file "edid.c".
+struct tegra_edid {
+	struct i2c_client	*client;
+	struct i2c_board_info	info;
+	int			bus;
+
+	struct tegra_edid_pvt	*data;
+
+	struct mutex		lock;
+	u8			vsdb;
+};
+
+struct tegra_edid *tegra_edid_create(int bus);
+void tegra_edid_destroy(struct tegra_edid *edid);
+
+int tegra_edid_get_monspecs_test(struct tegra_edid *edid,
+				struct fb_monspecs *specs, u8 *edid_ptr);
+int tegra_edid_get_monspecs(struct tegra_edid *edid, struct fb_monspecs *specs);
+int tegra_edid_get_eld(struct tegra_edid *edid, struct tegra_edid_hdmi_eld *elddata);
+
+struct tegra_dc_edid *tegra_edid_get_data(struct tegra_edid *edid);
+void tegra_edid_put_data(struct tegra_dc_edid *data);
+
+int tegra_edid_underscan_supported(struct tegra_edid *edid);
+#endif
diff --git a/drivers/staging/tegra/video/dc/ext/Makefile b/drivers/staging/tegra/video/dc/ext/Makefile
new file mode 100644
index 000000000000..a7eb3b01aa1a
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/ext/Makefile
@@ -0,0 +1,8 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/staging/tegra/video/host
+EXTRA_CFLAGS += -Idrivers/staging/tegra/include
+obj-y += dev.o
+obj-y += util.o
+obj-y += cursor.o
+obj-y += events.o
+obj-y += control.o
diff --git a/drivers/staging/tegra/video/dc/ext/control.c b/drivers/staging/tegra/video/dc/ext/control.c
new file mode 100644
index 000000000000..ed812be2eab5
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/ext/control.c
@@ -0,0 +1,276 @@
+/*
+ * drivers/video/tegra/dc/ext/control.c
+ *
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * Author: Robert Morell <rmorell@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include "tegra_dc_ext_priv.h"
+
+static struct tegra_dc_ext_control g_control;
+
+int tegra_dc_ext_process_hotplug(int output)
+{
+	return tegra_dc_ext_queue_hotplug(&g_control, output);
+}
+
+static int
+get_output_properties(struct tegra_dc_ext_control_output_properties *properties)
+{
+	struct tegra_dc *dc;
+
+	/* TODO: this should be more dynamic */
+	if (properties->handle > 2)
+		return -EINVAL;
+
+	switch (properties->handle) {
+	case 0:
+		properties->type = TEGRA_DC_EXT_LVDS;
+		break;
+	case 1:
+		properties->type = TEGRA_DC_EXT_HDMI;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	properties->associated_head = properties->handle;
+	properties->head_mask = (1 << properties->associated_head);
+
+	dc = tegra_dc_get_dc(properties->associated_head);
+	properties->connected = tegra_dc_get_connected(dc);
+
+	return 0;
+}
+
+static int get_output_edid(struct tegra_dc_ext_control_output_edid *edid)
+{
+	struct tegra_dc *dc;
+	size_t user_size = edid->size;
+	struct tegra_dc_edid *dc_edid = NULL;
+	int ret = 0;
+
+	/* TODO: this should be more dynamic */
+	if (edid->handle > 2)
+		return -EINVAL;
+
+	dc = tegra_dc_get_dc(edid->handle);
+
+	dc_edid = tegra_dc_get_edid(dc);
+	if (IS_ERR(dc_edid))
+		return PTR_ERR(dc_edid);
+
+	if (!dc_edid) {
+		edid->size = 0;
+	} else {
+		edid->size = dc_edid->len;
+
+		if (user_size < edid->size) {
+			ret = -EFBIG;
+			goto done;
+		}
+
+		if (copy_to_user(edid->data, dc_edid->buf, edid->size)) {
+			ret = -EFAULT;
+			goto done;
+		}
+
+	}
+
+done:
+	if (dc_edid)
+		tegra_dc_put_edid(dc_edid);
+
+	return ret;
+}
+
+static int set_event_mask(struct tegra_dc_ext_control_user *user, u32 mask)
+{
+	struct list_head *list, *tmp;
+
+	if (mask & ~TEGRA_DC_EXT_EVENT_MASK_ALL)
+		return -EINVAL;
+
+	mutex_lock(&user->lock);
+
+	user->event_mask = mask;
+
+	list_for_each_safe(list, tmp, &user->event_list) {
+		struct tegra_dc_ext_event_list *ev_list;
+		ev_list = list_entry(list, struct tegra_dc_ext_event_list,
+			list);
+		if (!(mask & ev_list->event.type)) {
+			list_del(list);
+			kfree(ev_list);
+		}
+	}
+	mutex_unlock(&user->lock);
+
+	return 0;
+}
+
+static int get_capabilities(struct tegra_dc_ext_control_capabilities *caps)
+{
+	caps->caps = TEGRA_DC_EXT_CAPABILITIES;
+	return 0;
+}
+
+static long tegra_dc_ext_control_ioctl(struct file *filp, unsigned int cmd,
+				       unsigned long arg)
+{
+	void __user *user_arg = (void __user *)arg;
+	struct tegra_dc_ext_control_user *user = filp->private_data;
+
+	switch (cmd) {
+	case TEGRA_DC_EXT_CONTROL_GET_NUM_OUTPUTS:
+	{
+		u32 num = tegra_dc_ext_get_num_outputs();
+
+		if (copy_to_user(user_arg, &num, sizeof(num)))
+			return -EFAULT;
+
+		return 0;
+	}
+	case TEGRA_DC_EXT_CONTROL_GET_OUTPUT_PROPERTIES:
+	{
+		struct tegra_dc_ext_control_output_properties args;
+		int ret;
+
+		if (copy_from_user(&args, user_arg, sizeof(args)))
+			return -EFAULT;
+
+		ret = get_output_properties(&args);
+
+		if (copy_to_user(user_arg, &args, sizeof(args)))
+			return -EFAULT;
+
+		return ret;
+	}
+	case TEGRA_DC_EXT_CONTROL_GET_OUTPUT_EDID:
+	{
+		struct tegra_dc_ext_control_output_edid args;
+		int ret;
+
+		if (copy_from_user(&args, user_arg, sizeof(args)))
+			return -EFAULT;
+
+		ret = get_output_edid(&args);
+
+		if (copy_to_user(user_arg, &args, sizeof(args)))
+			return -EFAULT;
+
+		return ret;
+	}
+	case TEGRA_DC_EXT_CONTROL_SET_EVENT_MASK:
+		return set_event_mask(user, (u32) arg);
+	case TEGRA_DC_EXT_CONTROL_GET_CAPABILITIES:
+	{
+		struct tegra_dc_ext_control_capabilities args;
+		int ret;
+
+		ret = get_capabilities(&args);
+
+		if (copy_to_user(user_arg, &args, sizeof(args)))
+			return -EFAULT;
+
+		return ret;
+	}
+	default:
+		return -EINVAL;
+	}
+}
+
+static int tegra_dc_ext_control_open(struct inode *inode, struct file *filp)
+{
+	struct tegra_dc_ext_control_user *user;
+	struct tegra_dc_ext_control *control;
+
+	user = kzalloc(sizeof(*user), GFP_KERNEL);
+	if (!user)
+		return -ENOMEM;
+
+	control = container_of(inode->i_cdev, struct tegra_dc_ext_control,
+		cdev);
+	user->control = control;;
+
+	INIT_LIST_HEAD(&user->event_list);
+	mutex_init(&user->lock);
+
+	filp->private_data = user;
+
+	mutex_lock(&control->lock);
+	list_add(&user->list, &control->users);
+	mutex_unlock(&control->lock);
+
+	return 0;
+}
+
+static int tegra_dc_ext_control_release(struct inode *inode, struct file *filp)
+{
+	struct tegra_dc_ext_control_user *user = filp->private_data;
+	struct tegra_dc_ext_control *control = user->control;
+
+	/* This will free any pending events for this user */
+	set_event_mask(user, 0);
+
+	mutex_lock(&control->lock);
+	list_del(&user->list);
+	mutex_unlock(&control->lock);
+
+	kfree(user);
+
+	return 0;
+}
+
+static const struct file_operations tegra_dc_ext_event_devops = {
+	.owner =		THIS_MODULE,
+	.open =			tegra_dc_ext_control_open,
+	.release =		tegra_dc_ext_control_release,
+	.read =			tegra_dc_ext_event_read,
+	.poll =			tegra_dc_ext_event_poll,
+	.unlocked_ioctl =	tegra_dc_ext_control_ioctl,
+};
+
+int tegra_dc_ext_control_init(void)
+{
+	struct tegra_dc_ext_control *control = &g_control;
+	int ret;
+
+	cdev_init(&control->cdev, &tegra_dc_ext_event_devops);
+	control->cdev.owner = THIS_MODULE;
+	ret = cdev_add(&control->cdev, tegra_dc_ext_devno, 1);
+	if (ret)
+		return ret;
+
+	control->dev = device_create(tegra_dc_ext_class,
+	     NULL, tegra_dc_ext_devno, NULL, "tegra_dc_ctrl");
+	if (IS_ERR(control->dev)) {
+		ret = PTR_ERR(control->dev);
+		cdev_del(&control->cdev);
+	}
+
+	mutex_init(&control->lock);
+
+	INIT_LIST_HEAD(&control->users);
+
+	return ret;
+}
diff --git a/drivers/staging/tegra/video/dc/ext/cursor.c b/drivers/staging/tegra/video/dc/ext/cursor.c
new file mode 100644
index 000000000000..970f38f5ac09
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/ext/cursor.c
@@ -0,0 +1,203 @@
+/*
+ * drivers/video/tegra/dc/ext/cursor.c
+ *
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * Author: Robert Morell <rmorell@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <video/tegra_dc_ext.h>
+
+#include "tegra_dc_ext_priv.h"
+
+/* ugh */
+#include "../dc_priv.h"
+#include "../dc_reg.h"
+
+int tegra_dc_ext_get_cursor(struct tegra_dc_ext_user *user)
+{
+	struct tegra_dc_ext *ext = user->ext;
+	int ret = 0;
+
+	mutex_lock(&ext->cursor.lock);
+
+	if (!ext->cursor.user)
+		ext->cursor.user = user;
+	else if (ext->cursor.user != user)
+		ret = -EBUSY;
+
+	mutex_unlock(&ext->cursor.lock);
+
+	return ret;
+}
+
+int tegra_dc_ext_put_cursor(struct tegra_dc_ext_user *user)
+{
+	struct tegra_dc_ext *ext = user->ext;
+	int ret = 0;
+
+	mutex_lock(&ext->cursor.lock);
+
+	if (ext->cursor.user == user)
+		ext->cursor.user = 0;
+	else
+		ret = -EACCES;
+
+	mutex_unlock(&ext->cursor.lock);
+
+	return ret;
+}
+
+static void set_cursor_image_hw(struct tegra_dc *dc,
+				struct tegra_dc_ext_cursor_image *args,
+				dma_addr_t phys_addr)
+{
+	tegra_dc_writel(dc,
+		CURSOR_COLOR(args->foreground.r,
+			     args->foreground.g,
+			     args->foreground.b),
+		DC_DISP_CURSOR_FOREGROUND);
+	tegra_dc_writel(dc,
+		CURSOR_COLOR(args->background.r,
+			     args->background.g,
+			     args->background.b),
+		DC_DISP_CURSOR_BACKGROUND);
+
+	BUG_ON(phys_addr & ~CURSOR_START_ADDR_MASK);
+
+	tegra_dc_writel(dc,
+		CURSOR_START_ADDR(((unsigned long) phys_addr)) |
+		((args->flags & TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64) ?
+			CURSOR_SIZE_64 : 0),
+		DC_DISP_CURSOR_START_ADDR);
+}
+
+int tegra_dc_ext_set_cursor_image(struct tegra_dc_ext_user *user,
+				  struct tegra_dc_ext_cursor_image *args)
+{
+	struct tegra_dc_ext *ext = user->ext;
+	struct tegra_dc *dc = ext->dc;
+	struct nvmap_handle_ref *handle, *old_handle;
+	dma_addr_t phys_addr;
+	u32 size;
+	int ret;
+
+	if (!user->nvmap)
+		return -EFAULT;
+
+	size = args->flags & (TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 |
+			      TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64);
+
+	if (size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 &&
+	    size !=  TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64)
+		return -EINVAL;
+
+	mutex_lock(&ext->cursor.lock);
+
+	if (ext->cursor.user != user) {
+		ret = -EACCES;
+		goto unlock;
+	}
+
+	if (!ext->enabled) {
+		ret = -ENXIO;
+		goto unlock;
+	}
+
+	old_handle = ext->cursor.cur_handle;
+
+	ret = tegra_dc_ext_pin_window(user, args->buff_id, &handle, &phys_addr);
+	if (ret)
+		goto unlock;
+
+	ext->cursor.cur_handle = handle;
+
+	mutex_lock(&dc->lock);
+
+	set_cursor_image_hw(dc, args, phys_addr);
+
+	tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+	/* XXX sync here? */
+
+	mutex_unlock(&dc->lock);
+
+	mutex_unlock(&ext->cursor.lock);
+
+	if (old_handle) {
+		nvmap_unpin(ext->nvmap, old_handle);
+		nvmap_free(ext->nvmap, old_handle);
+	}
+
+	return 0;
+
+unlock:
+	mutex_unlock(&ext->cursor.lock);
+
+	return ret;
+}
+
+int tegra_dc_ext_set_cursor(struct tegra_dc_ext_user *user,
+			    struct tegra_dc_ext_cursor *args)
+{
+	struct tegra_dc_ext *ext = user->ext;
+	struct tegra_dc *dc = ext->dc;
+	u32 win_options;
+	bool enable;
+	int ret;
+
+	mutex_lock(&ext->cursor.lock);
+
+	if (ext->cursor.user != user) {
+		ret = -EACCES;
+		goto unlock;
+	}
+
+	if (!ext->enabled) {
+		ret = -ENXIO;
+		goto unlock;
+	}
+
+	enable = !!(args->flags & TEGRA_DC_EXT_CURSOR_FLAGS_VISIBLE);
+
+	mutex_lock(&dc->lock);
+
+	win_options = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+	if (!!(win_options & CURSOR_ENABLE) != enable) {
+		win_options &= ~CURSOR_ENABLE;
+		if (enable)
+			win_options |= CURSOR_ENABLE;
+		tegra_dc_writel(dc, win_options, DC_DISP_DISP_WIN_OPTIONS);
+	}
+
+	tegra_dc_writel(dc, CURSOR_POSITION(args->x, args->y),
+		DC_DISP_CURSOR_POSITION);
+
+	tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+	/* TODO: need to sync here?  hopefully can avoid this, but need to
+	 * figure out interaction w/ rest of GENERAL_ACT_REQ */
+
+	mutex_unlock(&dc->lock);
+
+	mutex_unlock(&ext->cursor.lock);
+
+	return 0;
+
+unlock:
+	mutex_unlock(&ext->cursor.lock);
+
+	return ret;
+}
diff --git a/drivers/staging/tegra/video/dc/ext/dev.c b/drivers/staging/tegra/video/dc/ext/dev.c
new file mode 100644
index 000000000000..fcb795b4d4e8
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/ext/dev.c
@@ -0,0 +1,1140 @@
+/*
+ * drivers/video/tegra/dc/dev.c
+ *
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * Author: Robert Morell <rmorell@nvidia.com>
+ * Some code based on fbdev extensions written by:
+ *	Erik Gilling <konkers@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <video/tegra_dc_ext.h>
+
+#include <linux/nvmap.h>
+
+/* XXX ew */
+#include "../tegra_dc_ext.h"
+#include "../dc.h"
+#include "../dc_priv.h"
+#include "../dc_config.h"
+/* XXX ew 2 */
+#include "../../host/dev.h"
+/* XXX ew 3 */
+#include "../../nvmap/nvmap.h"
+#include "tegra_dc_ext_priv.h"
+
+int tegra_dc_ext_devno;
+struct class *tegra_dc_ext_class;
+static int head_count;
+
+struct tegra_dc_ext_flip_win {
+	struct tegra_dc_ext_flip_windowattr	attr;
+	struct nvmap_handle_ref			*handle[TEGRA_DC_NUM_PLANES];
+	dma_addr_t				phys_addr;
+	dma_addr_t				phys_addr_u;
+	dma_addr_t				phys_addr_v;
+	u32					syncpt_max;
+};
+
+struct tegra_dc_ext_flip_data {
+	struct tegra_dc_ext		*ext;
+	struct work_struct		work;
+	struct tegra_dc_ext_flip_win	win[DC_N_WINDOWS];
+	struct list_head		timestamp_node;
+};
+
+int tegra_dc_ext_get_num_outputs(void)
+{
+	/* TODO: decouple output count from head count */
+	return head_count;
+}
+
+static int tegra_dc_ext_set_nvmap_fd(struct tegra_dc_ext_user *user,
+				     int fd)
+{
+	struct nvmap_client *nvmap = NULL;
+
+	if (fd >= 0) {
+		nvmap = nvmap_client_get_file(fd);
+		if (IS_ERR(nvmap))
+			return PTR_ERR(nvmap);
+	}
+
+	if (user->nvmap)
+		nvmap_client_put(user->nvmap);
+
+	user->nvmap = nvmap;
+
+	return 0;
+}
+
+static int tegra_dc_ext_get_window(struct tegra_dc_ext_user *user,
+				   unsigned int n)
+{
+	struct tegra_dc_ext *ext = user->ext;
+	struct tegra_dc_ext_win *win;
+	int ret = 0;
+
+	if (n >= DC_N_WINDOWS)
+		return -EINVAL;
+
+	win = &ext->win[n];
+
+	mutex_lock(&win->lock);
+
+	if (!win->user)
+		win->user = user;
+	else if (win->user != user)
+		ret = -EBUSY;
+
+	mutex_unlock(&win->lock);
+
+	return ret;
+}
+
+static int tegra_dc_ext_put_window(struct tegra_dc_ext_user *user,
+				   unsigned int n)
+{
+	struct tegra_dc_ext *ext = user->ext;
+	struct tegra_dc_ext_win *win;
+	int ret = 0;
+
+	if (n >= DC_N_WINDOWS)
+		return -EINVAL;
+
+	win = &ext->win[n];
+
+	mutex_lock(&win->lock);
+
+	if (win->user == user) {
+		flush_workqueue(win->flip_wq);
+		win->user = 0;
+	} else {
+		ret = -EACCES;
+	}
+
+	mutex_unlock(&win->lock);
+
+	return ret;
+}
+
+static void set_enable(struct tegra_dc_ext *ext, bool en)
+{
+	int i;
+
+	/*
+	 * Take all locks to make sure any flip requests or cursor moves are
+	 * out of their critical sections
+	 */
+	for (i = 0; i < ext->dc->n_windows; i++)
+		mutex_lock(&ext->win[i].lock);
+	mutex_lock(&ext->cursor.lock);
+
+	ext->enabled = en;
+
+	mutex_unlock(&ext->cursor.lock);
+	for (i = ext->dc->n_windows - 1; i >= 0 ; i--)
+		mutex_unlock(&ext->win[i].lock);
+}
+
+void tegra_dc_ext_enable(struct tegra_dc_ext *ext)
+{
+	set_enable(ext, true);
+}
+
+void tegra_dc_ext_disable(struct tegra_dc_ext *ext)
+{
+	int i;
+	set_enable(ext, false);
+
+	/*
+	 * Flush the flip queue -- note that this must be called with dc->lock
+	 * unlocked or else it will hang.
+	 */
+	for (i = 0; i < ext->dc->n_windows; i++) {
+		struct tegra_dc_ext_win *win = &ext->win[i];
+
+		flush_workqueue(win->flip_wq);
+	}
+}
+
+int tegra_dc_ext_check_windowattr(struct tegra_dc_ext *ext,
+						struct tegra_dc_win *win)
+{
+	long *addr;
+	struct tegra_dc *dc = ext->dc;
+
+	/* Check the window format */
+	addr = tegra_dc_parse_feature(dc, win->idx, GET_WIN_FORMATS);
+	if (!test_bit(win->fmt, addr)) {
+		dev_err(&dc->ndev->dev, "Color format of window %d is"
+						" invalid.\n", win->idx);
+		goto fail;
+	}
+
+	/* Check window size */
+	addr = tegra_dc_parse_feature(dc, win->idx, GET_WIN_SIZE);
+	if (CHECK_SIZE(win->out_w, addr[MIN_WIDTH], addr[MAX_WIDTH]) ||
+		CHECK_SIZE(win->out_h, addr[MIN_HEIGHT], addr[MAX_HEIGHT])) {
+		dev_err(&dc->ndev->dev, "Size of window %d is"
+						" invalid.\n", win->idx);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static int tegra_dc_ext_set_windowattr(struct tegra_dc_ext *ext,
+			       struct tegra_dc_win *win,
+			       const struct tegra_dc_ext_flip_win *flip_win)
+{
+	int err = 0;
+	struct tegra_dc_ext_win *ext_win = &ext->win[win->idx];
+	s64 timestamp_ns;
+
+	if (flip_win->handle[TEGRA_DC_Y] == NULL) {
+		win->flags = 0;
+		memset(ext_win->cur_handle, 0, sizeof(ext_win->cur_handle));
+		return 0;
+	}
+
+	win->flags = TEGRA_WIN_FLAG_ENABLED;
+	if (flip_win->attr.blend == TEGRA_DC_EXT_BLEND_PREMULT)
+		win->flags |= TEGRA_WIN_FLAG_BLEND_PREMULT;
+	else if (flip_win->attr.blend == TEGRA_DC_EXT_BLEND_COVERAGE)
+		win->flags |= TEGRA_WIN_FLAG_BLEND_COVERAGE;
+	if (flip_win->attr.flags & TEGRA_DC_EXT_FLIP_FLAG_TILED)
+		win->flags |= TEGRA_WIN_FLAG_TILED;
+	if (flip_win->attr.flags & TEGRA_DC_EXT_FLIP_FLAG_INVERT_H)
+		win->flags |= TEGRA_WIN_FLAG_INVERT_H;
+	if (flip_win->attr.flags & TEGRA_DC_EXT_FLIP_FLAG_INVERT_V)
+		win->flags |= TEGRA_WIN_FLAG_INVERT_V;
+	if (flip_win->attr.flags & TEGRA_DC_EXT_FLIP_FLAG_GLOBAL_ALPHA)
+		win->global_alpha = flip_win->attr.global_alpha;
+	else
+		win->global_alpha = 255;
+	win->fmt = flip_win->attr.pixformat;
+	win->x.full = flip_win->attr.x;
+	win->y.full = flip_win->attr.y;
+	win->w.full = flip_win->attr.w;
+	win->h.full = flip_win->attr.h;
+	/* XXX verify that this doesn't go outside display's active region */
+	win->out_x = flip_win->attr.out_x;
+	win->out_y = flip_win->attr.out_y;
+	win->out_w = flip_win->attr.out_w;
+	win->out_h = flip_win->attr.out_h;
+	win->z = flip_win->attr.z;
+	memcpy(ext_win->cur_handle, flip_win->handle,
+	       sizeof(ext_win->cur_handle));
+
+	/* XXX verify that this won't read outside of the surface */
+	win->phys_addr = flip_win->phys_addr + flip_win->attr.offset;
+
+	win->phys_addr_u = flip_win->handle[TEGRA_DC_U] ?
+		flip_win->phys_addr_u : flip_win->phys_addr;
+	win->phys_addr_u += flip_win->attr.offset_u;
+
+	win->phys_addr_v = flip_win->handle[TEGRA_DC_V] ?
+		flip_win->phys_addr_v : flip_win->phys_addr;
+	win->phys_addr_v += flip_win->attr.offset_v;
+
+	win->stride = flip_win->attr.stride;
+	win->stride_uv = flip_win->attr.stride_uv;
+
+	err = tegra_dc_ext_check_windowattr(ext, win);
+	if (err < 0)
+		dev_err(&ext->dc->ndev->dev,
+				"Window atrributes are invalid.\n");
+
+	if ((s32)flip_win->attr.pre_syncpt_id >= 0) {
+		nvhost_syncpt_wait_timeout(
+				&nvhost_get_host(ext->dc->ndev)->syncpt,
+				flip_win->attr.pre_syncpt_id,
+				flip_win->attr.pre_syncpt_val,
+				msecs_to_jiffies(500), NULL);
+	}
+
+	timestamp_ns = timespec_to_ns(&flip_win->attr.timestamp);
+
+	if (timestamp_ns) {
+		/* XXX: Should timestamping be overridden by "no_vsync" flag */
+		tegra_dc_config_frame_end_intr(win->dc, true);
+		trace_printk("%s:Before timestamp wait\n", win->dc->ndev->name);
+		err = wait_event_interruptible(win->dc->timestamp_wq,
+				tegra_dc_is_within_n_vsync(win->dc, timestamp_ns));
+		trace_printk("%s:After timestamp wait\n", win->dc->ndev->name);
+		tegra_dc_config_frame_end_intr(win->dc, false);
+	}
+
+	return err;
+}
+
+static void (*flip_callback)(void);
+static spinlock_t flip_callback_lock;
+static bool init_tegra_dc_flip_callback_called;
+
+static int __init init_tegra_dc_flip_callback(void)
+{
+	spin_lock_init(&flip_callback_lock);
+	init_tegra_dc_flip_callback_called = true;
+	return 0;
+}
+
+pure_initcall(init_tegra_dc_flip_callback);
+
+int tegra_dc_set_flip_callback(void (*callback)(void))
+{
+	WARN_ON(!init_tegra_dc_flip_callback_called);
+
+	spin_lock(&flip_callback_lock);
+	flip_callback = callback;
+	spin_unlock(&flip_callback_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_dc_set_flip_callback);
+
+int tegra_dc_unset_flip_callback()
+{
+	spin_lock(&flip_callback_lock);
+	flip_callback = NULL;
+	spin_unlock(&flip_callback_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_dc_unset_flip_callback);
+
+static void tegra_dc_ext_flip_worker(struct work_struct *work)
+{
+	struct tegra_dc_ext_flip_data *data =
+		container_of(work, struct tegra_dc_ext_flip_data, work);
+	struct tegra_dc_ext *ext = data->ext;
+	struct tegra_dc_win *wins[DC_N_WINDOWS];
+	struct nvmap_handle_ref *unpin_handles[DC_N_WINDOWS *
+					       TEGRA_DC_NUM_PLANES];
+	struct nvmap_handle_ref *old_handle;
+	int i, nr_unpin = 0, nr_win = 0;
+	bool skip_flip = false;
+
+	for (i = 0; i < DC_N_WINDOWS; i++) {
+		struct tegra_dc_ext_flip_win *flip_win = &data->win[i];
+		int j = 0, index = flip_win->attr.index;
+		struct tegra_dc_win *win;
+		struct tegra_dc_ext_win *ext_win;
+		struct tegra_dc_ext_flip_data *temp = NULL;
+		s64 head_timestamp = 0;
+
+		if (index < 0)
+			continue;
+
+		win = tegra_dc_get_window(ext->dc, index);
+		ext_win = &ext->win[index];
+
+		if (!(atomic_dec_and_test(&ext_win->nr_pending_flips)) &&
+			(flip_win->attr.flags & TEGRA_DC_EXT_FLIP_FLAG_CURSOR))
+			skip_flip = true;
+
+		mutex_lock(&ext_win->queue_lock);
+		list_for_each_entry(temp, &ext_win->timestamp_queue,
+				timestamp_node) {
+			if (j == 0) {
+				if (unlikely(temp != data))
+					dev_err(&win->dc->ndev->dev,
+							"work queue did NOT dequeue head!!!");
+				else
+					head_timestamp =
+						timespec_to_ns(&flip_win->attr.timestamp);
+			} else {
+				s64 timestamp =
+					timespec_to_ns(&temp->win[i].attr.timestamp);
+
+				skip_flip = !tegra_dc_does_vsync_separate(ext->dc,
+						timestamp, head_timestamp);
+				/* Look ahead only one flip */
+				break;
+			}
+			j++;
+		}
+		if (!list_empty(&ext_win->timestamp_queue))
+			list_del(&data->timestamp_node);
+		mutex_unlock(&ext_win->queue_lock);
+
+		if (win->flags & TEGRA_WIN_FLAG_ENABLED) {
+			int j;
+			for (j = 0; j < TEGRA_DC_NUM_PLANES; j++) {
+				if (skip_flip)
+					old_handle = flip_win->handle[j];
+				else
+					old_handle = ext_win->cur_handle[j];
+
+				if (!old_handle)
+					continue;
+
+				unpin_handles[nr_unpin++] = old_handle;
+			}
+		}
+
+		if (!skip_flip)
+			tegra_dc_ext_set_windowattr(ext, win, &data->win[i]);
+
+		wins[nr_win++] = win;
+	}
+
+	if (!skip_flip) {
+		tegra_dc_update_windows(wins, nr_win);
+		/* TODO: implement swapinterval here */
+		tegra_dc_sync_windows(wins, nr_win);
+		if (!tegra_dc_has_multiple_dc()) {
+			spin_lock(&flip_callback_lock);
+			if (flip_callback)
+				flip_callback();
+			spin_unlock(&flip_callback_lock);
+		}
+
+		for (i = 0; i < DC_N_WINDOWS; i++) {
+			struct tegra_dc_ext_flip_win *flip_win = &data->win[i];
+			int index = flip_win->attr.index;
+
+			if (index < 0)
+				continue;
+
+			tegra_dc_incr_syncpt_min(ext->dc, index,
+					flip_win->syncpt_max);
+		}
+	}
+
+	/* unpin and deref previous front buffers */
+	for (i = 0; i < nr_unpin; i++) {
+		nvmap_unpin(ext->nvmap, unpin_handles[i]);
+		nvmap_free(ext->nvmap, unpin_handles[i]);
+	}
+
+	kfree(data);
+}
+
+static int lock_windows_for_flip(struct tegra_dc_ext_user *user,
+				 struct tegra_dc_ext_flip *args)
+{
+	struct tegra_dc_ext *ext = user->ext;
+	u8 idx_mask = 0;
+	int i;
+
+	for (i = 0; i < DC_N_WINDOWS; i++) {
+		int index = args->win[i].index;
+
+		if (index < 0)
+			continue;
+
+		idx_mask |= BIT(index);
+	}
+
+	for (i = 0; i < DC_N_WINDOWS; i++) {
+		struct tegra_dc_ext_win *win;
+
+		if (!(idx_mask & BIT(i)))
+			continue;
+
+		win = &ext->win[i];
+
+		mutex_lock(&win->lock);
+
+		if (win->user != user)
+			goto fail_unlock;
+	}
+
+	return 0;
+
+fail_unlock:
+	do {
+		if (!(idx_mask & BIT(i)))
+			continue;
+
+		mutex_unlock(&ext->win[i].lock);
+	} while (i--);
+
+	return -EACCES;
+}
+
+static void unlock_windows_for_flip(struct tegra_dc_ext_user *user,
+				    struct tegra_dc_ext_flip *args)
+{
+	struct tegra_dc_ext *ext = user->ext;
+	u8 idx_mask = 0;
+	int i;
+
+	for (i = 0; i < DC_N_WINDOWS; i++) {
+		int index = args->win[i].index;
+
+		if (index < 0)
+			continue;
+
+		idx_mask |= BIT(index);
+	}
+
+	for (i = DC_N_WINDOWS - 1; i >= 0; i--) {
+		if (!(idx_mask & BIT(i)))
+			continue;
+
+		mutex_unlock(&ext->win[i].lock);
+	}
+}
+
+static int sanitize_flip_args(struct tegra_dc_ext_user *user,
+			      struct tegra_dc_ext_flip *args)
+{
+	int i, used_windows = 0;
+
+	for (i = 0; i < DC_N_WINDOWS; i++) {
+		int index = args->win[i].index;
+
+		if (index < 0)
+			continue;
+
+		if (index >= DC_N_WINDOWS)
+			return -EINVAL;
+
+		if (used_windows & BIT(index))
+			return -EINVAL;
+
+		used_windows |= BIT(index);
+	}
+
+	if (!used_windows)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int tegra_dc_ext_flip(struct tegra_dc_ext_user *user,
+			     struct tegra_dc_ext_flip *args)
+{
+	struct tegra_dc_ext *ext = user->ext;
+	struct tegra_dc_ext_flip_data *data;
+	int work_index = -1;
+	int i, ret = 0;
+	bool has_timestamp = false;
+
+#ifdef CONFIG_ANDROID
+	int index_check[DC_N_WINDOWS] = {0, };
+	int zero_index_id = 0;
+#endif
+
+	if (!user->nvmap)
+		return -EFAULT;
+
+	ret = sanitize_flip_args(user, args);
+	if (ret)
+		return ret;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	INIT_WORK(&data->work, tegra_dc_ext_flip_worker);
+	data->ext = ext;
+
+#ifdef CONFIG_ANDROID
+	for (i = 0; i < DC_N_WINDOWS; i++) {
+		index_check[i] = args->win[i].index;
+		if (index_check[i] == 0)
+			zero_index_id = i;
+	}
+
+	if (index_check[DC_N_WINDOWS - 1] != 0) {
+		struct tegra_dc_ext_flip_windowattr win_temp;
+		win_temp = args->win[DC_N_WINDOWS - 1];
+		args->win[DC_N_WINDOWS - 1] = args->win[zero_index_id];
+		args->win[zero_index_id] = win_temp;
+	}
+#endif
+
+	for (i = 0; i < DC_N_WINDOWS; i++) {
+		struct tegra_dc_ext_flip_win *flip_win = &data->win[i];
+		int index = args->win[i].index;
+
+		memcpy(&flip_win->attr, &args->win[i], sizeof(flip_win->attr));
+		if (timespec_to_ns(&flip_win->attr.timestamp))
+			has_timestamp = true;
+
+		if (index < 0)
+			continue;
+
+		ret = tegra_dc_ext_pin_window(user, flip_win->attr.buff_id,
+					      &flip_win->handle[TEGRA_DC_Y],
+					      &flip_win->phys_addr);
+		if (ret)
+			goto fail_pin;
+
+		if (flip_win->attr.buff_id_u) {
+			ret = tegra_dc_ext_pin_window(user,
+					      flip_win->attr.buff_id_u,
+					      &flip_win->handle[TEGRA_DC_U],
+					      &flip_win->phys_addr_u);
+			if (ret)
+				goto fail_pin;
+		} else {
+			flip_win->handle[TEGRA_DC_U] = NULL;
+			flip_win->phys_addr_u = 0;
+		}
+
+		if (flip_win->attr.buff_id_v) {
+			ret = tegra_dc_ext_pin_window(user,
+					      flip_win->attr.buff_id_v,
+					      &flip_win->handle[TEGRA_DC_V],
+					      &flip_win->phys_addr_v);
+			if (ret)
+				goto fail_pin;
+		} else {
+			flip_win->handle[TEGRA_DC_V] = NULL;
+			flip_win->phys_addr_v = 0;
+		}
+	}
+
+	ret = lock_windows_for_flip(user, args);
+	if (ret)
+		goto fail_pin;
+
+	if (!ext->enabled) {
+		ret = -ENXIO;
+		goto unlock;
+	}
+
+	for (i = 0; i < DC_N_WINDOWS; i++) {
+		u32 syncpt_max;
+		int index = args->win[i].index;
+		struct tegra_dc_win *win;
+		struct tegra_dc_ext_win *ext_win;
+
+		if (index < 0)
+			continue;
+
+		win = tegra_dc_get_window(ext->dc, index);
+		ext_win = &ext->win[index];
+
+		syncpt_max = tegra_dc_incr_syncpt_max(ext->dc, index);
+
+		data->win[i].syncpt_max = syncpt_max;
+
+		/*
+		 * Any of these windows' syncpoints should be equivalent for
+		 * the client, so we just send back an arbitrary one of them
+		 */
+		args->post_syncpt_val = syncpt_max;
+		args->post_syncpt_id = tegra_dc_get_syncpt_id(ext->dc, index);
+		work_index = index;
+
+		atomic_inc(&ext->win[work_index].nr_pending_flips);
+	}
+	if (work_index < 0) {
+		ret = -EINVAL;
+		goto unlock;
+	}
+	if (has_timestamp) {
+		mutex_lock(&ext->win[work_index].queue_lock);
+		list_add_tail(&data->timestamp_node, &ext->win[work_index].timestamp_queue);
+		mutex_unlock(&ext->win[work_index].queue_lock);
+	}
+	queue_work(ext->win[work_index].flip_wq, &data->work);
+
+	unlock_windows_for_flip(user, args);
+
+	return 0;
+
+unlock:
+	unlock_windows_for_flip(user, args);
+
+fail_pin:
+	for (i = 0; i < DC_N_WINDOWS; i++) {
+		int j;
+		for (j = 0; j < TEGRA_DC_NUM_PLANES; j++) {
+			if (!data->win[i].handle[j])
+				continue;
+
+			nvmap_unpin(ext->nvmap, data->win[i].handle[j]);
+			nvmap_free(ext->nvmap, data->win[i].handle[j]);
+		}
+	}
+	kfree(data);
+
+	return ret;
+}
+
+static int tegra_dc_ext_set_csc(struct tegra_dc_ext_user *user,
+				struct tegra_dc_ext_csc *new_csc)
+{
+	unsigned int index = new_csc->win_index;
+	struct tegra_dc *dc = user->ext->dc;
+	struct tegra_dc_ext_win *ext_win;
+	struct tegra_dc_csc *csc;
+
+	if (index >= DC_N_WINDOWS)
+		return -EINVAL;
+
+	ext_win = &user->ext->win[index];
+	csc = &dc->windows[index].csc;
+
+	mutex_lock(&ext_win->lock);
+
+	if (ext_win->user != user) {
+		mutex_unlock(&ext_win->lock);
+		return -EACCES;
+	}
+
+	csc->yof =   new_csc->yof;
+	csc->kyrgb = new_csc->kyrgb;
+	csc->kur =   new_csc->kur;
+	csc->kvr =   new_csc->kvr;
+	csc->kug =   new_csc->kug;
+	csc->kvg =   new_csc->kvg;
+	csc->kub =   new_csc->kub;
+	csc->kvb =   new_csc->kvb;
+
+	tegra_dc_update_csc(dc, index);
+
+	mutex_unlock(&ext_win->lock);
+
+	return 0;
+}
+
+static int set_lut_channel(u16 *channel_from_user,
+			   u8 *channel_to,
+			   u32 start,
+			   u32 len)
+{
+	int i;
+	u16 lut16bpp[256];
+
+	if (channel_from_user) {
+		if (copy_from_user(lut16bpp, channel_from_user, len<<1))
+			return 1;
+
+		for (i = 0; i < len; i++)
+			channel_to[start+i] = lut16bpp[i]>>8;
+	} else {
+		for (i = 0; i < len; i++)
+			channel_to[start+i] = start+i;
+	}
+
+	return 0;
+}
+
+static int tegra_dc_ext_set_lut(struct tegra_dc_ext_user *user,
+				struct tegra_dc_ext_lut *new_lut)
+{
+	int err;
+	unsigned int index = new_lut->win_index;
+	u32 start = new_lut->start;
+	u32 len = new_lut->len;
+
+	struct tegra_dc *dc = user->ext->dc;
+	struct tegra_dc_ext_win *ext_win;
+	struct tegra_dc_lut *lut;
+
+	if (index >= DC_N_WINDOWS)
+		return -EINVAL;
+
+	if ((start >= 256) || (len > 256) || ((start + len) > 256))
+		return -EINVAL;
+
+	ext_win = &user->ext->win[index];
+	lut = &dc->windows[index].lut;
+
+	mutex_lock(&ext_win->lock);
+
+	if (ext_win->user != user) {
+		mutex_unlock(&ext_win->lock);
+		return -EACCES;
+	}
+
+	err = set_lut_channel(new_lut->r, lut->r, start, len) |
+	      set_lut_channel(new_lut->g, lut->g, start, len) |
+	      set_lut_channel(new_lut->b, lut->b, start, len);
+
+	if (err) {
+		mutex_unlock(&ext_win->lock);
+		return -EFAULT;
+	}
+
+	tegra_dc_update_lut(dc, index,
+			new_lut->flags & TEGRA_DC_EXT_LUT_FLAGS_FBOVERRIDE);
+
+	mutex_unlock(&ext_win->lock);
+
+	return 0;
+}
+
+static u32 tegra_dc_ext_get_vblank_syncpt(struct tegra_dc_ext_user *user)
+{
+	struct tegra_dc *dc = user->ext->dc;
+
+	return dc->vblank_syncpt;
+}
+
+static int tegra_dc_ext_get_status(struct tegra_dc_ext_user *user,
+				   struct tegra_dc_ext_status *status)
+{
+	struct tegra_dc *dc = user->ext->dc;
+
+	memset(status, 0, sizeof(*status));
+
+	if (dc->enabled)
+		status->flags |= TEGRA_DC_EXT_FLAGS_ENABLED;
+
+	return 0;
+}
+
+static int tegra_dc_ext_get_feature(struct tegra_dc_ext_user *user,
+				   struct tegra_dc_ext_feature *feature)
+{
+	struct tegra_dc *dc = user->ext->dc;
+	struct tegra_dc_feature *table = dc->feature;
+
+	if (dc->enabled && feature->entries) {
+		feature->length = table->num_entries;
+		memcpy(feature->entries, table->entries, table->num_entries *
+					sizeof(struct tegra_dc_feature_entry));
+	}
+
+	return 0;
+}
+
+static long tegra_dc_ioctl(struct file *filp, unsigned int cmd,
+			   unsigned long arg)
+{
+	void __user *user_arg = (void __user *)arg;
+	struct tegra_dc_ext_user *user = filp->private_data;
+
+	switch (cmd) {
+	case TEGRA_DC_EXT_SET_NVMAP_FD:
+		return tegra_dc_ext_set_nvmap_fd(user, arg);
+
+	case TEGRA_DC_EXT_GET_WINDOW:
+		return tegra_dc_ext_get_window(user, arg);
+	case TEGRA_DC_EXT_PUT_WINDOW:
+		return tegra_dc_ext_put_window(user, arg);
+
+	case TEGRA_DC_EXT_FLIP:
+	{
+		struct tegra_dc_ext_flip args;
+		int ret;
+
+		if (copy_from_user(&args, user_arg, sizeof(args)))
+			return -EFAULT;
+
+		ret = tegra_dc_ext_flip(user, &args);
+
+		if (copy_to_user(user_arg, &args, sizeof(args)))
+			return -EFAULT;
+
+		return ret;
+	}
+
+	case TEGRA_DC_EXT_GET_CURSOR:
+		return tegra_dc_ext_get_cursor(user);
+	case TEGRA_DC_EXT_PUT_CURSOR:
+		return tegra_dc_ext_put_cursor(user);
+	case TEGRA_DC_EXT_SET_CURSOR_IMAGE:
+	{
+		struct tegra_dc_ext_cursor_image args;
+
+		if (copy_from_user(&args, user_arg, sizeof(args)))
+			return -EFAULT;
+
+		return tegra_dc_ext_set_cursor_image(user, &args);
+	}
+	case TEGRA_DC_EXT_SET_CURSOR:
+	{
+		struct tegra_dc_ext_cursor args;
+
+		if (copy_from_user(&args, user_arg, sizeof(args)))
+			return -EFAULT;
+
+		return tegra_dc_ext_set_cursor(user, &args);
+	}
+
+	case TEGRA_DC_EXT_SET_CSC:
+	{
+		struct tegra_dc_ext_csc args;
+
+		if (copy_from_user(&args, user_arg, sizeof(args)))
+			return -EFAULT;
+
+		return tegra_dc_ext_set_csc(user, &args);
+	}
+
+	case TEGRA_DC_EXT_GET_VBLANK_SYNCPT:
+	{
+		u32 syncpt = tegra_dc_ext_get_vblank_syncpt(user);
+
+		if (copy_to_user(user_arg, &syncpt, sizeof(syncpt)))
+			return -EFAULT;
+
+		return 0;
+	}
+
+	case TEGRA_DC_EXT_GET_STATUS:
+	{
+		struct tegra_dc_ext_status args;
+		int ret;
+
+		ret = tegra_dc_ext_get_status(user, &args);
+
+		if (copy_to_user(user_arg, &args, sizeof(args)))
+			return -EFAULT;
+
+		return ret;
+	}
+
+	case TEGRA_DC_EXT_SET_LUT:
+	{
+		struct tegra_dc_ext_lut args;
+
+		if (copy_from_user(&args, user_arg, sizeof(args)))
+			return -EFAULT;
+
+		return tegra_dc_ext_set_lut(user, &args);
+	}
+
+	case TEGRA_DC_EXT_GET_FEATURES:
+	{
+		struct tegra_dc_ext_feature args;
+		int ret;
+
+		if (copy_from_user(&args, user_arg, sizeof(args)))
+			return -EFAULT;
+
+		ret = tegra_dc_ext_get_feature(user, &args);
+
+		if (copy_to_user(user_arg, &args, sizeof(args)))
+			return -EFAULT;
+
+		return ret;
+	}
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static int tegra_dc_open(struct inode *inode, struct file *filp)
+{
+	struct tegra_dc_ext_user *user;
+	struct tegra_dc_ext *ext;
+
+	user = kzalloc(sizeof(*user), GFP_KERNEL);
+	if (!user)
+		return -ENOMEM;
+
+	ext = container_of(inode->i_cdev, struct tegra_dc_ext, cdev);
+	user->ext = ext;
+
+	filp->private_data = user;
+
+	return 0;
+}
+
+static int tegra_dc_release(struct inode *inode, struct file *filp)
+{
+	struct tegra_dc_ext_user *user = filp->private_data;
+	struct tegra_dc_ext *ext = user->ext;
+	unsigned int i;
+
+	for (i = 0; i < DC_N_WINDOWS; i++) {
+		if (ext->win[i].user == user)
+			tegra_dc_ext_put_window(user, i);
+	}
+	if (ext->cursor.user == user)
+		tegra_dc_ext_put_cursor(user);
+
+	if (user->nvmap)
+		nvmap_client_put(user->nvmap);
+
+	kfree(user);
+
+	return 0;
+}
+
+static int tegra_dc_ext_setup_windows(struct tegra_dc_ext *ext)
+{
+	int i, ret;
+
+	for (i = 0; i < ext->dc->n_windows; i++) {
+		struct tegra_dc_ext_win *win = &ext->win[i];
+		char name[32];
+
+		win->ext = ext;
+		win->idx = i;
+
+		snprintf(name, sizeof(name), "tegradc.%d/%c",
+			 ext->dc->ndev->id, 'a' + i);
+		win->flip_wq = create_singlethread_workqueue(name);
+		if (!win->flip_wq) {
+			ret = -ENOMEM;
+			goto cleanup;
+		}
+
+		mutex_init(&win->lock);
+		mutex_init(&win->queue_lock);
+		INIT_LIST_HEAD(&win->timestamp_queue);
+	}
+
+	return 0;
+
+cleanup:
+	while (i--) {
+		struct tegra_dc_ext_win *win = &ext->win[i];
+		destroy_workqueue(win->flip_wq);
+	}
+
+	return ret;
+}
+
+static const struct file_operations tegra_dc_devops = {
+	.owner =		THIS_MODULE,
+	.open =			tegra_dc_open,
+	.release =		tegra_dc_release,
+	.unlocked_ioctl =	tegra_dc_ioctl,
+};
+
+struct tegra_dc_ext *tegra_dc_ext_register(struct nvhost_device *ndev,
+					   struct tegra_dc *dc)
+{
+	int ret;
+	struct tegra_dc_ext *ext;
+	int devno;
+
+	ext = kzalloc(sizeof(*ext), GFP_KERNEL);
+	if (!ext)
+		return ERR_PTR(-ENOMEM);
+
+	BUG_ON(!tegra_dc_ext_devno);
+	devno = tegra_dc_ext_devno + head_count + 1;
+
+	cdev_init(&ext->cdev, &tegra_dc_devops);
+	ext->cdev.owner = THIS_MODULE;
+	ret = cdev_add(&ext->cdev, devno, 1);
+	if (ret) {
+		dev_err(&ndev->dev, "Failed to create character device\n");
+		goto cleanup_alloc;
+	}
+
+	ext->dev = device_create(tegra_dc_ext_class,
+				 &ndev->dev,
+				 devno,
+				 NULL,
+				 "tegra_dc_%d",
+				 ndev->id);
+
+	if (IS_ERR(ext->dev)) {
+		ret = PTR_ERR(ext->dev);
+		goto cleanup_cdev;
+	}
+
+	ext->dc = dc;
+
+	ext->nvmap = nvmap_create_client(nvmap_dev, "tegra_dc_ext");
+	if (!ext->nvmap) {
+		ret = -ENOMEM;
+		goto cleanup_device;
+	}
+
+	ret = tegra_dc_ext_setup_windows(ext);
+	if (ret)
+		goto cleanup_nvmap;
+
+	mutex_init(&ext->cursor.lock);
+
+	head_count++;
+
+	return ext;
+
+cleanup_nvmap:
+	nvmap_client_put(ext->nvmap);
+
+cleanup_device:
+	device_del(ext->dev);
+
+cleanup_cdev:
+	cdev_del(&ext->cdev);
+
+cleanup_alloc:
+	kfree(ext);
+
+	return ERR_PTR(ret);
+}
+
+void tegra_dc_ext_unregister(struct tegra_dc_ext *ext)
+{
+	int i;
+
+	for (i = 0; i < ext->dc->n_windows; i++) {
+		struct tegra_dc_ext_win *win = &ext->win[i];
+
+		flush_workqueue(win->flip_wq);
+		destroy_workqueue(win->flip_wq);
+	}
+
+	nvmap_client_put(ext->nvmap);
+	device_del(ext->dev);
+	cdev_del(&ext->cdev);
+
+	kfree(ext);
+
+	head_count--;
+}
+
+int __init tegra_dc_ext_module_init(void)
+{
+	int ret;
+
+	tegra_dc_ext_class = class_create(THIS_MODULE, "tegra_dc_ext");
+	if (!tegra_dc_ext_class) {
+		printk(KERN_ERR "tegra_dc_ext: failed to create class\n");
+		return -ENOMEM;
+	}
+
+	/* Reserve one character device per head, plus the control device */
+	ret = alloc_chrdev_region(&tegra_dc_ext_devno,
+				  0, TEGRA_MAX_DC + 1,
+				  "tegra_dc_ext");
+	if (ret)
+		goto cleanup_class;
+
+	ret = tegra_dc_ext_control_init();
+	if (ret)
+		goto cleanup_region;
+
+	return 0;
+
+cleanup_region:
+	unregister_chrdev_region(tegra_dc_ext_devno, TEGRA_MAX_DC);
+
+cleanup_class:
+	class_destroy(tegra_dc_ext_class);
+
+	return ret;
+}
+
+void __exit tegra_dc_ext_module_exit(void)
+{
+	unregister_chrdev_region(tegra_dc_ext_devno, TEGRA_MAX_DC);
+	class_destroy(tegra_dc_ext_class);
+}
diff --git a/drivers/staging/tegra/video/dc/ext/events.c b/drivers/staging/tegra/video/dc/ext/events.c
new file mode 100644
index 000000000000..577d056e2436
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/ext/events.c
@@ -0,0 +1,197 @@
+/*
+ * drivers/video/tegra/dc/ext/events.c
+ *
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * Author: Robert Morell <rmorell@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "tegra_dc_ext_priv.h"
+
+static DECLARE_WAIT_QUEUE_HEAD(event_wait);
+
+unsigned int tegra_dc_ext_event_poll(struct file *filp, poll_table *wait)
+{
+	struct tegra_dc_ext_control_user *user = filp->private_data;
+	unsigned int mask = 0;
+
+	poll_wait(filp, &event_wait, wait);
+
+	if (atomic_read(&user->num_events))
+		mask |= POLLIN;
+
+	return mask;
+}
+
+static int get_next_event(struct tegra_dc_ext_control_user *user,
+			  struct tegra_dc_ext_event_list *event,
+			  bool block)
+{
+	struct list_head *list = &user->event_list;
+	struct tegra_dc_ext_event_list *next_event;
+	int ret;
+
+	if (block) {
+		ret = wait_event_interruptible(event_wait,
+			atomic_read(&user->num_events));
+
+		if (unlikely(ret)) {
+			if (ret == -ERESTARTSYS)
+				return -EAGAIN;
+			return ret;
+		}
+	} else {
+		if (!atomic_read(&user->num_events))
+			return 0;
+	}
+
+	mutex_lock(&user->lock);
+
+	BUG_ON(list_empty(list));
+	next_event = list_first_entry(list, struct tegra_dc_ext_event_list,
+			list);
+	*event = *next_event;
+	list_del(&next_event->list);
+	kfree(next_event);
+
+	atomic_dec(&user->num_events);
+
+	mutex_unlock(&user->lock);
+
+	return 1;
+}
+
+ssize_t tegra_dc_ext_event_read(struct file *filp, char __user *buf,
+				size_t size, loff_t *ppos)
+{
+	struct tegra_dc_ext_control_user *user = filp->private_data;
+	struct tegra_dc_ext_event_list event_elem;
+	struct tegra_dc_ext_event *event = &event_elem.event;
+	ssize_t retval = 0, to_copy, event_size, pending;
+	loff_t previously_copied = 0;
+	char *to_copy_ptr;
+
+	if (size == 0)
+		return 0;
+
+	if (user->partial_copy) {
+		/*
+		 * We didn't transfer the entire event last time, need to
+		 * finish it up
+		 */
+		event_elem = user->event_to_copy;
+		previously_copied = user->partial_copy;
+	} else {
+		/* Get the next event, if any */
+		pending = get_next_event(user, &event_elem,
+			!(filp->f_flags & O_NONBLOCK));
+		if (pending <= 0)
+			return pending;
+	}
+
+	/* Write the event to the user */
+	event_size = sizeof(*event) + event->data_size;
+	BUG_ON(event_size <= previously_copied);
+	event_size -= previously_copied;
+
+	to_copy_ptr = (char *)event + previously_copied;
+	to_copy = min_t(ssize_t, size, event_size);
+	if (copy_to_user(buf, to_copy_ptr, to_copy)) {
+		retval = -EFAULT;
+		to_copy = 0;
+	}
+
+	/* Note that we currently only deliver one event at a time */
+
+	if (event_size > to_copy) {
+		/*
+		 * We were only able to copy part of this event.  Stash it for
+		 * next time.
+		 */
+		user->event_to_copy = event_elem;
+		user->partial_copy = previously_copied + to_copy;
+	} else {
+		user->partial_copy = 0;
+	}
+
+	return to_copy ? to_copy : retval;
+}
+
+static int tegra_dc_ext_queue_event(struct tegra_dc_ext_control *control,
+				    struct tegra_dc_ext_event *event)
+{
+	struct list_head *cur;
+	int retval = 0;
+
+	mutex_lock(&control->lock);
+	list_for_each(cur, &control->users) {
+		struct tegra_dc_ext_control_user *user;
+		struct tegra_dc_ext_event_list *ev_list;
+
+		user = container_of(cur, struct tegra_dc_ext_control_user,
+			list);
+		mutex_lock(&user->lock);
+
+		if (!(user->event_mask & event->type)) {
+			mutex_unlock(&user->lock);
+			continue;
+		}
+
+		ev_list = kmalloc(sizeof(*ev_list), GFP_KERNEL);
+		if (!ev_list) {
+			retval = -ENOMEM;
+			mutex_unlock(&user->lock);
+			continue;
+		}
+
+		memcpy(&ev_list->event, event,
+			sizeof(*event) + event->data_size);
+
+		list_add_tail(&ev_list->list, &user->event_list);
+
+		atomic_inc(&user->num_events);
+
+		mutex_unlock(&user->lock);
+	}
+	mutex_unlock(&control->lock);
+
+	/* Is it worth it to track waiters with more granularity? */
+	wake_up(&event_wait);
+
+	return retval;
+}
+
+int tegra_dc_ext_queue_hotplug(struct tegra_dc_ext_control *control, int output)
+{
+	struct {
+		struct tegra_dc_ext_event event;
+		struct tegra_dc_ext_control_event_hotplug hotplug;
+	} __packed pack;
+
+	pack.event.type = TEGRA_DC_EXT_EVENT_HOTPLUG;
+	pack.event.data_size = sizeof(pack.hotplug);
+
+	pack.hotplug.handle = output;
+
+	tegra_dc_ext_queue_event(control, &pack.event);
+
+	return 0;
+}
diff --git a/drivers/staging/tegra/video/dc/ext/tegra_dc_ext_priv.h b/drivers/staging/tegra/video/dc/ext/tegra_dc_ext_priv.h
new file mode 100644
index 000000000000..b6ba170b4983
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/ext/tegra_dc_ext_priv.h
@@ -0,0 +1,152 @@
+/*
+ * drivers/video/tegra/dc/ext/tegra_dc_ext_priv.h
+ *
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * Author: Robert Morell <rmorell@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TEGRA_DC_EXT_PRIV_H
+#define __TEGRA_DC_EXT_PRIV_H
+
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+
+#include <linux/nvmap.h>
+
+#include <video/tegra_dc_ext.h>
+
+#include "../dc.h"
+
+struct tegra_dc_ext;
+
+struct tegra_dc_ext_user {
+	struct tegra_dc_ext	*ext;
+	struct nvmap_client	*nvmap;
+};
+
+enum {
+	TEGRA_DC_Y,
+	TEGRA_DC_U,
+	TEGRA_DC_V,
+	TEGRA_DC_NUM_PLANES,
+};
+
+struct tegra_dc_ext_win {
+	struct tegra_dc_ext	*ext;
+
+	int			idx;
+
+	struct tegra_dc_ext_user *user;
+
+	struct mutex		lock;
+
+	/* Current nvmap handle (if any) for Y, U, V planes */
+	struct nvmap_handle_ref	*cur_handle[TEGRA_DC_NUM_PLANES];
+
+	struct workqueue_struct	*flip_wq;
+
+	atomic_t		nr_pending_flips;
+
+	struct mutex		queue_lock;
+
+	struct list_head	timestamp_queue;
+};
+
+struct tegra_dc_ext {
+	struct tegra_dc			*dc;
+
+	struct cdev			cdev;
+	struct device			*dev;
+
+	struct nvmap_client		*nvmap;
+
+	struct tegra_dc_ext_win		win[DC_N_WINDOWS];
+
+	struct {
+		struct tegra_dc_ext_user	*user;
+		struct nvmap_handle_ref		*cur_handle;
+		struct mutex			lock;
+	} cursor;
+
+	bool				enabled;
+};
+
+#define TEGRA_DC_EXT_EVENT_MASK_ALL \
+	TEGRA_DC_EXT_EVENT_HOTPLUG
+
+#define TEGRA_DC_EXT_EVENT_MAX_SZ	8
+
+struct tegra_dc_ext_event_list {
+	struct tegra_dc_ext_event	event;
+	/* The data field _must_ follow the event field. */
+	char				data[TEGRA_DC_EXT_EVENT_MAX_SZ];
+
+	struct list_head		list;
+};
+
+#define TEGRA_DC_EXT_CAPABILITIES \
+	TEGRA_DC_EXT_CAPABILITIES_CURSOR_MODE
+
+struct tegra_dc_ext_control_user {
+	struct tegra_dc_ext_control	*control;
+
+	struct list_head		event_list;
+	atomic_t			num_events;
+
+	u32				event_mask;
+
+	struct tegra_dc_ext_event_list	event_to_copy;
+	loff_t				partial_copy;
+
+	struct mutex			lock;
+
+	struct list_head		list;
+};
+
+struct tegra_dc_ext_control {
+	struct cdev			cdev;
+	struct device			*dev;
+
+	struct list_head		users;
+
+	struct mutex			lock;
+};
+
+extern int tegra_dc_ext_devno;
+extern struct class *tegra_dc_ext_class;
+
+extern int tegra_dc_ext_pin_window(struct tegra_dc_ext_user *user, u32 id,
+				   struct nvmap_handle_ref **handle,
+				   dma_addr_t *phys_addr);
+
+extern int tegra_dc_ext_get_cursor(struct tegra_dc_ext_user *user);
+extern int tegra_dc_ext_put_cursor(struct tegra_dc_ext_user *user);
+extern int tegra_dc_ext_set_cursor_image(struct tegra_dc_ext_user *user,
+					 struct tegra_dc_ext_cursor_image *);
+extern int tegra_dc_ext_set_cursor(struct tegra_dc_ext_user *user,
+				   struct tegra_dc_ext_cursor *);
+
+extern int tegra_dc_ext_control_init(void);
+
+extern int tegra_dc_ext_queue_hotplug(struct tegra_dc_ext_control *,
+				      int output);
+extern ssize_t tegra_dc_ext_event_read(struct file *filp, char __user *buf,
+				       size_t size, loff_t *ppos);
+extern unsigned int tegra_dc_ext_event_poll(struct file *, poll_table *);
+
+extern int tegra_dc_ext_get_num_outputs(void);
+
+#endif /* __TEGRA_DC_EXT_PRIV_H */
diff --git a/drivers/staging/tegra/video/dc/ext/util.c b/drivers/staging/tegra/video/dc/ext/util.c
new file mode 100644
index 000000000000..162e2731abeb
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/ext/util.c
@@ -0,0 +1,78 @@
+/*
+ * drivers/video/tegra/dc/ext/util.c
+ *
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * Author: Robert Morell <rmorell@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/err.h>
+#include <linux/types.h>
+
+#include <linux/nvmap.h>
+
+/* ugh */
+#include "../dc.h"
+#include "../../nvmap/nvmap.h"
+
+#include "tegra_dc_ext_priv.h"
+
+int tegra_dc_ext_pin_window(struct tegra_dc_ext_user *user, u32 id,
+			    struct nvmap_handle_ref **handle,
+			    dma_addr_t *phys_addr)
+{
+	struct tegra_dc_ext *ext = user->ext;
+	struct nvmap_handle_ref *win_dup;
+	struct nvmap_handle *win_handle;
+	dma_addr_t phys;
+
+	if (!id) {
+		*handle = NULL;
+		*phys_addr = -1;
+
+		return 0;
+	}
+
+	/*
+	 * Take a reference to the buffer using the user's nvmap context, to
+	 * make sure they have permissions to access it.
+	 */
+	win_handle = nvmap_get_handle_id(user->nvmap, id);
+	if (!win_handle)
+		return -EACCES;
+
+	/*
+	 * Duplicate the buffer's handle into the dc_ext driver's nvmap
+	 * context, to ensure that the handle won't be freed as long as it is
+	 * in use by display.
+	 */
+	win_dup = nvmap_duplicate_handle_id(ext->nvmap, id);
+
+	/* Release the reference we took in the user's context above */
+	nvmap_handle_put(win_handle);
+
+	if (IS_ERR(win_dup))
+		return PTR_ERR(win_dup);
+
+	phys = nvmap_pin(ext->nvmap, win_dup);
+	/* XXX this isn't correct for non-pointers... */
+	if (IS_ERR((void *)phys)) {
+		nvmap_free(ext->nvmap, win_dup);
+		return PTR_ERR((void *)phys);
+	}
+
+	*phys_addr = phys;
+	*handle = win_dup;
+
+	return 0;
+}
diff --git a/drivers/staging/tegra/video/dc/fb.h b/drivers/staging/tegra/video/dc/fb.h
new file mode 100644
index 000000000000..5f4a57e0a27f
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/fb.h
@@ -0,0 +1,64 @@
+/*
+ * arch/arm/mach-tegra/include/mach/fb.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ *	Erik Gilling <konkers@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_FB_H
+#define __MACH_TEGRA_FB_H
+
+#include <linux/fb.h>
+
+struct nvhost_device;
+struct tegra_dc;
+struct tegra_fb_data;
+struct tegra_fb_info;
+struct resource;
+
+int tegra_fb_get_mode(struct tegra_dc *dc);
+int tegra_fb_set_mode(struct tegra_dc *dc, int fps);
+
+#ifdef CONFIG_FB_TEGRA
+struct tegra_fb_info *tegra_fb_register(struct nvhost_device *ndev,
+					struct tegra_dc *dc,
+					struct tegra_fb_data *fb_data,
+					struct resource *fb_mem);
+void tegra_fb_unregister(struct tegra_fb_info *fb_info);
+void tegra_fb_update_monspecs(struct tegra_fb_info *fb_info,
+			      struct fb_monspecs *specs,
+			      bool (*mode_filter)(const struct tegra_dc *dc,
+						  struct fb_videomode *mode));
+#else
+static inline struct tegra_fb_info *tegra_fb_register(struct nvhost_device *ndev,
+						      struct tegra_dc *dc,
+						      struct tegra_fb_data *fb_data,
+						      struct resource *fb_mem)
+{
+	return NULL;
+}
+
+static inline void tegra_fb_unregister(struct tegra_fb_info *fb_info)
+{
+}
+
+static inline void tegra_fb_update_monspecs(struct tegra_fb_info *fb_info,
+					    struct fb_monspecs *specs,
+					    bool (*mode_filter)(struct fb_videomode *mode))
+{
+}
+#endif
+
+#endif
diff --git a/drivers/staging/tegra/video/dc/hdmi.c b/drivers/staging/tegra/video/dc/hdmi.c
new file mode 100644
index 000000000000..fa20de04d513
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/hdmi.c
@@ -0,0 +1,2519 @@
+/*
+ * drivers/video/tegra/dc/hdmi.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#ifdef CONFIG_SWITCH
+#include <linux/switch.h>
+#endif
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/reset.h>
+
+#include <mach/clk.h>
+#include <linux/nvhost.h>
+#include <mach/hdmi-audio.h>
+
+#include <video/tegrafb.h>
+
+#include "dc.h"
+#include "fb.h"
+#include "dc_reg.h"
+#include "dc_priv.h"
+#include "hdmi_reg.h"
+#include "hdmi.h"
+#include "edid.h"
+#include "nvhdcp.h"
+
+/* datasheet claims this will always be 216MHz */
+#define HDMI_AUDIOCLK_FREQ		216000000
+
+#define HDMI_REKEY_DEFAULT		56
+
+#define HDMI_ELD_RESERVED1_INDEX		1
+#define HDMI_ELD_RESERVED2_INDEX		3
+#define HDMI_ELD_VER_INDEX			0
+#define HDMI_ELD_BASELINE_LEN_INDEX		2
+#define HDMI_ELD_CEA_VER_MNL_INDEX		4
+#define HDMI_ELD_SAD_CNT_CON_TYP_SAI_HDCP_INDEX		5
+#define HDMI_ELD_AUD_SYNC_DELAY_INDEX	6
+#define HDMI_ELD_SPK_ALLOC_INDEX		7
+#define HDMI_ELD_PORT_ID_INDEX		8
+#define HDMI_ELD_MANF_NAME_INDEX		16
+#define HDMI_ELD_PRODUCT_CODE_INDEX		18
+#define HDMI_ELD_MONITOR_NAME_INDEX		20
+
+/* These two values need to be cross checked in case of
+     addition/removal from tegra_dc_hdmi_aspect_ratios[] */
+#define TEGRA_DC_HDMI_MIN_ASPECT_RATIO_PERCENT	80
+#define TEGRA_DC_HDMI_MAX_ASPECT_RATIO_PERCENT	320
+
+/* Percentage equivalent of standard aspect ratios
+    accurate upto two decimal digits */
+static int tegra_dc_hdmi_aspect_ratios[] = {
+	/*   3:2	*/	150,
+	/*   4:3	*/	133,
+	/*   4:5	*/	 80,
+	/*   5:4	*/	125,
+	/*   9:5	*/	180,
+	/*  16:5	*/	320,
+	/*  16:9	*/	178,
+	/* 16:10	*/	160,
+	/* 19:10	*/	190,
+	/* 25:16	*/	156,
+	/* 64:35	*/	183,
+	/* 72:35	*/	206
+};
+
+struct tegra_dc_hdmi_data {
+	struct tegra_dc			*dc;
+	struct tegra_edid		*edid;
+	struct tegra_edid_hdmi_eld		eld;
+	struct tegra_nvhdcp		*nvhdcp;
+	struct delayed_work		work;
+
+	struct resource			*base_res;
+	void __iomem			*base;
+	struct clk			*clk;
+
+	struct clk			*disp1_clk;
+	struct clk			*disp2_clk;
+	struct clk			*hda_clk;
+	struct clk			*hda2codec_clk;
+	struct clk			*hda2hdmi_clk;
+	struct reset_control		*rst;
+
+#ifdef CONFIG_SWITCH
+	struct switch_dev		hpd_switch;
+#endif
+
+	spinlock_t			suspend_lock;
+	bool				suspended;
+	bool				eld_retrieved;
+	bool				clk_enabled;
+	unsigned			audio_freq;
+	unsigned			audio_source;
+	bool				audio_inject_null;
+
+	bool				dvi;
+};
+
+struct tegra_dc_hdmi_data *dc_hdmi;
+
+const struct fb_videomode tegra_dc_hdmi_supported_modes[] = {
+	/* 1280x720p 60hz: EIA/CEA-861-B Format 4 */
+	{
+		.xres =		1280,
+		.yres =		720,
+		.pixclock =	KHZ2PICOS(74250),
+		.hsync_len =	40,	/* h_sync_width */
+		.vsync_len =	5,	/* v_sync_width */
+		.left_margin =	220,	/* h_back_porch */
+		.upper_margin =	20,	/* v_back_porch */
+		.right_margin =	110,	/* h_front_porch */
+		.lower_margin =	5,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+	},
+
+	/* 1280x720p 60hz: EIA/CEA-861-B Format 4 (Stereo)*/
+	{
+		.xres =		1280,
+		.yres =		720,
+		.pixclock =	KHZ2PICOS(74250),
+		.hsync_len =	40,	/* h_sync_width */
+		.vsync_len =	5,	/* v_sync_width */
+		.left_margin =	220,	/* h_back_porch */
+		.upper_margin =	20,	/* v_back_porch */
+		.right_margin =	110,	/* h_front_porch */
+		.lower_margin =	5,	/* v_front_porch */
+		.vmode = FB_VMODE_NONINTERLACED |
+#ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT
+				 FB_VMODE_STEREO_FRAME_PACK,
+#else
+				 FB_VMODE_STEREO_LEFT_RIGHT,
+#endif
+		.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+	},
+
+	/* 720x480p 59.94hz: EIA/CEA-861-B Formats 2 & 3 */
+	{
+		.xres =		720,
+		.yres =		480,
+		.pixclock =	KHZ2PICOS(27000),
+		.hsync_len =	62,	/* h_sync_width */
+		.vsync_len =	6,	/* v_sync_width */
+		.left_margin =	60,	/* h_back_porch */
+		.upper_margin =	30,	/* v_back_porch */
+		.right_margin =	16,	/* h_front_porch */
+		.lower_margin =	9,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = 0,
+	},
+
+	/* 640x480p 60hz: EIA/CEA-861-B Format 1 */
+	{
+		.xres =		640,
+		.yres =		480,
+		.pixclock =	KHZ2PICOS(25200),
+		.hsync_len =	96,	/* h_sync_width */
+		.vsync_len =	2,	/* v_sync_width */
+		.left_margin =	48,	/* h_back_porch */
+		.upper_margin =	33,	/* v_back_porch */
+		.right_margin =	16,	/* h_front_porch */
+		.lower_margin =	10,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = 0,
+	},
+
+	/* 720x576p 50hz EIA/CEA-861-B Formats 17 & 18 */
+	{
+		.xres =		720,
+		.yres =		576,
+		.pixclock =	KHZ2PICOS(27000),
+		.hsync_len =	64,	/* h_sync_width */
+		.vsync_len =	5,	/* v_sync_width */
+		.left_margin =	68,	/* h_back_porch */
+		.upper_margin =	39,	/* v_back_porch */
+		.right_margin =	12,	/* h_front_porch */
+		.lower_margin =	5,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = 0,
+	},
+
+	/* 1920x1080p 23.98/24hz: EIA/CEA-861-B Format 32 (Stereo)*/
+	{
+		.xres =		1920,
+		.yres =		1080,
+		.pixclock =	KHZ2PICOS(74250),
+		.hsync_len =	44,	/* h_sync_width */
+		.vsync_len =	5,	/* v_sync_width */
+		.left_margin =	148,	/* h_back_porch */
+		.upper_margin =	36,	/* v_back_porch */
+		.right_margin =	638,	/* h_front_porch */
+		.lower_margin =	4,	/* v_front_porch */
+		.vmode = FB_VMODE_NONINTERLACED |
+#ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT
+				 FB_VMODE_STEREO_FRAME_PACK,
+#else
+				 FB_VMODE_STEREO_LEFT_RIGHT,
+#endif
+		.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+	},
+
+	/* 1920x1080p 30Hz EIA/CEA-861-B Format 34 */
+	{
+		.xres =		1920,
+		.yres =		1080,
+		.pixclock =	KHZ2PICOS(74250),
+		.hsync_len =	44,	/* h_sync_width */
+		.vsync_len =	5,	/* v_sync_width */
+		.left_margin =	148,	/* h_back_porch */
+		.upper_margin =	36,	/* v_back_porch */
+		.right_margin =	88,	/* h_front_porch */
+		.lower_margin =	4,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+	},
+
+	/* 1920x1080p 59.94/60hz CVT */
+	{
+		.xres =		1920,
+		.yres =		1080,
+		.pixclock =	KHZ2PICOS(138500),
+		.hsync_len =	32,	/* h_sync_width */
+		.vsync_len =	5,	/* v_sync_width */
+		.left_margin =	80,	/* h_back_porch */
+		.upper_margin =	23,	/* v_back_porch */
+		.right_margin =	48,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode = FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_VERT_HIGH_ACT,
+	},
+
+	/* 1920x1080p 59.94/60hz EIA/CEA-861-B Format 16 */
+	{
+		.xres =		1920,
+		.yres =		1080,
+		.pixclock =	KHZ2PICOS(148500),
+		.hsync_len =	44,	/* h_sync_width */
+		.vsync_len =	5,	/* v_sync_width */
+		.left_margin =	148,	/* h_back_porch */
+		.upper_margin =	36,	/* v_back_porch */
+		.right_margin =	88,	/* h_front_porch */
+		.lower_margin =	4,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+	},
+
+	/*
+	* Few VGA/SVGA modes to support monitors with lower
+	* resolutions or to support HDMI<->DVI connection
+	*/
+
+	/* 640x480p 75hz */
+	{
+		.xres =		640,
+		.yres =		480,
+		.pixclock =	KHZ2PICOS(31500),
+		.hsync_len =	96,	/* h_sync_width */
+		.vsync_len =	2,	/* v_sync_width */
+		.left_margin =	48,	/* h_back_porch */
+		.upper_margin =	32,	/* v_back_porch */
+		.right_margin =	16,	/* h_front_porch */
+		.lower_margin =	1,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = 0,
+	},
+	/* 720x400p 59hz */
+	{
+		.xres =		720,
+		.yres =		400,
+		.pixclock =	KHZ2PICOS(35500),
+		.hsync_len =	72,	/* h_sync_width */
+		.vsync_len =	3,	/* v_sync_width */
+		.left_margin =	108,	/* h_back_porch */
+		.upper_margin =	42,	/* v_back_porch */
+		.right_margin =	36,	/* h_front_porch */
+		.lower_margin =	1,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync  = FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 800x600p 60hz */
+	{
+		.xres =		800,
+		.yres =		600,
+		.pixclock =	KHZ2PICOS(40000),
+		.hsync_len =	128,	/* h_sync_width */
+		.vsync_len =	4,	/* v_sync_width */
+		.left_margin =	88,	/* h_back_porch */
+		.upper_margin =	23,	/* v_back_porch */
+		.right_margin =	40,	/* h_front_porch */
+		.lower_margin =	1,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 800x600p 75hz */
+	{
+		.xres =		800,
+		.yres =		600,
+		.pixclock =	KHZ2PICOS(49500),
+		.hsync_len =	80,	/* h_sync_width */
+		.vsync_len =	2,	/* v_sync_width */
+		.left_margin =	160,	/* h_back_porch */
+		.upper_margin =	21,	/* v_back_porch */
+		.right_margin =	16,	/* h_front_porch */
+		.lower_margin =	1,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1024x768p 60hz */
+	{
+		.xres =		1024,
+		.yres =		768,
+		.pixclock =	KHZ2PICOS(65000),
+		.hsync_len =	136,	/* h_sync_width */
+		.vsync_len =	6,	/* v_sync_width */
+		.left_margin =	160,	/* h_back_porch */
+		.upper_margin =	29,	/* v_back_porch */
+		.right_margin =	24,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync =	0,
+	},
+	/* 1024x768p 75hz */
+	{
+		.xres =		1024,
+		.yres =		768,
+		.pixclock =	KHZ2PICOS(78800),
+		.hsync_len =	96,	/* h_sync_width */
+		.vsync_len =	3,	/* v_sync_width */
+		.left_margin =	176,	/* h_back_porch */
+		.upper_margin =	28,	/* v_back_porch */
+		.right_margin =	16,	/* h_front_porch */
+		.lower_margin =	1,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = 0,
+	},
+	/* 1152x864p 75hz */
+	{
+		.xres =		1152,
+		.yres =		864,
+		.pixclock =	KHZ2PICOS(108000),
+		.hsync_len =	128,	/* h_sync_width */
+		.vsync_len =	3,	/* v_sync_width */
+		.left_margin =	256,	/* h_back_porch */
+		.upper_margin =	32,	/* v_back_porch */
+		.right_margin =	64,	/* h_front_porch */
+		.lower_margin =	1,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1280x800p 60hz */
+	{
+		.xres =		1280,
+		.yres =		800,
+		.pixclock =	KHZ2PICOS(83460),
+		.hsync_len =	136,	/* h_sync_width */
+		.vsync_len =	3,	/* v_sync_width */
+		.left_margin =	200,	/* h_back_porch */
+		.upper_margin =	24,	/* v_back_porch */
+		.right_margin =	64,	/* h_front_porch */
+		.lower_margin =	1,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync =		FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1280x960p 60hz */
+	{
+		.xres =		1280,
+		.yres =		960,
+		.pixclock =	KHZ2PICOS(108000),
+		.hsync_len =	136,	/* h_sync_width */
+		.vsync_len =	3,	/* v_sync_width */
+		.left_margin =	216,	/* h_back_porch */
+		.upper_margin =	30,	/* v_back_porch */
+		.right_margin =	80,	/* h_front_porch */
+		.lower_margin =	1,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync =		FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1280x1024p 60hz */
+	{
+		.xres =		1280,
+		.yres =		1024,
+		.pixclock =	KHZ2PICOS(108000),
+		.hsync_len =	112,	/* h_sync_width */
+		.vsync_len =	3,	/* v_sync_width */
+		.left_margin =	248,	/* h_back_porch */
+		.upper_margin =	38,	/* v_back_porch */
+		.right_margin =	48,	/* h_front_porch */
+		.lower_margin =	1,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1280x1024p 75hz */
+	{
+		.xres =		1280,
+		.yres =		1024,
+		.pixclock =	KHZ2PICOS(135000),
+		.hsync_len =	144,	/* h_sync_width */
+		.vsync_len =	3,	/* v_sync_width */
+		.left_margin =	248,	/* h_back_porch */
+		.upper_margin =	38,	/* v_back_porch */
+		.right_margin =	16,	/* h_front_porch */
+		.lower_margin =	1,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1368x768p 60hz */
+	{
+		.xres =		1368,
+		.yres =		768,
+		.pixclock =	KHZ2PICOS(85860),
+		.hsync_len =	144,	/* h_sync_width */
+		.vsync_len =	3,	/* v_sync_width */
+		.left_margin =	216,	/* h_back_porch */
+		.upper_margin =	23,	/* v_back_porch */
+		.right_margin =	72,	/* h_front_porch */
+		.lower_margin =	1,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync =		FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1440x900p 60hz */
+	{
+		.xres =		1440,
+		.yres =		900,
+		.pixclock =	KHZ2PICOS(106470),
+		.hsync_len =	152,	/* h_sync_width */
+		.vsync_len =	3,	/* v_sync_width */
+		.left_margin =	232,	/* h_back_porch */
+		.upper_margin =	28,	/* v_back_porch */
+		.right_margin =	80,	/* h_front_porch */
+		.lower_margin =	1,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync =		FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1600x1200p 60hz */
+	{
+		.xres =		1600,
+		.yres =		1200,
+		.pixclock =	KHZ2PICOS(162000),
+		.hsync_len =	192,	/* h_sync_width */
+		.vsync_len =	3,	/* v_sync_width */
+		.left_margin =	304,	/* h_back_porch */
+		.upper_margin =	46,	/* v_back_porch */
+		.right_margin =	64,	/* h_front_porch */
+		.lower_margin =	1,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1600x1200p 75hz */
+	{
+		.xres =		1600,
+		.yres =		1200,
+		.pixclock =	KHZ2PICOS(202500),
+		.hsync_len =	192,	/* h_sync_width */
+		.vsync_len =	3,	/* v_sync_width */
+		.left_margin =	304,	/* h_back_porch */
+		.upper_margin =	46,	/* v_back_porch */
+		.right_margin =	64,	/* h_front_porch */
+		.lower_margin =	1,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1680x1050p 59.94/60hz */
+	{
+		.xres =		1680,
+		.yres =		1050,
+		.pixclock =	KHZ2PICOS(147140),
+		.hsync_len =	184,	/* h_sync_width */
+		.vsync_len =	3,	/* v_sync_width */
+		.left_margin =	288,	/* h_back_porch */
+		.upper_margin =	33,	/* v_back_porch */
+		.right_margin =	104,	/* h_front_porch */
+		.lower_margin =	1,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync =	FB_SYNC_VERT_HIGH_ACT,
+	},
+};
+
+/* CVT timing representation of VESA modes*/
+const struct fb_videomode tegra_dc_hdmi_supported_cvt_modes[] = {
+
+	/* 640x480p 60hz */
+	{
+		.refresh =	60,
+		.xres =		640,
+		.yres =		480,
+		.pixclock =	KHZ2PICOS(23750),
+		.hsync_len =	64,	/* h_sync_width */
+		.vsync_len =	4,	/* v_sync_width */
+		.left_margin =	80,	/* h_back_porch */
+		.upper_margin =	17,	/* v_back_porch */
+		.right_margin =	16,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 640x480p 75hz */
+	{
+		.refresh =	75,
+		.xres =		640,
+		.yres =		480,
+		.pixclock =	KHZ2PICOS(30750),
+		.hsync_len =	64,	/* h_sync_width */
+		.vsync_len =	4,	/* v_sync_width */
+		.left_margin =	88,	/* h_back_porch */
+		.upper_margin =	21,	/* v_back_porch */
+		.right_margin =	24,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 720x400p 59hz */
+	{
+		.refresh =	59,
+		.xres =		720,
+		.yres =		400,
+		.pixclock =	KHZ2PICOS(22000),
+		.hsync_len =	64,	/* h_sync_width */
+		.vsync_len =	10,	/* v_sync_width */
+		.left_margin =	88,	/* h_back_porch */
+		.upper_margin =	14,	/* v_back_porch */
+		.right_margin =	24,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync  = FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 800x600p 60hz */
+	{
+		.refresh =	60,
+		.xres =		800,
+		.yres =		600,
+		.pixclock =	KHZ2PICOS(38250),
+		.hsync_len =	80,	/* h_sync_width */
+		.vsync_len =	4,	/* v_sync_width */
+		.left_margin =	112,	/* h_back_porch */
+		.upper_margin =	21,	/* v_back_porch */
+		.right_margin =	32,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = 	FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 800x600p 75hz */
+	{
+		.refresh =	75,
+		.xres =		800,
+		.yres =		600,
+		.pixclock =	KHZ2PICOS(49000),
+		.hsync_len =	80,	/* h_sync_width */
+		.vsync_len =	4,	/* v_sync_width */
+		.left_margin =	120,	/* h_back_porch */
+		.upper_margin =	26,	/* v_back_porch */
+		.right_margin =	40,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1024x768p 60hz */
+	{
+		.refresh =	60,
+		.xres =		1024,
+		.yres =		768,
+		.pixclock =	KHZ2PICOS(63500),
+		.hsync_len =	104,	/* h_sync_width */
+		.vsync_len =	4,	/* v_sync_width */
+		.left_margin =	152,	/* h_back_porch */
+		.upper_margin =	27,	/* v_back_porch */
+		.right_margin =	48,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync =	FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1024x768p 75hz */
+	{
+		.refresh =	75,
+		.xres =		1024,
+		.yres =		768,
+		.pixclock =	KHZ2PICOS(82000),
+		.hsync_len =	104,	/* h_sync_width */
+		.vsync_len =	4,	/* v_sync_width */
+		.left_margin =	168,	/* h_back_porch */
+		.upper_margin =	34,	/* v_back_porch */
+		.right_margin =	64,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1152x864p 75hz */
+	{
+		.refresh =	75,
+		.xres =		1152,
+		.yres =		864,
+		.pixclock =	KHZ2PICOS(104500),
+		.hsync_len =	120,	/* h_sync_width */
+		.vsync_len =	10,	/* v_sync_width */
+		.left_margin =	192,	/* h_back_porch */
+		.upper_margin =	38,	/* v_back_porch */
+		.right_margin =	72,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1280x800p 60hz */
+	{
+		.refresh =	60,
+		.xres =		1280,
+		.yres =		800,
+		.pixclock =	KHZ2PICOS(83500),
+		.hsync_len =	128,	/* h_sync_width */
+		.vsync_len =	6,	/* v_sync_width */
+		.left_margin =	200,	/* h_back_porch */
+		.upper_margin =	28,	/* v_back_porch */
+		.right_margin =	72,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync =		FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1280x960p 60hz */
+	{
+		.refresh =	60,
+		.xres =		1280,
+		.yres =		960,
+		.pixclock =	KHZ2PICOS(101250),
+		.hsync_len =	128,	/* h_sync_width */
+		.vsync_len =	4,	/* v_sync_width */
+		.left_margin =	208,	/* h_back_porch */
+		.upper_margin =	33,	/* v_back_porch */
+		.right_margin =	80,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1280x1024p 60hz */
+	{
+		.refresh =	60,
+		.xres =		1280,
+		.yres =		1024,
+		.pixclock =	KHZ2PICOS(109000),
+		.hsync_len =	136,	/* h_sync_width */
+		.vsync_len =	7,	/* v_sync_width */
+		.left_margin =	216,	/* h_back_porch */
+		.upper_margin =	36,	/* v_back_porch */
+		.right_margin =	80,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync =	FB_SYNC_VERT_HIGH_ACT,
+	},
+
+	/* 1280x1024p 75hz */
+	{
+		.refresh =	75,
+		.xres =		1280,
+		.yres =		1024,
+		.pixclock =	KHZ2PICOS(138750),
+		.hsync_len =	136,	/* h_sync_width */
+		.vsync_len =	7,	/* v_sync_width */
+		.left_margin =	224,	/* h_back_porch */
+		.upper_margin =	45,	/* v_back_porch */
+		.right_margin =	88,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1368x768p 60hz */
+	{
+		.refresh =	60,
+		.xres =		1368,
+		.yres =		768,
+		.pixclock =	KHZ2PICOS(85250),
+		.hsync_len =	136,	/* h_sync_width */
+		.vsync_len =	10,	/* v_sync_width */
+		.left_margin =	208,	/* h_back_porch */
+		.upper_margin =	27,	/* v_back_porch */
+		.right_margin =	72,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync =		FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1440x900p 60hz */
+	{
+		.refresh =	60,
+		.xres =		1440,
+		.yres =		900,
+		.pixclock =	KHZ2PICOS(106500),
+		.hsync_len =	152,	/* h_sync_width */
+		.vsync_len =	6,	/* v_sync_width */
+		.left_margin =	232,	/* h_back_porch */
+		.upper_margin =	31,	/* v_back_porch */
+		.right_margin =	80,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync =		FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1600x1200p 60hz */
+	{
+		.refresh =	60,
+		.xres =		1600,
+		.yres =		1200,
+		.pixclock =	KHZ2PICOS(161000),
+		.hsync_len =	168,	/* h_sync_width */
+		.vsync_len =	4,	/* v_sync_width */
+		.left_margin =	280,	/* h_back_porch */
+		.upper_margin =	42,	/* v_back_porch */
+		.right_margin =	112,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1600x1200p 75hz */
+	{
+		.refresh =	75,
+		.xres =		1600,
+		.yres =		1200,
+		.pixclock =	KHZ2PICOS(204750),
+		.hsync_len =	168,	/* h_sync_width */
+		.vsync_len =	4,	/* v_sync_width */
+		.left_margin =	288,	/* h_back_porch */
+		.upper_margin =	52,	/* v_back_porch */
+		.right_margin =	120,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync = FB_SYNC_VERT_HIGH_ACT,
+	},
+	/* 1680x1050p 59.94/60hz */
+	{
+		.refresh =	60,
+		.xres =		1680,
+		.yres =		1050,
+		.pixclock =	KHZ2PICOS(140000),
+		.hsync_len =	168,	/* h_sync_width */
+		.vsync_len =	10,	/* v_sync_width */
+		.left_margin =	272,	/* h_back_porch */
+		.upper_margin =	36,	/* v_back_porch */
+		.right_margin =	104,	/* h_front_porch */
+		.lower_margin =	3,	/* v_front_porch */
+		.vmode =	FB_VMODE_NONINTERLACED,
+		.sync =	FB_SYNC_VERT_HIGH_ACT,
+	},
+};
+
+/* table of electrical settings, must be in acending order. */
+struct tdms_config {
+	int pclk;
+	u32 pll0;
+	u32 pll1;
+	u32 pe_current; /* pre-emphasis */
+	u32 drive_current;
+};
+
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+const struct tdms_config tdms_config[] = {
+	{ /* 480p modes */
+	.pclk = 27000000,
+	.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL |
+		SOR_PLL_VCOCAP(0) | SOR_PLL_TX_REG_LOAD(0),
+	.pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+	.pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+		PE_CURRENT1(PE_CURRENT_0_0_mA) |
+		PE_CURRENT2(PE_CURRENT_0_0_mA) |
+		PE_CURRENT3(PE_CURRENT_0_0_mA),
+	.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+		DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+		DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+		DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+	},
+	{ /* 720p modes */
+	.pclk = 74250000,
+	.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL |
+		SOR_PLL_VCOCAP(1) | SOR_PLL_TX_REG_LOAD(0),
+	.pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+	.pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+		PE_CURRENT1(PE_CURRENT_5_0_mA) |
+		PE_CURRENT2(PE_CURRENT_5_0_mA) |
+		PE_CURRENT3(PE_CURRENT_5_0_mA),
+	.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+		DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+		DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+		DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+	},
+	{ /* 1080p modes */
+	.pclk = INT_MAX,
+	.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL |
+		SOR_PLL_VCOCAP(3) | SOR_PLL_TX_REG_LOAD(0),
+	.pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+	.pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+		PE_CURRENT1(PE_CURRENT_5_0_mA) |
+		PE_CURRENT2(PE_CURRENT_5_0_mA) |
+		PE_CURRENT3(PE_CURRENT_5_0_mA),
+	.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+		DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+		DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+		DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+	},
+};
+#else /*  CONFIG_ARCH_TEGRA_2x_SOC */
+const struct tdms_config tdms_config[] = {
+	{ /* 480p modes */
+	.pclk = 27000000,
+	.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL |
+		SOR_PLL_VCOCAP(0) | SOR_PLL_TX_REG_LOAD(3),
+	.pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+	.pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+		PE_CURRENT1(PE_CURRENT_0_0_mA) |
+		PE_CURRENT2(PE_CURRENT_0_0_mA) |
+		PE_CURRENT3(PE_CURRENT_0_0_mA),
+	.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+		DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+		DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+		DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+	},
+	{ /* 720p modes */
+	.pclk = 74250000,
+	.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL |
+		SOR_PLL_VCOCAP(1) | SOR_PLL_TX_REG_LOAD(3),
+	.pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+	.pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
+		PE_CURRENT1(PE_CURRENT_6_0_mA) |
+		PE_CURRENT2(PE_CURRENT_6_0_mA) |
+		PE_CURRENT3(PE_CURRENT_6_0_mA),
+	.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+		DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+		DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+		DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+	},
+	{ /* 1080p modes */
+	.pclk = INT_MAX,
+	.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL |
+		SOR_PLL_VCOCAP(1) | SOR_PLL_TX_REG_LOAD(3),
+	.pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+	.pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
+		PE_CURRENT1(PE_CURRENT_6_0_mA) |
+		PE_CURRENT2(PE_CURRENT_6_0_mA) |
+		PE_CURRENT3(PE_CURRENT_6_0_mA),
+	.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+		DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+		DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+		DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+	},
+};
+#endif
+
+struct tegra_hdmi_audio_config {
+	unsigned pix_clock;
+	unsigned n;
+	unsigned cts;
+	unsigned aval;
+};
+
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = {
+	{25200000,	4096,	25200,	24000},
+	{27000000,	4096,	27000,	24000},
+	{74250000,	4096,	74250,	24000},
+	{148500000,	4096,	148500,	24000},
+	{0,		0,	0},
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = {
+	{25200000,	5880,	26250,	25000},
+	{27000000,	5880,	28125,	25000},
+	{74250000,	4704,	61875,	20000},
+	{148500000,	4704,	123750,	20000},
+	{0,		0,	0},
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = {
+	{25200000,	6144,	25200,	24000},
+	{27000000,	6144,	27000,	24000},
+	{74250000,	6144,	74250,	24000},
+	{148500000,	6144,	148500,	24000},
+	{0,		0,	0},
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_88_2k[] = {
+	{25200000,	11760,	26250,	25000},
+	{27000000,	11760,	28125,	25000},
+	{74250000,	9408,	61875,	20000},
+	{148500000,	9408,	123750, 20000},
+	{0,		0,	0},
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_96k[] = {
+	{25200000,	12288,	25200,	24000},
+	{27000000,	12288,	27000,	24000},
+	{74250000,	12288,	74250,	24000},
+	{148500000,	12288,	148500,	24000},
+	{0,		0,	0},
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_176_4k[] = {
+	{25200000,	23520,	26250,	25000},
+	{27000000,	23520,	28125,	25000},
+	{74250000,	18816,	61875,	20000},
+	{148500000,	18816,	123750,	20000},
+	{0,		0,	0},
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = {
+	{25200000,	24576,	25200,	24000},
+	{27000000,	24576,	27000,	24000},
+	{74250000,	24576,	74250,	24000},
+	{148500000,	24576,	148500,	24000},
+	{0,		0,	0},
+};
+
+static const struct tegra_hdmi_audio_config
+*tegra_hdmi_get_audio_config(unsigned audio_freq, unsigned pix_clock)
+{
+	const struct tegra_hdmi_audio_config *table;
+
+	switch (audio_freq) {
+	case AUDIO_FREQ_32K:
+		table = tegra_hdmi_audio_32k;
+		break;
+	case AUDIO_FREQ_44_1K:
+		table = tegra_hdmi_audio_44_1k;
+		break;
+	case AUDIO_FREQ_48K:
+		table = tegra_hdmi_audio_48k;
+		break;
+	case AUDIO_FREQ_88_2K:
+		table = tegra_hdmi_audio_88_2k;
+		break;
+	case AUDIO_FREQ_96K:
+		table = tegra_hdmi_audio_96k;
+		break;
+	case AUDIO_FREQ_176_4K:
+		table = tegra_hdmi_audio_176_4k;
+		break;
+	case AUDIO_FREQ_192K:
+		table = tegra_hdmi_audio_192k;
+		break;
+	default:
+		return NULL;
+	}
+
+	while (table->pix_clock) {
+		if (table->pix_clock == pix_clock)
+			return table;
+		table++;
+	}
+
+	return NULL;
+}
+
+
+unsigned long tegra_hdmi_readl(struct tegra_dc_hdmi_data *hdmi,
+					     unsigned long reg)
+{
+	unsigned long ret;
+	ret = readl(hdmi->base + reg * 4);
+	trace_printk("readl %p=%#08lx\n", hdmi->base + reg * 4, ret);
+	return ret;
+}
+
+void tegra_hdmi_writel(struct tegra_dc_hdmi_data *hdmi,
+				     unsigned long val, unsigned long reg)
+{
+	trace_printk("writel %p=%#08lx\n", hdmi->base + reg * 4, val);
+	writel(val, hdmi->base + reg * 4);
+}
+
+static inline void tegra_hdmi_clrsetbits(struct tegra_dc_hdmi_data *hdmi,
+					 unsigned long reg, unsigned long clr,
+					 unsigned long set)
+{
+	unsigned long val = tegra_hdmi_readl(hdmi, reg);
+	val &= ~clr;
+	val |= set;
+	tegra_hdmi_writel(hdmi, val, reg);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int dbg_hdmi_show(struct seq_file *s, void *unused)
+{
+	struct tegra_dc_hdmi_data *hdmi = s->private;
+
+#define DUMP_REG(a) do {						\
+		seq_printf(s, "%-32s\t%03x\t%08lx\n",			\
+		       #a, a, tegra_hdmi_readl(hdmi, a));		\
+	} while (0)
+
+	tegra_dc_io_start(hdmi->dc);
+	clk_prepare_enable(hdmi->clk);
+
+	DUMP_REG(HDMI_CTXSW);
+	DUMP_REG(HDMI_NV_PDISP_SOR_STATE0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_STATE1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_STATE2);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2);
+	DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CAP);
+	DUMP_REG(HDMI_NV_PDISP_SOR_PWR);
+	DUMP_REG(HDMI_NV_PDISP_SOR_TEST);
+	DUMP_REG(HDMI_NV_PDISP_SOR_PLL0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_PLL1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_PLL2);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CSTM);
+	DUMP_REG(HDMI_NV_PDISP_SOR_LVDS);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CRCA);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CRCB);
+	DUMP_REG(HDMI_NV_PDISP_SOR_BLANK);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST2);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST3);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST4);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST5);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST6);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST7);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST8);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST9);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTA);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTB);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTC);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTD);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTE);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTF);
+	DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_TRIG);
+	DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK);
+	DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_N);
+	DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING);
+	DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK);
+	DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL);
+	DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL);
+	DUMP_REG(HDMI_NV_PDISP_SCRATCH);
+	DUMP_REG(HDMI_NV_PDISP_PE_CURRENT);
+	DUMP_REG(HDMI_NV_PDISP_KEY_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0);
+	DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1);
+	DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2);
+	DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0);
+	DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1);
+	DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2);
+	DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3);
+	DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
+	DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
+#undef DUMP_REG
+
+	clk_disable_unprepare(hdmi->clk);
+	tegra_dc_io_end(hdmi->dc);
+
+	return 0;
+}
+
+static int dbg_hdmi_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dbg_hdmi_show, inode->i_private);
+}
+
+static const struct file_operations dbg_fops = {
+	.open		= dbg_hdmi_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static struct dentry *hdmidir;
+
+static void tegra_dc_hdmi_debug_create(struct tegra_dc_hdmi_data *hdmi)
+{
+	struct dentry *retval;
+
+	hdmidir = debugfs_create_dir("tegra_hdmi", NULL);
+	if (!hdmidir)
+		return;
+	retval = debugfs_create_file("regs", S_IRUGO, hdmidir, hdmi,
+		&dbg_fops);
+	if (!retval)
+		goto free_out;
+	return;
+free_out:
+	debugfs_remove_recursive(hdmidir);
+	hdmidir = NULL;
+	return;
+}
+#else
+static inline void tegra_dc_hdmi_debug_create(struct tegra_dc_hdmi_data *hdmi)
+{ }
+#endif
+
+#define PIXCLOCK_TOLERANCE	200
+
+static int tegra_dc_calc_clock_per_frame(const struct fb_videomode *mode)
+{
+	return (mode->left_margin + mode->xres +
+		mode->right_margin + mode->hsync_len) *
+	       (mode->upper_margin + mode->yres +
+		mode->lower_margin + mode->vsync_len);
+}
+
+static bool tegra_dc_hdmi_valid_pixclock(const struct tegra_dc *dc,
+					const struct fb_videomode *mode)
+{
+	unsigned max_pixclock = tegra_dc_get_out_max_pixclock(dc);
+	if (max_pixclock) {
+		/* this might look counter-intuitive,
+		 * but pixclock's unit is picos(not Khz)
+		 */
+		return mode->pixclock >= max_pixclock;
+	} else {
+		return true;
+	}
+}
+
+static bool tegra_dc_cvt_mode_equal(const struct fb_videomode *mode1,
+				const struct fb_videomode *mode2)
+{
+	return (mode1->xres == mode2->xres &&
+		mode1->yres == mode2->yres &&
+		mode1->refresh == mode2->refresh &&
+		mode1->vmode == mode2->vmode);
+}
+
+static bool tegra_dc_reload_mode(struct fb_videomode *mode)
+{
+	int i = 0;
+	for (i = 0; i < ARRAY_SIZE(tegra_dc_hdmi_supported_cvt_modes); i++) {
+		const struct fb_videomode *cvt_mode
+				= &tegra_dc_hdmi_supported_cvt_modes[i];
+		if (tegra_dc_cvt_mode_equal(cvt_mode, mode)) {
+			memcpy(mode, cvt_mode, sizeof(*mode));
+			return true;
+		}
+	}
+	return false;
+}
+
+static bool tegra_dc_hdmi_valid_asp_ratio(const struct tegra_dc *dc,
+					struct fb_videomode *mode)
+{
+	int count = 0;
+	int m_aspratio = 0;
+	int s_aspratio = 0;
+
+	/* To check the aspect upto two decimal digits, calculate in % */
+	m_aspratio = (mode->xres*100 / mode->yres);
+
+	if ((m_aspratio < TEGRA_DC_HDMI_MIN_ASPECT_RATIO_PERCENT) ||
+			(m_aspratio > TEGRA_DC_HDMI_MAX_ASPECT_RATIO_PERCENT))
+				return false;
+
+	/* Check from the table of  supported aspect ratios, allow
+	    difference of 1% for second decimal digit calibration */
+	for (count = 0; count < ARRAY_SIZE(tegra_dc_hdmi_aspect_ratios);
+		 count++) {
+			s_aspratio =  tegra_dc_hdmi_aspect_ratios[count];
+			if ((m_aspratio == s_aspratio) ||
+				(abs(m_aspratio - s_aspratio) == 1))
+				return true;
+	}
+
+	return false;
+}
+
+static bool tegra_dc_hdmi_mode_filter(const struct tegra_dc *dc,
+					struct fb_videomode *mode)
+{
+	if (mode->vmode & FB_VMODE_INTERLACED)
+		return false;
+
+	/* Ignore modes with a 0 pixel clock */
+	if (!mode->pixclock)
+		return false;
+
+#ifdef CONFIG_TEGRA_HDMI_74MHZ_LIMIT
+	if (PICOS2KHZ(mode->pixclock) > 74250)
+		return false;
+#endif
+
+	/* Check if the mode's pixel clock is more than the max rate*/
+	if (!tegra_dc_hdmi_valid_pixclock(dc, mode))
+		return false;
+
+	/* Check if the mode's aspect ratio is supported */
+	if (!tegra_dc_hdmi_valid_asp_ratio(dc, mode))
+		return false;
+
+	/* Check some of DC's constraints */
+	if (mode->hsync_len > 1 && mode->vsync_len > 1 &&
+		mode->lower_margin + mode->vsync_len + mode->upper_margin > 1 &&
+		mode->xres >= 16 && mode->yres >= 16) {
+
+		if (mode->lower_margin == 1) {
+			/* This might be the case for HDMI<->DVI
+			 * where std VESA representation will not
+			 * pass constraint V_FRONT_PORCH >=
+			 * V_REF_TO_SYNC + 1.So reload mode in
+			 * CVT timing standards.
+			 */
+			if (!tegra_dc_reload_mode(mode))
+				return false;
+		}
+		mode->flag = FB_MODE_IS_DETAILED;
+		mode->refresh = (PICOS2KHZ(mode->pixclock) * 1000) /
+				tegra_dc_calc_clock_per_frame(mode);
+		return true;
+	}
+
+	return false;
+}
+
+static bool tegra_dc_hdmi_hpd(struct tegra_dc *dc)
+{
+	return tegra_dc_hpd(dc);
+}
+
+
+void tegra_dc_hdmi_detect_config(struct tegra_dc *dc,
+						struct fb_monspecs *specs)
+{
+	struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+
+	/* monitors like to lie about these but they are still useful for
+	 * detecting aspect ratios
+	 */
+	dc->out->h_size = specs->max_x * 1000;
+	dc->out->v_size = specs->max_y * 1000;
+
+	hdmi->dvi = !(specs->misc & FB_MISC_HDMI);
+
+	tegra_fb_update_monspecs(dc->fb, specs, tegra_dc_hdmi_mode_filter);
+#ifdef CONFIG_SWITCH
+	hdmi->hpd_switch.state = 0;
+	switch_set_state(&hdmi->hpd_switch, 1);
+#endif
+	dev_info(&dc->ndev->dev, "display detected\n");
+
+	dc->connected = true;
+	tegra_dc_ext_process_hotplug(dc->ndev->id);
+}
+
+/* This function is used to enable DC1 and HDMI for the purpose of testing. */
+bool tegra_dc_hdmi_detect_test(struct tegra_dc *dc, unsigned char *edid_ptr)
+{
+	int err;
+	struct fb_monspecs specs;
+	struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+
+	if (!hdmi || !edid_ptr) {
+		dev_err(&dc->ndev->dev, "HDMI test failed to get arguments.\n");
+		return false;
+	}
+
+	err = tegra_edid_get_monspecs_test(hdmi->edid, &specs, edid_ptr);
+	if (err < 0) {
+		/* Check if there's a hard-wired mode, if so, enable it */
+		if (dc->out->n_modes)
+			tegra_dc_enable(dc);
+		else {
+			dev_err(&dc->ndev->dev, "error reading edid\n");
+			goto fail;
+		}
+#ifdef CONFIG_SWITCH
+		hdmi->hpd_switch.state = 0;
+		switch_set_state(&hdmi->hpd_switch, 1);
+#endif
+		dev_info(&dc->ndev->dev, "display detected\n");
+
+		dc->connected = true;
+		tegra_dc_ext_process_hotplug(dc->ndev->id);
+	} else {
+		err = tegra_edid_get_eld(hdmi->edid, &hdmi->eld);
+		if (err < 0) {
+			dev_err(&dc->ndev->dev, "error populating eld\n");
+			goto fail;
+		}
+		hdmi->eld_retrieved = true;
+
+		tegra_dc_hdmi_detect_config(dc, &specs);
+	}
+
+	return true;
+
+fail:
+	hdmi->eld_retrieved = false;
+#ifdef CONFIG_SWITCH
+	switch_set_state(&hdmi->hpd_switch, 0);
+#endif
+	tegra_nvhdcp_set_plug(hdmi->nvhdcp, 0);
+	return false;
+}
+EXPORT_SYMBOL(tegra_dc_hdmi_detect_test);
+
+static bool tegra_dc_hdmi_detect(struct tegra_dc *dc)
+{
+	struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+	struct fb_monspecs specs;
+	int i, err;
+
+	if (!tegra_dc_hdmi_hpd(dc))
+		goto fail;
+
+	for (i = 0; i < 7; i++) {
+		err = tegra_edid_get_monspecs(hdmi->edid, &specs);
+		if (err >= 0)
+			break;
+
+		dev_err(&dc->ndev->dev, "error reading edid, try again...\n");
+		mdelay(50);
+	}
+
+	if (err < 0) {
+		if (dc->out->n_modes)
+			tegra_dc_enable(dc);
+		else {
+			dev_err(&dc->ndev->dev, "error reading edid\n");
+			goto fail;
+		}
+#ifdef CONFIG_SWITCH
+		hdmi->hpd_switch.state = 0;
+		switch_set_state(&hdmi->hpd_switch, 1);
+#endif
+		dev_info(&dc->ndev->dev, "display detected\n");
+
+		dc->connected = true;
+		tegra_dc_ext_process_hotplug(dc->ndev->id);
+	} else {
+		err = tegra_edid_get_eld(hdmi->edid, &hdmi->eld);
+		if (err < 0) {
+			dev_err(&dc->ndev->dev, "error populating eld\n");
+			goto fail;
+		}
+		hdmi->eld_retrieved = true;
+
+		tegra_dc_hdmi_detect_config(dc, &specs);
+	}
+
+	return true;
+
+fail:
+	hdmi->eld_retrieved = false;
+#ifdef CONFIG_SWITCH
+	switch_set_state(&hdmi->hpd_switch, 0);
+#endif
+	tegra_nvhdcp_set_plug(hdmi->nvhdcp, 0);
+	return false;
+}
+
+
+static void tegra_dc_hdmi_detect_worker(struct work_struct *work)
+{
+	struct tegra_dc_hdmi_data *hdmi =
+		container_of(to_delayed_work(work), struct tegra_dc_hdmi_data, work);
+	struct tegra_dc *dc = hdmi->dc;
+
+	tegra_dc_enable(dc);
+	msleep(5);
+	if (!tegra_dc_hdmi_detect(dc)) {
+		tegra_dc_disable(dc);
+		tegra_fb_update_monspecs(dc->fb, NULL, NULL);
+
+		dc->connected = false;
+		tegra_dc_ext_process_hotplug(dc->ndev->id);
+	}
+}
+
+static irqreturn_t tegra_dc_hdmi_irq(int irq, void *ptr)
+{
+	struct tegra_dc *dc = ptr;
+	struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&hdmi->suspend_lock, flags);
+	if (!hdmi->suspended) {
+		cancel_delayed_work(&hdmi->work);
+		if (tegra_dc_hdmi_hpd(dc))
+			queue_delayed_work(system_wq, &hdmi->work,
+					   msecs_to_jiffies(100));
+		else
+			queue_delayed_work(system_wq, &hdmi->work,
+					   msecs_to_jiffies(30));
+	}
+	spin_unlock_irqrestore(&hdmi->suspend_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static void tegra_dc_hdmi_suspend(struct tegra_dc *dc)
+{
+	struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+	unsigned long flags;
+
+	tegra_nvhdcp_suspend(hdmi->nvhdcp);
+	spin_lock_irqsave(&hdmi->suspend_lock, flags);
+	hdmi->suspended = true;
+	spin_unlock_irqrestore(&hdmi->suspend_lock, flags);
+}
+
+static void tegra_dc_hdmi_resume(struct tegra_dc *dc)
+{
+	struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&hdmi->suspend_lock, flags);
+	hdmi->suspended = false;
+
+	if (tegra_dc_hdmi_hpd(dc))
+		queue_delayed_work(system_wq, &hdmi->work,
+				   msecs_to_jiffies(100));
+	else
+		queue_delayed_work(system_wq, &hdmi->work,
+				   msecs_to_jiffies(30));
+
+	spin_unlock_irqrestore(&hdmi->suspend_lock, flags);
+	tegra_nvhdcp_resume(hdmi->nvhdcp);
+}
+
+#ifdef CONFIG_SWITCH
+static ssize_t underscan_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct tegra_dc_hdmi_data *hdmi =
+			container_of(dev_get_drvdata(dev), struct tegra_dc_hdmi_data, hpd_switch);
+
+	if (hdmi->edid)
+		return sprintf(buf, "%d\n", tegra_edid_underscan_supported(hdmi->edid));
+	else
+		return 0;
+}
+
+static DEVICE_ATTR(underscan, S_IRUGO, underscan_show, NULL);
+#endif
+
+static int tegra_dc_hdmi_init(struct tegra_dc *dc)
+{
+	struct tegra_dc_hdmi_data *hdmi;
+	struct resource *res;
+	struct resource *base_res;
+#ifdef CONFIG_SWITCH
+	int ret;
+#endif
+	void __iomem *base;
+	struct clk *clk = NULL;
+// 	struct clk *disp1_clk = NULL;
+// 	struct clk *disp2_clk = NULL;
+	int err;
+
+	hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+	if (!hdmi)
+		return -ENOMEM;
+
+	res = nvhost_get_resource_byname(dc->ndev, IORESOURCE_MEM, "hdmi_regs");
+	if (!res) {
+		dev_err(&dc->ndev->dev, "hdmi: no mem resource\n");
+		err = -ENOENT;
+		goto err_free_hdmi;
+	}
+
+	base_res = request_mem_region(res->start, resource_size(res), dc->ndev->name);
+	if (!base_res) {
+		dev_err(&dc->ndev->dev, "hdmi: request_mem_region failed\n");
+		err = -EBUSY;
+		goto err_free_hdmi;
+	}
+
+	base = ioremap(res->start, resource_size(res));
+	if (!base) {
+		dev_err(&dc->ndev->dev, "hdmi: registers can't be mapped\n");
+		err = -EBUSY;
+		goto err_release_resource_reg;
+	}
+
+	clk = clk_get(&dc->ndev->dev, "hdmi");
+	if (IS_ERR(clk)) {
+		dev_err(&dc->ndev->dev, "hdmi: can't get clock\n");
+		err = -ENOENT;
+		goto err_iounmap_reg;
+	}
+
+	hdmi->rst = devm_reset_control_get(&dc->ndev->dev, "hdmi");
+	if (IS_ERR(hdmi->rst)) {
+		dev_err(&dc->ndev->dev, "failed to get hdmi reset\n");
+		err = PTR_ERR(hdmi->rst);
+		goto err_iounmap_reg;
+	}
+
+// 	disp1_clk = clk_get(&dc->ndev->dev, "dc1");
+// 	if (IS_ERR(disp1_clk)) {
+// 		dev_err(&dc->ndev->dev, "hdmi: can't disp1 clock\n");
+// 		err = -ENOENT;
+// 		goto err_put_clock;
+// 	}
+// 
+// 	disp2_clk = clk_get(&dc->ndev->dev, NULL);
+// 	if (IS_ERR(disp2_clk)) {
+// 		dev_err(&dc->ndev->dev, "hdmi: can't disp2 clock\n");
+// 		err = -ENOENT;
+// 		goto err_put_clock;
+// 	}
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+	hdmi->hda_clk = clk_get_sys("tegra30-hda", "hda");
+	if (IS_ERR(hdmi->hda_clk)) {
+		dev_err(&dc->ndev->dev, "hdmi: can't get hda clock\n");
+		err = -ENOENT;
+		goto err_put_clock;
+	}
+
+	hdmi->hda2codec_clk = clk_get_sys("tegra30-hda", "hda2codec");
+	if (IS_ERR(hdmi->hda2codec_clk)) {
+		dev_err(&dc->ndev->dev, "hdmi: can't get hda2codec clock\n");
+		err = -ENOENT;
+		goto err_put_clock;
+	}
+
+	hdmi->hda2hdmi_clk = clk_get_sys("tegra30-hda", "hda2hdmi");
+	if (IS_ERR(hdmi->hda2hdmi_clk)) {
+		dev_err(&dc->ndev->dev, "hdmi: can't get hda2hdmi clock\n");
+		err = -ENOENT;
+		goto err_put_clock;
+	}
+#endif
+
+	/* TODO: support non-hotplug */
+	if (request_irq(gpio_to_irq(dc->out->hotplug_gpio), tegra_dc_hdmi_irq,
+			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+			dev_name(&dc->ndev->dev), dc)) {
+		dev_err(&dc->ndev->dev, "hdmi: request_irq %d failed\n",
+			gpio_to_irq(dc->out->hotplug_gpio));
+		err = -EBUSY;
+		goto err_put_clock;
+	}
+
+	hdmi->edid = tegra_edid_create(dc->out->dcc_bus);
+	if (IS_ERR(hdmi->edid)) {
+		dev_err(&dc->ndev->dev, "hdmi: can't create edid\n");
+		err = PTR_ERR(hdmi->edid);
+		goto err_free_irq;
+	}
+
+#ifdef CONFIG_TEGRA_NVHDCP
+	hdmi->nvhdcp = tegra_nvhdcp_create(hdmi, dc->ndev->id,
+			dc->out->dcc_bus);
+	if (IS_ERR(hdmi->nvhdcp)) {
+		dev_err(&dc->ndev->dev, "hdmi: can't create nvhdcp\n");
+		err = PTR_ERR(hdmi->nvhdcp);
+		goto err_edid_destroy;
+	}
+#else
+	hdmi->nvhdcp = NULL;
+#endif
+
+	INIT_DELAYED_WORK(&hdmi->work, tegra_dc_hdmi_detect_worker);
+
+	hdmi->dc = dc;
+	hdmi->base = base;
+	hdmi->base_res = base_res;
+	hdmi->clk = clk;
+// 	hdmi->disp1_clk = disp1_clk;
+// 	hdmi->disp2_clk = disp2_clk;
+	hdmi->suspended = false;
+	hdmi->eld_retrieved= false;
+	hdmi->clk_enabled = false;
+	hdmi->audio_freq = 44100;
+	hdmi->audio_source = AUTO;
+	spin_lock_init(&hdmi->suspend_lock);
+
+#ifdef CONFIG_SWITCH
+	hdmi->hpd_switch.name = "hdmi";
+	ret = switch_dev_register(&hdmi->hpd_switch);
+
+	if (!ret)
+		ret = device_create_file(hdmi->hpd_switch.dev,
+			&dev_attr_underscan);
+	BUG_ON(ret != 0);
+#endif
+
+	dc->out->depth = 24;
+
+	tegra_dc_set_outdata(dc, hdmi);
+
+	dc_hdmi = hdmi;
+	/* boards can select default content protection policy */
+	if (dc->out->flags & TEGRA_DC_OUT_NVHDCP_POLICY_ON_DEMAND)
+		tegra_nvhdcp_set_policy(hdmi->nvhdcp,
+			TEGRA_NVHDCP_POLICY_ON_DEMAND);
+	else
+		tegra_nvhdcp_set_policy(hdmi->nvhdcp,
+			TEGRA_NVHDCP_POLICY_ALWAYS_ON);
+
+	tegra_dc_hdmi_debug_create(hdmi);
+
+	return 0;
+
+#ifdef CONFIG_TEGRA_NVHDCP
+err_edid_destroy:
+	tegra_edid_destroy(hdmi->edid);
+#endif
+err_free_irq:
+	free_irq(gpio_to_irq(dc->out->hotplug_gpio), dc);
+err_put_clock:
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+	if (!IS_ERR(hdmi->hda2hdmi_clk))
+		clk_put(hdmi->hda2hdmi_clk);
+	if (!IS_ERR(hdmi->hda2codec_clk))
+		clk_put(hdmi->hda2codec_clk);
+	if (!IS_ERR(hdmi->hda_clk))
+		clk_put(hdmi->hda_clk);
+#endif
+// 	if (!IS_ERR(disp2_clk))
+// 		clk_put(disp2_clk);
+// 	if (!IS_ERR(disp1_clk))
+// 		clk_put(disp1_clk);
+	if (!IS_ERR(clk))
+		clk_put(clk);
+err_iounmap_reg:
+	iounmap(base);
+err_release_resource_reg:
+	release_resource(base_res);
+err_free_hdmi:
+	kfree(hdmi);
+	return err;
+}
+
+static void tegra_dc_hdmi_destroy(struct tegra_dc *dc)
+{
+	struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+
+	free_irq(gpio_to_irq(dc->out->hotplug_gpio), dc);
+	cancel_delayed_work_sync(&hdmi->work);
+#ifdef CONFIG_SWITCH
+	switch_dev_unregister(&hdmi->hpd_switch);
+#endif
+	iounmap(hdmi->base);
+	release_resource(hdmi->base_res);
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+	clk_put(hdmi->hda2hdmi_clk);
+	clk_put(hdmi->hda2codec_clk);
+	clk_put(hdmi->hda_clk);
+#endif
+	clk_put(hdmi->clk);
+// 	clk_put(hdmi->disp1_clk);
+// 	clk_put(hdmi->disp2_clk);
+	tegra_edid_destroy(hdmi->edid);
+	tegra_nvhdcp_destroy(hdmi->nvhdcp);
+
+	kfree(hdmi);
+
+}
+
+static void tegra_dc_hdmi_setup_audio_fs_tables(struct tegra_dc *dc)
+{
+	struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+	int i;
+	unsigned freqs[] = {
+		32000,
+		44100,
+		48000,
+		88200,
+		96000,
+		176400,
+		192000,
+        };
+
+	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+		unsigned f = freqs[i];
+		unsigned eight_half;
+		unsigned delta;;
+
+		if (f > 96000)
+			delta = 2;
+		else if (f > 48000)
+			delta = 6;
+		else
+			delta = 9;
+
+		eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128);
+		tegra_hdmi_writel(hdmi, AUDIO_FS_LOW(eight_half - delta) |
+				  AUDIO_FS_HIGH(eight_half + delta),
+				  HDMI_NV_PDISP_AUDIO_FS(i));
+	}
+}
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+static void tegra_dc_hdmi_setup_eld_buff(struct tegra_dc *dc)
+{
+	int i;
+	int j;
+	u8 tmp;
+
+	struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+
+	/* program ELD stuff */
+	for (i = 0; i < HDMI_ELD_MONITOR_NAME_INDEX; i++) {
+		switch (i) {
+		case HDMI_ELD_VER_INDEX:
+			tmp = (hdmi->eld.eld_ver << 3);
+			tegra_hdmi_writel(hdmi, (i << 8) | tmp,
+				  HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+			break;
+		case HDMI_ELD_BASELINE_LEN_INDEX:
+			break;
+		case HDMI_ELD_CEA_VER_MNL_INDEX:
+			tmp = (hdmi->eld.cea_edid_ver << 5);
+			tmp |= (hdmi->eld.mnl & 0x1f);
+			tegra_hdmi_writel(hdmi, (i << 8) | tmp,
+					  HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+			break;
+		case HDMI_ELD_SAD_CNT_CON_TYP_SAI_HDCP_INDEX:
+			tmp = (hdmi->eld.sad_count << 4);
+			tmp |= (hdmi->eld.conn_type & 0xC);
+			tmp |= (hdmi->eld.support_ai & 0x2);
+			tmp |= (hdmi->eld.support_hdcp & 0x1);
+			tegra_hdmi_writel(hdmi, (i << 8) | tmp,
+					  HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+			break;
+		case HDMI_ELD_AUD_SYNC_DELAY_INDEX:
+			tegra_hdmi_writel(hdmi, (i << 8) | (hdmi->eld.aud_synch_delay),
+					  HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+			break;
+		case HDMI_ELD_SPK_ALLOC_INDEX:
+			tegra_hdmi_writel(hdmi, (i << 8) | (hdmi->eld.spk_alloc),
+					  HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+			break;
+		case HDMI_ELD_PORT_ID_INDEX:
+			for (j = 0; j < 8;j++) {
+				tegra_hdmi_writel(hdmi, ((i +j) << 8) | (hdmi->eld.port_id[j]),
+					  HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+			}
+			break;
+		case HDMI_ELD_MANF_NAME_INDEX:
+			for (j = 0; j < 2;j++) {
+				tegra_hdmi_writel(hdmi, ((i +j) << 8) | (hdmi->eld.manufacture_id[j]),
+					  HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+			}
+			break;
+		case HDMI_ELD_PRODUCT_CODE_INDEX:
+			for (j = 0; j < 2;j++) {
+				tegra_hdmi_writel(hdmi, ((i +j) << 8) | (hdmi->eld.product_id[j]),
+					  HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+			}
+			break;
+		}
+	}
+	for (j = 0; j < hdmi->eld.mnl;j++) {
+		tegra_hdmi_writel(hdmi, ((j + HDMI_ELD_MONITOR_NAME_INDEX) << 8) |
+				  (hdmi->eld.monitor_name[j]),
+				  HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+	}
+	for (j = 0; j < hdmi->eld.sad_count;j++) {
+		tegra_hdmi_writel(hdmi, ((j + HDMI_ELD_MONITOR_NAME_INDEX + hdmi->eld.mnl) << 8) |
+				  (hdmi->eld.sad[j]),
+				  HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0);
+	}
+		/* set presence andvalid bit  */
+	tegra_hdmi_writel(hdmi, 3, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0);
+}
+#endif
+
+static int tegra_dc_hdmi_setup_audio(struct tegra_dc *dc, unsigned audio_freq,
+					unsigned audio_source)
+{
+	struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+	const struct tegra_hdmi_audio_config *config;
+	unsigned long audio_n;
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+	unsigned long reg_addr = 0;
+#endif
+	unsigned a_source = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+
+	if (HDA == audio_source)
+		a_source = AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+	else if (SPDIF == audio_source)
+		a_source = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+	if (hdmi->audio_inject_null)
+		a_source |= AUDIO_CNTRL0_INJECT_NULLSMPL;
+
+	tegra_hdmi_writel(hdmi,a_source,
+			  HDMI_NV_PDISP_SOR_AUDIO_CNTRL0_0);
+	tegra_hdmi_writel(hdmi,
+			  AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+			  AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0),
+			  HDMI_NV_PDISP_AUDIO_CNTRL0);
+#else
+	tegra_hdmi_writel(hdmi,
+			  AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+			  AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0) |
+			  a_source,
+			  HDMI_NV_PDISP_AUDIO_CNTRL0);
+#endif
+	config = tegra_hdmi_get_audio_config(audio_freq, dc->mode.pclk);
+	if (!config) {
+		dev_err(&dc->ndev->dev,
+			"hdmi: can't set audio to %d at %d pix_clock",
+			audio_freq, dc->mode.pclk);
+		return -EINVAL;
+	}
+
+	tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL);
+
+	audio_n = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNALTE |
+		AUDIO_N_VALUE(config->n - 1);
+	tegra_hdmi_writel(hdmi, audio_n, HDMI_NV_PDISP_AUDIO_N);
+
+	tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
+			  HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+
+	tegra_hdmi_writel(hdmi, ACR_SUBPACK_CTS(config->cts),
+			  HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+
+	tegra_hdmi_writel(hdmi, SPARE_HW_CTS | SPARE_FORCE_SW_CTS |
+			  SPARE_CTS_RESET_VAL(1),
+			  HDMI_NV_PDISP_HDMI_SPARE);
+
+	audio_n &= ~AUDIO_N_RESETF;
+	tegra_hdmi_writel(hdmi, audio_n, HDMI_NV_PDISP_AUDIO_N);
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+	switch (audio_freq) {
+	case AUDIO_FREQ_32K:
+		reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320_0;
+		break;
+	case AUDIO_FREQ_44_1K:
+		reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441_0;
+		break;
+	case AUDIO_FREQ_48K:
+		reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480_0;
+		break;
+	case AUDIO_FREQ_88_2K:
+		reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882_0;
+		break;
+	case AUDIO_FREQ_96K:
+		reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960_0;
+		break;
+	case AUDIO_FREQ_176_4K:
+		reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764_0;
+		break;
+	case AUDIO_FREQ_192K:
+		reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920_0;
+		break;
+	}
+
+	tegra_hdmi_writel(hdmi, config->aval, reg_addr);
+#endif
+	tegra_dc_hdmi_setup_audio_fs_tables(dc);
+
+	return 0;
+}
+
+int tegra_hdmi_setup_audio_freq_source(unsigned audio_freq, unsigned audio_source)
+{
+	struct tegra_dc_hdmi_data *hdmi = dc_hdmi;
+
+	if (!hdmi)
+		return -EAGAIN;
+
+	/* check for know freq */
+	if (AUDIO_FREQ_32K == audio_freq ||
+		AUDIO_FREQ_44_1K== audio_freq ||
+		AUDIO_FREQ_48K== audio_freq ||
+		AUDIO_FREQ_88_2K== audio_freq ||
+		AUDIO_FREQ_96K== audio_freq ||
+		AUDIO_FREQ_176_4K== audio_freq ||
+		AUDIO_FREQ_192K== audio_freq) {
+		/* If we can program HDMI, then proceed */
+		if (hdmi->clk_enabled)
+			tegra_dc_hdmi_setup_audio(hdmi->dc, audio_freq,audio_source);
+
+		/* Store it for using it in enable */
+		hdmi->audio_freq = audio_freq;
+		hdmi->audio_source = audio_source;
+	}
+	else
+		return -EINVAL;
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_hdmi_setup_audio_freq_source);
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+int tegra_hdmi_audio_null_sample_inject(bool on)
+{
+	struct tegra_dc_hdmi_data *hdmi = dc_hdmi;
+	unsigned int val = 0;
+
+	if (!hdmi)
+		return -EAGAIN;
+
+	if (hdmi->audio_inject_null != on) {
+		hdmi->audio_inject_null = on;
+		if (hdmi->clk_enabled) {
+			val = tegra_hdmi_readl(hdmi,
+				HDMI_NV_PDISP_SOR_AUDIO_CNTRL0_0);
+			val &= ~AUDIO_CNTRL0_INJECT_NULLSMPL;
+			if (on)
+				val |= AUDIO_CNTRL0_INJECT_NULLSMPL;
+			tegra_hdmi_writel(hdmi,val,
+				HDMI_NV_PDISP_SOR_AUDIO_CNTRL0_0);
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_hdmi_audio_null_sample_inject);
+
+int tegra_hdmi_setup_hda_presence()
+{
+	struct tegra_dc_hdmi_data *hdmi = dc_hdmi;
+
+	if (!hdmi)
+		return -EAGAIN;
+
+	if (hdmi->clk_enabled && hdmi->eld_retrieved) {
+		/* If HDA_PRESENCE is already set reset it */
+		if (tegra_hdmi_readl(hdmi,
+				     HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0))
+			tegra_hdmi_writel(hdmi, 0,
+				     HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0);
+
+		tegra_dc_hdmi_setup_eld_buff(hdmi->dc);
+	}
+	else
+		return -ENODEV;
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_hdmi_setup_hda_presence);
+#endif
+
+static void tegra_dc_hdmi_write_infopack(struct tegra_dc *dc, int header_reg,
+					 u8 type, u8 version, void *data, int len)
+{
+	struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+	u32 subpack[2];  /* extra byte for zero padding of subpack */
+	int i;
+	u8 csum;
+
+	/* first byte of data is the checksum */
+	csum = type + version + len - 1;
+	for (i = 1; i < len; i++)
+		csum +=((u8 *)data)[i];
+	((u8 *)data)[0] = 0x100 - csum;
+
+	tegra_hdmi_writel(hdmi, INFOFRAME_HEADER_TYPE(type) |
+			  INFOFRAME_HEADER_VERSION(version) |
+			  INFOFRAME_HEADER_LEN(len - 1),
+			  header_reg);
+
+	/* The audio inforame only has one set of subpack registers.  The hdmi
+	 * block pads the rest of the data as per the spec so we have to fixup
+	 * the length before filling in the subpacks.
+	 */
+	if (header_reg == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER)
+		len = 6;
+
+	/* each subpack 7 bytes devided into:
+	 *   subpack_low - bytes 0 - 3
+	 *   subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00)
+	 */
+	for (i = 0; i < len; i++) {
+		int subpack_idx = i % 7;
+
+		if (subpack_idx == 0)
+			memset(subpack, 0x0, sizeof(subpack));
+
+		((u8 *)subpack)[subpack_idx] = ((u8 *)data)[i];
+
+		if (subpack_idx == 6 || (i + 1 == len)) {
+			int reg = header_reg + 1 + (i / 7) * 2;
+
+			tegra_hdmi_writel(hdmi, subpack[0], reg);
+			tegra_hdmi_writel(hdmi, subpack[1], reg + 1);
+		}
+	}
+}
+
+static void tegra_dc_hdmi_setup_avi_infoframe(struct tegra_dc *dc, bool dvi)
+{
+	struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+	struct hdmi_avi_infoframe avi;
+
+	if (dvi) {
+		tegra_hdmi_writel(hdmi, 0x0,
+				  HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+		return;
+	}
+
+	memset(&avi, 0x0, sizeof(avi));
+
+	avi.r = HDMI_AVI_R_SAME;
+
+	if ((dc->mode.h_active == 720) && ((dc->mode.v_active == 480) || (dc->mode.v_active == 576)))
+		tegra_dc_writel(dc, 0x00101010, DC_DISP_BORDER_COLOR);
+	else
+		tegra_dc_writel(dc, 0x00000000, DC_DISP_BORDER_COLOR);
+
+	if (dc->mode.v_active == 480) {
+		if (dc->mode.h_active == 640) {
+			avi.m = HDMI_AVI_M_4_3;
+			avi.vic = 1;
+		} else {
+			avi.m = HDMI_AVI_M_16_9;
+			avi.vic = 3;
+		}
+	} else if (dc->mode.v_active == 576) {
+		/* CEC modes 17 and 18 differ only by the pysical size of the
+		 * screen so we have to calculation the physical aspect
+		 * ratio.  4 * 10 / 3  is 13
+		 */
+		if ((dc->out->h_size * 10) / dc->out->v_size > 14) {
+			avi.m = HDMI_AVI_M_16_9;
+			avi.vic = 18;
+		} else {
+			avi.m = HDMI_AVI_M_4_3;
+			avi.vic = 17;
+		}
+	} else if (dc->mode.v_active == 720 ||
+		(dc->mode.v_active == 1470 && dc->mode.stereo_mode)) {
+		/* VIC for both 720p and 720p 3D mode */
+		avi.m = HDMI_AVI_M_16_9;
+		if (dc->mode.h_front_porch == 110)
+			avi.vic = 4; /* 60 Hz */
+		else
+			avi.vic = 19; /* 50 Hz */
+	} else if (dc->mode.v_active == 1080 ||
+		(dc->mode.v_active == 2205 && dc->mode.stereo_mode)) {
+		/* VIC for both 1080p and 1080p 3D mode */
+		avi.m = HDMI_AVI_M_16_9;
+		if (dc->mode.h_front_porch == 88) {
+			if (dc->mode.pclk > 74250000)
+				avi.vic = 16; /* 60 Hz */
+			else
+				avi.vic = 34; /* 30 Hz */
+		} else if (dc->mode.h_front_porch == 528)
+			avi.vic = 31; /* 50 Hz */
+		else
+			avi.vic = 32; /* 24 Hz */
+	} else {
+		avi.m = HDMI_AVI_M_16_9;
+		avi.vic = 0;
+	}
+
+	if (hdmi->eld.vsdb) {
+		avi.s = HDMI_AVI_S_UNDERSCAN;
+	}
+
+	tegra_dc_hdmi_write_infopack(dc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER,
+				     HDMI_INFOFRAME_TYPE_AVI,
+				     HDMI_AVI_VERSION,
+				     &avi, sizeof(avi));
+
+	tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+			  HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+}
+
+static void tegra_dc_hdmi_setup_stereo_infoframe(struct tegra_dc *dc)
+{
+	struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+	struct hdmi_stereo_infoframe stereo;
+	u32 val;
+
+	if (!dc->mode.stereo_mode) {
+		val  = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+		val &= ~GENERIC_CTRL_ENABLE;
+		tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+		return;
+	}
+
+	memset(&stereo, 0x0, sizeof(stereo));
+
+	stereo.regid0 = 0x03;
+	stereo.regid1 = 0x0c;
+	stereo.regid2 = 0x00;
+	stereo.hdmi_video_format = 2; /* 3D_Structure present */
+#ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT
+	stereo._3d_structure = 0; /* frame packing */
+#else
+	stereo._3d_structure = 8; /* side-by-side (half) */
+	stereo._3d_ext_data = 0; /* something which fits into 00XX bit req */
+#endif
+
+	tegra_dc_hdmi_write_infopack(dc, HDMI_NV_PDISP_HDMI_GENERIC_HEADER,
+					HDMI_INFOFRAME_TYPE_VENDOR,
+					HDMI_VENDOR_VERSION,
+					&stereo, 6);
+
+	val  = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+	val |= GENERIC_CTRL_ENABLE;
+
+	tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
+
+static void tegra_dc_hdmi_setup_audio_infoframe(struct tegra_dc *dc, bool dvi)
+{
+	struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+	struct hdmi_audio_infoframe audio;
+
+	if (dvi) {
+		tegra_hdmi_writel(hdmi, 0x0,
+				  HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+		return;
+	}
+
+	memset(&audio, 0x0, sizeof(audio));
+
+	audio.cc = HDMI_AUDIO_CC_2;
+	tegra_dc_hdmi_write_infopack(dc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER,
+				     HDMI_INFOFRAME_TYPE_AUDIO,
+				     HDMI_AUDIO_VERSION,
+				     &audio, sizeof(audio));
+
+	tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+			  HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_dc_hdmi_setup_tdms(struct tegra_dc_hdmi_data *hdmi,
+		const struct tdms_config *tc)
+{
+	tegra_hdmi_writel(hdmi, tc->pll0, HDMI_NV_PDISP_SOR_PLL0);
+	tegra_hdmi_writel(hdmi, tc->pll1, HDMI_NV_PDISP_SOR_PLL1);
+
+	tegra_hdmi_writel(hdmi, tc->pe_current, HDMI_NV_PDISP_PE_CURRENT);
+
+	tegra_hdmi_writel(hdmi,
+			  tc->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE,
+			  HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+}
+
+static void tegra_dc_hdmi_enable(struct tegra_dc *dc)
+{
+	struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+	int pulse_start;
+	int dispclk_div_8_2;
+	int retries;
+	int rekey;
+	int err;
+	unsigned long val;
+	unsigned i;
+	unsigned long oldrate;
+
+	/* enbale power, clocks, resets, etc. */
+
+	/* The upstream DC needs to be clocked for accesses to HDMI to not
+	 * hard lock the system.  Because we don't know if HDMI is conencted
+	 * to disp1 or disp2 we need to enable both until we set the DC mux.
+	 */
+// 	clk_prepare_enable(hdmi->disp1_clk);
+// 	clk_prepare_enable(hdmi->disp2_clk);
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+	/* Enabling HDA clocks before asserting HDA PD and ELDV bits */
+	clk_prepare_enable(hdmi->hda_clk);
+	clk_prepare_enable(hdmi->hda2codec_clk);
+	clk_prepare_enable(hdmi->hda2hdmi_clk);
+#endif
+
+	/* back off multiplier before attaching to parent at new rate. */
+	oldrate = clk_get_rate(hdmi->clk);
+	clk_set_rate(hdmi->clk, oldrate / 2);
+
+	tegra_dc_setup_clk(dc, hdmi->clk);
+	clk_set_rate(hdmi->clk, dc->mode.pclk);
+
+	clk_prepare_enable(hdmi->clk);
+	reset_control_assert(hdmi->rst);
+	usleep_range(1000, 2000);
+	reset_control_deassert(hdmi->rst);
+
+	/* TODO: copy HDCP keys from KFUSE to HDMI */
+
+	/* Program display timing registers: handled by dc */
+
+	/* program HDMI registers and SOR sequencer */
+
+	tegra_dc_writel(dc, VSYNC_H_POSITION(1), DC_DISP_DISP_TIMING_OPTIONS);
+	tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
+			DC_DISP_DISP_COLOR_CONTROL);
+
+	/* video_preamble uses h_pulse2 */
+	pulse_start = dc->mode.h_ref_to_sync + dc->mode.h_sync_width +
+		dc->mode.h_back_porch - 10;
+	tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
+	tegra_dc_writel(dc,
+			PULSE_MODE_NORMAL |
+			PULSE_POLARITY_HIGH |
+			PULSE_QUAL_VACTIVE |
+			PULSE_LAST_END_A,
+			DC_DISP_H_PULSE2_CONTROL);
+	tegra_dc_writel(dc, PULSE_START(pulse_start) | PULSE_END(pulse_start + 8),
+		  DC_DISP_H_PULSE2_POSITION_A);
+
+	tegra_hdmi_writel(hdmi,
+			  VSYNC_WINDOW_END(0x210) |
+			  VSYNC_WINDOW_START(0x200) |
+			  VSYNC_WINDOW_ENABLE,
+			  HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+
+	if ((dc->mode.h_active == 720) && ((dc->mode.v_active == 480) || (dc->mode.v_active == 576)))
+		tegra_hdmi_writel(hdmi,
+				  (dc->ndev->id ? HDMI_SRC_DISPLAYB : HDMI_SRC_DISPLAYA) |
+				  ARM_VIDEO_RANGE_FULL,
+				  HDMI_NV_PDISP_INPUT_CONTROL);
+	else
+		tegra_hdmi_writel(hdmi,
+				  (dc->ndev->id ? HDMI_SRC_DISPLAYB : HDMI_SRC_DISPLAYA) |
+				  ARM_VIDEO_RANGE_LIMITED,
+				  HDMI_NV_PDISP_INPUT_CONTROL);
+
+// 	clk_disable_unprepare(hdmi->disp1_clk);
+// 	clk_disable_unprepare(hdmi->disp2_clk);
+
+	dispclk_div_8_2 = clk_get_rate(hdmi->clk) / 1000000 * 4;
+	tegra_hdmi_writel(hdmi,
+			  SOR_REFCLK_DIV_INT(dispclk_div_8_2 >> 2) |
+			  SOR_REFCLK_DIV_FRAC(dispclk_div_8_2),
+			  HDMI_NV_PDISP_SOR_REFCLK);
+
+	hdmi->clk_enabled = true;
+
+	if (!hdmi->dvi) {
+		err = tegra_dc_hdmi_setup_audio(dc, hdmi->audio_freq,
+			hdmi->audio_source);
+
+		if (err < 0)
+			hdmi->dvi = true;
+	}
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+	if (hdmi->eld_retrieved)
+		tegra_dc_hdmi_setup_eld_buff(dc);
+#endif
+
+	rekey = HDMI_REKEY_DEFAULT;
+	val = HDMI_CTRL_REKEY(rekey);
+	val |= HDMI_CTRL_MAX_AC_PACKET((dc->mode.h_sync_width +
+					dc->mode.h_back_porch +
+					dc->mode.h_front_porch -
+					rekey - 18) / 32);
+	if (!hdmi->dvi)
+		val |= HDMI_CTRL_ENABLE;
+	tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_HDMI_CTRL);
+
+	if (hdmi->dvi)
+		tegra_hdmi_writel(hdmi, 0x0,
+				  HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+	else
+		tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
+				  HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+
+	tegra_dc_hdmi_setup_avi_infoframe(dc, hdmi->dvi);
+	tegra_dc_hdmi_setup_audio_infoframe(dc, hdmi->dvi);
+	tegra_dc_hdmi_setup_stereo_infoframe(dc);
+
+	/* TMDS CONFIG */
+	for (i = 0; i < ARRAY_SIZE(tdms_config); i++) {
+		if (dc->mode.pclk <= tdms_config[i].pclk) {
+			tegra_dc_hdmi_setup_tdms(hdmi, &tdms_config[i]);
+			break;
+		}
+	}
+
+	tegra_hdmi_writel(hdmi,
+			  SOR_SEQ_CTL_PU_PC(0) |
+			  SOR_SEQ_PU_PC_ALT(0) |
+			  SOR_SEQ_PD_PC(8) |
+			  SOR_SEQ_PD_PC_ALT(8),
+			  HDMI_NV_PDISP_SOR_SEQ_CTL);
+
+	val = SOR_SEQ_INST_WAIT_TIME(1) |
+		SOR_SEQ_INST_WAIT_UNITS_VSYNC |
+		SOR_SEQ_INST_HALT |
+		SOR_SEQ_INST_PIN_A_LOW |
+		SOR_SEQ_INST_PIN_B_LOW |
+		SOR_SEQ_INST_DRIVE_PWM_OUT_LO;
+
+	tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_SEQ_INST0);
+	tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_SEQ_INST8);
+
+	val = 0x1c800;
+	val &= ~SOR_CSTM_ROTCLK(~0);
+	val |= SOR_CSTM_ROTCLK(2);
+	tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_CSTM);
+
+
+	tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+
+	/* start SOR */
+	tegra_hdmi_writel(hdmi,
+			  SOR_PWR_NORMAL_STATE_PU |
+			  SOR_PWR_NORMAL_START_NORMAL |
+			  SOR_PWR_SAFE_STATE_PD |
+			  SOR_PWR_SETTING_NEW_TRIGGER,
+			  HDMI_NV_PDISP_SOR_PWR);
+	tegra_hdmi_writel(hdmi,
+			  SOR_PWR_NORMAL_STATE_PU |
+			  SOR_PWR_NORMAL_START_NORMAL |
+			  SOR_PWR_SAFE_STATE_PD |
+			  SOR_PWR_SETTING_NEW_DONE,
+			  HDMI_NV_PDISP_SOR_PWR);
+
+	retries = 1000;
+	do {
+		BUG_ON(--retries < 0);
+		val = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR);
+	} while (val & SOR_PWR_SETTING_NEW_PENDING);
+
+	val = SOR_STATE_ASY_CRCMODE_COMPLETE |
+		SOR_STATE_ASY_OWNER_HEAD0 |
+		SOR_STATE_ASY_SUBOWNER_BOTH |
+		SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A |
+		SOR_STATE_ASY_DEPOL_POS;
+
+	if (dc->mode.flags & TEGRA_DC_MODE_FLAG_NEG_H_SYNC)
+		val |= SOR_STATE_ASY_HSYNCPOL_NEG;
+	else
+		val |= SOR_STATE_ASY_HSYNCPOL_POS;
+
+	if (dc->mode.flags & TEGRA_DC_MODE_FLAG_NEG_V_SYNC)
+		val |= SOR_STATE_ASY_VSYNCPOL_NEG;
+	else
+		val |= SOR_STATE_ASY_VSYNCPOL_POS;
+
+	tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_STATE2);
+
+	val = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL;
+	tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_STATE1);
+
+	tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+	tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0);
+	tegra_hdmi_writel(hdmi, val | SOR_STATE_ATTACHED,
+			  HDMI_NV_PDISP_SOR_STATE1);
+	tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+
+	tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
+
+	tegra_dc_writel(dc, PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+			PW4_ENABLE | PM0_ENABLE | PM1_ENABLE,
+			DC_CMD_DISPLAY_POWER_CONTROL);
+
+	tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, DC_CMD_DISPLAY_COMMAND);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+	tegra_nvhdcp_set_plug(hdmi->nvhdcp, 1);
+}
+
+static void tegra_dc_hdmi_disable(struct tegra_dc *dc)
+{
+	struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+
+	tegra_nvhdcp_set_plug(hdmi->nvhdcp, 0);
+
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+	tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0);
+	/* sleep 1ms before disabling clocks to ensure HDA gets the interrupt */
+	msleep(1);
+	clk_disable_unprepare(hdmi->hda2hdmi_clk);
+	clk_disable_unprepare(hdmi->hda2codec_clk);
+	clk_disable_unprepare(hdmi->hda_clk);
+#endif
+	reset_control_assert(hdmi->rst);
+	hdmi->clk_enabled = false;
+	clk_disable_unprepare(hdmi->clk);
+}
+
+struct tegra_dc_out_ops tegra_dc_hdmi_ops = {
+	.init = tegra_dc_hdmi_init,
+	.destroy = tegra_dc_hdmi_destroy,
+	.enable = tegra_dc_hdmi_enable,
+	.disable = tegra_dc_hdmi_disable,
+	.detect = tegra_dc_hdmi_detect,
+	.suspend = tegra_dc_hdmi_suspend,
+	.resume = tegra_dc_hdmi_resume,
+	.mode_filter = tegra_dc_hdmi_mode_filter,
+};
+
+struct tegra_dc_edid *tegra_dc_get_edid(struct tegra_dc *dc)
+{
+	struct tegra_dc_hdmi_data *hdmi;
+
+	/* TODO: Support EDID on non-HDMI devices */
+	if (dc->out->type != TEGRA_DC_OUT_HDMI)
+		return ERR_PTR(-ENODEV);
+
+	hdmi = tegra_dc_get_outdata(dc);
+
+	return tegra_edid_get_data(hdmi->edid);
+}
+EXPORT_SYMBOL(tegra_dc_get_edid);
+
+void tegra_dc_put_edid(struct tegra_dc_edid *edid)
+{
+	tegra_edid_put_data(edid);
+}
+EXPORT_SYMBOL(tegra_dc_put_edid);
diff --git a/drivers/staging/tegra/video/dc/hdmi.h b/drivers/staging/tegra/video/dc/hdmi.h
new file mode 100644
index 000000000000..5b4c42a31ffa
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/hdmi.h
@@ -0,0 +1,222 @@
+/*
+ * drivers/video/tegra/dc/hdmi.h
+ *
+ * non-tegra specific HDMI declarations
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_HDMI_H
+#define __DRIVERS_VIDEO_TEGRA_DC_HDMI_H
+
+#define HDMI_INFOFRAME_TYPE_VENDOR	0x81
+#define HDMI_INFOFRAME_TYPE_AVI		0x82
+#define HDMI_INFOFRAME_TYPE_SPD		0x83
+#define HDMI_INFOFRAME_TYPE_AUDIO	0x84
+#define HDMI_INFOFRAME_TYPE_MPEG_SRC	0x85
+#define HDMI_INFOFRAME_TYPE_NTSC_VBI	0x86
+
+/* all fields little endian */
+struct hdmi_avi_infoframe {
+	/* PB0 */
+	u8		csum;
+
+	/* PB1 */
+	unsigned	s:2;	/* scan information */
+	unsigned	b:2;	/* bar info data valid */
+	unsigned	a:1;	/* active info present */
+	unsigned	y:2;	/* RGB or YCbCr */
+	unsigned	res1:1;
+
+	/* PB2 */
+	unsigned	r:4;	/* active format aspect ratio */
+	unsigned	m:2;	/* picture aspect ratio */
+	unsigned	c:2;	/* colorimetry */
+
+	/* PB3 */
+	unsigned	sc:2;	/* scan information */
+	unsigned	q:2;	/* quantization range */
+	unsigned	ec:3;	/* extended colorimetry */
+	unsigned	itc:1;	/* it content */
+
+	/* PB4 */
+	unsigned	vic:7;	/* video format id code */
+	unsigned	res4:1;
+
+	/* PB5 */
+	unsigned	pr:4;	/* pixel repetition factor */
+	unsigned	cn:2;	/* it content type*/
+	unsigned	yq:2;	/* ycc quantization range */
+
+	/* PB6-7 */
+	u16		top_bar_end_line;
+
+	/* PB8-9 */
+	u16		bot_bar_start_line;
+
+	/* PB10-11 */
+	u16		left_bar_end_pixel;
+
+	/* PB12-13 */
+	u16		right_bar_start_pixel;
+} __attribute__((packed));
+
+#define HDMI_AVI_VERSION		0x02
+
+#define HDMI_AVI_Y_RGB			0x0
+#define HDMI_AVI_Y_YCBCR_422		0x1
+#define HDMI_AVI_Y_YCBCR_444		0x2
+
+#define HDMI_AVI_B_VERT			0x1
+#define HDMI_AVI_B_HORIZ		0x2
+
+#define HDMI_AVI_S_NONE			0x0
+#define HDMI_AVI_S_OVERSCAN		0x1
+#define HDMI_AVI_S_UNDERSCAN		0x2
+
+#define HDMI_AVI_C_NONE			0x0
+#define HDMI_AVI_C_SMPTE		0x1
+#define HDMI_AVI_C_ITU_R		0x2
+#define HDMI_AVI_C_EXTENDED		0x4
+
+#define HDMI_AVI_M_4_3			0x1
+#define HDMI_AVI_M_16_9			0x2
+
+#define HDMI_AVI_R_SAME			0x8
+#define HDMI_AVI_R_4_3_CENTER		0x9
+#define HDMI_AVI_R_16_9_CENTER		0xa
+#define HDMI_AVI_R_14_9_CENTER		0xb
+
+/* all fields little endian */
+struct hdmi_audio_infoframe {
+	/* PB0 */
+	u8		csum;
+
+	/* PB1 */
+	unsigned	cc:3;		/* channel count */
+	unsigned	res1:1;
+	unsigned	ct:4;		/* coding type */
+
+	/* PB2 */
+	unsigned	ss:2;		/* sample size */
+	unsigned	sf:3;		/* sample frequency */
+	unsigned	res2:3;
+
+	/* PB3 */
+	unsigned	cxt:5;		/* coding extention type */
+	unsigned	res3:3;
+
+	/* PB4 */
+	u8		ca;		/* channel/speaker allocation */
+
+	/* PB5 */
+	unsigned	res5:3;
+	unsigned	lsv:4;		/* level shift value */
+	unsigned	dm_inh:1;	/* downmix inhibit */
+
+	/* PB6-10 reserved */
+	u8		res6;
+	u8		res7;
+	u8		res8;
+	u8		res9;
+	u8		res10;
+} __attribute__((packed));
+
+#define HDMI_AUDIO_VERSION		0x01
+
+#define HDMI_AUDIO_CC_STREAM		0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CC_2			0x1
+#define HDMI_AUDIO_CC_3			0x2
+#define HDMI_AUDIO_CC_4			0x3
+#define HDMI_AUDIO_CC_5			0x4
+#define HDMI_AUDIO_CC_6			0x5
+#define HDMI_AUDIO_CC_7			0x6
+#define HDMI_AUDIO_CC_8			0x7
+
+#define HDMI_AUDIO_CT_STREAM		0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CT_PCM		0x1
+#define HDMI_AUDIO_CT_AC3		0x2
+#define HDMI_AUDIO_CT_MPEG1		0x3
+#define HDMI_AUDIO_CT_MP3		0x4
+#define HDMI_AUDIO_CT_MPEG2		0x5
+#define HDMI_AUDIO_CT_AAC_LC		0x6
+#define HDMI_AUDIO_CT_DTS		0x7
+#define HDMI_AUDIO_CT_ATRAC		0x8
+#define HDMI_AUDIO_CT_DSD		0x9
+#define HDMI_AUDIO_CT_E_AC3		0xa
+#define HDMI_AUDIO_CT_DTS_HD		0xb
+#define HDMI_AUDIO_CT_MLP		0xc
+#define HDMI_AUDIO_CT_DST		0xd
+#define HDMI_AUDIO_CT_WMA_PRO		0xe
+#define HDMI_AUDIO_CT_CXT		0xf
+
+#define HDMI_AUDIO_SF_STREAM		0x0 /* specified by audio stream */
+#define HDMI_AUIDO_SF_32K		0x1
+#define HDMI_AUDIO_SF_44_1K		0x2
+#define HDMI_AUDIO_SF_48K		0x3
+#define HDMI_AUDIO_SF_88_2K		0x4
+#define HDMI_AUDIO_SF_96K		0x5
+#define HDMI_AUDIO_SF_176_4K		0x6
+#define HDMI_AUDIO_SF_192K		0x7
+
+#define HDMI_AUDIO_SS_STREAM		0x0 /* specified by audio stream */
+#define HDMI_AUDIO_SS_16BIT		0x1
+#define HDMI_AUDIO_SS_20BIT		0x2
+#define HDMI_AUDIO_SS_24BIT		0x3
+
+#define HDMI_AUDIO_CXT_CT		0x0 /* refer to coding in CT */
+#define HDMI_AUDIO_CXT_HE_AAC		0x1
+#define HDMI_AUDIO_CXT_HE_AAC_V2	0x2
+#define HDMI_AUDIO_CXT_MPEG_SURROUND	0x3
+
+/* all fields little endian */
+struct hdmi_stereo_infoframe {
+	/* PB0 */
+	u8		csum;
+
+	/* PB1 */
+	u8		regid0;
+
+	/* PB2 */
+	u8		regid1;
+
+	/* PB3 */
+	u8		regid2;
+
+	/* PB4 */
+	unsigned	res1:5;
+	unsigned	hdmi_video_format:3;
+
+	/* PB5 */
+	unsigned	res2:4;
+	unsigned	_3d_structure:4;
+
+	/* PB6*/
+	unsigned	res3:4;
+	unsigned	_3d_ext_data:4;
+
+} __attribute__((packed));
+
+#define HDMI_VENDOR_VERSION 0x01
+
+struct tegra_dc_hdmi_data;
+
+unsigned long tegra_hdmi_readl(struct tegra_dc_hdmi_data *hdmi,
+				unsigned long reg);
+void tegra_hdmi_writel(struct tegra_dc_hdmi_data *hdmi,
+				unsigned long val, unsigned long reg);
+
+#endif
diff --git a/drivers/staging/tegra/video/dc/hdmi_reg.h b/drivers/staging/tegra/video/dc/hdmi_reg.h
new file mode 100644
index 000000000000..4a5fdcb2aaa6
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/hdmi_reg.h
@@ -0,0 +1,480 @@
+/*
+ * drivers/video/tegra/dc/hdmi_reg.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_HDMI_REG_H
+#define __DRIVERS_VIDEO_TEGRA_DC_HDMI_REG_H
+
+#define HDMI_CTXSW						0x00
+#define HDMI_NV_PDISP_SOR_STATE0				0x01
+#define  SOR_STATE_UPDATE			(1 << 0)
+
+#define HDMI_NV_PDISP_SOR_STATE1				0x02
+#define  SOR_STATE_ASY_HEAD_OPMODE_SLEEP	(0 << 0)
+#define  SOR_STATE_ASY_HEAD_OPMODE_SNOOSE	(1 << 0)
+#define  SOR_STATE_ASY_HEAD_OPMODE_AWAKE	(2 << 0)
+#define  SOR_STATE_ASY_ORMODE_SAFE		(0 << 2)
+#define  SOR_STATE_ASY_ORMODE_NORMAL		(1 << 2)
+#define  SOR_STATE_ATTACHED			(1 << 3)
+#define  SOR_STATE_ARM_SHOW_VGA			(1 << 4)
+
+#define HDMI_NV_PDISP_SOR_STATE2				0x03
+#define  SOR_STATE_ASY_OWNER_NONE		(0 << 0)
+#define  SOR_STATE_ASY_OWNER_HEAD0		(1 << 0)
+#define  SOR_STATE_ASY_SUBOWNER_NONE		(0 << 4)
+#define  SOR_STATE_ASY_SUBOWNER_SUBHEAD0	(1 << 4)
+#define  SOR_STATE_ASY_SUBOWNER_SUBHEAD1	(2 << 4)
+#define  SOR_STATE_ASY_SUBOWNER_BOTH		(3 << 4)
+#define  SOR_STATE_ASY_CRCMODE_ACTIVE		(0 << 6)
+#define  SOR_STATE_ASY_CRCMODE_COMPLETE		(1 << 6)
+#define  SOR_STATE_ASY_CRCMODE_NON_ACTIVE	(2 << 6)
+#define  SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A	(1 << 8)
+#define  SOR_STATE_ASY_PROTOCOL_CUSTOM		(15 << 8)
+#define  SOR_STATE_ASY_HSYNCPOL_POS		(0 << 12)
+#define  SOR_STATE_ASY_HSYNCPOL_NEG		(1 << 12)
+#define  SOR_STATE_ASY_VSYNCPOL_POS		(0 << 13)
+#define  SOR_STATE_ASY_VSYNCPOL_NEG		(1 << 13)
+#define  SOR_STATE_ASY_DEPOL_POS		(0 << 14)
+#define  SOR_STATE_ASY_DEPOL_NEG		(1 << 14)
+
+#define HDMI_NV_PDISP_RG_HDCP_AN_MSB				0x04
+#define HDMI_NV_PDISP_RG_HDCP_AN_LSB				0x05
+#define HDMI_NV_PDISP_RG_HDCP_CN_MSB				0x06
+#define HDMI_NV_PDISP_RG_HDCP_CN_LSB				0x07
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_MSB				0x08
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_LSB				0x09
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_MSB				0x0a
+#define  REPEATER				(1 << 31)
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_LSB				0x0b
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_MSB				0x0c
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_LSB				0x0d
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_MSB				0x0e
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_LSB				0x0f
+#define HDMI_NV_PDISP_RG_HDCP_CTRL				0x10
+#define  HDCP_RUN_YES				(1 << 0)
+#define  CRYPT_ENABLED				(1 << 1)
+#define  ONEONE_ENABLED				(1 << 3)
+#define  AN_VALID				(1 << 8)
+#define  R0_VALID				(1 << 9)
+#define  SPRIME_VALID				(1 << 10)
+#define  MPRIME_VALID				(1 << 11)
+#define  SROM_ERR				(1 << 13)
+#define HDMI_NV_PDISP_RG_HDCP_CMODE				0x11
+#define  TMDS0_LINK0				(1 << 4)
+#define  READ_S					(1 << 0)
+#define  READ_M					(2 << 0)
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB			0x12
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB			0x13
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB			0x14
+#define  STATUS_CS				(1 << 6)
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2			0x15
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1			0x16
+#define HDMI_NV_PDISP_RG_HDCP_RI				0x17
+#define HDMI_NV_PDISP_RG_HDCP_CS_MSB				0x18
+#define HDMI_NV_PDISP_RG_HDCP_CS_LSB				0x19
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU0				0x1a
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0			0x1b
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU1				0x1c
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU2				0x1d
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL			0x1e
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS		0x1f
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER		0x20
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW		0x21
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH	0x22
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL			0x23
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS			0x24
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER			0x25
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW		0x26
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH		0x27
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW		0x28
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH		0x29
+#define  INFOFRAME_CTRL_ENABLE			(1 << 0)
+#define  INFOFRAME_CTRL_OTHER			(1 << 4)
+#define  INFOFRAME_CTRL_SINGLE			(1 << 8)
+
+#define INFOFRAME_HEADER_TYPE(x)		((x) & 0xff)
+#define INFOFRAME_HEADER_VERSION(x)		(((x) & 0xff) << 8)
+#define INFOFRAME_HEADER_LEN(x)			(((x) & 0xf) << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_CTRL				0x2a
+#define  GENERIC_CTRL_ENABLE			(1 << 0)
+#define  GENERIC_CTRL_OTHER			(1 << 4)
+#define  GENERIC_CTRL_SINGLE			(1 << 8)
+#define  GENERIC_CTRL_HBLANK			(1 << 12)
+#define  GENERIC_CTRL_AUDIO			(1 << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_STATUS			0x2b
+#define HDMI_NV_PDISP_HDMI_GENERIC_HEADER			0x2c
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW			0x2d
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH		0x2e
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW			0x2f
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH		0x30
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW			0x31
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH		0x32
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW			0x33
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH		0x34
+#define HDMI_NV_PDISP_HDMI_ACR_CTRL				0x35
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW			0x36
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH		0x37
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW			0x38
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH		0x39
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW			0x3a
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH		0x3b
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW			0x3c
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH		0x3d
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW			0x3e
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH		0x3f
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW			0x40
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH		0x41
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW			0x42
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH		0x43
+#define  ACR_SB3(x)				(((x) & 0xff) << 8)
+#define  ACR_SB2(x)				(((x) & 0xff) << 16)
+#define  ACR_SB1(x)				(((x) & 0xff) << 24)
+#define  ACR_SUBPACK_CTS(x)			(((x) & 0xffffff) << 8)
+
+#define  ACR_SB6(x)				(((x) & 0xff) << 0)
+#define  ACR_SB5(x)				(((x) & 0xff) << 8)
+#define  ACR_SB4(x)				(((x) & 0xff) << 16)
+#define  ACR_ENABLE				(1 << 31)
+#define  ACR_SUBPACK_N(x)			((x) & 0xffffff)
+
+#define HDMI_NV_PDISP_HDMI_CTRL					0x44
+#define  HDMI_CTRL_REKEY(x)			(((x) & 0x7f) << 0)
+#define  HDMI_CTRL_AUDIO_LAYOUT			(1 << 8)
+#define  HDMI_CTRL_SAMPLE_FLAT			(1 << 12)
+#define  HDMI_CTRL_MAX_AC_PACKET(x)		(((x) & 0x1f) << 16)
+#define  HDMI_CTRL_ENABLE			(1 << 30)
+
+#define HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT			0x45
+#define HDMI_NV_PDISP_HDMI_VSYNC_WINDOW				0x46
+#define  VSYNC_WINDOW_END(x)			(((x) & 0x3ff) << 0)
+#define  VSYNC_WINDOW_START(x)			(((x) & 0x3ff) << 16)
+#define  VSYNC_WINDOW_ENABLE			(1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_GCP_CTRL				0x47
+#define HDMI_NV_PDISP_HDMI_GCP_STATUS				0x48
+#define HDMI_NV_PDISP_HDMI_GCP_SUBPACK				0x49
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1			0x4a
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2			0x4b
+#define HDMI_NV_PDISP_HDMI_EMU0					0x4c
+#define HDMI_NV_PDISP_HDMI_EMU1					0x4d
+#define HDMI_NV_PDISP_HDMI_EMU1_RDATA				0x4e
+#define HDMI_NV_PDISP_HDMI_SPARE				0x4f
+#define  SPARE_HW_CTS				(1 << 0)
+#define  SPARE_FORCE_SW_CTS			(1 << 1)
+#define  SPARE_CTS_RESET_VAL(x)			(((x) & 0x7) << 16)
+#define  SPARE_ACR_PRIORITY_HIGH		(0 << 31)
+#define  SPARE_ACR_PRIORITY_LOW			(1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1			0x50
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2			0x51
+#define HDMI_NV_PDISP_HDCPRIF_ROM_CTRL				0x53
+#define HDMI_NV_PDISP_SOR_CAP					0x54
+#define HDMI_NV_PDISP_SOR_PWR					0x55
+#define  SOR_PWR_NORMAL_STATE_PD		(0 << 0)
+#define  SOR_PWR_NORMAL_STATE_PU		(1 << 0)
+#define  SOR_PWR_NORMAL_START_NORMAL		(0 << 1)
+#define  SOR_PWR_NORMAL_START_ALT		(1 << 1)
+#define  SOR_PWR_SAFE_STATE_PD			(0 << 16)
+#define  SOR_PWR_SAFE_STATE_PU			(1 << 16)
+#define  SOR_PWR_SAFE_START_NORMAL		(0 << 17)
+#define  SOR_PWR_SAFE_START_ALT			(1 << 17)
+#define  SOR_PWR_HALT_DELAY			(1 << 24)
+#define  SOR_PWR_MODE				(1 << 28)
+#define  SOR_PWR_SETTING_NEW_DONE		(0 << 31)
+#define  SOR_PWR_SETTING_NEW_PENDING		(1 << 31)
+#define  SOR_PWR_SETTING_NEW_TRIGGER		(1 << 31)
+
+#define HDMI_NV_PDISP_SOR_TEST					0x56
+#define HDMI_NV_PDISP_SOR_PLL0					0x57
+#define  SOR_PLL_PWR				(1 << 0)
+#define  SOR_PLL_PDBG				(1 << 1)
+#define  SOR_PLL_VCOPD				(1 << 2)
+#define  SOR_PLL_PDPORT				(1 << 3)
+#define  SOR_PLL_RESISTORSEL			(1 << 4)
+#define  SOR_PLL_PULLDOWN			(1 << 5)
+#define  SOR_PLL_VCOCAP(x)			(((x) & 0xf) << 8)
+#define  SOR_PLL_BG_V17_S(x)			(((x) & 0xf) << 12)
+#define  SOR_PLL_FILTER(x)			(((x) & 0xf) << 16)
+#define  SOR_PLL_ICHPMP(x)			(((x) & 0xf) << 24)
+#define  SOR_PLL_TX_REG_LOAD(x)			(((x) & 0x3) << 28)
+
+#define HDMI_NV_PDISP_SOR_PLL1					0x58
+#define  SOR_PLL_TMDS_TERM_ENABLE		(1 << 8)
+#define  SOR_PLL_TMDS_TERMADJ(x)		(((x) & 0xf) << 9)
+#define  SOR_PLL_LOADADJ(x)			(((x) & 0xf) << 20)
+#define  SOR_PLL_PE_EN				(1 << 28)
+#define  SOR_PLL_HALF_FULL_PE			(1 << 29)
+#define  SOR_PLL_S_D_PIN_PE			(1 << 30)
+
+#define HDMI_NV_PDISP_SOR_PLL2					0x59
+#define HDMI_NV_PDISP_SOR_CSTM					0x5a
+#define  SOR_CSTM_PD_TXDA_0			(1 << 0)
+#define  SOR_CSTM_PD_TXDA_1			(1 << 1)
+#define  SOR_CSTM_PD_TXDA_2			(1 << 2)
+#define  SOR_CSTM_PD_TXDA_3			(1 << 3)
+#define  SOR_CSTM_PD_TXDB_0			(1 << 4)
+#define  SOR_CSTM_PD_TXDB_1			(1 << 5)
+#define  SOR_CSTM_PD_TXDB_2			(1 << 6)
+#define  SOR_CSTM_PD_TXDB_3			(1 << 7)
+#define  SOR_CSTM_PD_TXCA			(1 << 8)
+#define  SOR_CSTM_PD_TXCB			(1 << 9)
+#define  SOR_CSTM_UPPER				(1 << 11)
+#define  SOR_CSTM_MODE(x)			(((x) & 0x3) << 12)
+#define  SOR_CSTM_LINKACTA			(1 << 14)
+#define  SOR_CSTM_LINKACTB			(1 << 15)
+#define  SOR_CSTM_LVDS_EN			(1 << 16)
+#define  SOR_CSTM_DUP_SYNC			(1 << 17)
+#define  SOR_CSTM_NEW_MODE			(1 << 18)
+#define  SOR_CSTM_BALANCED			(1 << 19)
+#define  SOR_CSTM_PLLDIV			(1 << 21)
+#define  SOR_CSTM_ROTCLK(x)			(((x) & 0xf) << 24)
+#define  SOR_CSTM_ROTDAT(x)			(((x) & 0x7) << 28)
+
+#define HDMI_NV_PDISP_SOR_LVDS					0x5b
+#define HDMI_NV_PDISP_SOR_CRCA					0x5c
+#define HDMI_NV_PDISP_SOR_CRCB					0x5d
+#define HDMI_NV_PDISP_SOR_BLANK					0x5e
+#define HDMI_NV_PDISP_SOR_SEQ_CTL				0x5f
+#define  SOR_SEQ_CTL_PU_PC(x)			(((x) & 0xf) << 0)
+#define  SOR_SEQ_PU_PC_ALT(x)			(((x) & 0xf) << 4)
+#define  SOR_SEQ_PD_PC(x)			(((x) & 0xf) << 8)
+#define  SOR_SEQ_PD_PC_ALT(x)			(((x) & 0xf) << 12)
+#define  SOR_SEQ_PC(x)				(((x) & 0xf) << 16)
+#define  SOR_SEQ_STATUS				(1 << 28)
+#define  SOR_SEQ_SWITCH				(1 << 30)
+
+#define HDMI_NV_PDISP_SOR_SEQ_INST0				0x60
+#define HDMI_NV_PDISP_SOR_SEQ_INST1				0x61
+#define HDMI_NV_PDISP_SOR_SEQ_INST2				0x62
+#define HDMI_NV_PDISP_SOR_SEQ_INST3				0x63
+#define HDMI_NV_PDISP_SOR_SEQ_INST4				0x64
+#define HDMI_NV_PDISP_SOR_SEQ_INST5				0x65
+#define HDMI_NV_PDISP_SOR_SEQ_INST6				0x66
+#define HDMI_NV_PDISP_SOR_SEQ_INST7				0x67
+#define HDMI_NV_PDISP_SOR_SEQ_INST8				0x68
+#define HDMI_NV_PDISP_SOR_SEQ_INST9				0x69
+#define HDMI_NV_PDISP_SOR_SEQ_INSTA				0x6a
+#define HDMI_NV_PDISP_SOR_SEQ_INSTB				0x6b
+#define HDMI_NV_PDISP_SOR_SEQ_INSTC				0x6c
+#define HDMI_NV_PDISP_SOR_SEQ_INSTD				0x6d
+#define HDMI_NV_PDISP_SOR_SEQ_INSTE				0x6e
+#define HDMI_NV_PDISP_SOR_SEQ_INSTF				0x6f
+#define  SOR_SEQ_INST_WAIT_TIME(x)		(((x) & 0x3ff) << 0)
+#define  SOR_SEQ_INST_WAIT_UNITS_US		(0 << 12)
+#define  SOR_SEQ_INST_WAIT_UNITS_MS		(1 << 12)
+#define  SOR_SEQ_INST_WAIT_UNITS_VSYNC		(2 << 12)
+#define  SOR_SEQ_INST_HALT			(1 << 15)
+#define  SOR_SEQ_INST_PIN_A_LOW			(0 << 21)
+#define  SOR_SEQ_INST_PIN_A_HIGH		(1 << 21)
+#define  SOR_SEQ_INST_PIN_B_LOW			(0 << 22)
+#define  SOR_SEQ_INST_PIN_B_HIGH		(1 << 22)
+#define  SOR_SEQ_INST_DRIVE_PWM_OUT_LO		(1 << 23)
+#define  SOR_SEQ_INST_TRISTATE_IOS		(1 << 24)
+#define  SOR_SEQ_INST_SOR_SEQ_INST_BLACK_DATA	(1 << 25)
+#define  SOR_SEQ_INST_BLANK_DE			(1 << 26)
+#define  SOR_SEQ_INST_BLANK_H			(1 << 27)
+#define  SOR_SEQ_INST_BLANK_V			(1 << 28)
+#define  SOR_SEQ_INST_ASSERT_PLL_RESETV		(1 << 29)
+#define  SOR_SEQ_INST_POWERDOWN_MACRO		(1 << 30)
+#define  SOR_SEQ_INST_PLL_PULLDOWN		(1 << 31)
+
+#define HDMI_NV_PDISP_SOR_VCRCA0				0x72
+#define HDMI_NV_PDISP_SOR_VCRCA1				0x73
+#define HDMI_NV_PDISP_SOR_CCRCA0				0x74
+#define HDMI_NV_PDISP_SOR_CCRCA1				0x75
+#define HDMI_NV_PDISP_SOR_EDATAA0				0x76
+#define HDMI_NV_PDISP_SOR_EDATAA1				0x77
+#define HDMI_NV_PDISP_SOR_COUNTA0				0x78
+#define HDMI_NV_PDISP_SOR_COUNTA1				0x79
+#define HDMI_NV_PDISP_SOR_DEBUGA0				0x7a
+#define HDMI_NV_PDISP_SOR_DEBUGA1				0x7b
+#define HDMI_NV_PDISP_SOR_TRIG					0x7c
+#define HDMI_NV_PDISP_SOR_MSCHECK				0x7d
+#define HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT			0x7e
+#define  DRIVE_CURRENT_LANE0(x)			(((x) & 0x3f) << 0)
+#define  DRIVE_CURRENT_LANE1(x)			(((x) & 0x3f) << 8)
+#define  DRIVE_CURRENT_LANE2(x)			(((x) & 0x3f) << 16)
+#define  DRIVE_CURRENT_LANE3(x)			(((x) & 0x3f) << 24)
+#define  DRIVE_CURRENT_FUSE_OVERRIDE		(1 << 31)
+#define  DRIVE_CURRENT_1_500_mA			0x00
+#define  DRIVE_CURRENT_1_875_mA			0x01
+#define  DRIVE_CURRENT_2_250_mA			0x02
+#define  DRIVE_CURRENT_2_625_mA			0x03
+#define  DRIVE_CURRENT_3_000_mA			0x04
+#define  DRIVE_CURRENT_3_375_mA			0x05
+#define  DRIVE_CURRENT_3_750_mA			0x06
+#define  DRIVE_CURRENT_4_125_mA			0x07
+#define  DRIVE_CURRENT_4_500_mA			0x08
+#define  DRIVE_CURRENT_4_875_mA			0x09
+#define  DRIVE_CURRENT_5_250_mA			0x0a
+#define  DRIVE_CURRENT_5_625_mA			0x0b
+#define  DRIVE_CURRENT_6_000_mA			0x0c
+#define  DRIVE_CURRENT_6_375_mA			0x0d
+#define  DRIVE_CURRENT_6_750_mA			0x0e
+#define  DRIVE_CURRENT_7_125_mA			0x0f
+#define  DRIVE_CURRENT_7_500_mA			0x10
+#define  DRIVE_CURRENT_7_875_mA			0x11
+#define  DRIVE_CURRENT_8_250_mA			0x12
+#define  DRIVE_CURRENT_8_625_mA			0x13
+#define  DRIVE_CURRENT_9_000_mA			0x14
+#define  DRIVE_CURRENT_9_375_mA			0x15
+#define  DRIVE_CURRENT_9_750_mA			0x16
+#define  DRIVE_CURRENT_10_125_mA		0x17
+#define  DRIVE_CURRENT_10_500_mA		0x18
+#define  DRIVE_CURRENT_10_875_mA		0x19
+#define  DRIVE_CURRENT_11_250_mA		0x1a
+#define  DRIVE_CURRENT_11_625_mA		0x1b
+#define  DRIVE_CURRENT_12_000_mA		0x1c
+#define  DRIVE_CURRENT_12_375_mA		0x1d
+#define  DRIVE_CURRENT_12_750_mA		0x1e
+#define  DRIVE_CURRENT_13_125_mA		0x1f
+#define  DRIVE_CURRENT_13_500_mA		0x20
+#define  DRIVE_CURRENT_13_875_mA		0x21
+#define  DRIVE_CURRENT_14_250_mA		0x22
+#define  DRIVE_CURRENT_14_625_mA		0x23
+#define  DRIVE_CURRENT_15_000_mA		0x24
+#define  DRIVE_CURRENT_15_375_mA		0x25
+#define  DRIVE_CURRENT_15_750_mA		0x26
+#define  DRIVE_CURRENT_16_125_mA		0x27
+#define  DRIVE_CURRENT_16_500_mA		0x28
+#define  DRIVE_CURRENT_16_875_mA		0x29
+#define  DRIVE_CURRENT_17_250_mA		0x2a
+#define  DRIVE_CURRENT_17_625_mA		0x2b
+#define  DRIVE_CURRENT_18_000_mA		0x2c
+#define  DRIVE_CURRENT_18_375_mA		0x2d
+#define  DRIVE_CURRENT_18_750_mA		0x2e
+#define  DRIVE_CURRENT_19_125_mA		0x2f
+#define  DRIVE_CURRENT_19_500_mA		0x30
+#define  DRIVE_CURRENT_19_875_mA		0x31
+#define  DRIVE_CURRENT_20_250_mA		0x32
+#define  DRIVE_CURRENT_20_625_mA		0x33
+#define  DRIVE_CURRENT_21_000_mA		0x34
+#define  DRIVE_CURRENT_21_375_mA		0x35
+#define  DRIVE_CURRENT_21_750_mA		0x36
+#define  DRIVE_CURRENT_22_125_mA		0x37
+#define  DRIVE_CURRENT_22_500_mA		0x38
+#define  DRIVE_CURRENT_22_875_mA		0x39
+#define  DRIVE_CURRENT_23_250_mA		0x3a
+#define  DRIVE_CURRENT_23_625_mA		0x3b
+#define  DRIVE_CURRENT_24_000_mA		0x3c
+#define  DRIVE_CURRENT_24_375_mA		0x3d
+#define  DRIVE_CURRENT_24_750_mA		0x3e
+
+#define HDMI_NV_PDISP_AUDIO_DEBUG0				0x7f
+#define HDMI_NV_PDISP_AUDIO_DEBUG1				0x80
+#define HDMI_NV_PDISP_AUDIO_DEBUG2				0x81
+/* note: datasheet defines FS1..FS7.  we have FS(0)..FS(6) */
+#define HDMI_NV_PDISP_AUDIO_FS(x)				(0x82 + (x))
+#define  AUDIO_FS_LOW(x)			(((x) & 0xfff) << 0)
+#define  AUDIO_FS_HIGH(x)			(((x) & 0xfff) << 16)
+
+
+#define HDMI_NV_PDISP_AUDIO_PULSE_WIDTH				0x89
+#define HDMI_NV_PDISP_AUDIO_THRESHOLD				0x8a
+#define HDMI_NV_PDISP_AUDIO_CNTRL0				0x8b
+#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
+#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0_0		0xac
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0		0xbc
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0		0xbd
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320_0			0xbf
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441_0			0xc0
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882_0			0xc1
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764_0			0xc2
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480_0			0xc3
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960_0			0xc4
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920_0			0xc5
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT_0			0xc6
+#endif
+#define  AUDIO_CNTRL0_ERROR_TOLERANCE(x)	(((x) & 0xff) << 0)
+#define  AUDIO_CNTRL0_SOFT_RESET		(1 << 8)
+#define  AUDIO_CNTRL0_SOFT_RESET_ALL		(1 << 12)
+#define  AUDIO_CNTRL0_SAMPLING_FREQ_UNKNOWN	(1 << 16)
+#define  AUDIO_CNTRL0_SAMPLING_FREQ_32K		(2 << 16)
+#define  AUDIO_CNTRL0_SAMPLING_FREQ_44_1K	(0 << 16)
+#define  AUDIO_CNTRL0_SAMPLING_FREQ_48K		(2 << 16)
+#define  AUDIO_CNTRL0_SAMPLING_FREQ_88_2K	(8 << 16)
+#define  AUDIO_CNTRL0_SAMPLING_FREQ_96K		(10 << 16)
+#define  AUDIO_CNTRL0_SAMPLING_FREQ_176_4K	(12 << 16)
+#define  AUDIO_CNTRL0_SAMPLING_FREQ_192K	(14 << 16)
+#define  AUDIO_CNTRL0_SOURCE_SELECT_AUTO	(0 << 20)
+#define  AUDIO_CNTRL0_SOURCE_SELECT_SPDIF	(1 << 20)
+#define  AUDIO_CNTRL0_SOURCE_SELECT_HDAL	(2 << 20)
+#define  AUDIO_CNTRL0_INJECT_NULLSMPL		(1 << 29)
+#define  AUDIO_CNTRL0_FRAMES_PER_BLOCK(x)	(((x) & 0xff) << 24)
+
+#define HDMI_NV_PDISP_AUDIO_N					0x8c
+#define  AUDIO_N_VALUE(x)			(((x) & 0xfffff) << 0)
+#define  AUDIO_N_RESETF				(1 << 20)
+#define  AUDIO_N_GENERATE_NORMAL		(0 << 24)
+#define  AUDIO_N_GENERATE_ALTERNALTE		(1 << 24)
+#define  AUDIO_N_LOOKUP_ENABLE			(1 << 28)
+
+#define HDMI_NV_PDISP_HDCPRIF_ROM_TIMING			0x94
+#define HDMI_NV_PDISP_SOR_REFCLK				0x95
+#define  SOR_REFCLK_DIV_INT(x)			(((x) & 0xff) << 8)
+#define  SOR_REFCLK_DIV_FRAC(x)			(((x) & 0x3) << 6)
+
+#define HDMI_NV_PDISP_CRC_CONTROL				0x96
+#define HDMI_NV_PDISP_INPUT_CONTROL				0x97
+#define  HDMI_SRC_DISPLAYA			(0 << 0)
+#define  HDMI_SRC_DISPLAYB			(1 << 0)
+#define  ARM_VIDEO_RANGE_FULL			(0 << 1)
+#define  ARM_VIDEO_RANGE_LIMITED		(1 << 1)
+
+#define HDMI_NV_PDISP_SCRATCH					0x98
+#define HDMI_NV_PDISP_PE_CURRENT				0x99
+#define  PE_CURRENT0(x)				(((x) & 0xf) << 0)
+#define  PE_CURRENT1(x)				(((x) & 0xf) << 8)
+#define  PE_CURRENT2(x)				(((x) & 0xf) << 16)
+#define  PE_CURRENT3(x)				(((x) & 0xf) << 24)
+#define  PE_CURRENT_0_0_mA			0x0
+#define  PE_CURRENT_0_5_mA			0x1
+#define  PE_CURRENT_1_0_mA			0x2
+#define  PE_CURRENT_1_5_mA			0x3
+#define  PE_CURRENT_2_0_mA			0x4
+#define  PE_CURRENT_2_5_mA			0x5
+#define  PE_CURRENT_3_0_mA			0x6
+#define  PE_CURRENT_3_5_mA			0x7
+#define  PE_CURRENT_4_0_mA			0x8
+#define  PE_CURRENT_4_5_mA			0x9
+#define  PE_CURRENT_5_0_mA			0xa
+#define  PE_CURRENT_5_5_mA			0xb
+#define  PE_CURRENT_6_0_mA			0xc
+#define  PE_CURRENT_6_5_mA			0xd
+#define  PE_CURRENT_7_0_mA			0xe
+#define  PE_CURRENT_7_5_mA			0xf
+
+#define HDMI_NV_PDISP_KEY_CTRL					0x9a
+#define  LOCAL_KEYS				(1 << 0)
+#define  AUTOINC				(1 << 1)
+#define  WRITE16				(1 << 4)
+#define  PKEY_REQUEST_RELOAD_TRIGGER		(1 << 5)
+#define  PKEY_LOADED				(1 << 6)
+#define HDMI_NV_PDISP_KEY_DEBUG0				0x9b
+#define HDMI_NV_PDISP_KEY_DEBUG1				0x9c
+#define HDMI_NV_PDISP_KEY_DEBUG2				0x9d
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_0				0x9e
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_1				0x9f
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_2				0xa0
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_3				0xa1
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG				0xa2
+#define HDMI_NV_PDISP_KEY_SKEY_INDEX				0xa3
+
+#endif
diff --git a/drivers/staging/tegra/video/dc/lut.c b/drivers/staging/tegra/video/dc/lut.c
new file mode 100644
index 000000000000..864e22629965
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/lut.c
@@ -0,0 +1,130 @@
+/*
+ * drivers/video/tegra/dc/lut.c
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/types.h>
+
+#include "dc.h"
+#include "dc_reg.h"
+#include "dc_priv.h"
+
+void tegra_dc_init_lut_defaults(struct tegra_dc_lut *lut)
+{
+	int i;
+	for (i = 0; i < 256; i++)
+		lut->r[i] = lut->g[i] = lut->b[i] = (u8)i;
+}
+
+static int tegra_dc_loop_lut(struct tegra_dc *dc,
+			     struct tegra_dc_win *win,
+			     int(*lambda)(struct tegra_dc *dc, int i, u32 rgb))
+{
+	struct tegra_dc_lut *lut = &win->lut;
+	struct tegra_dc_lut *global_lut = &dc->fb_lut;
+	int i;
+	for (i = 0; i < 256; i++) {
+
+		u32 r = (u32)lut->r[i];
+		u32 g = (u32)lut->g[i];
+		u32 b = (u32)lut->b[i];
+
+		if (!(win->ppflags & TEGRA_WIN_PPFLAG_CP_FBOVERRIDE)) {
+			r = (u32)global_lut->r[r];
+			g = (u32)global_lut->g[g];
+			b = (u32)global_lut->b[b];
+		}
+
+		if (!lambda(dc, i, r | (g<<8) | (b<<16)))
+			return 0;
+	}
+	return 1;
+}
+
+static int tegra_dc_lut_isdefaults_lambda(struct tegra_dc *dc, int i, u32 rgb)
+{
+	if (rgb != (i | (i<<8) | (i<<16)))
+		return 0;
+	return 1;
+}
+
+static int tegra_dc_set_lut_setreg_lambda(struct tegra_dc *dc, int i, u32 rgb)
+{
+	tegra_dc_writel(dc, rgb, DC_WIN_COLOR_PALETTE(i));
+	return 1;
+}
+
+void tegra_dc_set_lut(struct tegra_dc *dc, struct tegra_dc_win *win)
+{
+	unsigned long val = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+
+	tegra_dc_loop_lut(dc, win, tegra_dc_set_lut_setreg_lambda);
+
+	if (win->ppflags & TEGRA_WIN_PPFLAG_CP_ENABLE)
+		val |= CP_ENABLE;
+	else
+		val &= ~CP_ENABLE;
+
+	tegra_dc_writel(dc, val, DC_WIN_WIN_OPTIONS);
+}
+
+static int tegra_dc_update_winlut(struct tegra_dc *dc, int win_idx, int fbovr)
+{
+	struct tegra_dc_win *win = &dc->windows[win_idx];
+
+	mutex_lock(&dc->lock);
+
+	if (!dc->enabled) {
+		mutex_unlock(&dc->lock);
+		return -EFAULT;
+	}
+
+	if (fbovr > 0)
+		win->ppflags |= TEGRA_WIN_PPFLAG_CP_FBOVERRIDE;
+	else if (fbovr == 0)
+		win->ppflags &= ~TEGRA_WIN_PPFLAG_CP_FBOVERRIDE;
+
+	if (!tegra_dc_loop_lut(dc, win, tegra_dc_lut_isdefaults_lambda))
+		win->ppflags |= TEGRA_WIN_PPFLAG_CP_ENABLE;
+	else
+		win->ppflags &= ~TEGRA_WIN_PPFLAG_CP_ENABLE;
+
+	tegra_dc_writel(dc, WINDOW_A_SELECT << win_idx,
+			DC_CMD_DISPLAY_WINDOW_HEADER);
+
+	tegra_dc_set_lut(dc, win);
+
+	mutex_unlock(&dc->lock);
+
+	tegra_dc_update_windows(&win, 1);
+
+	return 0;
+}
+
+int tegra_dc_update_lut(struct tegra_dc *dc, int win_idx, int fboveride)
+{
+	if (win_idx > -1)
+		return tegra_dc_update_winlut(dc, win_idx, fboveride);
+
+	for (win_idx = 0; win_idx < DC_N_WINDOWS; win_idx++) {
+		int err = tegra_dc_update_winlut(dc, win_idx, fboveride);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_dc_update_lut);
+
diff --git a/drivers/staging/tegra/video/dc/mode.c b/drivers/staging/tegra/video/dc/mode.c
new file mode 100644
index 000000000000..de73058fb453
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/mode.c
@@ -0,0 +1,343 @@
+/*
+ * drivers/video/tegra/dc/mode.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+
+#include <mach/clk.h>
+
+#include "dc.h"
+#include "dc_reg.h"
+#include "dc_priv.h"
+
+/* return non-zero if constraint is violated */
+static int calc_h_ref_to_sync(const struct tegra_dc_mode *mode, int *href)
+{
+	long a, b;
+
+	/* Constraint 5: H_REF_TO_SYNC >= 0 */
+	a = 0;
+
+	/* Constraint 6: H_FRONT_PORT >= (H_REF_TO_SYNC + 1) */
+	b = mode->h_front_porch - 1;
+
+	/* Constraint 1: H_REF_TO_SYNC + H_SYNC_WIDTH + H_BACK_PORCH > 11 */
+	if (a + mode->h_sync_width + mode->h_back_porch <= 11)
+		a = 1 + 11 - mode->h_sync_width - mode->h_back_porch;
+	/* check Constraint 1 and 6 */
+	if (a > b)
+		return 1;
+
+	/* Constraint 4: H_SYNC_WIDTH >= 1 */
+	if (mode->h_sync_width < 1)
+		return 4;
+
+	/* Constraint 7: H_DISP_ACTIVE >= 16 */
+	if (mode->h_active < 16)
+		return 7;
+
+	if (href) {
+		if (b > a && a % 2)
+			*href = a + 1; /* use smallest even value */
+		else
+			*href = a; /* even or only possible value */
+	}
+
+	return 0;
+}
+
+static int calc_v_ref_to_sync(const struct tegra_dc_mode *mode, int *vref)
+{
+	long a;
+	a = 1; /* Constraint 5: V_REF_TO_SYNC >= 1 */
+
+	/* Constraint 2: V_REF_TO_SYNC + V_SYNC_WIDTH + V_BACK_PORCH > 1 */
+	if (a + mode->v_sync_width + mode->v_back_porch <= 1)
+		a = 1 + 1 - mode->v_sync_width - mode->v_back_porch;
+
+	/* Constraint 6 */
+	if (mode->v_front_porch < a + 1)
+		a = mode->v_front_porch - 1;
+
+	/* Constraint 4: V_SYNC_WIDTH >= 1 */
+	if (mode->v_sync_width < 1)
+		return 4;
+
+	/* Constraint 7: V_DISP_ACTIVE >= 16 */
+	if (mode->v_active < 16)
+		return 7;
+
+	if (vref)
+		*vref = a;
+	return 0;
+}
+
+static int calc_ref_to_sync(struct tegra_dc_mode *mode)
+{
+	int ret;
+	ret = calc_h_ref_to_sync(mode, &mode->h_ref_to_sync);
+	if (ret)
+		return ret;
+	ret = calc_v_ref_to_sync(mode, &mode->v_ref_to_sync);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static bool check_ref_to_sync(struct tegra_dc_mode *mode)
+{
+	/* Constraint 1: H_REF_TO_SYNC + H_SYNC_WIDTH + H_BACK_PORCH > 11. */
+	if (mode->h_ref_to_sync + mode->h_sync_width + mode->h_back_porch <= 11)
+		return false;
+
+	/* Constraint 2: V_REF_TO_SYNC + V_SYNC_WIDTH + V_BACK_PORCH > 1. */
+	if (mode->v_ref_to_sync + mode->v_sync_width + mode->v_back_porch <= 1)
+		return false;
+
+	/* Constraint 3: V_FRONT_PORCH + V_SYNC_WIDTH + V_BACK_PORCH > 1
+	 * (vertical blank). */
+	if (mode->v_front_porch + mode->v_sync_width + mode->v_back_porch <= 1)
+		return false;
+
+	/* Constraint 4: V_SYNC_WIDTH >= 1; H_SYNC_WIDTH >= 1. */
+	if (mode->v_sync_width < 1 || mode->h_sync_width < 1)
+		return false;
+
+	/* Constraint 5: V_REF_TO_SYNC >= 1; H_REF_TO_SYNC >= 0. */
+	if (mode->v_ref_to_sync < 1 || mode->h_ref_to_sync < 0)
+		return false;
+
+	/* Constraint 6: V_FRONT_PORT >= (V_REF_TO_SYNC + 1);
+	 * H_FRONT_PORT >= (H_REF_TO_SYNC + 1). */
+	if (mode->v_front_porch < mode->v_ref_to_sync + 1 ||
+		mode->h_front_porch < mode->h_ref_to_sync + 1)
+		return false;
+
+	/* Constraint 7: H_DISP_ACTIVE >= 16; V_DISP_ACTIVE >= 16. */
+	if (mode->h_active < 16 || mode->v_active < 16)
+		return false;
+
+	return true;
+}
+
+static s64 calc_frametime_ns(const struct tegra_dc_mode *m)
+{
+	long h_total, v_total;
+	h_total = m->h_active + m->h_front_porch + m->h_back_porch +
+		m->h_sync_width;
+	v_total = m->v_active + m->v_front_porch + m->v_back_porch +
+		m->v_sync_width;
+	return (!m->pclk) ? 0 : (s64)(div_s64(((s64)h_total * v_total *
+					1000000000ULL), m->pclk));
+}
+
+/* return in 1000ths of a Hertz */
+int tegra_dc_calc_refresh(const struct tegra_dc_mode *m)
+{
+	long h_total, v_total, refresh;
+	h_total = m->h_active + m->h_front_porch + m->h_back_porch +
+		m->h_sync_width;
+	v_total = m->v_active + m->v_front_porch + m->v_back_porch +
+		m->v_sync_width;
+	refresh = m->pclk / h_total;
+	refresh *= 1000;
+	refresh /= v_total;
+	return refresh;
+}
+
+#ifdef DEBUG
+static void print_mode(struct tegra_dc *dc,
+			const struct tegra_dc_mode *mode, const char *note)
+{
+	if (mode) {
+		int refresh = tegra_dc_calc_refresh(mode);
+		dev_info(&dc->ndev->dev, "%s():MODE:%dx%d@%d.%03uHz pclk=%d\n",
+			note ? note : "",
+			mode->h_active, mode->v_active,
+			refresh / 1000, refresh % 1000,
+			mode->pclk);
+	}
+}
+#else /* !DEBUG */
+static inline void print_mode(struct tegra_dc *dc,
+			const struct tegra_dc_mode *mode, const char *note) { }
+#endif /* DEBUG */
+
+int tegra_dc_program_mode(struct tegra_dc *dc, struct tegra_dc_mode *mode)
+{
+	unsigned long val;
+	unsigned long rate;
+	unsigned long div;
+	unsigned long pclk;
+
+	print_mode(dc, mode, __func__);
+
+	/* use default EMC rate when switching modes */
+	dc->new_emc_clk_rate = tegra_dc_get_default_emc_clk_rate(dc);
+	tegra_dc_program_bandwidth(dc, true);
+
+	tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
+	tegra_dc_writel(dc, mode->h_ref_to_sync | (mode->v_ref_to_sync << 16),
+			DC_DISP_REF_TO_SYNC);
+	tegra_dc_writel(dc, mode->h_sync_width | (mode->v_sync_width << 16),
+			DC_DISP_SYNC_WIDTH);
+	tegra_dc_writel(dc, mode->h_back_porch | (mode->v_back_porch << 16),
+			DC_DISP_BACK_PORCH);
+	tegra_dc_writel(dc, mode->h_active | (mode->v_active << 16),
+			DC_DISP_DISP_ACTIVE);
+	tegra_dc_writel(dc, mode->h_front_porch | (mode->v_front_porch << 16),
+			DC_DISP_FRONT_PORCH);
+
+	tegra_dc_writel(dc, DE_SELECT_ACTIVE | DE_CONTROL_NORMAL,
+			DC_DISP_DATA_ENABLE_OPTIONS);
+
+	/* TODO: MIPI/CRT/HDMI clock cals */
+
+	val = DISP_DATA_FORMAT_DF1P1C;
+
+	if (dc->out->align == TEGRA_DC_ALIGN_MSB)
+		val |= DISP_DATA_ALIGNMENT_MSB;
+	else
+		val |= DISP_DATA_ALIGNMENT_LSB;
+
+	if (dc->out->order == TEGRA_DC_ORDER_RED_BLUE)
+		val |= DISP_DATA_ORDER_RED_BLUE;
+	else
+		val |= DISP_DATA_ORDER_BLUE_RED;
+
+	tegra_dc_writel(dc, val, DC_DISP_DISP_INTERFACE_CONTROL);
+
+	rate = tegra_dc_clk_get_rate(dc);
+
+	pclk = tegra_dc_pclk_round_rate(dc, mode->pclk);
+	trace_printk("%s:pclk=%ld\n", dc->ndev->name, pclk);
+	if (pclk < (mode->pclk / 100 * 99) ||
+	    pclk > (mode->pclk / 100 * 109)) {
+		dev_err(&dc->ndev->dev,
+			"can't divide %ld clock to %d -1/+9%% %ld %d %d\n",
+			rate, mode->pclk,
+			pclk, (mode->pclk / 100 * 99),
+			(mode->pclk / 100 * 109));
+		return -EINVAL;
+	}
+
+	div = (rate * 2 / pclk) - 2;
+	trace_printk("%s:div=%ld\n", dc->ndev->name, div);
+
+	tegra_dc_writel(dc, 0x00010001,
+			DC_DISP_SHIFT_CLOCK_OPTIONS);
+	tegra_dc_writel(dc, PIXEL_CLK_DIVIDER_PCD1 | SHIFT_CLK_DIVIDER(div),
+			DC_DISP_DISP_CLOCK_CONTROL);
+
+#ifdef CONFIG_SWITCH
+	switch_set_state(&dc->modeset_switch,
+			 (mode->h_active << 16) | mode->v_active);
+#endif
+
+	tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+	print_mode_info(dc, dc->mode);
+	return 0;
+}
+
+static int panel_sync_rate;
+
+int tegra_dc_get_panel_sync_rate(void)
+{
+	return panel_sync_rate;
+}
+EXPORT_SYMBOL(tegra_dc_get_panel_sync_rate);
+
+int tegra_dc_set_mode(struct tegra_dc *dc, const struct tegra_dc_mode *mode)
+{
+	memcpy(&dc->mode, mode, sizeof(dc->mode));
+
+	if (dc->out->type == TEGRA_DC_OUT_RGB)
+		panel_sync_rate = tegra_dc_calc_refresh(mode);
+	else if (dc->out->type == TEGRA_DC_OUT_DSI)
+		panel_sync_rate = dc->out->dsi->rated_refresh_rate * 1000;
+
+	print_mode(dc, mode, __func__);
+	dc->frametime_ns = calc_frametime_ns(mode);
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_dc_set_mode);
+
+int tegra_dc_set_fb_mode(struct tegra_dc *dc,
+		const struct fb_videomode *fbmode, bool stereo_mode)
+{
+	struct tegra_dc_mode mode;
+
+	if (!fbmode->pixclock)
+		return -EINVAL;
+
+	mode.pclk = PICOS2KHZ(fbmode->pixclock) * 1000;
+	mode.h_sync_width = fbmode->hsync_len;
+	mode.v_sync_width = fbmode->vsync_len;
+	mode.h_back_porch = fbmode->left_margin;
+	mode.v_back_porch = fbmode->upper_margin;
+	mode.h_active = fbmode->xres;
+	mode.v_active = fbmode->yres;
+	mode.h_front_porch = fbmode->right_margin;
+	mode.v_front_porch = fbmode->lower_margin;
+	mode.stereo_mode = stereo_mode;
+	if (dc->out->type == TEGRA_DC_OUT_HDMI) {
+		/* HDMI controller requires h_ref=1, v_ref=1 */
+		mode.h_ref_to_sync = 1;
+		mode.v_ref_to_sync = 1;
+	} else {
+		calc_ref_to_sync(&mode);
+	}
+	if (!check_ref_to_sync(&mode)) {
+		dev_err(&dc->ndev->dev,
+				"Display timing doesn't meet restrictions.\n");
+		return -EINVAL;
+	}
+	dev_info(&dc->ndev->dev, "Using mode %dx%d pclk=%d href=%d vref=%d\n",
+		mode.h_active, mode.v_active, mode.pclk,
+		mode.h_ref_to_sync, mode.v_ref_to_sync
+	);
+
+#ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT
+	/* Double the pixel clock and update v_active only for
+	 * frame packed mode */
+	if (mode.stereo_mode) {
+		mode.pclk *= 2;
+		/* total v_active = yres*2 + activespace */
+		mode.v_active = fbmode->yres * 2 +
+				fbmode->vsync_len +
+				fbmode->upper_margin +
+				fbmode->lower_margin;
+	}
+#endif
+
+	mode.flags = 0;
+
+	if (!(fbmode->sync & FB_SYNC_HOR_HIGH_ACT))
+		mode.flags |= TEGRA_DC_MODE_FLAG_NEG_H_SYNC;
+
+	if (!(fbmode->sync & FB_SYNC_VERT_HIGH_ACT))
+		mode.flags |= TEGRA_DC_MODE_FLAG_NEG_V_SYNC;
+
+	return tegra_dc_set_mode(dc, &mode);
+}
+EXPORT_SYMBOL(tegra_dc_set_fb_mode);
diff --git a/drivers/staging/tegra/video/dc/nvhdcp.c b/drivers/staging/tegra/video/dc/nvhdcp.c
new file mode 100644
index 000000000000..59fb55dd605a
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/nvhdcp.c
@@ -0,0 +1,1259 @@
+/*
+ * drivers/video/tegra/dc/nvhdcp.c
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <asm/atomic.h>
+
+#include <mach/kfuse.h>
+
+#include <video/nvhdcp.h>
+
+#include "dc.h"
+#include "dc_reg.h"
+#include "dc_priv.h"
+#include "hdmi_reg.h"
+#include "hdmi.h"
+
+DECLARE_WAIT_QUEUE_HEAD(wq_worker);
+
+/* for 0x40 Bcaps */
+#define BCAPS_REPEATER (1 << 6)
+#define BCAPS_READY (1 << 5)
+#define BCAPS_11 (1 << 1) /* used for both Bcaps and Ainfo */
+
+/* for 0x41 Bstatus */
+#define BSTATUS_MAX_DEVS_EXCEEDED	(1 << 7)
+#define BSTATUS_MAX_CASCADE_EXCEEDED	(1 << 11)
+
+#ifdef VERBOSE_DEBUG
+#define nvhdcp_vdbg(...)	\
+		printk("nvhdcp: " __VA_ARGS__)
+#else
+#define nvhdcp_vdbg(...)		\
+({						\
+	if(0)					\
+		printk("nvhdcp: " __VA_ARGS__); \
+	0;					\
+})
+#endif
+#define nvhdcp_debug(...)	\
+		pr_debug("nvhdcp: " __VA_ARGS__)
+#define nvhdcp_err(...)	\
+		pr_err("nvhdcp: Error: " __VA_ARGS__)
+#define nvhdcp_info(...)	\
+		pr_info("nvhdcp: " __VA_ARGS__)
+
+
+/* for nvhdcp.state */
+enum tegra_nvhdcp_state {
+	STATE_OFF,
+	STATE_UNAUTHENTICATED,
+	STATE_LINK_VERIFY,
+	STATE_RENEGOTIATE,
+};
+
+struct tegra_nvhdcp {
+	struct delayed_work		work;
+	struct tegra_dc_hdmi_data	*hdmi;
+	struct workqueue_struct		*downstream_wq;
+	struct mutex			lock;
+	struct miscdevice		miscdev;
+	char				name[12];
+	unsigned			id;
+	bool				plugged; /* true if hotplug detected */
+	atomic_t			policy; /* set policy */
+	enum tegra_nvhdcp_state		state; /* STATE_xxx */
+	struct i2c_client		*client;
+	struct i2c_board_info		info;
+	int				bus;
+	u32				b_status;
+	u64				a_n;
+	u64				c_n;
+	u64				a_ksv;
+	u64				b_ksv;
+	u64				c_ksv;
+	u64				d_ksv;
+	u8				v_prime[20];
+	u64				m_prime;
+	u32				num_bksv_list;
+	u64				bksv_list[TEGRA_NVHDCP_MAX_DEVS];
+	int				fail_count;
+};
+
+static inline bool nvhdcp_is_plugged(struct tegra_nvhdcp *nvhdcp)
+{
+	rmb();
+	return nvhdcp->plugged;
+}
+
+static inline bool nvhdcp_set_plugged(struct tegra_nvhdcp *nvhdcp, bool plugged)
+{
+	nvhdcp->plugged = plugged;
+	wmb();
+	return plugged;
+}
+
+static int nvhdcp_i2c_read(struct tegra_nvhdcp *nvhdcp, u8 reg,
+					size_t len, void *data)
+{
+	int status;
+	int retries = 15;
+	struct i2c_msg msg[] = {
+		{
+			.addr = 0x74 >> 1, /* primary link */
+			.flags = 0,
+			.len = 1,
+			.buf = &reg,
+		},
+		{
+			.addr = 0x74 >> 1, /* primary link */
+			.flags = I2C_M_RD,
+			.len = len,
+			.buf = data,
+		},
+	};
+
+	do {
+		if (!nvhdcp_is_plugged(nvhdcp)) {
+			nvhdcp_err("disconnect during i2c xfer\n");
+			return -EIO;
+		}
+		status = i2c_transfer(nvhdcp->client->adapter,
+			msg, ARRAY_SIZE(msg));
+		if ((status < 0) && (retries > 1))
+			msleep(250);
+	} while ((status < 0) && retries--);
+
+	if (status < 0) {
+		nvhdcp_err("i2c xfer error %d\n", status);
+		return status;
+	}
+
+	return 0;
+}
+
+static int nvhdcp_i2c_write(struct tegra_nvhdcp *nvhdcp, u8 reg,
+					size_t len, const void *data)
+{
+	int status;
+	u8 buf[len + 1];
+	struct i2c_msg msg[] = {
+		{
+			.addr = 0x74 >> 1, /* primary link */
+			.flags = 0,
+			.len = len + 1,
+			.buf = buf,
+		},
+	};
+	int retries = 15;
+
+	buf[0] = reg;
+	memcpy(buf + 1, data, len);
+
+	do {
+		if (!nvhdcp_is_plugged(nvhdcp)) {
+			nvhdcp_err("disconnect during i2c xfer\n");
+			return -EIO;
+		}
+		status = i2c_transfer(nvhdcp->client->adapter,
+			msg, ARRAY_SIZE(msg));
+		if ((status < 0) && (retries > 1))
+			msleep(250);
+	} while ((status < 0) && retries--);
+
+	if (status < 0) {
+		nvhdcp_err("i2c xfer error %d\n", status);
+		return status;
+	}
+
+	return 0;
+}
+
+static inline int nvhdcp_i2c_read8(struct tegra_nvhdcp *nvhdcp, u8 reg, u8 *val)
+{
+	return nvhdcp_i2c_read(nvhdcp, reg, 1, val);
+}
+
+static inline int nvhdcp_i2c_write8(struct tegra_nvhdcp *nvhdcp, u8 reg, u8 val)
+{
+	return nvhdcp_i2c_write(nvhdcp, reg, 1, &val);
+}
+
+static inline int nvhdcp_i2c_read16(struct tegra_nvhdcp *nvhdcp,
+					u8 reg, u16 *val)
+{
+	u8 buf[2];
+	int e;
+
+	e = nvhdcp_i2c_read(nvhdcp, reg, sizeof buf, buf);
+	if (e)
+		return e;
+
+	if (val)
+		*val = buf[0] | (u16)buf[1] << 8;
+
+	return 0;
+}
+
+static int nvhdcp_i2c_read40(struct tegra_nvhdcp *nvhdcp, u8 reg, u64 *val)
+{
+	u8 buf[5];
+	int e, i;
+	u64 n;
+
+	e = nvhdcp_i2c_read(nvhdcp, reg, sizeof buf, buf);
+	if (e)
+		return e;
+
+	for(i = 0, n = 0; i < 5; i++ ) {
+		n <<= 8;
+		n |= buf[4 - i];
+	}
+
+	if (val)
+		*val = n;
+
+	return 0;
+}
+
+static int nvhdcp_i2c_write40(struct tegra_nvhdcp *nvhdcp, u8 reg, u64 val)
+{
+	char buf[5];
+	int i;
+	for(i = 0; i < 5; i++ ) {
+		buf[i] = val;
+		val >>= 8;
+	}
+	return nvhdcp_i2c_write(nvhdcp, reg, sizeof buf, buf);
+}
+
+static int nvhdcp_i2c_write64(struct tegra_nvhdcp *nvhdcp, u8 reg, u64 val)
+{
+	char buf[8];
+	int i;
+	for(i = 0; i < 8; i++ ) {
+		buf[i] = val;
+		val >>= 8;
+	}
+	return nvhdcp_i2c_write(nvhdcp, reg, sizeof buf, buf);
+}
+
+
+/* 64-bit link encryption session random number */
+static inline u64 get_an(struct tegra_dc_hdmi_data *hdmi)
+{
+	u64 r;
+	r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_AN_MSB) << 32;
+	r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_AN_LSB);
+	return r;
+}
+
+/* 64-bit upstream exchange random number */
+static inline void set_cn(struct tegra_dc_hdmi_data *hdmi, u64 c_n)
+{
+	tegra_hdmi_writel(hdmi, (u32)c_n, HDMI_NV_PDISP_RG_HDCP_CN_LSB);
+	tegra_hdmi_writel(hdmi, c_n >> 32, HDMI_NV_PDISP_RG_HDCP_CN_MSB);
+}
+
+
+/* 40-bit transmitter's key selection vector */
+static inline u64 get_aksv(struct tegra_dc_hdmi_data *hdmi)
+{
+	u64 r;
+	r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_AKSV_MSB) << 32;
+	r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_AKSV_LSB);
+	return r;
+}
+
+/* 40-bit receiver's key selection vector */
+static inline void set_bksv(struct tegra_dc_hdmi_data *hdmi, u64 b_ksv, bool repeater)
+{
+	if (repeater)
+		b_ksv |= (u64)REPEATER << 32;
+	tegra_hdmi_writel(hdmi, (u32)b_ksv, HDMI_NV_PDISP_RG_HDCP_BKSV_LSB);
+	tegra_hdmi_writel(hdmi, b_ksv >> 32, HDMI_NV_PDISP_RG_HDCP_BKSV_MSB);
+}
+
+
+/* 40-bit software's key selection vector */
+static inline void set_cksv(struct tegra_dc_hdmi_data *hdmi, u64 c_ksv)
+{
+	tegra_hdmi_writel(hdmi, (u32)c_ksv, HDMI_NV_PDISP_RG_HDCP_CKSV_LSB);
+	tegra_hdmi_writel(hdmi, c_ksv >> 32, HDMI_NV_PDISP_RG_HDCP_CKSV_MSB);
+}
+
+/* 40-bit connection state */
+static inline u64 get_cs(struct tegra_dc_hdmi_data *hdmi)
+{
+	u64 r;
+	r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CS_MSB) << 32;
+	r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CS_LSB);
+	return r;
+}
+
+/* 40-bit upstream key selection vector */
+static inline u64 get_dksv(struct tegra_dc_hdmi_data *hdmi)
+{
+	u64 r;
+	r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_DKSV_MSB) << 32;
+	r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_DKSV_LSB);
+	return r;
+}
+
+/* 64-bit encrypted M0 value */
+static inline u64 get_mprime(struct tegra_dc_hdmi_data *hdmi)
+{
+	u64 r;
+	r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB) << 32;
+	r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB);
+	return r;
+}
+
+static inline u16 get_transmitter_ri(struct tegra_dc_hdmi_data *hdmi)
+{
+	return tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_RI);
+}
+
+static inline int get_receiver_ri(struct tegra_nvhdcp *nvhdcp, u16 *r)
+{
+	return nvhdcp_i2c_read16(nvhdcp, 0x8, r); /* long read */
+}
+
+static int get_bcaps(struct tegra_nvhdcp *nvhdcp, u8 *b_caps)
+{
+	return nvhdcp_i2c_read8(nvhdcp, 0x40, b_caps);
+}
+
+static int get_ksvfifo(struct tegra_nvhdcp *nvhdcp,
+					unsigned num_bksv_list, u64 *ksv_list)
+{
+	u8 *buf, *p;
+	int e;
+	unsigned i;
+	size_t buf_len = num_bksv_list * 5;
+
+	if (!ksv_list || num_bksv_list > TEGRA_NVHDCP_MAX_DEVS)
+		return -EINVAL;
+
+	if (num_bksv_list == 0)
+		return 0;
+
+	buf = kmalloc(buf_len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	e = nvhdcp_i2c_read(nvhdcp, 0x43, buf_len, buf);
+	if (e) {
+		kfree(buf);
+		return e;
+	}
+
+	/* load 40-bit keys from repeater into array of u64 */
+	p = buf;
+	for (i = 0; i < num_bksv_list; i++) {
+		ksv_list[i] = p[0] | ((u64)p[1] << 8) | ((u64)p[2] << 16)
+				| ((u64)p[3] << 24) | ((u64)p[4] << 32);
+		p += 5;
+	}
+
+	kfree(buf);
+	return 0;
+}
+
+/* get V' 160-bit SHA-1 hash from repeater */
+static int get_vprime(struct tegra_nvhdcp *nvhdcp, u8 *v_prime)
+{
+	int e, i;
+
+	for (i = 0; i < 20; i += 4) {
+		e = nvhdcp_i2c_read(nvhdcp, 0x20 + i, 4, v_prime + i);
+		if (e)
+			return e;
+	}
+	return 0;
+}
+
+
+/* set or clear RUN_YES */
+static void hdcp_ctrl_run(struct tegra_dc_hdmi_data *hdmi, bool v)
+{
+	u32 ctrl;
+
+	if (v) {
+		ctrl = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CTRL);
+		ctrl |= HDCP_RUN_YES;
+	} else {
+		ctrl = 0;
+	}
+
+	tegra_hdmi_writel(hdmi, ctrl, HDMI_NV_PDISP_RG_HDCP_CTRL);
+}
+
+/* wait for any bits in mask to be set in HDMI_NV_PDISP_RG_HDCP_CTRL
+ * sleeps up to 120mS */
+static int wait_hdcp_ctrl(struct tegra_dc_hdmi_data *hdmi, u32 mask, u32 *v)
+{
+	int retries = 13;
+	u32 ctrl;
+
+	do {
+		ctrl = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CTRL);
+		if ((ctrl & mask)) {
+			if (v)
+				*v = ctrl;
+			break;
+		}
+		if (retries > 1)
+			msleep(10);
+	} while (--retries);
+	if (!retries) {
+		nvhdcp_err("ctrl read timeout (mask=0x%x)\n", mask);
+		return -EIO;
+	}
+	return 0;
+}
+
+/* wait for bits in mask to be set to value in HDMI_NV_PDISP_KEY_CTRL
+ * waits up to 100mS */
+static int wait_key_ctrl(struct tegra_dc_hdmi_data *hdmi, u32 mask, u32 value)
+{
+	int retries = 101;
+	u32 ctrl;
+
+	do {
+		msleep(1);
+		ctrl = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_KEY_CTRL);
+		if (((ctrl ^ value) & mask) == 0)
+			break;
+	} while (--retries);
+	if (!retries) {
+		nvhdcp_err("key ctrl read timeout (mask=0x%x)\n", mask);
+		return -EIO;
+	}
+	return 0;
+}
+
+/* check that key selection vector is well formed.
+ * NOTE: this function assumes KSV has already been checked against
+ * revocation list.
+ */
+static int verify_ksv(u64 k)
+{
+	unsigned i;
+
+	/* count set bits, must be exactly 20 set to be valid */
+	for(i = 0; k; i++)
+		k ^= k & -k;
+
+	return  (i != 20) ? -EINVAL : 0;
+}
+
+/* get Status and Kprime signature - READ_S on TMDS0_LINK0 only */
+static int get_s_prime(struct tegra_nvhdcp *nvhdcp, struct tegra_nvhdcp_packet *pkt)
+{
+	struct tegra_dc_hdmi_data *hdmi = nvhdcp->hdmi;
+	u32 sp_msb, sp_lsb1, sp_lsb2;
+	int e;
+
+	/* if connection isn't authenticated ... */
+	mutex_lock(&nvhdcp->lock);
+	if (nvhdcp->state != STATE_LINK_VERIFY) {
+		memset(pkt, 0, sizeof *pkt);
+		pkt->packet_results = TEGRA_NVHDCP_RESULT_LINK_FAILED;
+		e = 0;
+		goto err;
+	}
+
+	pkt->packet_results = TEGRA_NVHDCP_RESULT_UNSUCCESSFUL;
+
+	/* we will be taking c_n, c_ksv as input */
+	if (!(pkt->value_flags & TEGRA_NVHDCP_FLAG_CN)
+			|| !(pkt->value_flags & TEGRA_NVHDCP_FLAG_CKSV)) {
+		nvhdcp_err("missing value_flags (0x%x)\n", pkt->value_flags);
+		e = -EINVAL;
+		goto err;
+	}
+
+	pkt->value_flags = 0;
+
+	pkt->a_ksv = nvhdcp->a_ksv;
+	pkt->a_n = nvhdcp->a_n;
+	pkt->value_flags = TEGRA_NVHDCP_FLAG_AKSV | TEGRA_NVHDCP_FLAG_AN;
+
+	nvhdcp_vdbg("%s():cn %llx cksv %llx\n", __func__, pkt->c_n, pkt->c_ksv);
+
+	set_cn(hdmi, pkt->c_n);
+
+	tegra_hdmi_writel(hdmi, TMDS0_LINK0 | READ_S,
+					HDMI_NV_PDISP_RG_HDCP_CMODE);
+
+	set_cksv(hdmi, pkt->c_ksv);
+
+	e = wait_hdcp_ctrl(hdmi, SPRIME_VALID, NULL);
+	if (e) {
+		nvhdcp_err("Sprime read timeout\n");
+		pkt->packet_results = TEGRA_NVHDCP_RESULT_UNSUCCESSFUL;
+		e = -EIO;
+		goto err;
+	}
+
+	msleep(50);
+
+	/* read 56-bit Sprime plus 16 status bits */
+	sp_msb = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB);
+	sp_lsb1 = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1);
+	sp_lsb2 = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2);
+
+	/* top 8 bits of LSB2 and bottom 8 bits of MSB hold status bits. */
+	pkt->hdcp_status = ( sp_msb << 8 ) | ( sp_lsb2 >> 24);
+	pkt->value_flags |= TEGRA_NVHDCP_FLAG_S;
+
+	/* 56-bit Kprime */
+	pkt->k_prime = ((u64)(sp_lsb2 & 0xffffff) << 32) | sp_lsb1;
+	pkt->value_flags |= TEGRA_NVHDCP_FLAG_KP;
+
+	/* is connection state supported? */
+	if (sp_msb & STATUS_CS) {
+		pkt->cs = get_cs(hdmi);
+		pkt->value_flags |= TEGRA_NVHDCP_FLAG_CS;
+	}
+
+	/* load Dksv */
+	pkt->d_ksv = get_dksv(hdmi);
+	if (verify_ksv(pkt->d_ksv)) {
+		nvhdcp_err("Dksv invalid!\n");
+		pkt->packet_results = TEGRA_NVHDCP_RESULT_UNSUCCESSFUL;
+		e = -EIO; /* treat bad Dksv as I/O error */
+	}
+	pkt->value_flags |= TEGRA_NVHDCP_FLAG_DKSV;
+
+	/* copy current Bksv */
+	pkt->b_ksv = nvhdcp->b_ksv;
+	pkt->value_flags |= TEGRA_NVHDCP_FLAG_BKSV;
+
+	pkt->packet_results = TEGRA_NVHDCP_RESULT_SUCCESS;
+	mutex_unlock(&nvhdcp->lock);
+	return 0;
+
+err:
+	mutex_unlock(&nvhdcp->lock);
+	return e;
+}
+
+/* get M prime - READ_M on TMDS0_LINK0 only */
+static inline int get_m_prime(struct tegra_nvhdcp *nvhdcp, struct tegra_nvhdcp_packet *pkt)
+{
+	struct tegra_dc_hdmi_data *hdmi = nvhdcp->hdmi;
+	int e;
+
+	pkt->packet_results = TEGRA_NVHDCP_RESULT_UNSUCCESSFUL;
+
+	/* if connection isn't authenticated ... */
+	mutex_lock(&nvhdcp->lock);
+	if (nvhdcp->state != STATE_LINK_VERIFY) {
+		memset(pkt, 0, sizeof *pkt);
+		pkt->packet_results = TEGRA_NVHDCP_RESULT_LINK_FAILED;
+		e = 0;
+		goto err;
+	}
+
+	pkt->a_ksv = nvhdcp->a_ksv;
+	pkt->a_n = nvhdcp->a_n;
+	pkt->value_flags = TEGRA_NVHDCP_FLAG_AKSV | TEGRA_NVHDCP_FLAG_AN;
+
+	set_cn(hdmi, pkt->c_n);
+
+	tegra_hdmi_writel(hdmi, TMDS0_LINK0 | READ_M,
+					HDMI_NV_PDISP_RG_HDCP_CMODE);
+
+	/* Cksv write triggers Mprime update */
+	set_cksv(hdmi, pkt->c_ksv);
+
+	e = wait_hdcp_ctrl(hdmi, MPRIME_VALID, NULL);
+	if (e) {
+		nvhdcp_err("Mprime read timeout\n");
+		e = -EIO;
+		goto err;
+	}
+	msleep(50);
+
+	/* load Mprime */
+	pkt->m_prime = get_mprime(hdmi);
+	pkt->value_flags |= TEGRA_NVHDCP_FLAG_MP;
+
+	pkt->b_status = nvhdcp->b_status;
+	pkt->value_flags |= TEGRA_NVHDCP_FLAG_BSTATUS;
+
+	/* copy most recent KSVFIFO, if it is non-zero */
+	pkt->num_bksv_list = nvhdcp->num_bksv_list;
+	if( nvhdcp->num_bksv_list ) {
+		BUILD_BUG_ON(sizeof(pkt->bksv_list) != sizeof(nvhdcp->bksv_list));
+		memcpy(pkt->bksv_list, nvhdcp->bksv_list,
+			nvhdcp->num_bksv_list * sizeof(*pkt->bksv_list));
+		pkt->value_flags |= TEGRA_NVHDCP_FLAG_BKSVLIST;
+	}
+
+	/* copy v_prime */
+	BUILD_BUG_ON(sizeof(pkt->v_prime) != sizeof(nvhdcp->v_prime));
+	memcpy(pkt->v_prime, nvhdcp->v_prime, sizeof(nvhdcp->v_prime));
+	pkt->value_flags |= TEGRA_NVHDCP_FLAG_V;
+
+	/* load Dksv */
+	pkt->d_ksv = get_dksv(hdmi);
+	if (verify_ksv(pkt->d_ksv)) {
+		nvhdcp_err("Dksv invalid!\n");
+		e = -EIO;
+		goto err;
+	}
+	pkt->value_flags |= TEGRA_NVHDCP_FLAG_DKSV;
+
+	/* copy current Bksv */
+	pkt->b_ksv = nvhdcp->b_ksv;
+	pkt->value_flags |= TEGRA_NVHDCP_FLAG_BKSV;
+
+	pkt->packet_results = TEGRA_NVHDCP_RESULT_SUCCESS;
+	mutex_unlock(&nvhdcp->lock);
+	return 0;
+
+err:
+	mutex_unlock(&nvhdcp->lock);
+	return e;
+}
+
+static int load_kfuse(struct tegra_dc_hdmi_data *hdmi)
+{
+	unsigned buf[KFUSE_DATA_SZ / 4];
+	int e, i;
+	u32 ctrl;
+	u32 tmp;
+	int retries;
+
+	/* copy load kfuse into buffer - only needed for early Tegra parts */
+	e = tegra_kfuse_read(buf, sizeof buf);
+	if (e) {
+		nvhdcp_err("Kfuse read failure\n");
+		return e;
+	}
+
+	/* write the kfuse to HDMI SRAM */
+
+	tegra_hdmi_writel(hdmi, 1, HDMI_NV_PDISP_KEY_CTRL); /* LOAD_KEYS */
+
+	/* issue a reload */
+	ctrl = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_KEY_CTRL);
+	tegra_hdmi_writel(hdmi, ctrl | PKEY_REQUEST_RELOAD_TRIGGER
+					| LOCAL_KEYS , HDMI_NV_PDISP_KEY_CTRL);
+
+	e = wait_key_ctrl(hdmi, PKEY_LOADED, PKEY_LOADED);
+	if (e) {
+		nvhdcp_err("key reload timeout\n");
+		return -EIO;
+	}
+
+	tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_KEY_SKEY_INDEX);
+
+	/* wait for SRAM to be cleared */
+	retries = 6;
+	do {
+		tmp = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_KEY_DEBUG0);
+		if ((tmp & 1) == 0) break;
+		if (retries > 1)
+			mdelay(1);
+	} while (--retries);
+	if (!retries) {
+		nvhdcp_err("key SRAM clear timeout\n");
+		return -EIO;
+	}
+
+	for (i = 0; i < KFUSE_DATA_SZ / 4; i += 4) {
+
+		/* load 128-bits*/
+		tegra_hdmi_writel(hdmi, buf[i], HDMI_NV_PDISP_KEY_HDCP_KEY_0);
+		tegra_hdmi_writel(hdmi, buf[i+1], HDMI_NV_PDISP_KEY_HDCP_KEY_1);
+		tegra_hdmi_writel(hdmi, buf[i+2], HDMI_NV_PDISP_KEY_HDCP_KEY_2);
+		tegra_hdmi_writel(hdmi, buf[i+3], HDMI_NV_PDISP_KEY_HDCP_KEY_3);
+
+		/* trigger LOAD_HDCP_KEY */
+		tegra_hdmi_writel(hdmi, 0x100, HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
+
+		tmp = LOCAL_KEYS | WRITE16;
+		if (i)
+			tmp |= AUTOINC;
+		tegra_hdmi_writel(hdmi, tmp, HDMI_NV_PDISP_KEY_CTRL);
+
+		/* wait for WRITE16 to complete */
+		e = wait_key_ctrl(hdmi, 0x10, 0); /* WRITE16 */
+		if (e) {
+			nvhdcp_err("key write timeout\n");
+			return -EIO;
+		}
+	}
+
+	return 0;
+}
+
+static int verify_link(struct tegra_nvhdcp *nvhdcp, bool wait_ri)
+{
+	struct tegra_dc_hdmi_data *hdmi = nvhdcp->hdmi;
+	int retries = 3;
+	u16 old, rx, tx;
+	int e;
+
+	old = 0;
+	rx = 0;
+	tx = 0;
+	/* retry 3 times to deal with I2C link issues */
+	do {
+		if (wait_ri)
+			old = get_transmitter_ri(hdmi);
+
+		e = get_receiver_ri(nvhdcp, &rx);
+		if (!e) {
+			if (!rx) {
+				nvhdcp_err("Ri is 0!\n");
+				return -EINVAL;
+			}
+
+			tx = get_transmitter_ri(hdmi);
+		} else {
+			rx = ~tx;
+			msleep(50);
+		}
+
+	} while (wait_ri && --retries && old != tx);
+
+	nvhdcp_debug("R0 Ri poll:rx=0x%04x tx=0x%04x\n", rx, tx);
+
+	if (!nvhdcp_is_plugged(nvhdcp)) {
+		nvhdcp_err("aborting verify links - lost hdmi connection\n");
+		return -EIO;
+	}
+
+	if (rx != tx)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int get_repeater_info(struct tegra_nvhdcp *nvhdcp)
+{
+	int e, retries;
+	u8 b_caps;
+	u16 b_status;
+
+	nvhdcp_vdbg("repeater found:fetching repeater info\n");
+
+	/* wait up to 5 seconds for READY on repeater */
+	retries = 51;
+	do {
+		if (!nvhdcp_is_plugged(nvhdcp)) {
+			nvhdcp_err("disconnect while waiting for repeater\n");
+			return -EIO;
+		}
+
+		e = get_bcaps(nvhdcp, &b_caps);
+		if (!e && (b_caps & BCAPS_READY)) {
+			nvhdcp_debug("Bcaps READY from repeater\n");
+			break;
+		}
+		if (retries > 1)
+			msleep(100);
+	} while (--retries);
+	if (!retries) {
+		nvhdcp_err("repeater Bcaps read timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	memset(nvhdcp->v_prime, 0, sizeof nvhdcp->v_prime);
+	e = get_vprime(nvhdcp, nvhdcp->v_prime);
+	if (e) {
+		nvhdcp_err("repeater Vprime read failure!\n");
+		return e;
+	}
+
+	e = nvhdcp_i2c_read16(nvhdcp, 0x41, &b_status);
+	if (e) {
+		nvhdcp_err("Bstatus read failure!\n");
+		return e;
+	}
+
+	if (b_status & BSTATUS_MAX_DEVS_EXCEEDED) {
+		nvhdcp_err("repeater:max devices (0x%04x)\n", b_status);
+		return -EINVAL;
+	}
+
+	if (b_status & BSTATUS_MAX_CASCADE_EXCEEDED) {
+		nvhdcp_err("repeater:max cascade (0x%04x)\n", b_status);
+		return -EINVAL;
+	}
+
+	nvhdcp->b_status = b_status;
+	nvhdcp->num_bksv_list = b_status & 0x7f;
+	nvhdcp_vdbg("Bstatus 0x%x (devices: %d)\n",
+				b_status, nvhdcp->num_bksv_list);
+
+	memset(nvhdcp->bksv_list, 0, sizeof nvhdcp->bksv_list);
+	e = get_ksvfifo(nvhdcp, nvhdcp->num_bksv_list, nvhdcp->bksv_list);
+	if (e) {
+		nvhdcp_err("repeater:could not read KSVFIFO (err %d)\n", e);
+		return e;
+	}
+
+	return 0;
+}
+
+static void nvhdcp_downstream_worker(struct work_struct *work)
+{
+	struct tegra_nvhdcp *nvhdcp =
+		container_of(to_delayed_work(work), struct tegra_nvhdcp, work);
+	struct tegra_dc_hdmi_data *hdmi = nvhdcp->hdmi;
+	int e;
+	u8 b_caps;
+	u32 tmp;
+	u32 res;
+
+	nvhdcp_vdbg("%s():started thread %s\n", __func__, nvhdcp->name);
+
+	mutex_lock(&nvhdcp->lock);
+	if (nvhdcp->state == STATE_OFF) {
+		nvhdcp_err("nvhdcp failure - giving up\n");
+		goto err;
+	}
+	nvhdcp->state = STATE_UNAUTHENTICATED;
+
+	/* check plug state to terminate early in case flush_workqueue() */
+	if (!nvhdcp_is_plugged(nvhdcp)) {
+		nvhdcp_err("worker started while unplugged!\n");
+		goto lost_hdmi;
+	}
+	nvhdcp_vdbg("%s():hpd=%d\n", __func__, nvhdcp->plugged);
+
+	nvhdcp->a_ksv = 0;
+	nvhdcp->b_ksv = 0;
+	nvhdcp->a_n = 0;
+
+	e = get_bcaps(nvhdcp, &b_caps);
+	if (e) {
+		nvhdcp_err("Bcaps read failure\n");
+		goto failure;
+	}
+
+	nvhdcp_vdbg("read Bcaps = 0x%02x\n", b_caps);
+
+	nvhdcp_vdbg("kfuse loading ...\n");
+
+	/* repeater flag in Bskv must be configured before loading fuses */
+	set_bksv(hdmi, 0, (b_caps & BCAPS_REPEATER));
+
+	e = load_kfuse(hdmi);
+	if (e) {
+		nvhdcp_err("kfuse could not be loaded\n");
+		goto failure;
+	}
+
+	hdcp_ctrl_run(hdmi, 1);
+
+	nvhdcp_vdbg("wait AN_VALID ...\n");
+
+	/* wait for hardware to generate HDCP values */
+	e = wait_hdcp_ctrl(hdmi, AN_VALID | SROM_ERR, &res);
+	if (e) {
+		nvhdcp_err("An key generation timeout\n");
+		goto failure;
+	}
+	if (res & SROM_ERR) {
+		nvhdcp_err("SROM error\n");
+		goto failure;
+	}
+
+	msleep(25);
+
+	nvhdcp->a_ksv = get_aksv(hdmi);
+	nvhdcp->a_n = get_an(hdmi);
+	nvhdcp_vdbg("Aksv is 0x%016llx\n", nvhdcp->a_ksv);
+	nvhdcp_vdbg("An is 0x%016llx\n", nvhdcp->a_n);
+	if (verify_ksv(nvhdcp->a_ksv)) {
+		nvhdcp_err("Aksv verify failure! (0x%016llx)\n", nvhdcp->a_ksv);
+		goto disable;
+	}
+
+	/* write Ainfo to receiver - set 1.1 only if b_caps supports it */
+	e = nvhdcp_i2c_write8(nvhdcp, 0x15, b_caps & BCAPS_11);
+	if (e) {
+		nvhdcp_err("Ainfo write failure\n");
+		goto failure;
+	}
+
+	/* write An to receiver */
+	e = nvhdcp_i2c_write64(nvhdcp, 0x18, nvhdcp->a_n);
+	if (e) {
+		nvhdcp_err("An write failure\n");
+		goto failure;
+	}
+
+	nvhdcp_vdbg("wrote An = 0x%016llx\n", nvhdcp->a_n);
+
+	/* write Aksv to receiver - triggers auth sequence */
+	e = nvhdcp_i2c_write40(nvhdcp, 0x10, nvhdcp->a_ksv);
+	if (e) {
+		nvhdcp_err("Aksv write failure\n");
+		goto failure;
+	}
+
+	nvhdcp_vdbg("wrote Aksv = 0x%010llx\n", nvhdcp->a_ksv);
+
+	/* bail out if unplugged in the middle of negotiation */
+	if (!nvhdcp_is_plugged(nvhdcp))
+		goto lost_hdmi;
+
+	/* get Bksv from receiver */
+	e = nvhdcp_i2c_read40(nvhdcp, 0x00, &nvhdcp->b_ksv);
+	if (e) {
+		nvhdcp_err("Bksv read failure\n");
+		goto failure;
+	}
+	nvhdcp_vdbg("Bksv is 0x%016llx\n", nvhdcp->b_ksv);
+	if (verify_ksv(nvhdcp->b_ksv)) {
+		nvhdcp_err("Bksv verify failure!\n");
+		goto failure;
+	}
+
+	nvhdcp_vdbg("read Bksv = 0x%010llx from device\n", nvhdcp->b_ksv);
+
+	set_bksv(hdmi, nvhdcp->b_ksv, (b_caps & BCAPS_REPEATER));
+
+	nvhdcp_vdbg("loaded Bksv into controller\n");
+
+	e = wait_hdcp_ctrl(hdmi, R0_VALID, NULL);
+	if (e) {
+		nvhdcp_err("R0 read failure!\n");
+		goto failure;
+	}
+
+	nvhdcp_vdbg("R0 valid\n");
+
+	msleep(100); /* can't read R0' within 100ms of writing Aksv */
+
+	nvhdcp_vdbg("verifying links ...\n");
+
+	e = verify_link(nvhdcp, false);
+	if (e) {
+		nvhdcp_err("link verification failed err %d\n", e);
+		goto failure;
+	}
+
+	/* if repeater then get repeater info */
+	if (b_caps & BCAPS_REPEATER) {
+		e = get_repeater_info(nvhdcp);
+		if (e) {
+			nvhdcp_err("get repeater info failed\n");
+			goto failure;
+		}
+	}
+
+	tmp = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CTRL);
+	tmp |= CRYPT_ENABLED;
+	if (b_caps & BCAPS_11) /* HDCP 1.1 ? */
+		tmp |= ONEONE_ENABLED;
+	tegra_hdmi_writel(hdmi, tmp, HDMI_NV_PDISP_RG_HDCP_CTRL);
+
+	nvhdcp_vdbg("CRYPT enabled\n");
+
+	nvhdcp->state = STATE_LINK_VERIFY;
+	nvhdcp_info("link verified!\n");
+
+	while (1) {
+		if (!nvhdcp_is_plugged(nvhdcp))
+			goto lost_hdmi;
+
+		if (nvhdcp->state != STATE_LINK_VERIFY)
+			goto failure;
+
+		e = verify_link(nvhdcp, true);
+		if (e) {
+			nvhdcp_err("link verification failed err %d\n", e);
+			goto failure;
+		}
+		mutex_unlock(&nvhdcp->lock);
+		wait_event_interruptible_timeout(wq_worker,
+			!nvhdcp_is_plugged(nvhdcp), msecs_to_jiffies(1500));
+		mutex_lock(&nvhdcp->lock);
+
+	}
+
+failure:
+	nvhdcp->fail_count++;
+	if(nvhdcp->fail_count > 5) {
+	        nvhdcp_err("nvhdcp failure - too many failures, giving up!\n");
+	} else {
+		nvhdcp_err("nvhdcp failure - renegotiating in 1 second\n");
+		if (!nvhdcp_is_plugged(nvhdcp))
+			goto lost_hdmi;
+		queue_delayed_work(nvhdcp->downstream_wq, &nvhdcp->work,
+						msecs_to_jiffies(1000));
+	}
+
+lost_hdmi:
+	nvhdcp->state = STATE_UNAUTHENTICATED;
+	hdcp_ctrl_run(hdmi, 0);
+
+err:
+	mutex_unlock(&nvhdcp->lock);
+	return;
+disable:
+	nvhdcp->state = STATE_OFF;
+	nvhdcp_set_plugged(nvhdcp, false);
+	mutex_unlock(&nvhdcp->lock);
+	return;
+}
+
+static int tegra_nvhdcp_on(struct tegra_nvhdcp *nvhdcp)
+{
+	nvhdcp->state = STATE_UNAUTHENTICATED;
+	if (nvhdcp_is_plugged(nvhdcp)) {
+		nvhdcp->fail_count = 0;
+		queue_delayed_work(nvhdcp->downstream_wq, &nvhdcp->work,
+						msecs_to_jiffies(100));
+	}
+	return 0;
+}
+
+static int tegra_nvhdcp_off(struct tegra_nvhdcp *nvhdcp)
+{
+	mutex_lock(&nvhdcp->lock);
+	nvhdcp->state = STATE_OFF;
+	nvhdcp_set_plugged(nvhdcp, false);
+	mutex_unlock(&nvhdcp->lock);
+	wake_up_interruptible(&wq_worker);
+	flush_workqueue(nvhdcp->downstream_wq);
+	return 0;
+}
+
+void tegra_nvhdcp_set_plug(struct tegra_nvhdcp *nvhdcp, bool hpd)
+{
+	nvhdcp_debug("hdmi hotplug detected (hpd = %d)\n", hpd);
+
+	if (hpd) {
+		nvhdcp_set_plugged(nvhdcp, true);
+		tegra_nvhdcp_on(nvhdcp);
+	} else {
+		tegra_nvhdcp_off(nvhdcp);
+	}
+}
+
+int tegra_nvhdcp_set_policy(struct tegra_nvhdcp *nvhdcp, int pol)
+{
+	if (pol == TEGRA_NVHDCP_POLICY_ALWAYS_ON) {
+		nvhdcp_info("using \"always on\" policy.\n");
+		if (atomic_xchg(&nvhdcp->policy, pol) != pol) {
+			/* policy changed, start working */
+			tegra_nvhdcp_on(nvhdcp);
+		}
+	} else {
+		/* unsupported policy */
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int tegra_nvhdcp_renegotiate(struct tegra_nvhdcp *nvhdcp)
+{
+	mutex_lock(&nvhdcp->lock);
+	nvhdcp->state = STATE_RENEGOTIATE;
+	mutex_unlock(&nvhdcp->lock);
+	tegra_nvhdcp_on(nvhdcp);
+	return 0;
+}
+
+void tegra_nvhdcp_suspend(struct tegra_nvhdcp *nvhdcp)
+{
+	if (!nvhdcp) return;
+	tegra_nvhdcp_off(nvhdcp);
+}
+
+void tegra_nvhdcp_resume(struct tegra_nvhdcp *nvhdcp)
+{
+	if (!nvhdcp) return;
+	tegra_nvhdcp_renegotiate(nvhdcp);
+}
+
+static long nvhdcp_dev_ioctl(struct file *filp,
+                unsigned int cmd, unsigned long arg)
+{
+	struct tegra_nvhdcp *nvhdcp = filp->private_data;
+	struct tegra_nvhdcp_packet *pkt;
+	int e = -ENOTTY;
+
+	switch (cmd) {
+	case TEGRAIO_NVHDCP_ON:
+		return tegra_nvhdcp_on(nvhdcp);
+
+	case TEGRAIO_NVHDCP_OFF:
+		return tegra_nvhdcp_off(nvhdcp);
+
+	case TEGRAIO_NVHDCP_SET_POLICY:
+		return tegra_nvhdcp_set_policy(nvhdcp, arg);
+
+	case TEGRAIO_NVHDCP_READ_M:
+		pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
+		if (!pkt)
+			return -ENOMEM;
+		if (copy_from_user(pkt, (void __user *)arg, sizeof(*pkt))) {
+			e = -EFAULT;
+			goto kfree_pkt;
+		}
+		e = get_m_prime(nvhdcp, pkt);
+		if (copy_to_user((void __user *)arg, pkt, sizeof(*pkt))) {
+			e = -EFAULT;
+			goto kfree_pkt;
+		}
+		kfree(pkt);
+		return e;
+
+	case TEGRAIO_NVHDCP_READ_S:
+		pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
+		if (!pkt)
+			return -ENOMEM;
+		if (copy_from_user(pkt, (void __user *)arg, sizeof(*pkt))) {
+			e = -EFAULT;
+			goto kfree_pkt;
+		}
+		e = get_s_prime(nvhdcp, pkt);
+		if (copy_to_user((void __user *)arg, pkt, sizeof(*pkt))) {
+			e = -EFAULT;
+			goto kfree_pkt;
+		}
+		kfree(pkt);
+		return e;
+
+	case TEGRAIO_NVHDCP_RENEGOTIATE:
+		e = tegra_nvhdcp_renegotiate(nvhdcp);
+		break;
+	}
+
+	return e;
+kfree_pkt:
+	kfree(pkt);
+	return e;
+}
+
+static int nvhdcp_dev_open(struct inode *inode, struct file *filp)
+{
+	struct miscdevice *miscdev = filp->private_data;
+	struct tegra_nvhdcp *nvhdcp =
+		container_of(miscdev, struct tegra_nvhdcp, miscdev);
+	filp->private_data = nvhdcp;
+	return 0;
+}
+
+static int nvhdcp_dev_release(struct inode *inode, struct file *filp)
+{
+	filp->private_data = NULL;
+	return 0;
+}
+
+static const struct file_operations nvhdcp_fops = {
+	.owner          = THIS_MODULE,
+	.llseek         = no_llseek,
+	.unlocked_ioctl = nvhdcp_dev_ioctl,
+	.open           = nvhdcp_dev_open,
+	.release        = nvhdcp_dev_release,
+};
+
+/* we only support one AP right now, so should only call this once. */
+struct tegra_nvhdcp *tegra_nvhdcp_create(struct tegra_dc_hdmi_data *hdmi,
+			int id, int bus)
+{
+	static struct tegra_nvhdcp *nvhdcp; /* prevent multiple calls */
+	struct i2c_adapter *adapter;
+	int e;
+
+	if (nvhdcp)
+		return ERR_PTR(-EMFILE);
+
+	nvhdcp = kzalloc(sizeof(*nvhdcp), GFP_KERNEL);
+	if (!nvhdcp)
+		return ERR_PTR(-ENOMEM);
+
+	nvhdcp->id = id;
+	snprintf(nvhdcp->name, sizeof(nvhdcp->name), "nvhdcp%u", id);
+	nvhdcp->hdmi = hdmi;
+	mutex_init(&nvhdcp->lock);
+
+	strlcpy(nvhdcp->info.type, nvhdcp->name, sizeof(nvhdcp->info.type));
+	nvhdcp->bus = bus;
+	nvhdcp->info.addr = 0x74 >> 1;
+	nvhdcp->info.platform_data = nvhdcp;
+	nvhdcp->fail_count = 0;
+
+	adapter = i2c_get_adapter(bus);
+	if (!adapter) {
+		nvhdcp_err("can't get adapter for bus %d\n", bus);
+		e = -EBUSY;
+		goto free_nvhdcp;
+	}
+
+	nvhdcp->client = i2c_new_device(adapter, &nvhdcp->info);
+	i2c_put_adapter(adapter);
+
+	if (!nvhdcp->client) {
+		nvhdcp_err("can't create new device\n");
+		e = -EBUSY;
+		goto free_nvhdcp;
+	}
+
+	nvhdcp->state = STATE_UNAUTHENTICATED;
+
+	nvhdcp->downstream_wq = create_singlethread_workqueue(nvhdcp->name);
+	INIT_DELAYED_WORK(&nvhdcp->work, nvhdcp_downstream_worker);
+
+	nvhdcp->miscdev.minor = MISC_DYNAMIC_MINOR;
+	nvhdcp->miscdev.name = nvhdcp->name;
+	nvhdcp->miscdev.fops = &nvhdcp_fops;
+
+	e = misc_register(&nvhdcp->miscdev);
+	if (e)
+		goto free_workqueue;
+
+	nvhdcp_vdbg("%s(): created misc device %s\n", __func__, nvhdcp->name);
+
+	return nvhdcp;
+free_workqueue:
+	destroy_workqueue(nvhdcp->downstream_wq);
+	i2c_release_client(nvhdcp->client);
+free_nvhdcp:
+	kfree(nvhdcp);
+	nvhdcp_err("unable to create device.\n");
+	return ERR_PTR(e);
+}
+
+void tegra_nvhdcp_destroy(struct tegra_nvhdcp *nvhdcp)
+{
+	misc_deregister(&nvhdcp->miscdev);
+	tegra_nvhdcp_off(nvhdcp);
+	destroy_workqueue(nvhdcp->downstream_wq);
+	i2c_release_client(nvhdcp->client);
+	kfree(nvhdcp);
+}
diff --git a/drivers/staging/tegra/video/dc/nvhdcp.h b/drivers/staging/tegra/video/dc/nvhdcp.h
new file mode 100644
index 000000000000..ce4c7f806745
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/nvhdcp.h
@@ -0,0 +1,46 @@
+/*
+ * drivers/video/tegra/dc/nvhdcp.h
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_NVHDCP_H
+#define __DRIVERS_VIDEO_TEGRA_DC_NVHDCP_H
+#include <video/nvhdcp.h>
+
+struct tegra_nvhdcp;
+#ifdef CONFIG_TEGRA_NVHDCP
+void tegra_nvhdcp_set_plug(struct tegra_nvhdcp *nvhdcp, bool hpd);
+int tegra_nvhdcp_set_policy(struct tegra_nvhdcp *nvhdcp, int pol);
+void tegra_nvhdcp_suspend(struct tegra_nvhdcp *nvhdcp);
+void tegra_nvhdcp_resume(struct tegra_nvhdcp *nvhdcp);
+struct tegra_nvhdcp *tegra_nvhdcp_create(struct tegra_dc_hdmi_data *hdmi,
+					int id, int bus);
+void tegra_nvhdcp_destroy(struct tegra_nvhdcp *nvhdcp);
+#else
+inline void tegra_nvhdcp_set_plug(struct tegra_nvhdcp *nvhdcp, bool hpd) { }
+inline int tegra_nvhdcp_set_policy(struct tegra_nvhdcp *nvhdcp, int pol)
+{
+	return 0;
+}
+inline void tegra_nvhdcp_suspend(struct tegra_nvhdcp *nvhdcp) { }
+inline void tegra_nvhdcp_resume(struct tegra_nvhdcp *nvhdcp) { }
+inline struct tegra_nvhdcp *tegra_nvhdcp_create(struct tegra_dc_hdmi_data *hdmi,
+					int id, int bus)
+{
+	return NULL;
+}
+inline void tegra_nvhdcp_destroy(struct tegra_nvhdcp *nvhdcp) { }
+#endif
+
+#endif
diff --git a/drivers/staging/tegra/video/dc/nvsd.c b/drivers/staging/tegra/video/dc/nvsd.c
new file mode 100644
index 000000000000..2731847dbc0f
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/nvsd.c
@@ -0,0 +1,914 @@
+/*
+ * drivers/video/tegra/dc/nvsd.c
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/backlight.h>
+#include <linux/platform_device.h>
+#include <linux/stat.h>
+
+#include "dc.h"
+#include "dc_reg.h"
+#include "dc_priv.h"
+#include "nvsd.h"
+
+/* Elements for sysfs access */
+#define NVSD_ATTR(__name) static struct kobj_attribute nvsd_attr_##__name = \
+	__ATTR(__name, S_IRUGO|S_IWUSR, nvsd_settings_show, nvsd_settings_store)
+#define NVSD_ATTRS_ENTRY(__name) (&nvsd_attr_##__name.attr)
+#define IS_NVSD_ATTR(__name) (attr == &nvsd_attr_##__name)
+
+static ssize_t nvsd_settings_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf);
+
+static ssize_t nvsd_settings_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count);
+
+static ssize_t nvsd_registers_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf);
+
+NVSD_ATTR(enable);
+NVSD_ATTR(aggressiveness);
+NVSD_ATTR(phase_in_settings);
+NVSD_ATTR(phase_in_adjustments);
+NVSD_ATTR(bin_width);
+NVSD_ATTR(hw_update_delay);
+NVSD_ATTR(use_vid_luma);
+NVSD_ATTR(coeff);
+NVSD_ATTR(blp_time_constant);
+NVSD_ATTR(blp_step);
+NVSD_ATTR(fc_time_limit);
+NVSD_ATTR(fc_threshold);
+NVSD_ATTR(lut);
+NVSD_ATTR(bltf);
+static struct kobj_attribute nvsd_attr_registers =
+	__ATTR(registers, S_IRUGO, nvsd_registers_show, NULL);
+
+static struct attribute *nvsd_attrs[] = {
+	NVSD_ATTRS_ENTRY(enable),
+	NVSD_ATTRS_ENTRY(aggressiveness),
+	NVSD_ATTRS_ENTRY(phase_in_settings),
+	NVSD_ATTRS_ENTRY(phase_in_adjustments),
+	NVSD_ATTRS_ENTRY(bin_width),
+	NVSD_ATTRS_ENTRY(hw_update_delay),
+	NVSD_ATTRS_ENTRY(use_vid_luma),
+	NVSD_ATTRS_ENTRY(coeff),
+	NVSD_ATTRS_ENTRY(blp_time_constant),
+	NVSD_ATTRS_ENTRY(blp_step),
+	NVSD_ATTRS_ENTRY(fc_time_limit),
+	NVSD_ATTRS_ENTRY(fc_threshold),
+	NVSD_ATTRS_ENTRY(lut),
+	NVSD_ATTRS_ENTRY(bltf),
+	NVSD_ATTRS_ENTRY(registers),
+	NULL,
+};
+
+static struct attribute_group nvsd_attr_group = {
+	.attrs = nvsd_attrs,
+};
+
+static struct kobject *nvsd_kobj;
+
+/* shared brightness variable */
+static atomic_t *sd_brightness = NULL;
+/* shared boolean for manual K workaround */
+static atomic_t man_k_until_blank = ATOMIC_INIT(0);
+
+static u8 nvsd_get_bw_idx(struct tegra_dc_sd_settings *settings)
+{
+	u8 bw;
+
+	switch (settings->bin_width) {
+	default:
+	case -1:
+	/* A -1 bin-width indicates 'automatic'
+	   based upon aggressiveness. */
+		settings->bin_width = -1;
+		switch (settings->aggressiveness) {
+		default:
+		case 0:
+		case 1:
+			bw = SD_BIN_WIDTH_ONE;
+			break;
+		case 2:
+		case 3:
+		case 4:
+			bw = SD_BIN_WIDTH_TWO;
+			break;
+		case 5:
+			bw = SD_BIN_WIDTH_FOUR;
+			break;
+		}
+		break;
+	case 1:
+		bw = SD_BIN_WIDTH_ONE;
+		break;
+	case 2:
+		bw = SD_BIN_WIDTH_TWO;
+		break;
+	case 4:
+		bw = SD_BIN_WIDTH_FOUR;
+		break;
+	case 8:
+		bw = SD_BIN_WIDTH_EIGHT;
+		break;
+	}
+	return bw >> 3;
+
+}
+
+static bool nvsd_phase_in_adjustments(struct tegra_dc *dc,
+	struct tegra_dc_sd_settings *settings)
+{
+	u8 step, cur_sd_brightness;
+	u16 target_k, cur_k;
+	u32 man_k, val;
+
+	cur_sd_brightness = atomic_read(sd_brightness);
+
+	target_k = tegra_dc_readl(dc, DC_DISP_SD_HW_K_VALUES);
+	target_k = SD_HW_K_R(target_k);
+	cur_k = tegra_dc_readl(dc, DC_DISP_SD_MAN_K_VALUES);
+	cur_k = SD_HW_K_R(cur_k);
+
+	/* read brightness value */
+	val = tegra_dc_readl(dc, DC_DISP_SD_BL_CONTROL);
+	val = SD_BLC_BRIGHTNESS(val);
+
+	step = settings->phase_adj_step;
+	if (cur_sd_brightness != val || target_k != cur_k) {
+		if (!step)
+			step = ADJ_PHASE_STEP;
+
+		/* Phase in Backlight and Pixel K
+		every ADJ_PHASE_STEP frames*/
+		if ((step-- & ADJ_PHASE_STEP) == ADJ_PHASE_STEP) {
+
+			if (val != cur_sd_brightness) {
+				val > cur_sd_brightness ?
+				(cur_sd_brightness++) :
+				(cur_sd_brightness--);
+			}
+
+			if (target_k != cur_k) {
+				if (target_k > cur_k)
+					cur_k += K_STEP;
+				else
+					cur_k -= K_STEP;
+			}
+
+			/* Set manual k value */
+			man_k = SD_MAN_K_R(cur_k) |
+				SD_MAN_K_G(cur_k) | SD_MAN_K_B(cur_k);
+			tegra_dc_writel(dc, man_k, DC_DISP_SD_MAN_K_VALUES);
+			/* Set manual brightness value */
+			atomic_set(sd_brightness, cur_sd_brightness);
+		}
+		settings->phase_adj_step = step;
+		return true;
+	} else
+		return false;
+}
+
+/* phase in the luts based on the current and max step */
+static void nvsd_phase_in_luts(struct tegra_dc_sd_settings *settings,
+	struct tegra_dc *dc)
+{
+	u32 val;
+	u8 bw_idx;
+	int i;
+	u16 phase_settings_step = settings->phase_settings_step;
+	u16 num_phase_in_steps = settings->num_phase_in_steps;
+
+	bw_idx = nvsd_get_bw_idx(settings);
+
+	/* Phase in Final LUT */
+	for (i = 0; i < DC_DISP_SD_LUT_NUM; i++) {
+		val = SD_LUT_R((settings->lut[bw_idx][i].r *
+				phase_settings_step)/num_phase_in_steps) |
+			SD_LUT_G((settings->lut[bw_idx][i].g *
+				phase_settings_step)/num_phase_in_steps) |
+			SD_LUT_B((settings->lut[bw_idx][i].b *
+				phase_settings_step)/num_phase_in_steps);
+
+		tegra_dc_writel(dc, val, DC_DISP_SD_LUT(i));
+	}
+	/* Phase in Final BLTF */
+	for (i = 0; i < DC_DISP_SD_BL_TF_NUM; i++) {
+		val = SD_BL_TF_POINT_0(255-((255-settings->bltf[bw_idx][i][0])
+				* phase_settings_step)/num_phase_in_steps) |
+			SD_BL_TF_POINT_1(255-((255-settings->bltf[bw_idx][i][1])
+				* phase_settings_step)/num_phase_in_steps) |
+			SD_BL_TF_POINT_2(255-((255-settings->bltf[bw_idx][i][2])
+				* phase_settings_step)/num_phase_in_steps) |
+			SD_BL_TF_POINT_3(255-((255-settings->bltf[bw_idx][i][3])
+				* phase_settings_step)/num_phase_in_steps);
+
+		tegra_dc_writel(dc, val, DC_DISP_SD_BL_TF(i));
+	}
+}
+
+/* handle the commands that may be invoked for phase_in_settings */
+static void nvsd_cmd_handler(struct tegra_dc_sd_settings *settings,
+	struct tegra_dc *dc)
+{
+	u32 val;
+	u8 bw_idx, bw;
+
+	if (settings->cmd & ENABLE) {
+		settings->phase_settings_step++;
+		if (settings->phase_settings_step >=
+				settings->num_phase_in_steps)
+			settings->cmd &= ~ENABLE;
+
+		nvsd_phase_in_luts(settings, dc);
+	}
+	if (settings->cmd & DISABLE) {
+		settings->phase_settings_step--;
+		nvsd_phase_in_luts(settings, dc);
+		if (settings->phase_settings_step == 0) {
+			/* finish up aggressiveness phase in */
+			if (settings->cmd & AGG_CHG)
+				settings->aggressiveness = settings->final_agg;
+			settings->cmd = NO_CMD;
+			settings->enable = 0;
+			nvsd_init(dc, settings);
+		}
+	}
+	if (settings->cmd & AGG_CHG) {
+		if (settings->aggressiveness == settings->final_agg)
+			settings->cmd &= ~AGG_CHG;
+		if ((settings->cur_agg_step++ & (STEPS_PER_AGG_CHG - 1)) == 0) {
+			settings->final_agg > settings->aggressiveness ?
+				settings->aggressiveness++ :
+				settings->aggressiveness--;
+
+			/* Update aggressiveness value in HW */
+			val = tegra_dc_readl(dc, DC_DISP_SD_CONTROL);
+			val &= ~SD_AGGRESSIVENESS(0x7);
+			val |= SD_AGGRESSIVENESS(settings->aggressiveness);
+
+			/* Adjust bin_width for automatic setting */
+			if (settings->bin_width == -1) {
+				bw_idx = nvsd_get_bw_idx(settings);
+
+				bw = bw_idx << 3;
+
+				val &= ~SD_BIN_WIDTH_MASK;
+				val |= bw;
+			}
+			tegra_dc_writel(dc, val, DC_DISP_SD_CONTROL);
+
+			nvsd_phase_in_luts(settings, dc);
+		}
+	}
+}
+
+static bool nvsd_update_enable(struct tegra_dc_sd_settings *settings,
+	int enable_val)
+{
+
+	if (enable_val != 1 && enable_val != 0)
+		return false;
+
+	if (!settings->cmd && settings->enable != enable_val) {
+		settings->num_phase_in_steps =
+			STEPS_PER_AGG_LVL*settings->aggressiveness;
+		settings->phase_settings_step = enable_val ?
+			0 : settings->num_phase_in_steps;
+	}
+
+	if (settings->enable != enable_val || settings->cmd & DISABLE) {
+		settings->cmd &= ~(ENABLE | DISABLE);
+		if (!settings->enable && enable_val)
+			settings->cmd |= PHASE_IN;
+		settings->cmd |= enable_val ? ENABLE : DISABLE;
+		return true;
+	}
+
+	return false;
+}
+
+static bool nvsd_update_agg(struct tegra_dc_sd_settings *settings, int agg_val)
+{
+	int i;
+	int pri_lvl = SD_AGG_PRI_LVL(agg_val);
+	int agg_lvl = SD_GET_AGG(agg_val);
+	struct tegra_dc_sd_agg_priorities *sd_agg_priorities =
+		&settings->agg_priorities;
+
+	if (agg_lvl > 5 || agg_lvl < 0)
+		return false;
+	else if (agg_lvl == 0 && pri_lvl == 0)
+		return false;
+
+	if (pri_lvl >= 0 && pri_lvl < 4)
+		sd_agg_priorities->agg[pri_lvl] = agg_lvl;
+
+	for (i = NUM_AGG_PRI_LVLS - 1; i >= 0; i--) {
+		if (sd_agg_priorities->agg[i])
+			break;
+	}
+
+	sd_agg_priorities->pri_lvl = i;
+	pri_lvl = i;
+	agg_lvl = sd_agg_priorities->agg[i];
+
+	if (settings->phase_in_settings && settings->enable &&
+		settings->aggressiveness != agg_lvl) {
+
+		settings->final_agg = agg_lvl;
+		settings->cmd |= AGG_CHG;
+		settings->cur_agg_step = 0;
+		return true;
+	} else if (settings->aggressiveness != agg_lvl) {
+		settings->aggressiveness = agg_lvl;
+		return true;
+	}
+
+	return false;
+}
+
+/* Functional initialization */
+void nvsd_init(struct tegra_dc *dc, struct tegra_dc_sd_settings *settings)
+{
+	u32 i = 0;
+	u32 val = 0;
+	u32 bw_idx = 0;
+	/* TODO: check if HW says SD's available */
+
+	/* If SD's not present or disabled, clear the register and return. */
+	if (!settings || settings->enable == 0) {
+		/* clear the brightness val, too. */
+		if (sd_brightness)
+			atomic_set(sd_brightness, 255);
+
+		sd_brightness = NULL;
+
+		if (settings)
+			settings->phase_settings_step = 0;
+		tegra_dc_writel(dc, 0, DC_DISP_SD_CONTROL);
+		return;
+	}
+
+	dev_dbg(&dc->ndev->dev, "NVSD Init:\n");
+
+	/* init agg_priorities */
+	if (!settings->agg_priorities.agg[0])
+		settings->agg_priorities.agg[0] = settings->aggressiveness;
+
+	/* WAR: Settings will not be valid until the next flip.
+	 * Thus, set manual K to either HW's current value (if
+	 * we're already enabled) or a non-effective value (if
+	 * we're about to enable). */
+	val = tegra_dc_readl(dc, DC_DISP_SD_CONTROL);
+
+	if (val & SD_ENABLE_NORMAL)
+		if (settings->phase_in_adjustments)
+			i = tegra_dc_readl(dc, DC_DISP_SD_MAN_K_VALUES);
+		else
+			i = tegra_dc_readl(dc, DC_DISP_SD_HW_K_VALUES);
+	else
+		i = 0; /* 0 values for RGB = 1.0, i.e. non-affected */
+
+	tegra_dc_writel(dc, i, DC_DISP_SD_MAN_K_VALUES);
+	/* Enable manual correction mode here so that changing the
+	 * settings won't immediately impact display dehavior. */
+	val |= SD_CORRECTION_MODE_MAN;
+	tegra_dc_writel(dc, val, DC_DISP_SD_CONTROL);
+
+	bw_idx = nvsd_get_bw_idx(settings);
+
+	/* Write LUT */
+	if (!settings->cmd) {
+		dev_dbg(&dc->ndev->dev, "  LUT:\n");
+
+		for (i = 0; i < DC_DISP_SD_LUT_NUM; i++) {
+			val = SD_LUT_R(settings->lut[bw_idx][i].r) |
+				SD_LUT_G(settings->lut[bw_idx][i].g) |
+				SD_LUT_B(settings->lut[bw_idx][i].b);
+			tegra_dc_writel(dc, val, DC_DISP_SD_LUT(i));
+
+			dev_dbg(&dc->ndev->dev, "    %d: 0x%08x\n", i, val);
+		}
+	}
+
+	/* Write BL TF */
+	if (!settings->cmd) {
+		dev_dbg(&dc->ndev->dev, "  BL_TF:\n");
+
+		for (i = 0; i < DC_DISP_SD_BL_TF_NUM; i++) {
+			val = SD_BL_TF_POINT_0(settings->bltf[bw_idx][i][0]) |
+				SD_BL_TF_POINT_1(settings->bltf[bw_idx][i][1]) |
+				SD_BL_TF_POINT_2(settings->bltf[bw_idx][i][2]) |
+				SD_BL_TF_POINT_3(settings->bltf[bw_idx][i][3]);
+
+			tegra_dc_writel(dc, val, DC_DISP_SD_BL_TF(i));
+
+			dev_dbg(&dc->ndev->dev, "    %d: 0x%08x\n", i, val);
+		}
+	} else if ((settings->cmd & PHASE_IN)) {
+		settings->cmd &= ~PHASE_IN;
+		/* Write NO_OP values for BLTF */
+		for (i = 0; i < DC_DISP_SD_BL_TF_NUM; i++) {
+			val = SD_BL_TF_POINT_0(0xFF) |
+				SD_BL_TF_POINT_1(0xFF) |
+				SD_BL_TF_POINT_2(0xFF) |
+				SD_BL_TF_POINT_3(0xFF);
+
+			tegra_dc_writel(dc, val, DC_DISP_SD_BL_TF(i));
+
+			dev_dbg(&dc->ndev->dev, "    %d: 0x%08x\n", i, val);
+		}
+	}
+
+	/* Set step correctly on init */
+	if (!settings->cmd && settings->phase_in_settings) {
+		settings->num_phase_in_steps = STEPS_PER_AGG_LVL *
+			settings->aggressiveness;
+		settings->phase_settings_step = settings->enable ?
+			settings->num_phase_in_steps : 0;
+	}
+
+	/* Write Coeff */
+	val = SD_CSC_COEFF_R(settings->coeff.r) |
+		SD_CSC_COEFF_G(settings->coeff.g) |
+		SD_CSC_COEFF_B(settings->coeff.b);
+	tegra_dc_writel(dc, val, DC_DISP_SD_CSC_COEFF);
+	dev_dbg(&dc->ndev->dev, "  COEFF: 0x%08x\n", val);
+
+	/* Write BL Params */
+	val = SD_BLP_TIME_CONSTANT(settings->blp.time_constant) |
+		SD_BLP_STEP(settings->blp.step);
+	tegra_dc_writel(dc, val, DC_DISP_SD_BL_PARAMETERS);
+	dev_dbg(&dc->ndev->dev, "  BLP: 0x%08x\n", val);
+
+	/* Write Auto/Manual PWM */
+	val = (settings->use_auto_pwm) ? SD_BLC_MODE_AUTO : SD_BLC_MODE_MAN;
+	tegra_dc_writel(dc, val, DC_DISP_SD_BL_CONTROL);
+	dev_dbg(&dc->ndev->dev, "  BL_CONTROL: 0x%08x\n", val);
+
+	/* Write Flicker Control */
+	val = SD_FC_TIME_LIMIT(settings->fc.time_limit) |
+		SD_FC_THRESHOLD(settings->fc.threshold);
+	tegra_dc_writel(dc, val, DC_DISP_SD_FLICKER_CONTROL);
+	dev_dbg(&dc->ndev->dev, "  FLICKER_CONTROL: 0x%08x\n", val);
+
+	/* Manage SD Control */
+	val = 0;
+	/* Stay in manual correction mode until the next flip. */
+	val |= SD_CORRECTION_MODE_MAN;
+	/* Enable / One-Shot */
+	val |= (settings->enable == 2) ?
+		(SD_ENABLE_ONESHOT | SD_ONESHOT_ENABLE) :
+		SD_ENABLE_NORMAL;
+	/* HW Update Delay */
+	val |= SD_HW_UPDATE_DLY(settings->hw_update_delay);
+	/* Video Luma */
+	val |= (settings->use_vid_luma) ? SD_USE_VID_LUMA : 0;
+	/* Aggressiveness */
+	val |= SD_AGGRESSIVENESS(settings->aggressiveness);
+	/* Bin Width (value derived from bw_idx) */
+	val |= bw_idx << 3;
+	/* Finally, Write SD Control */
+	tegra_dc_writel(dc, val, DC_DISP_SD_CONTROL);
+	dev_dbg(&dc->ndev->dev, "  SD_CONTROL: 0x%08x\n", val);
+
+	/* set the brightness pointer */
+	sd_brightness = settings->sd_brightness;
+
+	/* note that we're in manual K until the next flip */
+	atomic_set(&man_k_until_blank, 1);
+}
+
+/* Periodic update */
+bool nvsd_update_brightness(struct tegra_dc *dc)
+{
+	u32 val = 0;
+	int cur_sd_brightness;
+	struct tegra_dc_sd_settings *settings = dc->out->sd_settings;
+
+	if (sd_brightness) {
+		if (atomic_read(&man_k_until_blank) &&
+					!settings->phase_in_adjustments) {
+			val = tegra_dc_readl(dc, DC_DISP_SD_CONTROL);
+			val &= ~SD_CORRECTION_MODE_MAN;
+			tegra_dc_writel(dc, val, DC_DISP_SD_CONTROL);
+			atomic_set(&man_k_until_blank, 0);
+		}
+
+		if (settings->cmd)
+			nvsd_cmd_handler(settings, dc);
+
+		/* nvsd_cmd_handler may turn off didim */
+		if (!settings->enable)
+			return true;
+
+		cur_sd_brightness = atomic_read(sd_brightness);
+
+		/* read brightness value */
+		val = tegra_dc_readl(dc, DC_DISP_SD_BL_CONTROL);
+		val = SD_BLC_BRIGHTNESS(val);
+
+		if (settings->phase_in_adjustments) {
+			return nvsd_phase_in_adjustments(dc, settings);
+		} else if (val != (u32)cur_sd_brightness) {
+			/* set brightness value and note the update */
+			atomic_set(sd_brightness, (int)val);
+			return true;
+		}
+	}
+
+	/* No update needed. */
+	return false;
+}
+
+static ssize_t nvsd_lut_show(struct tegra_dc_sd_settings *sd_settings,
+	char *buf, ssize_t res)
+{
+	u32 i;
+	u32 j;
+
+	for (i = 0; i < NUM_BIN_WIDTHS; i++) {
+		res += snprintf(buf + res, PAGE_SIZE - res,
+			"Bin Width: %d\n", 1 << i);
+
+		for (j = 0; j < DC_DISP_SD_LUT_NUM; j++) {
+			res += snprintf(buf + res,
+				PAGE_SIZE - res,
+				"%d: R: %3d / G: %3d / B: %3d\n",
+				j,
+				sd_settings->lut[i][j].r,
+				sd_settings->lut[i][j].g,
+				sd_settings->lut[i][j].b);
+		}
+	}
+	return res;
+}
+
+static ssize_t nvsd_bltf_show(struct tegra_dc_sd_settings *sd_settings,
+	char *buf, ssize_t res)
+{
+	u32 i;
+	u32 j;
+
+	for (i = 0; i < NUM_BIN_WIDTHS; i++) {
+		res += snprintf(buf + res, PAGE_SIZE - res,
+			"Bin Width: %d\n", 1 << i);
+
+		for (j = 0; j < DC_DISP_SD_BL_TF_NUM; j++) {
+			res += snprintf(buf + res,
+				PAGE_SIZE - res,
+				"%d: 0: %3d / 1: %3d / 2: %3d / 3: %3d\n",
+				j,
+				sd_settings->bltf[i][j][0],
+				sd_settings->bltf[i][j][1],
+				sd_settings->bltf[i][j][2],
+				sd_settings->bltf[i][j][3]);
+		}
+	}
+	return res;
+}
+
+/* Sysfs accessors */
+static ssize_t nvsd_settings_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	struct device *dev = container_of((kobj->parent), struct device, kobj);
+	struct nvhost_device *ndev = to_nvhost_device(dev);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+	struct tegra_dc_sd_settings *sd_settings = dc->out->sd_settings;
+	ssize_t res = 0;
+
+	if (sd_settings) {
+		if (IS_NVSD_ATTR(enable))
+			res = snprintf(buf, PAGE_SIZE, "%d\n",
+				sd_settings->enable);
+		else if (IS_NVSD_ATTR(aggressiveness))
+			res = snprintf(buf, PAGE_SIZE, "%d\n",
+				sd_settings->aggressiveness);
+		else if (IS_NVSD_ATTR(phase_in_settings))
+			res = snprintf(buf, PAGE_SIZE, "%d\n",
+				sd_settings->phase_in_settings);
+		else if (IS_NVSD_ATTR(phase_in_adjustments))
+			res = snprintf(buf, PAGE_SIZE, "%d\n",
+				sd_settings->phase_in_adjustments);
+		else if (IS_NVSD_ATTR(bin_width))
+			res = snprintf(buf, PAGE_SIZE, "%d\n",
+				sd_settings->bin_width);
+		else if (IS_NVSD_ATTR(hw_update_delay))
+			res = snprintf(buf, PAGE_SIZE, "%d\n",
+				sd_settings->hw_update_delay);
+		else if (IS_NVSD_ATTR(use_vid_luma))
+			res = snprintf(buf, PAGE_SIZE, "%d\n",
+				sd_settings->use_vid_luma);
+		else if (IS_NVSD_ATTR(coeff))
+			res = snprintf(buf, PAGE_SIZE,
+				"R: %d / G: %d / B: %d\n",
+				sd_settings->coeff.r,
+				sd_settings->coeff.g,
+				sd_settings->coeff.b);
+		else if (IS_NVSD_ATTR(blp_time_constant))
+			res = snprintf(buf, PAGE_SIZE, "%d\n",
+				sd_settings->blp.time_constant);
+		else if (IS_NVSD_ATTR(blp_step))
+			res = snprintf(buf, PAGE_SIZE, "%d\n",
+				sd_settings->blp.step);
+		else if (IS_NVSD_ATTR(fc_time_limit))
+			res = snprintf(buf, PAGE_SIZE, "%d\n",
+				sd_settings->fc.time_limit);
+		else if (IS_NVSD_ATTR(fc_threshold))
+			res = snprintf(buf, PAGE_SIZE, "%d\n",
+				sd_settings->fc.threshold);
+		else if (IS_NVSD_ATTR(lut))
+			res = nvsd_lut_show(sd_settings, buf, res);
+		else if (IS_NVSD_ATTR(bltf))
+			res = nvsd_bltf_show(sd_settings, buf, res);
+		else
+			res = -EINVAL;
+	} else {
+		/* This shouldn't be reachable. But just in case... */
+		res = -EINVAL;
+	}
+
+	return res;
+}
+
+#define nvsd_check_and_update(_min, _max, _varname) { \
+	int val = simple_strtol(buf, NULL, 10); \
+	if (val >= _min && val <= _max) { \
+		sd_settings->_varname = val; \
+		settings_updated = true; \
+	} }
+
+#define nvsd_get_multi(_ele, _num, _act, _min, _max) { \
+	char *b, *c, *orig_b; \
+	b = orig_b = kstrdup(buf, GFP_KERNEL); \
+	for (_act = 0; _act < _num; _act++) { \
+		if (!b) \
+			break; \
+		b = strim(b); \
+		c = strsep(&b, " "); \
+		if (!strlen(c)) \
+			break; \
+		_ele[_act] = simple_strtol(c, NULL, 10); \
+		if (_ele[_act] < _min || _ele[_act] > _max) \
+			break; \
+	} \
+	if (orig_b) \
+		kfree(orig_b); \
+}
+
+static int nvsd_lut_store(struct tegra_dc_sd_settings *sd_settings,
+	const char *buf)
+{
+	int ele[3 * DC_DISP_SD_LUT_NUM * NUM_BIN_WIDTHS];
+	int i = 0;
+	int j = 0;
+	int num = 3 * DC_DISP_SD_LUT_NUM * NUM_BIN_WIDTHS;
+
+	nvsd_get_multi(ele, num, i, 0, 255);
+
+	if (i != num)
+		return -EINVAL;
+
+	for (i = 0; i < NUM_BIN_WIDTHS; i++) {
+		for (j = 0; j < DC_DISP_SD_LUT_NUM; j++) {
+			sd_settings->lut[i][j].r =
+				ele[i * NUM_BIN_WIDTHS + j * 3 + 0];
+			sd_settings->lut[i][j].g =
+				ele[i * NUM_BIN_WIDTHS + j * 3 + 1];
+			sd_settings->lut[i][j].b =
+				ele[i * NUM_BIN_WIDTHS + j * 3 + 2];
+		}
+	}
+	return 0;
+}
+
+static int nvsd_bltf_store(struct tegra_dc_sd_settings *sd_settings,
+	const char *buf)
+{
+	int ele[4 * DC_DISP_SD_BL_TF_NUM * NUM_BIN_WIDTHS];
+	int i = 0, j = 0, num = ARRAY_SIZE(ele);
+
+	nvsd_get_multi(ele, num, i, 0, 255);
+
+	if (i != num)
+		return -EINVAL;
+
+	for (i = 0; i < NUM_BIN_WIDTHS; i++) {
+		for (j = 0; j < DC_DISP_SD_BL_TF_NUM; j++) {
+			size_t base = (i * NUM_BIN_WIDTHS *
+				       DC_DISP_SD_BL_TF_NUM) + (j * 4);
+			sd_settings->bltf[i][j][0] = ele[base + 0];
+			sd_settings->bltf[i][j][1] = ele[base + 1];
+			sd_settings->bltf[i][j][2] = ele[base + 2];
+			sd_settings->bltf[i][j][3] = ele[base + 3];
+		}
+	}
+
+	return 0;
+}
+
+static ssize_t nvsd_settings_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	struct device *dev = container_of((kobj->parent), struct device, kobj);
+	struct nvhost_device *ndev = to_nvhost_device(dev);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+	struct tegra_dc_sd_settings *sd_settings = dc->out->sd_settings;
+	ssize_t res = count;
+	bool settings_updated = false;
+	long int result;
+	int err;
+
+	if (sd_settings) {
+		if (IS_NVSD_ATTR(enable)) {
+			if (sd_settings->phase_in_settings) {
+				err = kstrtol(buf, 10, &result);
+				if (err)
+					return err;
+
+				if (nvsd_update_enable(sd_settings, result))
+					nvsd_check_and_update(1, 1, enable);
+
+			} else {
+				nvsd_check_and_update(0, 1, enable);
+			}
+		} else if (IS_NVSD_ATTR(aggressiveness)) {
+			err = kstrtol(buf, 10, &result);
+			if (err)
+				return err;
+
+			if (nvsd_update_agg(sd_settings, result)
+					&& !sd_settings->phase_in_settings)
+				settings_updated = true;
+
+		} else if (IS_NVSD_ATTR(phase_in_settings)) {
+			nvsd_check_and_update(0, 1, phase_in_settings);
+		} else if (IS_NVSD_ATTR(phase_in_adjustments)) {
+			nvsd_check_and_update(0, 1, phase_in_adjustments);
+		} else if (IS_NVSD_ATTR(bin_width)) {
+			nvsd_check_and_update(0, 8, bin_width);
+		} else if (IS_NVSD_ATTR(hw_update_delay)) {
+			nvsd_check_and_update(0, 2, hw_update_delay);
+		} else if (IS_NVSD_ATTR(use_vid_luma)) {
+			nvsd_check_and_update(0, 1, use_vid_luma);
+		} else if (IS_NVSD_ATTR(coeff)) {
+			int ele[3], i = 0, num = 3;
+			nvsd_get_multi(ele, num, i, 0, 15);
+
+			if (i == num) {
+				sd_settings->coeff.r = ele[0];
+				sd_settings->coeff.g = ele[1];
+				sd_settings->coeff.b = ele[2];
+				settings_updated = true;
+			} else {
+				res = -EINVAL;
+			}
+		} else if (IS_NVSD_ATTR(blp_time_constant)) {
+			nvsd_check_and_update(0, 1024, blp.time_constant);
+		} else if (IS_NVSD_ATTR(blp_step)) {
+			nvsd_check_and_update(0, 255, blp.step);
+		} else if (IS_NVSD_ATTR(fc_time_limit)) {
+			nvsd_check_and_update(0, 255, fc.time_limit);
+		} else if (IS_NVSD_ATTR(fc_threshold)) {
+			nvsd_check_and_update(0, 255, fc.threshold);
+		} else if (IS_NVSD_ATTR(lut)) {
+			if (nvsd_lut_store(sd_settings, buf))
+				res = -EINVAL;
+			else
+				settings_updated = true;
+		} else if (IS_NVSD_ATTR(bltf)) {
+			if (nvsd_bltf_store(sd_settings, buf))
+				res = -EINVAL;
+			else
+				settings_updated = true;
+		} else {
+			res = -EINVAL;
+		}
+
+		/* Re-init if our settings were updated. */
+		if (settings_updated) {
+			mutex_lock(&dc->lock);
+			if (!dc->enabled) {
+				mutex_unlock(&dc->lock);
+				return -ENODEV;
+			}
+
+			tegra_dc_hold_dc_out(dc);
+			nvsd_init(dc, sd_settings);
+			tegra_dc_release_dc_out(dc);
+
+			mutex_unlock(&dc->lock);
+
+			/* Update backlight state IFF we're disabling! */
+			if (!sd_settings->enable && sd_settings->bl_device) {
+				/* Do the actual brightness update outside of
+				 * the mutex */
+				struct platform_device *pdev =
+					sd_settings->bl_device;
+				struct backlight_device *bl =
+					platform_get_drvdata(pdev);
+
+				if (bl)
+					backlight_update_status(bl);
+			}
+		}
+	} else {
+		/* This shouldn't be reachable. But just in case... */
+		res = -EINVAL;
+	}
+
+	return res;
+}
+
+#define NVSD_PRINT_REG(__name) { \
+	u32 val = tegra_dc_readl(dc, __name); \
+	res += snprintf(buf + res, PAGE_SIZE - res, #__name ": 0x%08x\n", \
+		val); \
+}
+
+#define NVSD_PRINT_REG_ARRAY(__name) { \
+	u32 val = 0, i = 0; \
+	res += snprintf(buf + res, PAGE_SIZE - res, #__name ":\n"); \
+	for (i = 0; i < __name##_NUM; i++) { \
+		val = tegra_dc_readl(dc, __name(i)); \
+		res += snprintf(buf + res, PAGE_SIZE - res, "  %d: 0x%08x\n", \
+			i, val); \
+	} \
+}
+
+static ssize_t nvsd_registers_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	struct device *dev = container_of((kobj->parent), struct device, kobj);
+	struct nvhost_device *ndev = to_nvhost_device(dev);
+	struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+	ssize_t res = 0;
+
+	mutex_lock(&dc->lock);
+	if (!dc->enabled) {
+		mutex_unlock(&dc->lock);
+		return -ENODEV;
+	}
+
+	mutex_unlock(&dc->lock);
+	NVSD_PRINT_REG(DC_DISP_SD_CONTROL);
+	NVSD_PRINT_REG(DC_DISP_SD_CSC_COEFF);
+	NVSD_PRINT_REG_ARRAY(DC_DISP_SD_LUT);
+	NVSD_PRINT_REG(DC_DISP_SD_FLICKER_CONTROL);
+	NVSD_PRINT_REG(DC_DISP_SD_PIXEL_COUNT);
+	NVSD_PRINT_REG_ARRAY(DC_DISP_SD_HISTOGRAM);
+	NVSD_PRINT_REG(DC_DISP_SD_BL_PARAMETERS);
+	NVSD_PRINT_REG_ARRAY(DC_DISP_SD_BL_TF);
+	NVSD_PRINT_REG(DC_DISP_SD_BL_CONTROL);
+	NVSD_PRINT_REG(DC_DISP_SD_HW_K_VALUES);
+	NVSD_PRINT_REG(DC_DISP_SD_MAN_K_VALUES);
+
+	return res;
+}
+
+/* Sysfs initializer */
+int nvsd_create_sysfs(struct device *dev)
+{
+	int retval = 0;
+
+	nvsd_kobj = kobject_create_and_add("smartdimmer", &dev->kobj);
+
+	if (!nvsd_kobj)
+		return -ENOMEM;
+
+	retval = sysfs_create_group(nvsd_kobj, &nvsd_attr_group);
+
+	if (retval) {
+		kobject_put(nvsd_kobj);
+		dev_err(dev, "%s: failed to create attributes\n", __func__);
+	}
+
+	return retval;
+}
+
+/* Sysfs destructor */
+void nvsd_remove_sysfs(struct device *dev)
+{
+	if (nvsd_kobj) {
+		sysfs_remove_group(nvsd_kobj, &nvsd_attr_group);
+		kobject_put(nvsd_kobj);
+	}
+}
diff --git a/drivers/staging/tegra/video/dc/nvsd.h b/drivers/staging/tegra/video/dc/nvsd.h
new file mode 100644
index 000000000000..06f302354c62
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/nvsd.h
@@ -0,0 +1,25 @@
+/*
+ * drivers/video/tegra/dc/nvsd.h
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_NVSD_H
+#define __DRIVERS_VIDEO_TEGRA_DC_NVSD_H
+
+void nvsd_init(struct tegra_dc *dc, struct tegra_dc_sd_settings *settings);
+bool nvsd_update_brightness(struct tegra_dc *dc);
+int nvsd_create_sysfs(struct device *dev);
+void nvsd_remove_sysfs(struct device *dev);
+
+#endif
diff --git a/drivers/staging/tegra/video/dc/overlay.c b/drivers/staging/tegra/video/dc/overlay.c
new file mode 100644
index 000000000000..810c3b07adf2
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/overlay.c
@@ -0,0 +1,893 @@
+/*
+ * drivers/video/tegra/overlay/overlay.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/tegra_overlay.h>
+#include <linux/uaccess.h>
+#include <drm/drm_fixed.h>
+
+#include <asm/atomic.h>
+
+#include <linux/nvhost.h>
+
+#include "dc.h"
+#include "fb.h"
+#include "dc_priv.h"
+#include "../nvmap/nvmap.h"
+#include "../host/dev.h"
+#include "overlay.h"
+
+/* Minimum extra shot for DIDIM if n shot is enabled. */
+#define TEGRA_DC_DIDIM_MIN_SHOT	1
+
+DEFINE_MUTEX(tegra_flip_lock);
+
+struct overlay_client;
+
+struct overlay {
+	struct overlay_client	*owner;
+};
+
+struct tegra_overlay_info {
+	struct miscdevice	dev;
+
+	struct list_head	clients;
+	spinlock_t		clients_lock;
+
+	struct overlay		overlays[DC_N_WINDOWS];
+	struct mutex		overlays_lock;
+
+	struct nvhost_device	*ndev;
+
+	struct nvmap_client	*overlay_nvmap;
+
+	struct tegra_dc		*dc;
+
+	struct tegra_dc_blend	blend;
+
+	u32			n_shot;
+	u32			overlay_ref;
+	struct mutex		lock;
+	struct workqueue_struct	*flip_wq;
+
+	/* Big enough for tegra_dc%u when %u < 10 */
+	char			name[10];
+};
+
+struct overlay_client {
+	struct tegra_overlay_info	*dev;
+	struct list_head		list;
+	struct task_struct		*task;
+	struct nvmap_client		*user_nvmap;
+};
+
+struct tegra_overlay_flip_win {
+	struct tegra_overlay_windowattr	attr;
+	struct nvmap_handle_ref		*handle;
+	dma_addr_t			phys_addr;
+};
+
+struct tegra_overlay_flip_data {
+	bool				didim_work;
+	u32				flags;
+	u32				nr_unpin;
+	u32				syncpt_max;
+	struct work_struct		work;
+	struct tegra_overlay_info	*overlay;
+	struct nvmap_handle_ref		*unpin_handles[TEGRA_FB_FLIP_N_WINDOWS];
+	struct tegra_overlay_flip_win	win[TEGRA_FB_FLIP_N_WINDOWS];
+};
+
+static void tegra_overlay_flip_worker(struct work_struct *work);
+
+/* Overlay window manipulation */
+static int tegra_overlay_pin_window(struct tegra_overlay_info *overlay,
+				    struct tegra_overlay_flip_win *flip_win,
+				    struct nvmap_client *user_nvmap)
+{
+	struct nvmap_handle_ref *win_dupe;
+	struct nvmap_handle *win_handle;
+	unsigned long buff_id = flip_win->attr.buff_id;
+
+	if (!buff_id)
+		return 0;
+
+	win_handle = nvmap_get_handle_id(user_nvmap, buff_id);
+	if (win_handle == NULL) {
+		dev_err(&overlay->ndev->dev, "%s: flip invalid "
+			"handle %08lx\n", current->comm, buff_id);
+		return -EPERM;
+	}
+
+	/* duplicate the new framebuffer's handle into the fb driver's
+	 * nvmap context, to ensure that the handle won't be freed as
+	 * long as it is in-use by the fb driver */
+	win_dupe = nvmap_duplicate_handle_id(overlay->overlay_nvmap, buff_id);
+	nvmap_handle_put(win_handle);
+
+	if (IS_ERR(win_dupe)) {
+		dev_err(&overlay->ndev->dev, "couldn't duplicate handle\n");
+		return PTR_ERR(win_dupe);
+	}
+
+	flip_win->handle = win_dupe;
+
+	flip_win->phys_addr = nvmap_pin(overlay->overlay_nvmap, win_dupe);
+	if (IS_ERR((void *)flip_win->phys_addr)) {
+		dev_err(&overlay->ndev->dev, "couldn't pin handle\n");
+		nvmap_free(overlay->overlay_nvmap, win_dupe);
+		return PTR_ERR((void *)flip_win->phys_addr);
+	}
+
+	return 0;
+}
+
+static int tegra_overlay_set_windowattr(struct tegra_overlay_info *overlay,
+					struct tegra_dc_win *win,
+					const struct tegra_overlay_flip_win *flip_win)
+{
+	int xres, yres;
+	if (flip_win->handle == NULL) {
+		win->flags = 0;
+		win->cur_handle = NULL;
+		return 0;
+	}
+
+	xres = overlay->dc->mode.h_active;
+	yres = overlay->dc->mode.v_active;
+
+	win->flags = TEGRA_WIN_FLAG_ENABLED;
+	if (flip_win->attr.blend == TEGRA_FB_WIN_BLEND_PREMULT)
+		win->flags |= TEGRA_WIN_FLAG_BLEND_PREMULT;
+	else if (flip_win->attr.blend == TEGRA_FB_WIN_BLEND_COVERAGE)
+		win->flags |= TEGRA_WIN_FLAG_BLEND_COVERAGE;
+	if (flip_win->attr.flags & TEGRA_FB_WIN_FLAG_INVERT_H)
+		win->flags |= TEGRA_WIN_FLAG_INVERT_H;
+	if (flip_win->attr.flags & TEGRA_FB_WIN_FLAG_INVERT_V)
+		win->flags |= TEGRA_WIN_FLAG_INVERT_V;
+	if (flip_win->attr.flags & TEGRA_FB_WIN_FLAG_TILED)
+		win->flags |= TEGRA_WIN_FLAG_TILED;
+
+	win->fmt = flip_win->attr.pixformat;
+	win->x.full = dfixed_const(flip_win->attr.x);
+	win->y.full = dfixed_const(flip_win->attr.y);
+	win->w.full = dfixed_const(flip_win->attr.w);
+	win->h.full = dfixed_const(flip_win->attr.h);
+	win->out_x = flip_win->attr.out_x;
+	win->out_y = flip_win->attr.out_y;
+	win->out_w = flip_win->attr.out_w;
+	win->out_h = flip_win->attr.out_h;
+
+	WARN_ONCE(win->out_x >= xres,
+		"%s:application window x offset(%d) exceeds display width(%d)\n",
+		dev_name(&win->dc->ndev->dev), win->out_x, xres);
+	WARN_ONCE(win->out_y >= yres,
+		"%s:application window y offset(%d) exceeds display height(%d)\n",
+		dev_name(&win->dc->ndev->dev), win->out_y, yres);
+	WARN_ONCE(win->out_x + win->out_w > xres && win->out_x < xres,
+		"%s:application window width(%d) exceeds display width(%d)\n",
+		dev_name(&win->dc->ndev->dev), win->out_x + win->out_w, xres);
+	WARN_ONCE(win->out_y + win->out_h > yres && win->out_y < yres,
+		"%s:application window height(%d) exceeds display height(%d)\n",
+		dev_name(&win->dc->ndev->dev), win->out_y + win->out_h, yres);
+
+	if (((win->out_x + win->out_w) > xres) && (win->out_x < xres)) {
+		long new_w = xres - win->out_x;
+		u64 in_w = win->w.full * new_w;
+		do_div(in_w, win->out_w);
+		win->w.full = lower_32_bits(in_w);
+	        win->out_w = new_w;
+	}
+	if (((win->out_y + win->out_h) > yres) && (win->out_y < yres)) {
+		long new_h = yres - win->out_y;
+		u64 in_h = win->h.full * new_h;
+		do_div(in_h, win->out_h);
+		win->h.full = lower_32_bits(in_h);
+	        win->out_h = new_h;
+	}
+
+	win->z = flip_win->attr.z;
+	win->cur_handle = flip_win->handle;
+
+	/* STOPSHIP verify that this won't read outside of the surface */
+	win->phys_addr = flip_win->phys_addr + flip_win->attr.offset;
+	win->phys_addr_u = flip_win->phys_addr + flip_win->attr.offset_u;
+	win->phys_addr_v = flip_win->phys_addr + flip_win->attr.offset_v;
+	win->stride = flip_win->attr.stride;
+	win->stride_uv = flip_win->attr.stride_uv;
+
+	if ((s32)flip_win->attr.pre_syncpt_id >= 0) {
+		nvhost_syncpt_wait_timeout(&nvhost_get_host(overlay->ndev)->syncpt,
+					   flip_win->attr.pre_syncpt_id,
+					   flip_win->attr.pre_syncpt_val,
+					   msecs_to_jiffies(500),
+					   NULL);
+	}
+
+	/* Store the blend state incase we need to reorder later */
+	overlay->blend.z[win->idx] = win->z;
+	overlay->blend.flags[win->idx] = win->flags & TEGRA_WIN_BLEND_FLAGS_MASK;
+
+	return 0;
+}
+
+/* overlay policy for premult is dst alpha, which needs reassignment */
+/* of blend settings for the DC */
+static void tegra_overlay_blend_reorder(struct tegra_dc_blend *blend,
+					struct tegra_dc_win *windows[])
+{
+	int idx, below;
+
+	/* Copy across the original blend state to each window */
+	for (idx = 0; idx < DC_N_WINDOWS; idx++) {
+		windows[idx]->z = blend->z[idx];
+		windows[idx]->flags &= ~TEGRA_WIN_BLEND_FLAGS_MASK;
+		windows[idx]->flags |= blend->flags[idx];
+	}
+
+	/* Find a window with PreMult */
+	for (idx = 0; idx < DC_N_WINDOWS; idx++) {
+		if (blend->flags[idx] == TEGRA_WIN_FLAG_BLEND_PREMULT)
+			break;
+	}
+	if (idx == DC_N_WINDOWS)
+		return;
+
+	/* Find the window directly below it */
+	for (below = 0; below < DC_N_WINDOWS; below++) {
+		if (below == idx)
+			continue;
+		if (blend->z[below] > blend->z[idx])
+			break;
+	}
+	if (below == DC_N_WINDOWS)
+		return;
+
+	/* Switch the flags and the ordering */
+	windows[idx]->z = blend->z[below];
+	windows[idx]->flags &= ~TEGRA_WIN_BLEND_FLAGS_MASK;
+	windows[idx]->flags |= blend->flags[below];
+	windows[below]->z = blend->z[idx];
+	windows[below]->flags &= ~TEGRA_WIN_BLEND_FLAGS_MASK;
+	windows[below]->flags |= blend->flags[idx];
+}
+
+static int tegra_overlay_flip_didim(struct tegra_overlay_flip_data *data)
+{
+	mutex_lock(&tegra_flip_lock);
+	INIT_WORK(&data->work, tegra_overlay_flip_worker);
+
+	queue_work(data->overlay->flip_wq, &data->work);
+
+	mutex_unlock(&tegra_flip_lock);
+
+	return 0;
+}
+
+static void tegra_overlay_n_shot(struct tegra_overlay_flip_data *data,
+			struct nvmap_handle_ref **unpin_handles, int *nr_unpin)
+{
+	int i;
+	struct tegra_overlay_info *overlay = data->overlay;
+	u32 didim_delay = overlay->dc->out->sd_settings->hw_update_delay;
+	u32 didim_enable = overlay->dc->out->sd_settings->enable;
+
+	mutex_lock(&overlay->lock);
+
+	if (data->didim_work) {
+		/* Increment sync point if we finish n shot;
+		 * otherwise send overlay flip request. */
+		if (overlay->n_shot)
+			overlay->n_shot--;
+
+		if (overlay->n_shot && didim_enable) {
+			tegra_overlay_flip_didim(data);
+			mutex_unlock(&overlay->lock);
+			return;
+		} else {
+			*nr_unpin = data->nr_unpin;
+			for (i = 0; i < *nr_unpin; i++)
+				unpin_handles[i] = data->unpin_handles[i];
+			tegra_dc_incr_syncpt_min(overlay->dc, 0,
+						data->syncpt_max);
+		}
+	} else {
+		overlay->overlay_ref--;
+		/* If no new flip request in the queue, we will send
+		 * the last frame n times for DIDIM */
+		if (!overlay->overlay_ref && didim_enable)
+			overlay->n_shot = TEGRA_DC_DIDIM_MIN_SHOT + didim_delay;
+
+		if (overlay->n_shot && didim_enable) {
+			data->nr_unpin = *nr_unpin;
+			data->didim_work = true;
+			for (i = 0; i < *nr_unpin; i++)
+				data->unpin_handles[i] = unpin_handles[i];
+			tegra_overlay_flip_didim(data);
+			mutex_unlock(&overlay->lock);
+			return;
+		} else {
+			tegra_dc_incr_syncpt_min(overlay->dc, 0,
+						data->syncpt_max);
+		}
+	}
+
+	mutex_unlock(&overlay->lock);
+
+	/* unpin and deref previous front buffers */
+	for (i = 0; i < *nr_unpin; i++) {
+		nvmap_unpin(overlay->overlay_nvmap, unpin_handles[i]);
+		nvmap_free(overlay->overlay_nvmap, unpin_handles[i]);
+	}
+
+	kfree(data);
+}
+
+static void tegra_overlay_flip_worker(struct work_struct *work)
+{
+	struct tegra_overlay_flip_data *data =
+		container_of(work, struct tegra_overlay_flip_data, work);
+	struct tegra_overlay_info *overlay = data->overlay;
+	struct tegra_dc_win *win;
+	struct tegra_dc_win *wins[TEGRA_FB_FLIP_N_WINDOWS];
+	struct nvmap_handle_ref *unpin_handles[TEGRA_FB_FLIP_N_WINDOWS];
+	int i, nr_win = 0, nr_unpin = 0;
+
+	data = container_of(work, struct tegra_overlay_flip_data, work);
+
+	for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) {
+		struct tegra_overlay_flip_win *flip_win = &data->win[i];
+		int idx = flip_win->attr.index;
+
+		if (idx == -1)
+			continue;
+
+		win = tegra_dc_get_window(overlay->dc, idx);
+
+		if (!win)
+			continue;
+
+		if (win->flags && win->cur_handle && !data->didim_work)
+				unpin_handles[nr_unpin++] = win->cur_handle;
+
+		tegra_overlay_set_windowattr(overlay, win, &data->win[i]);
+
+		wins[nr_win++] = win;
+
+#if 0
+		if (flip_win->attr.pre_syncpt_id < 0)
+			continue;
+		printk("%08x %08x\n",
+		       flip_win->attr.pre_syncpt_id,
+		       flip_win->attr.pre_syncpt_val);
+
+		nvhost_syncpt_wait_timeout(&overlay->ndev->host->syncpt,
+					   flip_win->attr.pre_syncpt_id,
+					   flip_win->attr.pre_syncpt_val,
+					   msecs_to_jiffies(500));
+#endif
+	}
+
+	if (data->flags & TEGRA_OVERLAY_FLIP_FLAG_BLEND_REORDER) {
+		struct tegra_dc_win *dcwins[DC_N_WINDOWS];
+
+		for (i = 0; i < DC_N_WINDOWS; i++)
+			dcwins[i] = tegra_dc_get_window(overlay->dc, i);
+
+		tegra_overlay_blend_reorder(&overlay->blend, dcwins);
+		tegra_dc_update_windows(dcwins, DC_N_WINDOWS);
+		tegra_dc_sync_windows(dcwins, DC_N_WINDOWS);
+	} else {
+		tegra_dc_update_windows(wins, nr_win);
+		/* TODO: implement swapinterval here */
+		tegra_dc_sync_windows(wins, nr_win);
+	}
+
+	if ((overlay->dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) &&
+		(overlay->dc->out->flags & TEGRA_DC_OUT_N_SHOT_MODE)) {
+		tegra_overlay_n_shot(data, unpin_handles, &nr_unpin);
+	} else {
+		tegra_dc_incr_syncpt_min(overlay->dc, 0, data->syncpt_max);
+
+		/* unpin and deref previous front buffers */
+		for (i = 0; i < nr_unpin; i++) {
+			nvmap_unpin(overlay->overlay_nvmap, unpin_handles[i]);
+			nvmap_free(overlay->overlay_nvmap, unpin_handles[i]);
+		}
+
+		kfree(data);
+	}
+}
+
+static int tegra_overlay_flip(struct tegra_overlay_info *overlay,
+			      struct tegra_overlay_flip_args *args,
+			      struct nvmap_client *user_nvmap)
+{
+	struct tegra_overlay_flip_data *data;
+	struct tegra_overlay_flip_win *flip_win;
+	u32 syncpt_max;
+	int i, err;
+
+	if (WARN_ON(!overlay->ndev))
+		return -EFAULT;
+
+	mutex_lock(&tegra_flip_lock);
+	mutex_lock(&overlay->dc->lock);
+	if (!overlay->dc->enabled) {
+		mutex_unlock(&overlay->dc->lock);
+		mutex_unlock(&tegra_flip_lock);
+		return -EFAULT;
+	}
+	mutex_unlock(&overlay->dc->lock);
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (data == NULL) {
+		dev_err(&overlay->ndev->dev,
+			"can't allocate memory for flip\n");
+		mutex_unlock(&tegra_flip_lock);
+		return -ENOMEM;
+	}
+
+	INIT_WORK(&data->work, tegra_overlay_flip_worker);
+	data->overlay = overlay;
+	data->flags = args->flags;
+	data->didim_work = false;
+
+	if ((overlay->dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) &&
+		(overlay->dc->out->flags & TEGRA_DC_OUT_N_SHOT_MODE)) {
+		mutex_lock(&overlay->lock);
+		overlay->overlay_ref++;
+		overlay->n_shot = 0;
+		mutex_unlock(&overlay->lock);
+	}
+
+	for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) {
+		flip_win = &data->win[i];
+
+		memcpy(&flip_win->attr, &args->win[i], sizeof(flip_win->attr));
+
+		if (flip_win->attr.index == -1)
+			continue;
+
+		err = tegra_overlay_pin_window(overlay, flip_win, user_nvmap);
+		if (err < 0) {
+			dev_err(&overlay->ndev->dev,
+				"error setting window attributes\n");
+			goto surf_err;
+		}
+	}
+
+	syncpt_max = tegra_dc_incr_syncpt_max(overlay->dc, 0);
+	data->syncpt_max = syncpt_max;
+
+	queue_work(overlay->flip_wq, &data->work);
+
+	args->post_syncpt_val = syncpt_max;
+	args->post_syncpt_id = tegra_dc_get_syncpt_id(overlay->dc, 0);
+	mutex_unlock(&tegra_flip_lock);
+
+	return 0;
+
+surf_err:
+	while (i--) {
+		if (data->win[i].handle) {
+			nvmap_unpin(overlay->overlay_nvmap,
+				    data->win[i].handle);
+			nvmap_free(overlay->overlay_nvmap,
+				   data->win[i].handle);
+		}
+	}
+	kfree(data);
+	mutex_unlock(&tegra_flip_lock);
+	return err;
+}
+
+static void tegra_overlay_set_emc_freq(struct tegra_overlay_info *dev)
+{
+	struct tegra_dc_win *win;
+	struct tegra_dc_win *wins[DC_N_WINDOWS];
+	int i;
+
+	for (i = 0; i < DC_N_WINDOWS; i++) {
+		win = tegra_dc_get_window(dev->dc, i);
+		wins[i] = win;
+	}
+
+	tegra_dc_set_dynamic_emc(wins, dev->dc->n_windows);
+	tegra_dc_program_bandwidth(dev->dc, false);
+}
+
+/* Overlay functions */
+static bool tegra_overlay_get(struct overlay_client *client, int idx)
+{
+	struct tegra_overlay_info *dev = client->dev;
+	bool ret = false;
+
+	if (idx < 0 || idx > dev->dc->n_windows)
+		return ret;
+
+	mutex_lock(&dev->overlays_lock);
+	if (dev->overlays[idx].owner == NULL) {
+		dev->overlays[idx].owner = client;
+		ret = true;
+		if (dev->dc->mode.pclk != 0)
+			tegra_overlay_set_emc_freq(dev);
+
+		dev_dbg(&client->dev->ndev->dev,
+			"%s(): idx=%d pid=%d comm=%s\n",
+			__func__, idx, client->task->pid, client->task->comm);
+	}
+	mutex_unlock(&dev->overlays_lock);
+
+	return ret;
+}
+
+static void tegra_overlay_put_locked(struct overlay_client *client, int idx)
+{
+	struct tegra_overlay_flip_args flip_args;
+	struct tegra_overlay_info *dev = client->dev;
+
+	if (idx < 0 || idx > dev->dc->n_windows)
+		return;
+
+	if (dev->overlays[idx].owner != client)
+		return;
+
+	dev_dbg(&client->dev->ndev->dev,
+		"%s(): idx=%d pid=%d comm=%s\n",
+		__func__, idx, client->task->pid, client->task->comm);
+
+	dev->overlays[idx].owner = NULL;
+
+	flip_args.win[0].index = idx;
+	flip_args.win[0].buff_id = 0;
+	flip_args.win[1].index = -1;
+	flip_args.win[2].index = -1;
+	flip_args.flags = 0;
+
+	tegra_overlay_flip(dev, &flip_args, NULL);
+	if (dev->dc->mode.pclk != 0)
+		tegra_overlay_set_emc_freq(dev);
+}
+
+static void tegra_overlay_put(struct overlay_client *client, int idx)
+{
+	mutex_lock(&client->dev->overlays_lock);
+	tegra_overlay_put_locked(client, idx);
+	mutex_unlock(&client->dev->overlays_lock);
+}
+
+/* Ioctl implementations */
+static int tegra_overlay_ioctl_open(struct overlay_client *client,
+				    void __user *arg)
+{
+	int idx = -1;
+
+	if (copy_from_user(&idx, arg, sizeof(idx)))
+		return -EFAULT;
+
+	if (!tegra_overlay_get(client, idx))
+		return -EBUSY;
+
+	if (copy_to_user(arg, &idx, sizeof(idx))) {
+		tegra_overlay_put(client, idx);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int tegra_overlay_ioctl_close(struct overlay_client *client,
+				     void __user *arg)
+{
+	int err = 0;
+	int idx;
+
+	if (copy_from_user(&idx, arg, sizeof(idx)))
+		return -EFAULT;
+
+	if (idx < 0 || idx > client->dev->dc->n_windows)
+		return -EINVAL;
+
+	mutex_lock(&client->dev->overlays_lock);
+	if (client->dev->overlays[idx].owner == client)
+		tegra_overlay_put_locked(client, idx);
+	else
+		err = -EINVAL;
+	mutex_unlock(&client->dev->overlays_lock);
+
+	return err;
+}
+
+static int tegra_overlay_ioctl_flip(struct overlay_client *client,
+				    void __user *arg)
+{
+	int i = 0;
+	int idx = 0;
+	int err;
+	bool found_one = false;
+	struct tegra_overlay_flip_args flip_args;
+
+	mutex_lock(&client->dev->dc->lock);
+	if (!client->dev->dc->enabled) {
+		mutex_unlock(&client->dev->dc->lock);
+		return -EPIPE;
+	}
+	mutex_unlock(&client->dev->dc->lock);
+
+	if (copy_from_user(&flip_args, arg, sizeof(flip_args)))
+		return -EFAULT;
+
+	for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) {
+		idx = flip_args.win[i].index;
+		if (idx == -1) {
+			flip_args.win[i].buff_id = 0;
+			continue;
+		}
+
+		if (idx < 0 || idx > client->dev->dc->n_windows) {
+			dev_err(&client->dev->ndev->dev,
+				"Flipping an invalid overlay! %d\n", idx);
+			flip_args.win[i].index = -1;
+			flip_args.win[i].buff_id = 0;
+			continue;
+		}
+
+		if (client->dev->overlays[idx].owner != client) {
+			dev_err(&client->dev->ndev->dev,
+				"Flipping a non-owned overlay! %d\n", idx);
+			flip_args.win[i].index = -1;
+			flip_args.win[i].buff_id = 0;
+			continue;
+		}
+
+		found_one = true;
+	}
+
+	if (!found_one)
+		return -EFAULT;
+
+	err = tegra_overlay_flip(client->dev, &flip_args, client->user_nvmap);
+
+	if (err)
+		return err;
+
+	if (copy_to_user(arg, &flip_args, sizeof(flip_args)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int tegra_overlay_ioctl_set_nvmap_fd(struct overlay_client *client,
+					    void __user *arg)
+{
+	int fd;
+	struct nvmap_client *nvmap = NULL;
+
+	if (copy_from_user(&fd, arg, sizeof(fd)))
+		return -EFAULT;
+
+	if (fd < 0)
+		return -EINVAL;
+
+	nvmap = nvmap_client_get_file(fd);
+	if (IS_ERR(nvmap))
+		return PTR_ERR(nvmap);
+
+	if (client->user_nvmap)
+		nvmap_client_put(client->user_nvmap);
+
+	client->user_nvmap = nvmap;
+
+	return 0;
+}
+
+/* File operations */
+static int tegra_overlay_open(struct inode *inode, struct file *filp)
+{
+	struct miscdevice *miscdev = filp->private_data;
+	struct tegra_overlay_info *dev = container_of(miscdev,
+						      struct tegra_overlay_info,
+						      dev);
+	struct overlay_client *priv;
+	unsigned long flags;
+	int ret;
+
+	ret = nonseekable_open(inode, filp);
+	if (unlikely(ret))
+		return ret;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->dev = dev;
+
+	get_task_struct(current);
+	priv->task = current;
+
+	spin_lock_irqsave(&dev->clients_lock, flags);
+	list_add(&priv->list, &dev->clients);
+	spin_unlock_irqrestore(&dev->clients_lock, flags);
+
+	filp->private_data = priv;
+	return 0;
+}
+
+static int tegra_overlay_release(struct inode *inode, struct file *filp)
+{
+	struct overlay_client *client = filp->private_data;
+	unsigned long flags;
+	int i;
+
+	mutex_lock(&client->dev->overlays_lock);
+	for (i = 0; i < client->dev->dc->n_windows; i++)
+		if (client->dev->overlays[i].owner == client)
+			tegra_overlay_put_locked(client, i);
+	mutex_unlock(&client->dev->overlays_lock);
+
+	spin_lock_irqsave(&client->dev->clients_lock, flags);
+	list_del(&client->list);
+	spin_unlock_irqrestore(&client->dev->clients_lock, flags);
+
+	nvmap_client_put(client->user_nvmap);
+	put_task_struct(client->task);
+
+	kfree(client);
+	return 0;
+}
+
+static long tegra_overlay_ioctl(struct file *filp, unsigned int cmd,
+				unsigned long arg)
+{
+	struct overlay_client *client = filp->private_data;
+	int err = 0;
+	void __user *uarg = (void __user *)arg;
+
+	if (_IOC_TYPE(cmd) != TEGRA_OVERLAY_IOCTL_MAGIC)
+		return -ENOTTY;
+
+	if (_IOC_NR(cmd) < TEGRA_OVERLAY_IOCTL_MIN_NR)
+		return -ENOTTY;
+
+	if (_IOC_NR(cmd) > TEGRA_OVERLAY_IOCTL_MAX_NR)
+		return -ENOTTY;
+
+	if (_IOC_DIR(cmd) & _IOC_READ)
+		err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+	if (_IOC_DIR(cmd) & _IOC_WRITE)
+		err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+	if (err)
+		return -EFAULT;
+
+	switch (cmd) {
+	case TEGRA_OVERLAY_IOCTL_OPEN_WINDOW:
+		err = tegra_overlay_ioctl_open(client, uarg);
+		break;
+	case TEGRA_OVERLAY_IOCTL_CLOSE_WINDOW:
+		err = tegra_overlay_ioctl_close(client, uarg);
+		break;
+	case TEGRA_OVERLAY_IOCTL_FLIP:
+		err = tegra_overlay_ioctl_flip(client, uarg);
+		break;
+	case TEGRA_OVERLAY_IOCTL_SET_NVMAP_FD:
+		err = tegra_overlay_ioctl_set_nvmap_fd(client, uarg);
+		break;
+	default:
+		return -ENOTTY;
+	}
+	return err;
+}
+
+static const struct file_operations overlay_fops = {
+	.owner		= THIS_MODULE,
+	.open		= tegra_overlay_open,
+	.release	= tegra_overlay_release,
+	.unlocked_ioctl = tegra_overlay_ioctl,
+};
+
+/* Registration */
+struct tegra_overlay_info *tegra_overlay_register(struct nvhost_device *ndev,
+						  struct tegra_dc *dc)
+{
+	struct tegra_overlay_info *dev;
+	int e;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev) {
+		dev_err(&ndev->dev, "out of memory for device\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	snprintf(dev->name, sizeof(dev->name), "tegra_dc%u", ndev->id);
+
+	dev->ndev = ndev;
+	dev->dev.minor = MISC_DYNAMIC_MINOR;
+	dev->dev.name = dev->name;
+	dev->dev.fops = &overlay_fops;
+	dev->dev.parent = &ndev->dev;
+
+	spin_lock_init(&dev->clients_lock);
+	INIT_LIST_HEAD(&dev->clients);
+
+	mutex_init(&dev->overlays_lock);
+
+	e = misc_register(&dev->dev);
+	if (e) {
+		dev_err(&ndev->dev, "unable to register miscdevice %s\n",
+			dev->dev.name);
+		goto fail;
+	}
+
+	dev->overlay_nvmap = nvmap_create_client(nvmap_dev, "overlay");
+	if (!dev->overlay_nvmap) {
+		dev_err(&ndev->dev, "couldn't create nvmap client\n");
+		e = -ENOMEM;
+		goto err_free;
+	}
+
+	dev->flip_wq = create_singlethread_workqueue(dev_name(&ndev->dev));
+	if (!dev->flip_wq) {
+		dev_err(&ndev->dev, "couldn't create flip work-queue\n");
+		e = -ENOMEM;
+		goto err_delete_wq;
+	}
+	mutex_init(&dev->lock);
+	dev->overlay_ref = 0;
+	dev->n_shot = 0;
+
+	dev->dc = dc;
+
+	dev_info(&ndev->dev, "registered overlay\n");
+
+	return dev;
+
+err_delete_wq:
+err_free:
+fail:
+	if (dev->dev.minor != MISC_DYNAMIC_MINOR)
+		misc_deregister(&dev->dev);
+	kfree(dev);
+	return ERR_PTR(e);
+}
+
+void tegra_overlay_unregister(struct tegra_overlay_info *info)
+{
+	misc_deregister(&info->dev);
+
+	kfree(info);
+}
+
+void tegra_overlay_disable(struct tegra_overlay_info *overlay_info)
+{
+	mutex_lock(&tegra_flip_lock);
+	mutex_lock(&overlay_info->lock);
+	overlay_info->n_shot = 0;
+	flush_workqueue(overlay_info->flip_wq);
+	mutex_unlock(&overlay_info->lock);
+	mutex_unlock(&tegra_flip_lock);
+}
diff --git a/drivers/staging/tegra/video/dc/overlay.h b/drivers/staging/tegra/video/dc/overlay.h
new file mode 100644
index 000000000000..812bc0237562
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/overlay.h
@@ -0,0 +1,43 @@
+/*
+ * drivers/video/tegra/dc/overlay.h
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_OVERLAY_H
+#define __DRIVERS_VIDEO_TEGRA_OVERLAY_H
+
+struct tegra_overlay_info;
+
+#ifdef CONFIG_TEGRA_OVERLAY
+struct tegra_overlay_info *tegra_overlay_register(struct nvhost_device *ndev,
+						  struct tegra_dc *dc);
+void tegra_overlay_unregister(struct tegra_overlay_info *overlay_info);
+void tegra_overlay_disable(struct tegra_overlay_info *overlay_info);
+#else
+static inline struct tegra_overlay_info *tegra_overlay_register(struct nvhost_device *ndev,
+								struct tegra_dc *dc)
+{
+	return NULL;
+}
+
+static inline void tegra_overlay_unregister(struct tegra_overlay_info *overlay_info)
+{
+}
+
+static inline void tegra_overlay_disable(struct tegra_overlay_info *overlay_info)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/staging/tegra/video/dc/rgb.c b/drivers/staging/tegra/video/dc/rgb.c
new file mode 100644
index 000000000000..0f1c884a8c34
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/rgb.c
@@ -0,0 +1,158 @@
+/*
+ * drivers/video/tegra/dc/rgb.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#include "dc.h"
+#include "dc_reg.h"
+#include "dc_priv.h"
+
+
+static const u32 tegra_dc_rgb_enable_partial_pintable[] = {
+	DC_COM_PIN_OUTPUT_ENABLE0,	0x00000000,
+	DC_COM_PIN_OUTPUT_ENABLE1,	0x00000000,
+	DC_COM_PIN_OUTPUT_ENABLE2,	0x00000000,
+	DC_COM_PIN_OUTPUT_ENABLE3,	0x00000000,
+	DC_COM_PIN_OUTPUT_POLARITY0,	0x00000000,
+	DC_COM_PIN_OUTPUT_POLARITY2,	0x00000000,
+	DC_COM_PIN_OUTPUT_DATA0,	0x00000000,
+	DC_COM_PIN_OUTPUT_DATA1,	0x00000000,
+	DC_COM_PIN_OUTPUT_DATA2,	0x00000000,
+	DC_COM_PIN_OUTPUT_DATA3,	0x00000000,
+};
+
+static const u32 tegra_dc_rgb_enable_pintable[] = {
+	DC_COM_PIN_OUTPUT_ENABLE0,	0x00000000,
+	DC_COM_PIN_OUTPUT_ENABLE1,	0x00000000,
+	DC_COM_PIN_OUTPUT_ENABLE2,	0x00000000,
+	DC_COM_PIN_OUTPUT_ENABLE3,	0x00000000,
+	DC_COM_PIN_OUTPUT_POLARITY0,	0x00000000,
+	DC_COM_PIN_OUTPUT_POLARITY1,	0x01000000,
+	DC_COM_PIN_OUTPUT_POLARITY2,	0x00000000,
+	DC_COM_PIN_OUTPUT_POLARITY3,	0x00000000,
+	DC_COM_PIN_OUTPUT_DATA0,	0x00000000,
+	DC_COM_PIN_OUTPUT_DATA1,	0x00000000,
+	DC_COM_PIN_OUTPUT_DATA2,	0x00000000,
+	DC_COM_PIN_OUTPUT_DATA3,	0x00000000,
+};
+
+static const u32 tegra_dc_rgb_enable_out_sel_pintable[] = {
+	DC_COM_PIN_OUTPUT_SELECT0,	0x00000000,
+	DC_COM_PIN_OUTPUT_SELECT1,	0x00000000,
+	DC_COM_PIN_OUTPUT_SELECT2,	0x00000000,
+	DC_COM_PIN_OUTPUT_SELECT3,	0x00000000,
+	DC_COM_PIN_OUTPUT_SELECT4,	0x00210222,
+	DC_COM_PIN_OUTPUT_SELECT5,	0x00002200,
+	DC_COM_PIN_OUTPUT_SELECT6,	0x00020000,
+};
+
+static const u32 tegra_dc_rgb_disable_pintable[] = {
+	DC_COM_PIN_OUTPUT_ENABLE0,	0x55555555,
+	DC_COM_PIN_OUTPUT_ENABLE1,	0x55150005,
+	DC_COM_PIN_OUTPUT_ENABLE2,	0x55555555,
+	DC_COM_PIN_OUTPUT_ENABLE3,	0x55555555,
+	DC_COM_PIN_OUTPUT_POLARITY0,	0x00000000,
+	DC_COM_PIN_OUTPUT_POLARITY1,	0x00000000,
+	DC_COM_PIN_OUTPUT_POLARITY2,	0x00000000,
+	DC_COM_PIN_OUTPUT_POLARITY3,	0x00000000,
+	DC_COM_PIN_OUTPUT_DATA0,	0xaaaaaaaa,
+	DC_COM_PIN_OUTPUT_DATA1,	0xaaaaaaaa,
+	DC_COM_PIN_OUTPUT_DATA2,	0xaaaaaaaa,
+	DC_COM_PIN_OUTPUT_DATA3,	0xaaaaaaaa,
+	DC_COM_PIN_OUTPUT_SELECT0,	0x00000000,
+	DC_COM_PIN_OUTPUT_SELECT1,	0x00000000,
+	DC_COM_PIN_OUTPUT_SELECT2,	0x00000000,
+	DC_COM_PIN_OUTPUT_SELECT3,	0x00000000,
+	DC_COM_PIN_OUTPUT_SELECT4,	0x00000000,
+	DC_COM_PIN_OUTPUT_SELECT5,	0x00000000,
+	DC_COM_PIN_OUTPUT_SELECT6,	0x00000000,
+};
+
+static void tegra_dc_rgb_enable(struct tegra_dc *dc)
+{
+	int i;
+	u32 out_sel_pintable[ARRAY_SIZE(tegra_dc_rgb_enable_out_sel_pintable)];
+
+	tegra_dc_writel(dc, PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+			PW4_ENABLE | PM0_ENABLE | PM1_ENABLE,
+			DC_CMD_DISPLAY_POWER_CONTROL);
+
+	tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, DC_CMD_DISPLAY_COMMAND);
+
+	if (dc->out->out_pins) {
+		tegra_dc_set_out_pin_polars(dc, dc->out->out_pins,
+			dc->out->n_out_pins);
+		tegra_dc_write_table(dc, tegra_dc_rgb_enable_partial_pintable);
+	} else {
+		tegra_dc_write_table(dc, tegra_dc_rgb_enable_pintable);
+	}
+
+	memcpy(out_sel_pintable, tegra_dc_rgb_enable_out_sel_pintable,
+		sizeof(tegra_dc_rgb_enable_out_sel_pintable));
+
+	if (dc->out && dc->out->out_sel_configs) {
+		u8 *out_sels = dc->out->out_sel_configs;
+		for (i = 0; i < dc->out->n_out_sel_configs; i++) {
+			switch (out_sels[i]) {
+			case TEGRA_PIN_OUT_CONFIG_SEL_LM1_M1:
+				out_sel_pintable[5*2+1] =
+					(out_sel_pintable[5*2+1] &
+					~PIN5_LM1_LCD_M1_OUTPUT_MASK) |
+					PIN5_LM1_LCD_M1_OUTPUT_M1;
+				break;
+			case TEGRA_PIN_OUT_CONFIG_SEL_LM1_LD21:
+				out_sel_pintable[5*2+1] =
+					(out_sel_pintable[5*2+1] &
+					~PIN5_LM1_LCD_M1_OUTPUT_MASK) |
+					PIN5_LM1_LCD_M1_OUTPUT_LD21;
+				break;
+			case TEGRA_PIN_OUT_CONFIG_SEL_LM1_PM1:
+				out_sel_pintable[5*2+1] =
+					(out_sel_pintable[5*2+1] &
+					~PIN5_LM1_LCD_M1_OUTPUT_MASK) |
+					PIN5_LM1_LCD_M1_OUTPUT_PM1;
+				break;
+			default:
+				dev_err(&dc->ndev->dev,
+					"Invalid pin config[%d]: %d\n",
+					 i, out_sels[i]);
+				break;
+			}
+		}
+	}
+
+	tegra_dc_write_table(dc, out_sel_pintable);
+
+	/* Inform DC register updated */
+	tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+}
+
+static void tegra_dc_rgb_disable(struct tegra_dc *dc)
+{
+	tegra_dc_writel(dc, 0x00000000, DC_CMD_DISPLAY_POWER_CONTROL);
+
+	tegra_dc_write_table(dc, tegra_dc_rgb_disable_pintable);
+}
+
+struct tegra_dc_out_ops tegra_dc_rgb_ops = {
+	.enable = tegra_dc_rgb_enable,
+	.disable = tegra_dc_rgb_disable,
+};
+
diff --git a/drivers/staging/tegra/video/dc/tegra_dc_ext.h b/drivers/staging/tegra/video/dc/tegra_dc_ext.h
new file mode 100644
index 000000000000..321bfe6170dd
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/tegra_dc_ext.h
@@ -0,0 +1,78 @@
+/*
+ * arch/arm/mach-tegra/include/mach/tegra_dc_ext.h
+ *
+ * Copyright (C) 2011, NVIDIA Corporation
+ *
+ * Author: Robert Morell <rmorell@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MACH_TEGRA_DC_EXT_H
+#define __MACH_TEGRA_DC_EXT_H
+
+#include <linux/nvhost.h>
+
+struct tegra_dc_ext;
+struct tegra_dc;
+
+#ifdef CONFIG_TEGRA_DC_EXTENSIONS
+int __init tegra_dc_ext_module_init(void);
+void __exit tegra_dc_ext_module_exit(void);
+
+struct tegra_dc_ext *tegra_dc_ext_register(struct nvhost_device *ndev,
+					   struct tegra_dc *dc);
+void tegra_dc_ext_unregister(struct tegra_dc_ext *dc_ext);
+
+/* called by display controller on enable/disable */
+void tegra_dc_ext_enable(struct tegra_dc_ext *dc_ext);
+void tegra_dc_ext_disable(struct tegra_dc_ext *dc_ext);
+
+int tegra_dc_ext_process_hotplug(int output);
+
+#else /* CONFIG_TEGRA_DC_EXTENSIONS */
+
+static inline
+int tegra_dc_ext_module_init(void)
+{
+	return 0;
+}
+static inline
+void tegra_dc_ext_module_exit(void)
+{
+}
+
+static inline
+struct tegra_dc_ext *tegra_dc_ext_register(struct nvhost_device *ndev,
+					   struct tegra_dc *dc)
+{
+	return NULL;
+}
+static inline
+void tegra_dc_ext_unregister(struct tegra_dc_ext *dc_ext)
+{
+}
+static inline
+void tegra_dc_ext_enable(struct tegra_dc_ext *dc_ext)
+{
+}
+static inline
+void tegra_dc_ext_disable(struct tegra_dc_ext *dc_ext)
+{
+}
+static inline
+int tegra_dc_ext_process_hotplug(int output)
+{
+	return 0;
+}
+#endif /* CONFIG_TEGRA_DC_EXTENSIONS */
+
+#endif /* __MACH_TEGRA_DC_EXT_H */
diff --git a/drivers/staging/tegra/video/dc/tegra_fb.h b/drivers/staging/tegra/video/dc/tegra_fb.h
new file mode 100644
index 000000000000..84ae8869b247
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/tegra_fb.h
@@ -0,0 +1,27 @@
+/*
+ * arch/arm/mach-tegra/include/mach/tegra_fb.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* Platform data structure to be passed to the driver */
+struct tegra_fb_lcd_data {
+	int	fb_xres;
+	int	fb_yres;
+	/* Resolution of the output to the LCD.  If different from the
+	   framebuffer resolution, the Tegra display block will scale it */
+	int	lcd_xres;
+	int	lcd_yres;
+	int	bits_per_pixel;
+};
diff --git a/drivers/staging/tegra/video/dc/window.c b/drivers/staging/tegra/video/dc/window.c
new file mode 100644
index 000000000000..892b146f954a
--- /dev/null
+++ b/drivers/staging/tegra/video/dc/window.c
@@ -0,0 +1,469 @@
+/*
+ * drivers/video/tegra/dc/window.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/module.h>
+
+#include "dc.h"
+#include "dc_reg.h"
+#include "dc_config.h"
+#include "dc_priv.h"
+
+static int no_vsync;
+static atomic_t frame_end_ref = ATOMIC_INIT(0);
+
+module_param_named(no_vsync, no_vsync, int, S_IRUGO | S_IWUSR);
+
+static bool tegra_dc_windows_are_clean(struct tegra_dc_win *windows[],
+					     int n)
+{
+	int i;
+
+	for (i = 0; i < n; i++) {
+		if (windows[i]->dirty)
+			return false;
+	}
+
+	return true;
+}
+
+int tegra_dc_config_frame_end_intr(struct tegra_dc *dc, bool enable)
+{
+	tegra_dc_writel(dc, FRAME_END_INT, DC_CMD_INT_STATUS);
+	if (enable) {
+		atomic_inc(&frame_end_ref);
+		tegra_dc_unmask_interrupt(dc, FRAME_END_INT);
+	} else if (!atomic_dec_return(&frame_end_ref))
+		tegra_dc_mask_interrupt(dc, FRAME_END_INT);
+	return 0;
+}
+
+static int get_topmost_window(u32 *depths, unsigned long *wins)
+{
+	int idx, best = -1;
+
+	for_each_set_bit(idx, wins, DC_N_WINDOWS) {
+		if (best == -1 || depths[idx] < depths[best])
+			best = idx;
+	}
+	clear_bit(best, wins);
+	return best;
+}
+
+static u32 blend_topwin(u32 flags)
+{
+	if (flags & TEGRA_WIN_FLAG_BLEND_COVERAGE)
+		return BLEND(NOKEY, ALPHA, 0xff, 0xff);
+	else if (flags & TEGRA_WIN_FLAG_BLEND_PREMULT)
+		return BLEND(NOKEY, PREMULT, 0xff, 0xff);
+	else
+		return BLEND(NOKEY, FIX, 0xff, 0xff);
+}
+
+static u32 blend_2win(int idx, unsigned long behind_mask, u32* flags, int xy)
+{
+	int other;
+
+	for (other = 0; other < DC_N_WINDOWS; other++) {
+		if (other != idx && (xy-- == 0))
+			break;
+	}
+	if (BIT(other) & behind_mask)
+		return blend_topwin(flags[idx]);
+	else if (flags[other])
+		return BLEND(NOKEY, DEPENDANT, 0x00, 0x00);
+	else
+		return BLEND(NOKEY, FIX, 0x00, 0x00);
+}
+
+static u32 blend_3win(int idx, unsigned long behind_mask, u32* flags)
+{
+	unsigned long infront_mask;
+	int first;
+
+	infront_mask = ~(behind_mask | BIT(idx));
+	infront_mask &= (BIT(DC_N_WINDOWS) - 1);
+	first = ffs(infront_mask) - 1;
+
+	if (!infront_mask)
+		return blend_topwin(flags[idx]);
+	else if (behind_mask && first != -1 && flags[first])
+		return BLEND(NOKEY, DEPENDANT, 0x00, 0x00);
+	else
+		return BLEND(NOKEY, FIX, 0x0, 0x0);
+}
+
+static void tegra_dc_set_blending(struct tegra_dc *dc,
+	struct tegra_dc_blend *blend)
+{
+	unsigned long mask = BIT(DC_N_WINDOWS) - 1;
+
+	while (mask) {
+		int idx = get_topmost_window(blend->z, &mask);
+
+		tegra_dc_writel(dc, WINDOW_A_SELECT << idx,
+				DC_CMD_DISPLAY_WINDOW_HEADER);
+		tegra_dc_writel(dc, BLEND(NOKEY, FIX, 0xff, 0xff),
+				DC_WIN_BLEND_NOKEY);
+		tegra_dc_writel(dc, BLEND(NOKEY, FIX, 0xff, 0xff),
+				DC_WIN_BLEND_1WIN);
+		tegra_dc_writel(dc, blend_2win(idx, mask, blend->flags, 0),
+				DC_WIN_BLEND_2WIN_X);
+		tegra_dc_writel(dc, blend_2win(idx, mask, blend->flags, 1),
+				DC_WIN_BLEND_2WIN_Y);
+		tegra_dc_writel(dc, blend_3win(idx, mask, blend->flags),
+				DC_WIN_BLEND_3WIN_XY);
+	}
+}
+
+/* does not support syncing windows on multiple dcs in one call */
+int tegra_dc_sync_windows(struct tegra_dc_win *windows[], int n)
+{
+	int ret;
+	if (n < 1 || n > DC_N_WINDOWS)
+		return -EINVAL;
+
+	if (!windows[0]->dc->enabled)
+		return -EFAULT;
+
+	trace_printk("%s:Before wait_event_interruptible_timeout\n",
+		windows[0]->dc->ndev->name);
+	ret = wait_event_interruptible_timeout(windows[0]->dc->wq,
+		tegra_dc_windows_are_clean(windows, n),
+		HZ);
+	trace_printk("%s:After wait_event_interruptible_timeout\n",
+		windows[0]->dc->ndev->name);
+
+	return ret;
+}
+EXPORT_SYMBOL(tegra_dc_sync_windows);
+
+static inline u32 compute_dda_inc(fixed20_12 in, unsigned out_int,
+				  bool v, unsigned Bpp)
+{
+	/*
+	 * min(round((prescaled_size_in_pixels - 1) * 0x1000 /
+	 *	     (post_scaled_size_in_pixels - 1)), MAX)
+	 * Where the value of MAX is as follows:
+	 * For V_DDA_INCREMENT: 15.0 (0xF000)
+	 * For H_DDA_INCREMENT:  4.0 (0x4000) for 4 Bytes/pix formats.
+	 *			 8.0 (0x8000) for 2 Bytes/pix formats.
+	 */
+
+	fixed20_12 out = dfixed_init(out_int);
+	u32 dda_inc;
+	int max;
+
+	if (v) {
+		max = 15;
+	} else {
+		switch (Bpp) {
+		default:
+			WARN_ON_ONCE(1);
+			/* fallthrough */
+		case 4:
+			max = 4;
+			break;
+		case 2:
+			max = 8;
+			break;
+		}
+	}
+
+	out.full = max_t(u32, out.full - dfixed_const(1), dfixed_const(1));
+	in.full -= dfixed_const(1);
+
+	dda_inc = dfixed_div(in, out);
+
+	dda_inc = min_t(u32, dda_inc, dfixed_const(max));
+
+	return dda_inc;
+}
+
+static inline u32 compute_initial_dda(fixed20_12 in)
+{
+	return dfixed_frac(in);
+}
+
+/* does not support updating windows on multiple dcs in one call */
+int tegra_dc_update_windows(struct tegra_dc_win *windows[], int n)
+{
+	struct tegra_dc *dc;
+	unsigned long update_mask = GENERAL_ACT_REQ;
+	unsigned long val;
+	bool update_blend = false;
+	int i;
+
+	dc = windows[0]->dc;
+
+	if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) {
+		/* Acquire one_shot_lock to avoid race condition between
+		 * cancellation of old delayed work and schedule of new
+		 * delayed work. */
+		mutex_lock(&dc->one_shot_lock);
+		cancel_delayed_work_sync(&dc->one_shot_work);
+	}
+	mutex_lock(&dc->lock);
+
+	if (!dc->enabled) {
+		mutex_unlock(&dc->lock);
+		if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
+			mutex_unlock(&dc->one_shot_lock);
+		return -EFAULT;
+	}
+
+	tegra_dc_hold_dc_out(dc);
+
+	if (no_vsync)
+		tegra_dc_writel(dc, WRITE_MUX_ACTIVE | READ_MUX_ACTIVE,
+			DC_CMD_STATE_ACCESS);
+	else
+		tegra_dc_writel(dc, WRITE_MUX_ASSEMBLY | READ_MUX_ASSEMBLY,
+			DC_CMD_STATE_ACCESS);
+
+	for (i = 0; i < n; i++) {
+		struct tegra_dc_win *win = windows[i];
+		unsigned h_dda;
+		unsigned v_dda;
+		fixed20_12 h_offset, v_offset;
+		bool invert_h = (win->flags & TEGRA_WIN_FLAG_INVERT_H) != 0;
+		bool invert_v = (win->flags & TEGRA_WIN_FLAG_INVERT_V) != 0;
+		bool yuv = tegra_dc_is_yuv(win->fmt);
+		bool yuvp = tegra_dc_is_yuv_planar(win->fmt);
+		unsigned Bpp = tegra_dc_fmt_bpp(win->fmt) / 8;
+		/* Bytes per pixel of bandwidth, used for dda_inc calculation */
+		unsigned Bpp_bw = Bpp * (yuvp ? 2 : 1);
+		const bool filter_h = win_use_h_filter(dc, win);
+		const bool filter_v = win_use_v_filter(dc, win);
+
+		if (win->z != dc->blend.z[win->idx]) {
+			dc->blend.z[win->idx] = win->z;
+			update_blend = true;
+		}
+		if ((win->flags & TEGRA_WIN_BLEND_FLAGS_MASK) !=
+			dc->blend.flags[win->idx]) {
+			dc->blend.flags[win->idx] =
+				win->flags & TEGRA_WIN_BLEND_FLAGS_MASK;
+			update_blend = true;
+		}
+
+		tegra_dc_writel(dc, WINDOW_A_SELECT << win->idx,
+				DC_CMD_DISPLAY_WINDOW_HEADER);
+
+		if (!no_vsync)
+			update_mask |= WIN_A_ACT_REQ << win->idx;
+
+		if (!WIN_IS_ENABLED(win)) {
+			dc->windows[i].dirty = 1;
+			tegra_dc_writel(dc, 0, DC_WIN_WIN_OPTIONS);
+			continue;
+		}
+
+		tegra_dc_writel(dc, win->fmt & 0x1f, DC_WIN_COLOR_DEPTH);
+		tegra_dc_writel(dc, win->fmt >> 6, DC_WIN_BYTE_SWAP);
+
+		tegra_dc_writel(dc,
+			V_POSITION(win->out_y) | H_POSITION(win->out_x),
+			DC_WIN_POSITION);
+		tegra_dc_writel(dc,
+			V_SIZE(win->out_h) | H_SIZE(win->out_w),
+			DC_WIN_SIZE);
+
+		if (tegra_dc_feature_has_scaling(dc, win->idx)) {
+			tegra_dc_writel(dc,
+				V_PRESCALED_SIZE(dfixed_trunc(win->h)) |
+				H_PRESCALED_SIZE(dfixed_trunc(win->w) * Bpp),
+				DC_WIN_PRESCALED_SIZE);
+
+			h_dda = compute_dda_inc(win->w, win->out_w, false,
+				Bpp_bw);
+			v_dda = compute_dda_inc(win->h, win->out_h, true,
+				Bpp_bw);
+			tegra_dc_writel(dc, V_DDA_INC(v_dda) |
+				H_DDA_INC(h_dda), DC_WIN_DDA_INCREMENT);
+			h_dda = compute_initial_dda(win->x);
+			v_dda = compute_initial_dda(win->y);
+			tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
+			tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
+		}
+
+		tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
+		tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
+		tegra_dc_writel(dc, (unsigned long)win->phys_addr,
+			DC_WINBUF_START_ADDR);
+
+		if (!yuvp) {
+			tegra_dc_writel(dc, win->stride, DC_WIN_LINE_STRIDE);
+		} else {
+			tegra_dc_writel(dc,
+				(unsigned long)win->phys_addr_u,
+				DC_WINBUF_START_ADDR_U);
+			tegra_dc_writel(dc,
+				(unsigned long)win->phys_addr_v,
+				DC_WINBUF_START_ADDR_V);
+			tegra_dc_writel(dc,
+				LINE_STRIDE(win->stride) |
+				UV_LINE_STRIDE(win->stride_uv),
+				DC_WIN_LINE_STRIDE);
+		}
+
+		h_offset = win->x;
+		if (invert_h) {
+			h_offset.full += win->w.full - dfixed_const(1);
+		}
+
+		v_offset = win->y;
+		if (invert_v) {
+			v_offset.full += win->h.full - dfixed_const(1);
+		}
+
+		tegra_dc_writel(dc, dfixed_trunc(h_offset) * Bpp,
+				DC_WINBUF_ADDR_H_OFFSET);
+		tegra_dc_writel(dc, dfixed_trunc(v_offset),
+				DC_WINBUF_ADDR_V_OFFSET);
+
+		if (tegra_dc_feature_has_tiling(dc, win->idx)) {
+			if (WIN_IS_TILED(win))
+				tegra_dc_writel(dc,
+					DC_WIN_BUFFER_ADDR_MODE_TILE |
+					DC_WIN_BUFFER_ADDR_MODE_TILE_UV,
+					DC_WIN_BUFFER_ADDR_MODE);
+			else
+				tegra_dc_writel(dc,
+					DC_WIN_BUFFER_ADDR_MODE_LINEAR |
+					DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV,
+					DC_WIN_BUFFER_ADDR_MODE);
+		}
+
+		val = WIN_ENABLE;
+		if (yuv)
+			val |= CSC_ENABLE;
+		else if (tegra_dc_fmt_bpp(win->fmt) < 24)
+			val |= COLOR_EXPAND;
+
+		if (win->ppflags & TEGRA_WIN_PPFLAG_CP_ENABLE)
+			val |= CP_ENABLE;
+
+		if (filter_h)
+			val |= H_FILTER_ENABLE;
+		if (filter_v)
+			val |= V_FILTER_ENABLE;
+
+		if (invert_h)
+			val |= H_DIRECTION_DECREMENT;
+		if (invert_v)
+			val |= V_DIRECTION_DECREMENT;
+
+		tegra_dc_writel(dc, val, DC_WIN_WIN_OPTIONS);
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+		if (win->global_alpha == 255)
+			tegra_dc_writel(dc, 0, DC_WIN_GLOBAL_ALPHA);
+		else
+			tegra_dc_writel(dc, GLOBAL_ALPHA_ENABLE |
+				win->global_alpha, DC_WIN_GLOBAL_ALPHA);
+#endif
+
+		win->dirty = no_vsync ? 0 : 1;
+
+		dev_dbg(&dc->ndev->dev, "%s():idx=%d z=%d x=%d y=%d w=%d h=%d "
+			"out_x=%u out_y=%u out_w=%u out_h=%u "
+			"fmt=%d yuvp=%d Bpp=%u filter_h=%d filter_v=%d",
+			__func__, win->idx, win->z,
+			dfixed_trunc(win->x), dfixed_trunc(win->y),
+			dfixed_trunc(win->w), dfixed_trunc(win->h),
+			win->out_x, win->out_y, win->out_w, win->out_h,
+			win->fmt, yuvp, Bpp, filter_h, filter_v);
+		trace_printk("%s:win%u in:%ux%u out:%ux%u fmt=%d\n",
+			dc->ndev->name, win->idx, dfixed_trunc(win->w),
+			dfixed_trunc(win->h), win->out_w, win->out_h, win->fmt);
+	}
+
+	if (update_blend) {
+		tegra_dc_set_blending(dc, &dc->blend);
+		for (i = 0; i < DC_N_WINDOWS; i++) {
+			if (!no_vsync)
+				dc->windows[i].dirty = 1;
+			update_mask |= WIN_A_ACT_REQ << i;
+		}
+	}
+
+	tegra_dc_set_dynamic_emc(windows, n);
+
+	tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL);
+
+	tegra_dc_writel(dc, FRAME_END_INT | V_BLANK_INT, DC_CMD_INT_STATUS);
+	if (!no_vsync) {
+		set_bit(V_BLANK_FLIP, &dc->vblank_ref_count);
+		tegra_dc_unmask_interrupt(dc,
+			FRAME_END_INT | V_BLANK_INT | ALL_UF_INT);
+	} else {
+		clear_bit(V_BLANK_FLIP, &dc->vblank_ref_count);
+		tegra_dc_mask_interrupt(dc, V_BLANK_INT | ALL_UF_INT);
+		if (!atomic_read(&frame_end_ref))
+			tegra_dc_mask_interrupt(dc, FRAME_END_INT);
+	}
+
+	if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
+		schedule_delayed_work(&dc->one_shot_work,
+				msecs_to_jiffies(dc->one_shot_delay_ms));
+
+	/* update EMC clock if calculated bandwidth has changed */
+	tegra_dc_program_bandwidth(dc, false);
+
+	if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
+		update_mask |= NC_HOST_TRIG;
+
+	tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL);
+	trace_printk("%s:update_mask=%#lx\n", dc->ndev->name, update_mask);
+
+	tegra_dc_release_dc_out(dc);
+	mutex_unlock(&dc->lock);
+	if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
+		mutex_unlock(&dc->one_shot_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_dc_update_windows);
+
+void tegra_dc_trigger_windows(struct tegra_dc *dc)
+{
+	u32 val, i;
+	u32 completed = 0;
+	u32 dirty = 0;
+
+	val = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+	for (i = 0; i < DC_N_WINDOWS; i++) {
+		if (!(val & (WIN_A_ACT_REQ << i))) {
+			dc->windows[i].dirty = 0;
+			completed = 1;
+		} else {
+			dirty = 1;
+		}
+	}
+
+	if (!dirty) {
+		if (!(dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
+			&& !atomic_read(&frame_end_ref))
+			tegra_dc_mask_interrupt(dc, FRAME_END_INT);
+	}
+
+	if (completed)
+		wake_up(&dc->wq);
+}
+
diff --git a/drivers/staging/tegra/video/fb.c b/drivers/staging/tegra/video/fb.c
new file mode 100644
index 000000000000..e0fae11aa418
--- /dev/null
+++ b/drivers/staging/tegra/video/fb.c
@@ -0,0 +1,776 @@
+/*
+ * drivers/video/tegra/fb.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *         Colin Cross <ccross@android.com>
+ *         Travis Geiselbrecht <travis@palm.com>
+ *
+ * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/workqueue.h>
+#include <linux/memblock.h>
+#include <linux/delay.h>
+
+#include <asm/atomic.h>
+
+#include <video/tegrafb.h>
+
+#include <linux/nvhost.h>
+#include <linux/nvmap.h>
+#include <linux/console.h>
+
+#include "dc/dc.h"
+#include "dc/fb.h"
+#include "host/dev.h"
+#include "nvmap/nvmap.h"
+#include "dc/dc_priv.h"
+
+/* Pad pitch to 16-byte boundary. */
+#define TEGRA_LINEAR_PITCH_ALIGNMENT 32
+
+struct tegra_fb_info {
+	struct tegra_dc_win	*win;
+	struct nvhost_device	*ndev;
+	struct fb_info		*info;
+	bool			valid;
+
+	struct resource		*fb_mem;
+
+	int			xres;
+	int			yres;
+};
+
+/* palette array used by the fbcon */
+static u32 pseudo_palette[16];
+
+static int tegra_fb_check_var(struct fb_var_screeninfo *var,
+			      struct fb_info *info)
+{
+	struct tegra_fb_info *tegra_fb = info->par;
+	struct tegra_dc *dc = tegra_fb->win->dc;
+	struct tegra_dc_out_ops *ops = dc->out_ops;
+	struct fb_videomode mode;
+
+	if ((var->yres * var->xres * var->bits_per_pixel / 8 * 2) >
+	    info->screen_size)
+		return -EINVAL;
+
+	/* Apply mode filter for HDMI only -LVDS supports only fix mode */
+	if (ops && ops->mode_filter) {
+
+		fb_var_to_videomode(&mode, var);
+		if (!ops->mode_filter(dc, &mode))
+			return -EINVAL;
+
+		/* Mode filter may have modified the mode */
+		fb_videomode_to_var(var, &mode);
+	}
+
+	/* Double yres_virtual to allow double buffering through pan_display */
+	var->yres_virtual = var->yres * 2;
+
+	return 0;
+}
+
+static int tegra_fb_set_par(struct fb_info *info)
+{
+	struct tegra_fb_info *tegra_fb = info->par;
+	struct fb_var_screeninfo *var = &info->var;
+	struct tegra_dc *dc = tegra_fb->win->dc;
+
+	if (var->bits_per_pixel) {
+		/* we only support RGB ordering for now */
+		switch (var->bits_per_pixel) {
+		case 32:
+			var->red.offset = 0;
+			var->red.length = 8;
+			var->green.offset = 8;
+			var->green.length = 8;
+			var->blue.offset = 16;
+			var->blue.length = 8;
+			var->transp.offset = 24;
+			var->transp.length = 8;
+			tegra_fb->win->fmt = TEGRA_WIN_FMT_R8G8B8A8;
+			break;
+		case 16:
+			var->red.offset = 11;
+			var->red.length = 5;
+			var->green.offset = 5;
+			var->green.length = 6;
+			var->blue.offset = 0;
+			var->blue.length = 5;
+			tegra_fb->win->fmt = TEGRA_WIN_FMT_B5G6R5;
+			break;
+
+		default:
+			return -EINVAL;
+		}
+		/* if line_length unset, then pad the stride */
+		if (!info->fix.line_length) {
+			info->fix.line_length = var->xres * var->bits_per_pixel
+				/ 8;
+			info->fix.line_length = round_up(info->fix.line_length,
+						TEGRA_LINEAR_PITCH_ALIGNMENT);
+		}
+		tegra_fb->win->stride = info->fix.line_length;
+		tegra_fb->win->stride_uv = 0;
+		tegra_fb->win->phys_addr_u = 0;
+		tegra_fb->win->phys_addr_v = 0;
+	}
+
+	if (var->pixclock) {
+		bool stereo;
+		unsigned old_len = 0;
+		struct fb_videomode m;
+		struct fb_videomode *old_mode;
+
+		fb_var_to_videomode(&m, var);
+
+		/* Load framebuffer info with new mode details*/
+		old_mode = info->mode;
+		old_len  = info->fix.line_length;
+
+		info->mode = (struct fb_videomode *)
+			fb_find_nearest_mode(&m, &info->modelist);
+		if (!info->mode) {
+			dev_warn(&tegra_fb->ndev->dev, "can't match video mode\n");
+			info->mode = old_mode;
+			return -EINVAL;
+		}
+
+		if (old_mode == info->mode)
+			return 0;
+
+		/* Update fix line_length and window stride as per new mode */
+		info->fix.line_length = var->xres * var->bits_per_pixel / 8;
+		info->fix.line_length = round_up(info->fix.line_length,
+			TEGRA_LINEAR_PITCH_ALIGNMENT);
+		tegra_fb->win->stride = info->fix.line_length;
+
+		/*
+		 * only enable stereo if the mode supports it and
+		 * client requests it
+		 */
+		stereo = !!(var->vmode & info->mode->vmode &
+#ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT
+					FB_VMODE_STEREO_FRAME_PACK);
+#else
+					FB_VMODE_STEREO_LEFT_RIGHT);
+#endif
+
+		/* Configure DC with new mode */
+		if (tegra_dc_set_fb_mode(dc, info->mode, stereo)) {
+			/* Error while configuring DC, fallback to old mode */
+			dev_warn(&tegra_fb->ndev->dev, "can't configure dc with mode %ux%u\n",
+				info->mode->xres, info->mode->yres);
+			info->mode = old_mode;
+			info->fix.line_length = old_len;
+			tegra_fb->win->stride = old_len;
+			return -EINVAL;
+		}
+
+		/* Reflect mode chnage on DC HW */
+		if (dc->enabled)
+			tegra_dc_disable(dc);
+		tegra_dc_enable(dc);
+
+		tegra_fb->win->w.full = dfixed_const(info->mode->xres);
+		tegra_fb->win->h.full = dfixed_const(info->mode->yres);
+		tegra_fb->win->out_w = info->mode->xres;
+		tegra_fb->win->out_h = info->mode->yres;
+	}
+	return 0;
+}
+
+static int tegra_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
+	unsigned blue, unsigned transp, struct fb_info *info)
+{
+	struct fb_var_screeninfo *var = &info->var;
+
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+		u32 v;
+
+		if (regno >= 16)
+			return -EINVAL;
+
+		red = (red >> (16 - info->var.red.length));
+		green = (green >> (16 - info->var.green.length));
+		blue = (blue >> (16 - info->var.blue.length));
+
+		v = (red << var->red.offset) |
+			(green << var->green.offset) |
+			(blue << var->blue.offset);
+
+		((u32 *)info->pseudo_palette)[regno] = v;
+	}
+
+	return 0;
+}
+
+
+static int tegra_fb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
+{
+	struct tegra_fb_info *tegra_fb = info->par;
+	struct tegra_dc *dc = tegra_fb->win->dc;
+	int i;
+	u16 *red = cmap->red;
+	u16 *green = cmap->green;
+	u16 *blue = cmap->blue;
+	int start = cmap->start;
+
+	if (((unsigned)start > 255) || ((start + cmap->len) > 256))
+		return -EINVAL;
+
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+		info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+		/*
+		 * For now we are considering color schemes with
+		 * cmap->len <=16 as special case of basic color
+		 * scheme to support fbconsole.But for DirectColor
+		 * visuals(like the one we actually have, that include
+		 * a HW LUT),the way it's intended to work is that the
+		 * actual LUT HW is programmed to the intended values,
+		 * even for small color maps like those with 16 or fewer
+		 * entries. The pseudo_palette is then programmed to the
+		 * identity transform.
+		 */
+		if (cmap->len <= 16) {
+			/* Low-color schemes like fbconsole*/
+			u16 *transp = cmap->transp;
+			u_int vtransp = 0xffff;
+
+			for (i = 0; i < cmap->len; i++) {
+				if (transp)
+					vtransp = *transp++;
+				if (tegra_fb_setcolreg(start++, *red++,
+					*green++, *blue++,
+					vtransp, info))
+						return -EINVAL;
+			}
+		} else {
+			/* High-color schemes*/
+			for (i = 0; i < cmap->len; i++) {
+				dc->fb_lut.r[start+i] = *red++ >> 8;
+				dc->fb_lut.g[start+i] = *green++ >> 8;
+				dc->fb_lut.b[start+i] = *blue++ >> 8;
+			}
+			tegra_dc_update_lut(dc, -1, -1);
+		}
+	}
+	return 0;
+}
+
+static int tegra_fb_blank(int blank, struct fb_info *info)
+{
+	struct tegra_fb_info *tegra_fb = info->par;
+	struct tegra_dc *dc = tegra_fb->win->dc;
+
+	switch (blank) {
+	case FB_BLANK_UNBLANK:
+		dev_dbg(&tegra_fb->ndev->dev, "unblank\n");
+		tegra_fb->win->flags = TEGRA_WIN_FLAG_ENABLED;
+		tegra_dc_enable(dc);
+		tegra_enable_backlight(dc);
+		tegra_dc_update_windows(&tegra_fb->win, 1);
+		tegra_dc_sync_windows(&tegra_fb->win, 1);
+		return 0;
+
+	case FB_BLANK_NORMAL:
+		dev_dbg(&tegra_fb->ndev->dev, "blank - normal\n");
+		tegra_dc_blank(dc);
+		return 0;
+
+	case FB_BLANK_VSYNC_SUSPEND:
+	case FB_BLANK_HSYNC_SUSPEND:
+	case FB_BLANK_POWERDOWN:
+		dev_dbg(&tegra_fb->ndev->dev, "blank - powerdown\n");
+		tegra_disable_backlight(dc);
+		schedule_delayed_work(&dc->disable_work, 5 * HZ);
+		return 0;
+
+	default:
+		return -ENOTTY;
+	}
+}
+
+static int tegra_fb_pan_display(struct fb_var_screeninfo *var,
+				struct fb_info *info)
+{
+	struct tegra_fb_info *tegra_fb = info->par;
+	char __iomem *flush_start;
+	char __iomem *flush_end;
+	u32 addr;
+
+	if (!tegra_fb->win->cur_handle) {
+		flush_start = info->screen_base + (var->yoffset * info->fix.line_length);
+		flush_end = flush_start + (var->yres * info->fix.line_length);
+
+		info->var.xoffset = var->xoffset;
+		info->var.yoffset = var->yoffset;
+
+		addr = info->fix.smem_start + (var->yoffset * info->fix.line_length) +
+			(var->xoffset * (var->bits_per_pixel/8));
+
+		tegra_fb->win->phys_addr = addr;
+		tegra_fb->win->flags = TEGRA_WIN_FLAG_ENABLED;
+		tegra_fb->win->virt_addr = info->screen_base;
+
+		tegra_dc_update_windows(&tegra_fb->win, 1);
+		tegra_dc_sync_windows(&tegra_fb->win, 1);
+	}
+
+	return 0;
+}
+
+static void tegra_fb_fillrect(struct fb_info *info,
+			      const struct fb_fillrect *rect)
+{
+	cfb_fillrect(info, rect);
+}
+
+static void tegra_fb_copyarea(struct fb_info *info,
+			      const struct fb_copyarea *region)
+{
+	cfb_copyarea(info, region);
+}
+
+static void tegra_fb_imageblit(struct fb_info *info,
+			       const struct fb_image *image)
+{
+	cfb_imageblit(info, image);
+}
+
+static int tegra_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
+{
+	struct tegra_fb_info *tegra_fb = (struct tegra_fb_info *)info->par;
+	struct tegra_fb_modedb modedb;
+	struct fb_modelist *modelist;
+	struct fb_vblank vblank = {};
+	int i;
+
+	switch (cmd) {
+	case FBIO_TEGRA_GET_MODEDB:
+		if (copy_from_user(&modedb, (void __user *)arg, sizeof(modedb)))
+			return -EFAULT;
+
+		i = 0;
+		list_for_each_entry(modelist, &info->modelist, list) {
+			struct fb_var_screeninfo var;
+
+			if (i >= modedb.modedb_len)
+				break;
+
+			/* fb_videomode_to_var doesn't fill out all the members
+			   of fb_var_screeninfo */
+			memset(&var, 0x0, sizeof(var));
+
+			fb_videomode_to_var(&var, &modelist->mode);
+
+			if (copy_to_user((void __user *)&modedb.modedb[i],
+					 &var, sizeof(var)))
+				return -EFAULT;
+			i++;
+
+			if (var.vmode & FB_VMODE_STEREO_MASK) {
+				if (i >= modedb.modedb_len)
+					break;
+				var.vmode &= ~FB_VMODE_STEREO_MASK;
+				if (copy_to_user(
+					(void __user *)&modedb.modedb[i],
+					 &var, sizeof(var)))
+					return -EFAULT;
+				i++;
+			}
+		}
+		modedb.modedb_len = i;
+
+		if (copy_to_user((void __user *)arg, &modedb, sizeof(modedb)))
+			return -EFAULT;
+		break;
+
+	case FBIOGET_VBLANK:
+		tegra_dc_get_fbvblank(tegra_fb->win->dc, &vblank);
+
+		if (copy_to_user(
+			(void __user *)arg, &vblank, sizeof(vblank)))
+			return -EFAULT;
+		break;
+
+	case FBIO_WAITFORVSYNC:
+		return tegra_dc_wait_for_vsync(tegra_fb->win->dc);
+
+	default:
+		return -ENOTTY;
+	}
+
+	return 0;
+}
+
+int tegra_fb_get_mode(struct tegra_dc *dc) {
+	return dc->fb->info->mode->refresh;
+}
+
+int tegra_fb_set_mode(struct tegra_dc *dc, int fps) {
+	size_t stereo;
+	struct list_head *pos;
+	struct fb_videomode *best_mode = NULL;
+	int curr_diff = INT_MAX; /* difference of best_mode refresh rate */
+	struct fb_modelist *modelist;
+	struct fb_info *info = dc->fb->info;
+
+	list_for_each(pos, &info->modelist) {
+		struct fb_videomode *mode;
+
+		modelist = list_entry(pos, struct fb_modelist, list);
+		mode = &modelist->mode;
+		if (fps <= mode->refresh && curr_diff > (mode->refresh - fps)) {
+			curr_diff = mode->refresh - fps;
+			best_mode = mode;
+		}
+	}
+	if (best_mode) {
+		info->mode = best_mode;
+		stereo = !!(info->var.vmode & info->mode->vmode &
+#ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT
+				FB_VMODE_STEREO_FRAME_PACK);
+#else
+				FB_VMODE_STEREO_LEFT_RIGHT);
+#endif
+		return tegra_dc_set_fb_mode(dc, best_mode, stereo);
+	}
+	return -EIO;
+}
+
+static struct fb_ops tegra_fb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = tegra_fb_check_var,
+	.fb_set_par = tegra_fb_set_par,
+	.fb_setcmap = tegra_fb_setcmap,
+	.fb_blank = tegra_fb_blank,
+	.fb_pan_display = tegra_fb_pan_display,
+	.fb_fillrect = tegra_fb_fillrect,
+	.fb_copyarea = tegra_fb_copyarea,
+	.fb_imageblit = tegra_fb_imageblit,
+	.fb_ioctl = tegra_fb_ioctl,
+};
+
+const struct fb_videomode *tegra_fb_find_best_mode(
+	struct fb_var_screeninfo *var,
+	struct list_head *head)
+{
+	struct list_head *pos;
+	struct fb_modelist *modelist;
+	struct fb_videomode *mode, *best = NULL;
+	int diff = 0;
+
+	list_for_each(pos, head) {
+		int d;
+
+		modelist = list_entry(pos, struct fb_modelist, list);
+		mode = &modelist->mode;
+
+		if (mode->xres >= var->xres && mode->yres >= var->yres) {
+			d = (mode->xres - var->xres) +
+				(mode->yres - var->yres);
+			if (diff < d) {
+				diff = d;
+				best = mode;
+			} else if (diff == d && best &&
+				   mode->refresh > best->refresh)
+				best = mode;
+		}
+	}
+	return best;
+}
+
+static int tegra_fb_activate_mode(struct tegra_fb_info *fb_info,
+				struct fb_var_screeninfo *var)
+{
+	int err;
+	struct fb_info *info = fb_info->info;
+
+	var->activate |= FB_ACTIVATE_FORCE;
+	console_lock();
+	info->flags |= FBINFO_MISC_USEREVENT;
+	err = fb_set_var(info, var);
+	info->flags &= ~FBINFO_MISC_USEREVENT;
+	console_unlock();
+	if (err)
+		return err;
+	return 0;
+}
+
+void tegra_fb_update_monspecs(struct tegra_fb_info *fb_info,
+			      struct fb_monspecs *specs,
+			      bool (*mode_filter)(const struct tegra_dc *dc,
+						  struct fb_videomode *mode))
+{
+	int i;
+	int ret = 0;
+	struct fb_event event;
+	struct fb_info *info = fb_info->info;
+	const struct fb_videomode *best_mode = NULL;
+	struct fb_var_screeninfo var = {0,};
+
+	mutex_lock(&fb_info->info->lock);
+	fb_destroy_modedb(fb_info->info->monspecs.modedb);
+
+	fb_destroy_modelist(&fb_info->info->modelist);
+
+	if (specs == NULL) {
+		struct tegra_dc_mode mode;
+		memset(&fb_info->info->monspecs, 0x0,
+		       sizeof(fb_info->info->monspecs));
+		memset(&mode, 0x0, sizeof(mode));
+
+		/*
+		 * reset video mode properties to prevent garbage being displayed on 'mode' device.
+		 */
+		fb_info->info->mode = (struct fb_videomode*) NULL;
+
+		tegra_dc_set_mode(fb_info->win->dc, &mode);
+		mutex_unlock(&fb_info->info->lock);
+		return;
+	}
+
+	memcpy(&fb_info->info->monspecs, specs,
+	       sizeof(fb_info->info->monspecs));
+	fb_info->info->mode = specs->modedb;
+
+	/* Prepare a mode db */
+	for (i = 0; i < specs->modedb_len; i++) {
+		if (info->fbops->fb_check_var) {
+			struct fb_videomode m;
+
+			/* Call mode filter to check mode */
+			fb_videomode_to_var(&var, &specs->modedb[i]);
+			if (!(info->fbops->fb_check_var(&var, info))) {
+				fb_var_to_videomode(&m, &var);
+				fb_add_videomode(&m,
+						 &fb_info->info->modelist);
+			}
+		} else {
+			fb_add_videomode(&specs->modedb[i],
+					 &fb_info->info->modelist);
+		}
+	}
+
+	/* Get the best mode from modedb and apply on fb */
+	var.xres = 0;
+	var.yres = 0;
+	best_mode = tegra_fb_find_best_mode(&var, &info->modelist);
+
+	/* Update framebuffer with best mode */
+	fb_videomode_to_var(&var, best_mode);
+
+	/* TODO: Get proper way of getting rid of a 0 bpp */
+	if (!var.bits_per_pixel)
+		var.bits_per_pixel = 32;
+
+	memcpy(&info->var, &var, sizeof(struct fb_var_screeninfo));
+
+	ret = tegra_fb_activate_mode(fb_info, &var);
+	if (ret)
+		return;
+
+	event.info = fb_info->info;
+
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE
+/* Lock the console before sending the noti. Fbconsole
+  * on HDMI might be using console
+  */
+	console_lock();
+#endif
+	fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE
+/* Unlock the console */
+	console_unlock();
+#endif
+
+	mutex_unlock(&fb_info->info->lock);
+}
+
+struct tegra_fb_info *tegra_fb_register(struct nvhost_device *ndev,
+					struct tegra_dc *dc,
+					struct tegra_fb_data *fb_data,
+					struct resource *fb_mem)
+{
+	struct tegra_dc_win *win;
+	struct fb_info *info;
+	struct tegra_fb_info *tegra_fb;
+	void __iomem *fb_base = NULL;
+	unsigned long fb_size = 0;
+	unsigned long fb_phys = 0;
+	int ret = 0;
+	unsigned stride;
+
+	win = tegra_dc_get_window(dc, fb_data->win);
+	if (!win) {
+		dev_err(&ndev->dev, "dc does not have a window at index %d\n",
+			fb_data->win);
+		return ERR_PTR(-ENOENT);
+	}
+
+	info = framebuffer_alloc(sizeof(struct tegra_fb_info), &ndev->dev);
+	if (!info) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	tegra_fb = info->par;
+	tegra_fb->win = win;
+	tegra_fb->ndev = ndev;
+	tegra_fb->fb_mem = fb_mem;
+	tegra_fb->xres = fb_data->xres;
+	tegra_fb->yres = fb_data->yres;
+
+	if (fb_mem) {
+		fb_size = resource_size(fb_mem);
+		fb_phys = fb_mem->start;
+		memblock_remove(fb_phys, fb_size);
+		fb_base = ioremap_nocache(fb_phys, fb_size);
+		if (!fb_base) {
+			dev_err(&ndev->dev, "fb can't be mapped\n");
+			ret = -EBUSY;
+			goto err_free;
+		}
+		tegra_fb->valid = true;
+	}
+
+	info->fix.line_length = fb_data->xres * fb_data->bits_per_pixel / 8;
+
+	stride = tegra_dc_get_stride(dc, 0);
+	if (!stride) /* default to pad the stride */
+		stride = round_up(info->fix.line_length,
+			TEGRA_LINEAR_PITCH_ALIGNMENT);
+
+	info->fbops = &tegra_fb_ops;
+	info->pseudo_palette = pseudo_palette;
+	info->screen_base = fb_base;
+	info->screen_size = fb_size;
+
+	strlcpy(info->fix.id, "tegra_fb", sizeof(info->fix.id));
+	info->fix.type		= FB_TYPE_PACKED_PIXELS;
+	info->fix.visual	= FB_VISUAL_TRUECOLOR;
+	info->fix.xpanstep	= 1;
+	info->fix.ypanstep	= 1;
+	info->fix.accel		= FB_ACCEL_NONE;
+	info->fix.smem_start	= fb_phys;
+	info->fix.smem_len	= fb_size;
+	info->fix.line_length = stride;
+
+	info->var.xres			= fb_data->xres;
+	info->var.yres			= fb_data->yres;
+	info->var.xres_virtual		= fb_data->xres;
+	info->var.yres_virtual		= fb_data->yres * 2;
+	info->var.bits_per_pixel	= fb_data->bits_per_pixel;
+	info->var.activate		= FB_ACTIVATE_VBL;
+	info->var.height		= tegra_dc_get_out_height(dc);
+	info->var.width			= tegra_dc_get_out_width(dc);
+	info->var.pixclock		= 0;
+	info->var.left_margin		= 0;
+	info->var.right_margin		= 0;
+	info->var.upper_margin		= 0;
+	info->var.lower_margin		= 0;
+	info->var.hsync_len		= 0;
+	info->var.vsync_len		= 0;
+	info->var.vmode			= FB_VMODE_NONINTERLACED;
+
+	win->x.full = dfixed_const(0);
+	win->y.full = dfixed_const(0);
+	win->w.full = dfixed_const(fb_data->xres);
+	win->h.full = dfixed_const(fb_data->yres);
+	/* TODO: set to output res dc */
+	win->out_x = 0;
+	win->out_y = 0;
+	win->out_w = fb_data->xres;
+	win->out_h = fb_data->yres;
+	win->z = 0;
+	win->phys_addr = fb_phys;
+	win->virt_addr = fb_base;
+	win->phys_addr_u = 0;
+	win->phys_addr_v = 0;
+	win->stride = info->fix.line_length;
+	win->stride_uv = 0;
+	win->flags = TEGRA_WIN_FLAG_ENABLED;
+
+	if (fb_mem)
+		tegra_fb_set_par(info);
+
+	if (register_framebuffer(info)) {
+		dev_err(&ndev->dev, "failed to register framebuffer\n");
+		ret = -ENODEV;
+		goto err_iounmap_fb;
+	}
+
+	tegra_fb->info = info;
+
+	dev_info(&ndev->dev, "fb probed\n");
+
+	if (fb_data->flags & TEGRA_FB_FLIP_ON_PROBE) {
+		tegra_dc_update_windows(&tegra_fb->win, 1);
+		tegra_dc_sync_windows(&tegra_fb->win, 1);
+	}
+
+	if (dc->mode.pclk > 1000) {
+		struct tegra_dc_mode *mode = &dc->mode;
+		struct fb_videomode vmode;
+
+		if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE)
+			info->var.pixclock = KHZ2PICOS(mode->rated_pclk / 1000);
+		else
+			info->var.pixclock = KHZ2PICOS(mode->pclk / 1000);
+		info->var.left_margin = mode->h_back_porch;
+		info->var.right_margin = mode->h_front_porch;
+		info->var.upper_margin = mode->v_back_porch;
+		info->var.lower_margin = mode->v_front_porch;
+		info->var.hsync_len = mode->h_sync_width;
+		info->var.vsync_len = mode->v_sync_width;
+
+		/* Keep info->var consistent with info->modelist. */
+		fb_var_to_videomode(&vmode, &info->var);
+		fb_add_videomode(&vmode, &info->modelist);
+	}
+
+	return tegra_fb;
+
+err_iounmap_fb:
+	if (fb_base)
+		iounmap(fb_base);
+err_free:
+	framebuffer_release(info);
+err:
+	return ERR_PTR(ret);
+}
+
+void tegra_fb_unregister(struct tegra_fb_info *fb_info)
+{
+	struct fb_info *info = fb_info->info;
+
+	unregister_framebuffer(info);
+
+	iounmap(info->screen_base);
+	framebuffer_release(info);
+}
diff --git a/drivers/staging/tegra/video/host/Makefile b/drivers/staging/tegra/video/host/Makefile
new file mode 100644
index 000000000000..f6edb2850f6e
--- /dev/null
+++ b/drivers/staging/tegra/video/host/Makefile
@@ -0,0 +1,31 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/staging/tegra/video/host
+EXTRA_CFLAGS += -Idrivers/staging/tegra/include
+
+nvhost-objs = \
+	nvhost_acm.o \
+	nvhost_syncpt.o \
+	nvhost_cdma.o \
+	nvhost_intr.o \
+	nvhost_channel.o \
+	nvhost_job.o \
+	bus.o \
+	dev.o \
+	debug.o \
+	bus_client.o \
+	chip_support.o \
+	nvhost_memmgr.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += mpe/
+obj-$(CONFIG_TEGRA_GRHOST) += gr3d/
+obj-$(CONFIG_TEGRA_GRHOST) += host1x/
+obj-$(CONFIG_TEGRA_GRHOST) += t20/
+obj-$(CONFIG_TEGRA_GRHOST) += t30/
+obj-$(CONFIG_TEGRA_GRHOST) += gr2d/
+obj-$(CONFIG_TEGRA_GRHOST) += isp/
+ifneq ($(CONFIG_VIDEO_TEGRA),y)
+obj-$(CONFIG_TEGRA_GRHOST) += vi/
+endif
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost.o
+
+obj-$(CONFIG_TEGRA_GRHOST_USE_NVMAP) += nvmap.o
diff --git a/drivers/staging/tegra/video/host/bus.c b/drivers/staging/tegra/video/host/bus.c
new file mode 100644
index 000000000000..f0784ab091ff
--- /dev/null
+++ b/drivers/staging/tegra/video/host/bus.c
@@ -0,0 +1,715 @@
+/*
+ * drivers/video/tegra/host/bus.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@google.com>
+ *
+ * Copyright (C) 2010-2012 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/nvhost.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include "bus.h"
+#include "dev.h"
+
+struct nvhost_bus *nvhost_bus_inst;
+struct nvhost_master *nvhost;
+
+struct resource *nvhost_get_resource(struct nvhost_device *dev,
+				       unsigned int type, unsigned int num)
+{
+	int i;
+
+	for (i = 0; i < dev->num_resources; i++) {
+		struct resource *r = &dev->resource[i];
+
+		if (type == resource_type(r) && num-- == 0)
+			return r;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_resource);
+
+int nvhost_get_irq(struct nvhost_device *dev, unsigned int num)
+{
+	struct resource *r = nvhost_get_resource(dev, IORESOURCE_IRQ, num);
+
+	return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_irq);
+
+struct resource *nvhost_get_resource_byname(struct nvhost_device *dev,
+					      unsigned int type,
+					      const char *name)
+{
+	int i;
+
+	for (i = 0; i < dev->num_resources; i++) {
+		struct resource *r = &dev->resource[i];
+
+		if (type == resource_type(r) && !strcmp(r->name, name))
+			return r;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_resource_byname);
+
+int nvhost_get_irq_byname(struct nvhost_device *dev, const char *name)
+{
+	struct resource *r = nvhost_get_resource_byname(dev, IORESOURCE_IRQ,
+							  name);
+
+	return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_irq_byname);
+
+static struct nvhost_device_id *nvhost_bus_match_id(struct nvhost_device *dev,
+	struct nvhost_device_id *id_table)
+{
+	while (id_table->name[0]) {
+		if (strcmp(dev->name, id_table->name) == 0
+				&& dev->version == id_table->version)
+			return id_table;
+		id_table++;
+	}
+	return NULL;
+}
+
+static int nvhost_bus_match(struct device *_dev, struct device_driver *drv)
+{
+	struct nvhost_device *dev = to_nvhost_device(_dev);
+	struct nvhost_driver *ndrv = to_nvhost_driver(drv);
+
+	/* Attempt an OF style match first */
+	if (of_driver_match_device(_dev, drv))
+		return 1;
+
+	/* check if driver support multiple devices through id_table */
+	if (ndrv->id_table)
+		return nvhost_bus_match_id(dev, ndrv->id_table) != NULL;
+	else /* driver does not support id_table */
+		return !strcmp(dev->name, drv->name);
+}
+
+static int nvhost_drv_probe(struct device *_dev)
+{
+	struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+	struct nvhost_device *dev = to_nvhost_device(_dev);
+
+	if (drv && drv->probe) {
+		if (drv->id_table)
+			return drv->probe(dev, nvhost_bus_match_id(dev, drv->id_table));
+		else
+			return drv->probe(dev, NULL);
+	}
+	else
+		return -ENODEV;
+}
+
+static int nvhost_drv_remove(struct device *_dev)
+{
+	struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+	struct nvhost_device *dev = to_nvhost_device(_dev);
+
+	return drv->remove(dev);
+}
+
+static void nvhost_drv_shutdown(struct device *_dev)
+{
+	struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+	struct nvhost_device *dev = to_nvhost_device(_dev);
+
+	drv->shutdown(dev);
+}
+
+int nvhost_driver_register(struct nvhost_driver *drv)
+{
+	if (!nvhost_bus_inst)
+		return -ENODEV;
+
+	drv->driver.bus = nvhost_bus_inst->nvhost_bus_type;
+	if (drv->probe)
+		drv->driver.probe = nvhost_drv_probe;
+	if (drv->remove)
+		drv->driver.remove = nvhost_drv_remove;
+	if (drv->shutdown)
+		drv->driver.shutdown = nvhost_drv_shutdown;
+
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(nvhost_driver_register);
+
+void nvhost_driver_unregister(struct nvhost_driver *drv)
+{
+	driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(nvhost_driver_unregister);
+
+int nvhost_add_devices(struct nvhost_device **devs, int num)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < num; i++) {
+		ret = nvhost_device_register(devs[i]);
+		if (ret) {
+			while (--i >= 0)
+				nvhost_device_unregister(devs[i]);
+			break;
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nvhost_add_devices);
+
+static int of_nvhost_device_register(struct nvhost_device *dev,
+				     const char *bus_id,
+				     struct device_node *node)
+{
+	int i, ret = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	device_initialize(&dev->dev);
+
+	/*  If the dev does not have a parent, assign host1x as parent */
+	if (!dev->dev.parent && nvhost && nvhost->dev != dev)
+		dev->dev.parent = &nvhost->dev->dev;
+
+	dev->dev.bus = nvhost_bus_inst->nvhost_bus_type;
+	dev->dev.of_node = of_node_get(node);
+
+	if (node && !dev->name) {
+		if (bus_id)
+			dev_set_name(&dev->dev, "%s", bus_id);
+		else
+			of_device_make_bus_id(&dev->dev);
+		dev->name = dev_name(&dev->dev);
+	} else {
+		if (dev->id != -1)
+			dev_set_name(&dev->dev, "%s.%d", dev->name,  dev->id);
+		else
+			dev_set_name(&dev->dev, "%s", dev->name);
+	}
+
+	for (i = 0; i < dev->num_resources; i++) {
+		struct resource *p, *r = &dev->resource[i];
+
+		if (r->name == NULL)
+			r->name = dev_name(&dev->dev);
+
+		p = r->parent;
+		if (!p) {
+			if (resource_type(r) == IORESOURCE_MEM)
+				p = &iomem_resource;
+			else if (resource_type(r) == IORESOURCE_IO)
+				p = &ioport_resource;
+		}
+
+		if (p && insert_resource(p, r)) {
+			pr_err("%s: failed to claim resource %d\n",
+			       dev_name(&dev->dev), i);
+			ret = -EBUSY;
+			goto failed;
+		}
+	}
+
+	if (dev->dev.of_node) {
+		dev->dev.id = -1;
+		
+		if (!dev->dev.parent)
+			set_dev_node(&dev->dev, of_node_to_nid(dev->dev.of_node));
+	}
+	
+	ret = device_add(&dev->dev);
+	if (ret == 0)
+		return ret;
+
+failed:
+	while (--i >= 0) {
+		struct resource *r = &dev->resource[i];
+		unsigned long type = resource_type(r);
+
+		if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+			release_resource(r);
+	}
+
+	return ret;
+}
+
+int nvhost_device_register(struct nvhost_device *dev)
+{
+	return of_nvhost_device_register(dev, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(nvhost_device_register);
+
+int of_nvhost_device_create(struct device_node *np, const char *bus_id,
+			    void *aux_dev)
+{
+	struct nvhost_device *dev;
+	int rc, i, num_reg = 0, num_irq;
+	struct resource *res, temp_res;
+
+	if (!of_device_is_available(np))
+		return -ENODEV;
+
+	if (!nvhost_bus_inst && nvhost_bus_init())
+		return -ENODEV;
+
+	if (!aux_dev) {
+		dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+		if (!dev)
+			return -ENOMEM;
+		dev->is_dynamic = true;
+	} else
+		dev = aux_dev;
+
+	/* count the io and irq resources */
+	while (of_address_to_resource(np, num_reg, &temp_res) == 0)
+		num_reg++;
+	num_irq = of_irq_count(np);
+
+	/* Populate the resource table */
+	if (num_irq || num_reg) {
+		res = kzalloc(sizeof(*res) * (num_irq + num_reg), GFP_KERNEL);
+		if (!res)
+			return -ENOMEM;
+
+		dev->num_resources = num_reg + num_irq;
+		dev->resource = res;
+		for (i = 0; i < num_reg; i++, res++) {
+			rc = of_address_to_resource(np, i, res);
+			WARN_ON(rc);
+		}
+		WARN_ON(of_irq_to_resource_table(np, res, num_irq) != num_irq);
+	}
+
+	return of_nvhost_device_register(dev, bus_id, np);
+}
+
+void nvhost_device_unregister(struct nvhost_device *dev)
+{
+	int i;
+	if (dev) {
+		device_del(&dev->dev);
+
+		for (i = 0; i < dev->num_resources; i++) {
+			struct resource *r = &dev->resource[i];
+			unsigned long type = resource_type(r);
+
+			if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+				release_resource(r);
+		}
+
+		if (dev->dev.of_node)
+			kfree(dev->resource);
+
+		of_node_put(dev->dev.of_node);
+		put_device(&dev->dev);
+
+		if (dev->is_dynamic)
+			kfree(dev);
+	}
+}
+EXPORT_SYMBOL_GPL(nvhost_device_unregister);
+
+#ifdef CONFIG_PM_SLEEP
+
+static int nvhost_legacy_suspend(struct device *dev, pm_message_t mesg)
+{
+	struct nvhost_driver *pdrv = to_nvhost_driver(dev->driver);
+	struct nvhost_device *pdev = to_nvhost_device(dev);
+	int ret = 0;
+
+	if (dev->driver && pdrv->suspend)
+		ret = pdrv->suspend(pdev, mesg);
+
+	return ret;
+}
+
+static int nvhost_legacy_resume(struct device *dev)
+{
+	struct nvhost_driver *pdrv = to_nvhost_driver(dev->driver);
+	struct nvhost_device *pdev = to_nvhost_device(dev);
+	int ret = 0;
+
+	if (dev->driver && pdrv->resume)
+		ret = pdrv->resume(pdev);
+
+	return ret;
+}
+
+static int nvhost_pm_prepare(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (drv && drv->pm && drv->pm->prepare)
+		ret = drv->pm->prepare(dev);
+
+	return ret;
+}
+
+static void nvhost_pm_complete(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+
+	if (drv && drv->pm && drv->pm->complete)
+		drv->pm->complete(dev);
+}
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define nvhost_pm_prepare		NULL
+#define nvhost_pm_complete		NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_SUSPEND
+
+int __weak nvhost_pm_suspend(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->suspend)
+			ret = drv->pm->suspend(dev);
+	} else {
+		ret = nvhost_legacy_suspend(dev, PMSG_SUSPEND);
+	}
+
+	return ret;
+}
+
+int __weak nvhost_pm_suspend_noirq(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->suspend_noirq)
+			ret = drv->pm->suspend_noirq(dev);
+	}
+
+	return ret;
+}
+
+int __weak nvhost_pm_resume(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->resume)
+			ret = drv->pm->resume(dev);
+	} else {
+		ret = nvhost_legacy_resume(dev);
+	}
+
+	return ret;
+}
+
+int __weak nvhost_pm_resume_noirq(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->resume_noirq)
+			ret = drv->pm->resume_noirq(dev);
+	}
+
+	return ret;
+}
+
+#else /* !CONFIG_SUSPEND */
+
+#define nvhost_pm_suspend		NULL
+#define nvhost_pm_resume		NULL
+#define nvhost_pm_suspend_noirq	NULL
+#define nvhost_pm_resume_noirq	NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATION
+
+static int nvhost_pm_freeze(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->freeze)
+			ret = drv->pm->freeze(dev);
+	} else {
+		ret = nvhost_legacy_suspend(dev, PMSG_FREEZE);
+	}
+
+	return ret;
+}
+
+static int nvhost_pm_freeze_noirq(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->freeze_noirq)
+			ret = drv->pm->freeze_noirq(dev);
+	}
+
+	return ret;
+}
+
+static int nvhost_pm_thaw(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->thaw)
+			ret = drv->pm->thaw(dev);
+	} else {
+		ret = nvhost_legacy_resume(dev);
+	}
+
+	return ret;
+}
+
+static int nvhost_pm_thaw_noirq(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->thaw_noirq)
+			ret = drv->pm->thaw_noirq(dev);
+	}
+
+	return ret;
+}
+
+static int nvhost_pm_poweroff(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->poweroff)
+			ret = drv->pm->poweroff(dev);
+	} else {
+		ret = nvhost_legacy_suspend(dev, PMSG_HIBERNATE);
+	}
+
+	return ret;
+}
+
+static int nvhost_pm_poweroff_noirq(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->poweroff_noirq)
+			ret = drv->pm->poweroff_noirq(dev);
+	}
+
+	return ret;
+}
+
+static int nvhost_pm_restore(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->restore)
+			ret = drv->pm->restore(dev);
+	} else {
+		ret = nvhost_legacy_resume(dev);
+	}
+
+	return ret;
+}
+
+static int nvhost_pm_restore_noirq(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->restore_noirq)
+			ret = drv->pm->restore_noirq(dev);
+	}
+
+	return ret;
+}
+
+#else /* !CONFIG_HIBERNATION */
+
+#define nvhost_pm_freeze		NULL
+#define nvhost_pm_thaw		NULL
+#define nvhost_pm_poweroff		NULL
+#define nvhost_pm_restore		NULL
+#define nvhost_pm_freeze_noirq	NULL
+#define nvhost_pm_thaw_noirq		NULL
+#define nvhost_pm_poweroff_noirq	NULL
+#define nvhost_pm_restore_noirq	NULL
+
+#endif /* !CONFIG_HIBERNATION */
+
+#ifdef CONFIG_PM_RUNTIME
+
+int __weak nvhost_pm_runtime_suspend(struct device *dev)
+{
+	return pm_generic_runtime_suspend(dev);
+};
+
+int __weak nvhost_pm_runtime_resume(struct device *dev)
+{
+	return pm_generic_runtime_resume(dev);
+};
+
+#else /* !CONFIG_PM_RUNTIME */
+
+#define nvhost_pm_runtime_suspend NULL
+#define nvhost_pm_runtime_resume NULL
+
+#endif /* !CONFIG_PM_RUNTIME */
+
+static const struct dev_pm_ops nvhost_dev_pm_ops = {
+	.prepare = nvhost_pm_prepare,
+	.complete = nvhost_pm_complete,
+	.suspend = nvhost_pm_suspend,
+	.resume = nvhost_pm_resume,
+	.freeze = nvhost_pm_freeze,
+	.thaw = nvhost_pm_thaw,
+	.poweroff = nvhost_pm_poweroff,
+	.restore = nvhost_pm_restore,
+	.suspend_noirq = nvhost_pm_suspend_noirq,
+	.resume_noirq = nvhost_pm_resume_noirq,
+	.freeze_noirq = nvhost_pm_freeze_noirq,
+	.thaw_noirq = nvhost_pm_thaw_noirq,
+	.poweroff_noirq = nvhost_pm_poweroff_noirq,
+	.restore_noirq = nvhost_pm_restore_noirq,
+	.runtime_suspend = nvhost_pm_runtime_suspend,
+	.runtime_resume = nvhost_pm_runtime_resume,
+};
+
+static int set_parent(struct device *dev, void *data)
+{
+	struct nvhost_device *ndev = to_nvhost_device(dev);
+	struct nvhost_master *host = data;
+	if (!dev->parent && ndev != host->dev)
+		dev->parent = &host->dev->dev;
+	return 0;
+}
+
+int nvhost_bus_add_host(struct nvhost_master *host)
+{
+	nvhost = host;
+
+	/*  Assign host1x as parent to all devices in nvhost bus */
+	bus_for_each_dev(nvhost_bus_inst->nvhost_bus_type, NULL, host, set_parent);
+
+	return 0;
+}
+
+struct nvhost_bus *nvhost_bus_get(void)
+{
+	return nvhost_bus_inst;
+}
+
+static struct bus_type nvhost_bus_type = {
+	.name		= "nvhost",
+	.match		= nvhost_bus_match,
+	.pm		= &nvhost_dev_pm_ops,
+};
+
+int nvhost_bus_init(void)
+{
+	int err;
+	struct nvhost_chip_support *chip_ops;
+
+	pr_info("host1x bus init\n");
+
+	nvhost_bus_inst = kzalloc(sizeof(*nvhost_bus_inst), GFP_KERNEL);
+	if (nvhost_bus_inst == NULL) {
+		pr_err("%s: Cannot allocate nvhost_bus\n", __func__);
+		return -ENOMEM;
+	}
+
+	chip_ops = kzalloc(sizeof(*chip_ops), GFP_KERNEL);
+	if (chip_ops == NULL) {
+		pr_err("%s: Cannot allocate nvhost_chip_support\n", __func__);
+		kfree(nvhost_bus_inst);
+		nvhost_bus_inst = NULL;
+		return -ENOMEM;
+	}
+
+	nvhost_bus_inst->nvhost_bus_type = &nvhost_bus_type;
+	nvhost_bus_inst->nvhost_chip_ops = chip_ops;
+
+	err = bus_register(nvhost_bus_inst->nvhost_bus_type);
+
+	if (err) {
+		kfree(nvhost_bus_inst);
+		kfree(chip_ops);
+		nvhost_bus_inst = NULL;
+		chip_ops = NULL;
+	}
+
+	return err;
+}
diff --git a/drivers/staging/tegra/video/host/bus.h b/drivers/staging/tegra/video/host/bus.h
new file mode 100644
index 000000000000..ec542f23f513
--- /dev/null
+++ b/drivers/staging/tegra/video/host/bus.h
@@ -0,0 +1,38 @@
+/*
+ * drivers/video/tegra/host/bus.h
+ *
+ * Tegra Graphics Host bus API header
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_BUS_H
+#define __NVHOST_BUS_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+#include "chip_support.h"
+
+struct nvhost_bus {
+	struct nvhost_chip_support *nvhost_chip_ops;
+	struct bus_type *nvhost_bus_type;
+};
+
+struct nvhost_bus *nvhost_bus_get(void);
+
+extern struct nvhost_bus *nvhost_bus_inst;
+
+#endif
diff --git a/drivers/staging/tegra/video/host/bus_client.c b/drivers/staging/tegra/video/host/bus_client.c
new file mode 100644
index 000000000000..c91145788f36
--- /dev/null
+++ b/drivers/staging/tegra/video/host/bus_client.c
@@ -0,0 +1,669 @@
+/*
+ * drivers/video/tegra/host/bus_client.c
+ *
+ * Tegra Graphics Host Client Module
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+#include <linux/clk.h>
+#include <linux/hrtimer.h>
+
+#include <trace/events/nvhost.h>
+
+#include <linux/io.h>
+#include <linux/string.h>
+
+#include <linux/nvhost.h>
+#include <linux/nvhost_ioctl.h>
+
+#include "debug.h"
+#include "bus_client.h"
+#include "dev.h"
+#include "nvhost_memmgr.h"
+#include "chip_support.h"
+#include "nvhost_acm.h"
+
+#include "nvhost_channel.h"
+#include "nvhost_job.h"
+#include "nvhost_hwctx.h"
+
+static int validate_reg(struct nvhost_device *ndev, u32 offset, int count)
+{
+	struct resource *r = nvhost_get_resource(ndev, IORESOURCE_MEM, 0);
+	int err = 0;
+
+	if (offset + 4 * count > resource_size(r)
+			|| (offset + 4 * count < offset))
+		err = -EPERM;
+
+	return err;
+}
+
+int nvhost_read_module_regs(struct nvhost_device *ndev,
+			u32 offset, int count, u32 *values)
+{
+	void __iomem *p = ndev->aperture + offset;
+	int err;
+
+	/* verify offset */
+	err = validate_reg(ndev, offset, count);
+	if (err)
+		return err;
+
+	nvhost_module_busy(ndev);
+	while (count--) {
+		*(values++) = readl(p);
+		p += 4;
+	}
+	rmb();
+	nvhost_module_idle(ndev);
+
+	return 0;
+}
+
+int nvhost_write_module_regs(struct nvhost_device *ndev,
+			u32 offset, int count, const u32 *values)
+{
+	void __iomem *p = ndev->aperture + offset;
+	int err;
+
+	/* verify offset */
+	err = validate_reg(ndev, offset, count);
+	if (err)
+		return err;
+
+	nvhost_module_busy(ndev);
+	while (count--) {
+		writel(*(values++), p);
+		p += 4;
+	}
+	wmb();
+	nvhost_module_idle(ndev);
+
+	return 0;
+}
+
+struct nvhost_channel_userctx {
+	struct nvhost_channel *ch;
+	struct nvhost_hwctx *hwctx;
+	struct nvhost_submit_hdr_ext hdr;
+	int num_relocshifts;
+	struct nvhost_job *job;
+	struct mem_mgr *memmgr;
+	u32 timeout;
+	u32 priority;
+	int clientid;
+};
+
+static int nvhost_channelrelease(struct inode *inode, struct file *filp)
+{
+	struct nvhost_channel_userctx *priv = filp->private_data;
+
+	trace_nvhost_channel_release(priv->ch->dev->name);
+
+	filp->private_data = NULL;
+
+	nvhost_module_remove_client(priv->ch->dev, priv);
+	nvhost_putchannel(priv->ch, priv->hwctx);
+
+	if (priv->hwctx)
+		priv->ch->ctxhandler->put(priv->hwctx);
+
+	if (priv->job)
+		nvhost_job_put(priv->job);
+
+	mem_op().put_mgr(priv->memmgr);
+	kfree(priv);
+	return 0;
+}
+
+static int nvhost_channelopen(struct inode *inode, struct file *filp)
+{
+	struct nvhost_channel_userctx *priv;
+	struct nvhost_channel *ch;
+
+	ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
+	ch = nvhost_getchannel(ch);
+	if (!ch)
+		return -ENOMEM;
+	trace_nvhost_channel_open(ch->dev->name);
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		nvhost_putchannel(ch, NULL);
+		return -ENOMEM;
+	}
+	filp->private_data = priv;
+	priv->ch = ch;
+	if(nvhost_module_add_client(ch->dev, priv))
+		goto fail;
+
+	if (ch->ctxhandler && ch->ctxhandler->alloc) {
+		priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
+		if (!priv->hwctx)
+			goto fail;
+	}
+	priv->priority = NVHOST_PRIORITY_MEDIUM;
+	priv->clientid = atomic_add_return(1,
+			&nvhost_get_host(ch->dev)->clientid);
+	priv->timeout = CONFIG_TEGRA_GRHOST_DEFAULT_TIMEOUT;
+
+	return 0;
+fail:
+	nvhost_channelrelease(inode, filp);
+	return -ENOMEM;
+}
+
+static int set_submit(struct nvhost_channel_userctx *ctx)
+{
+	struct nvhost_device *ndev = ctx->ch->dev;
+	struct nvhost_master *host = nvhost_get_host(ndev);
+
+	/* submit should have at least 1 cmdbuf */
+	if (!ctx->hdr.num_cmdbufs ||
+			!nvhost_syncpt_is_valid(&host->syncpt,
+				ctx->hdr.syncpt_id))
+		return -EIO;
+
+	if (!ctx->memmgr) {
+		dev_err(&ndev->dev, "no nvmap context set\n");
+		return -EFAULT;
+	}
+
+	ctx->job = nvhost_job_alloc(ctx->ch,
+			ctx->hwctx,
+			&ctx->hdr,
+			ctx->memmgr,
+			ctx->priority,
+			ctx->clientid);
+	if (!ctx->job)
+		return -ENOMEM;
+	ctx->job->timeout = ctx->timeout;
+
+	if (ctx->hdr.submit_version >= NVHOST_SUBMIT_VERSION_V2)
+		ctx->num_relocshifts = ctx->hdr.num_relocs;
+
+	return 0;
+}
+
+static void reset_submit(struct nvhost_channel_userctx *ctx)
+{
+	ctx->hdr.num_cmdbufs = 0;
+	ctx->hdr.num_relocs = 0;
+	ctx->num_relocshifts = 0;
+	ctx->hdr.num_waitchks = 0;
+
+	if (ctx->job) {
+		nvhost_job_put(ctx->job);
+		ctx->job = NULL;
+	}
+}
+
+static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
+				size_t count, loff_t *offp)
+{
+	struct nvhost_channel_userctx *priv = filp->private_data;
+	size_t remaining = count;
+	int err = 0;
+	struct nvhost_job *job = priv->job;
+	struct nvhost_submit_hdr_ext *hdr = &priv->hdr;
+	const char *chname = priv->ch->dev->name;
+
+	if (!job)
+		return -EIO;
+
+	while (remaining) {
+		size_t consumed;
+		if (!hdr->num_relocs &&
+		    !priv->num_relocshifts &&
+		    !hdr->num_cmdbufs &&
+		    !hdr->num_waitchks) {
+			consumed = sizeof(struct nvhost_submit_hdr);
+			if (remaining < consumed)
+				break;
+			if (copy_from_user(hdr, buf, consumed)) {
+				err = -EFAULT;
+				break;
+			}
+			hdr->submit_version = NVHOST_SUBMIT_VERSION_V0;
+			err = set_submit(priv);
+			if (err)
+				break;
+			trace_nvhost_channel_write_submit(chname,
+			  count, hdr->num_cmdbufs, hdr->num_relocs,
+			  hdr->syncpt_id, hdr->syncpt_incrs);
+		} else if (hdr->num_cmdbufs) {
+			struct nvhost_cmdbuf cmdbuf;
+			consumed = sizeof(cmdbuf);
+			if (remaining < consumed)
+				break;
+			if (copy_from_user(&cmdbuf, buf, consumed)) {
+				err = -EFAULT;
+				break;
+			}
+			trace_nvhost_channel_write_cmdbuf(chname,
+				cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
+			nvhost_job_add_gather(job,
+				cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
+			hdr->num_cmdbufs--;
+		} else if (hdr->num_relocs) {
+			int numrelocs = remaining / sizeof(struct nvhost_reloc);
+			if (!numrelocs)
+				break;
+			numrelocs = min_t(int, numrelocs, priv->hdr.num_relocs);
+			consumed = numrelocs * sizeof(struct nvhost_reloc);
+			if (copy_from_user(&job->relocarray[job->num_relocs],
+					buf, consumed)) {
+				err = -EFAULT;
+				break;
+			}
+			while (numrelocs) {
+				struct nvhost_reloc *reloc =
+					&job->relocarray[job->num_relocs];
+				trace_nvhost_channel_write_reloc(chname,
+					reloc->cmdbuf_mem,
+					reloc->cmdbuf_offset,
+					reloc->target,
+					reloc->target_offset);
+				job->num_relocs++;
+				hdr->num_relocs--;
+				numrelocs--;
+			}
+		} else if (hdr->num_waitchks) {
+			int numwaitchks =
+				(remaining / sizeof(struct nvhost_waitchk));
+			if (!numwaitchks)
+				break;
+			numwaitchks = min_t(int,
+				numwaitchks, hdr->num_waitchks);
+			consumed = numwaitchks * sizeof(struct nvhost_waitchk);
+			if (copy_from_user(&job->waitchk[job->num_waitchk],
+					buf, consumed)) {
+				err = -EFAULT;
+				break;
+			}
+			trace_nvhost_channel_write_waitchks(
+			  chname, numwaitchks,
+			  hdr->waitchk_mask);
+			job->num_waitchk += numwaitchks;
+			hdr->num_waitchks -= numwaitchks;
+		} else if (priv->num_relocshifts) {
+			int next_shift =
+				job->num_relocs - priv->num_relocshifts;
+			int num =
+				(remaining / sizeof(struct nvhost_reloc_shift));
+			if (!num)
+				break;
+			num = min_t(int, num, priv->num_relocshifts);
+			consumed = num * sizeof(struct nvhost_reloc_shift);
+			if (copy_from_user(&job->relocshiftarray[next_shift],
+					buf, consumed)) {
+				err = -EFAULT;
+				break;
+			}
+			priv->num_relocshifts -= num;
+		} else {
+			err = -EFAULT;
+			break;
+		}
+		remaining -= consumed;
+		buf += consumed;
+	}
+
+	if (err < 0) {
+		dev_err(&priv->ch->dev->dev, "channel write error\n");
+		reset_submit(priv);
+		return err;
+	}
+
+	return count - remaining;
+}
+
+static int nvhost_ioctl_channel_flush(
+	struct nvhost_channel_userctx *ctx,
+	struct nvhost_get_param_args *args,
+	int null_kickoff)
+{
+	struct nvhost_device *ndev = to_nvhost_device(&ctx->ch->dev->dev);
+	int err;
+
+	trace_nvhost_ioctl_channel_flush(ctx->ch->dev->name);
+
+	if (!ctx->job ||
+	    ctx->hdr.num_relocs ||
+	    ctx->hdr.num_cmdbufs ||
+	    ctx->hdr.num_waitchks) {
+		reset_submit(ctx);
+		dev_err(&ndev->dev, "channel submit out of sync\n");
+		return -EFAULT;
+	}
+
+	err = nvhost_job_pin(ctx->job, &nvhost_get_host(ndev)->syncpt);
+	if (err) {
+		dev_warn(&ndev->dev, "nvhost_job_pin failed: %d\n", err);
+		return err;
+	}
+
+	if (nvhost_debug_null_kickoff_pid == current->tgid)
+		null_kickoff = 1;
+	ctx->job->null_kickoff = null_kickoff;
+
+	if ((nvhost_debug_force_timeout_pid == current->tgid) &&
+	    (nvhost_debug_force_timeout_channel == ctx->ch->chid)) {
+		ctx->timeout = nvhost_debug_force_timeout_val;
+	}
+
+	/* context switch if needed, and submit user's gathers to the channel */
+	err = nvhost_channel_submit(ctx->job);
+	args->value = ctx->job->syncpt_end;
+	if (err)
+		nvhost_job_unpin(ctx->job);
+
+	nvhost_job_put(ctx->job);
+	ctx->job = NULL;
+
+	return err;
+}
+
+static int nvhost_ioctl_channel_read_3d_reg(struct nvhost_channel_userctx *ctx,
+	struct nvhost_read_3d_reg_args *args)
+{
+	BUG_ON(!channel_op().read3dreg);
+	return channel_op().read3dreg(ctx->ch, ctx->hwctx,
+			args->offset, &args->value);
+}
+
+static long nvhost_channelctl(struct file *filp,
+	unsigned int cmd, unsigned long arg)
+{
+	struct nvhost_channel_userctx *priv = filp->private_data;
+	u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
+	int err = 0;
+
+	if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
+		(_IOC_NR(cmd) == 0) ||
+		(_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
+		(_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
+		return -EFAULT;
+
+	if (_IOC_DIR(cmd) & _IOC_WRITE) {
+		if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
+			return -EFAULT;
+	}
+
+	switch (cmd) {
+	case NVHOST_IOCTL_CHANNEL_FLUSH:
+		err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0);
+		break;
+	case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF:
+		err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1);
+		break;
+	case NVHOST_IOCTL_CHANNEL_SUBMIT_EXT:
+	{
+		struct nvhost_submit_hdr_ext *hdr;
+
+		if (priv->hdr.num_relocs ||
+		    priv->num_relocshifts ||
+		    priv->hdr.num_cmdbufs ||
+		    priv->hdr.num_waitchks) {
+			reset_submit(priv);
+			dev_err(&priv->ch->dev->dev,
+				"channel submit out of sync\n");
+			err = -EIO;
+			break;
+		}
+
+		hdr = (struct nvhost_submit_hdr_ext *)buf;
+		if (hdr->submit_version > NVHOST_SUBMIT_VERSION_MAX_SUPPORTED) {
+			dev_err(&priv->ch->dev->dev,
+				"submit version %d > max supported %d\n",
+				hdr->submit_version,
+				NVHOST_SUBMIT_VERSION_MAX_SUPPORTED);
+			err = -EINVAL;
+			break;
+		}
+		memcpy(&priv->hdr, hdr, sizeof(struct nvhost_submit_hdr_ext));
+		err = set_submit(priv);
+		trace_nvhost_ioctl_channel_submit(priv->ch->dev->name,
+			priv->hdr.submit_version,
+			priv->hdr.num_cmdbufs, priv->hdr.num_relocs,
+			priv->hdr.num_waitchks,
+			priv->hdr.syncpt_id, priv->hdr.syncpt_incrs);
+		break;
+	}
+	case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
+		/* host syncpt ID is used by the RM (and never be given out) */
+		BUG_ON(priv->ch->dev->syncpts & (1 << NVSYNCPT_GRAPHICS_HOST));
+		((struct nvhost_get_param_args *)buf)->value =
+			priv->ch->dev->syncpts;
+		break;
+	case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
+		((struct nvhost_get_param_args *)buf)->value =
+			priv->ch->dev->waitbases;
+		break;
+	case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
+		((struct nvhost_get_param_args *)buf)->value =
+			priv->ch->dev->modulemutexes;
+		break;
+	case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
+	{
+		int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
+		struct mem_mgr *new_client = mem_op().get_mgr_file(fd);
+
+		if (IS_ERR(new_client)) {
+			err = PTR_ERR(new_client);
+			break;
+		}
+
+		if (priv->memmgr)
+			mem_op().put_mgr(priv->memmgr);
+
+		priv->memmgr = new_client;
+		break;
+	}
+	case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
+		err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
+		break;
+	case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
+	{
+		unsigned long rate;
+		struct nvhost_clk_rate_args *arg =
+				(struct nvhost_clk_rate_args *)buf;
+
+		err = nvhost_module_get_rate(priv->ch->dev, &rate, 0);
+		if (err == 0)
+			arg->rate = rate;
+		break;
+	}
+	case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
+	{
+		struct nvhost_clk_rate_args *arg =
+				(struct nvhost_clk_rate_args *)buf;
+		unsigned long rate = (unsigned long)arg->rate;
+
+		err = nvhost_module_set_rate(priv->ch->dev, priv, rate, 0);
+		break;
+	}
+	case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
+		priv->timeout =
+			(u32)((struct nvhost_set_timeout_args *)buf)->timeout;
+		dev_dbg(&priv->ch->dev->dev,
+			"%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
+			__func__, priv->timeout, priv);
+		break;
+	case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
+		((struct nvhost_get_param_args *)buf)->value =
+				priv->hwctx->has_timedout;
+		break;
+	case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
+		priv->priority =
+			(u32)((struct nvhost_set_priority_args *)buf)->priority;
+		break;
+	default:
+		err = -ENOTTY;
+		break;
+	}
+
+	if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
+		err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
+
+	return err;
+}
+
+static const struct file_operations nvhost_channelops = {
+	.owner = THIS_MODULE,
+	.release = nvhost_channelrelease,
+	.open = nvhost_channelopen,
+	.write = nvhost_channelwrite,
+	.unlocked_ioctl = nvhost_channelctl
+};
+
+int nvhost_client_user_init(struct nvhost_device *dev)
+{
+	int err, devno;
+
+	struct nvhost_channel *ch = dev->channel;
+	err = alloc_chrdev_region(&devno, 0, 1, IFACE_NAME);
+	if (err < 0) {
+		dev_err(&dev->dev, "failed to allocate devno\n");
+		goto fail;
+	}
+
+	cdev_init(&ch->cdev, &nvhost_channelops);
+	ch->cdev.owner = THIS_MODULE;
+
+	err = cdev_add(&ch->cdev, devno, 1);
+	if (err < 0) {
+		dev_err(&dev->dev,
+			"failed to add chan %i cdev\n", dev->index);
+		goto fail;
+	}
+	ch->node = device_create(nvhost_get_host(dev)->nvhost_class,
+			NULL, devno, NULL,
+			IFACE_NAME "-%s", dev->name);
+	if (IS_ERR(ch->node)) {
+		err = PTR_ERR(ch->node);
+		dev_err(&dev->dev,
+			"failed to create %s channel device\n", dev->name);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return err;
+}
+
+int nvhost_client_device_init(struct nvhost_device *dev)
+{
+	int err;
+	struct nvhost_master *nvhost_master = nvhost_get_host(dev);
+	struct nvhost_channel *ch;
+
+	dev_info(&dev->dev, "initialize...\n");
+
+	ch = nvhost_alloc_channel(dev);
+	if (ch == NULL)
+		return -ENODEV;
+
+	/* store the pointer to this device for channel */
+	ch->dev = dev;
+
+	err = nvhost_channel_init(ch, nvhost_master, dev->index);
+	if (err)
+		goto fail;
+
+	err = nvhost_client_user_init(dev);
+	if (err)
+		goto fail;
+
+	err = nvhost_module_init(dev);
+	if (err)
+		goto fail;
+
+	dev_info(&dev->dev, "initialized\n");
+
+	return 0;
+
+fail:
+	/* Add clean-up */
+	nvhost_free_channel(ch);
+	return err;
+}
+
+int nvhost_client_device_suspend(struct nvhost_device *dev)
+{
+	int ret = 0;
+
+	ret = nvhost_channel_suspend(dev->channel);
+	if (ret)
+		return ret;
+
+	dev_info(&dev->dev, "suspend status: %d\n", ret);
+
+	return ret;
+}
+
+int nvhost_client_device_get_resources(struct nvhost_device *dev)
+{
+	struct resource *r = NULL;
+	void __iomem *regs = NULL;
+	struct resource *reg_mem = NULL;
+
+	r = nvhost_get_resource(dev, IORESOURCE_MEM, 0);
+	if (!r)
+		goto fail;
+
+	reg_mem = request_mem_region(r->start, resource_size(r), dev->name);
+	if (!reg_mem)
+		goto fail;
+
+	regs = ioremap(r->start, resource_size(r));
+	if (!regs)
+		goto fail;
+
+	dev->reg_mem = reg_mem;
+	dev->aperture = regs;
+
+	return 0;
+
+fail:
+	if (reg_mem)
+		release_mem_region(r->start, resource_size(r));
+	if (regs)
+		iounmap(regs);
+
+	dev_err(&dev->dev, "failed to get register memory\n");
+
+	return -ENXIO;
+}
+
+void nvhost_client_device_put_resources(struct nvhost_device *dev)
+{
+	struct resource *r;
+
+	r = nvhost_get_resource(dev, IORESOURCE_MEM, 0);
+	BUG_ON(!r);
+
+	iounmap(dev->aperture);
+
+	release_mem_region(r->start, resource_size(r));
+}
diff --git a/drivers/staging/tegra/video/host/bus_client.h b/drivers/staging/tegra/video/host/bus_client.h
new file mode 100644
index 000000000000..8c7bdc9faefe
--- /dev/null
+++ b/drivers/staging/tegra/video/host/bus_client.h
@@ -0,0 +1,42 @@
+/*
+ * drivers/video/tegra/host/bus_client.h
+ *
+ * Tegra Graphics Host client
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_BUS_CLIENT_H
+#define __NVHOST_BUS_CLIENT_H
+
+#include <linux/types.h>
+struct nvhost_device;
+
+int nvhost_read_module_regs(struct nvhost_device *ndev,
+			u32 offset, int count, u32 *values);
+
+int nvhost_write_module_regs(struct nvhost_device *ndev,
+			u32 offset, int count, const u32 *values);
+
+int nvhost_client_user_init(struct nvhost_device *dev);
+
+int nvhost_client_device_init(struct nvhost_device *dev);
+
+int nvhost_client_device_suspend(struct nvhost_device *dev);
+
+int nvhost_client_device_get_resources(struct nvhost_device *dev);
+void nvhost_client_device_put_resources(struct nvhost_device *dev);
+
+#endif
diff --git a/drivers/staging/tegra/video/host/chip_support.c b/drivers/staging/tegra/video/host/chip_support.c
new file mode 100644
index 000000000000..375116d0a2de
--- /dev/null
+++ b/drivers/staging/tegra/video/host/chip_support.c
@@ -0,0 +1,56 @@
+/*
+ * drivers/video/tegra/host/chip_support.c
+ *
+ * Tegra Graphics Host Chip support module
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/errno.h>
+
+#include <mach/fuse.h>
+
+#include "bus.h"
+#include "chip_support.h"
+#include "t20/t20.h"
+#include "t30/t30.h"
+
+struct nvhost_chip_support *nvhost_get_chip_ops(void)
+{
+	return (nvhost_bus_get())->nvhost_chip_ops;
+}
+
+int nvhost_init_chip_support(struct nvhost_master *host)
+{
+	int err = 0;
+	struct nvhost_chip_support *chip_ops;
+
+	chip_ops = nvhost_get_chip_ops();
+
+	switch (tegra_chip_id) {
+	case TEGRA20:
+		err = nvhost_init_t20_support(host, chip_ops);
+		break;
+
+	case TEGRA30:
+		err = nvhost_init_t30_support(host, chip_ops);
+		break;
+
+	default:
+		err = -ENODEV;
+	}
+
+	return err;
+}
diff --git a/drivers/staging/tegra/video/host/chip_support.h b/drivers/staging/tegra/video/host/chip_support.h
new file mode 100644
index 000000000000..072e4f5691bc
--- /dev/null
+++ b/drivers/staging/tegra/video/host/chip_support.h
@@ -0,0 +1,181 @@
+/*
+ * drivers/video/tegra/host/chip_support.h
+ *
+ * Tegra Graphics Host Chip Support
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _NVHOST_CHIP_SUPPORT_H_
+#define _NVHOST_CHIP_SUPPORT_H_
+
+#include <linux/types.h>
+#include "bus.h"
+
+struct output;
+
+struct nvhost_master;
+struct nvhost_intr;
+struct nvhost_syncpt;
+struct nvhost_userctx_timeout;
+struct nvhost_channel;
+struct nvhost_hwctx;
+struct nvhost_cdma;
+struct nvhost_job;
+struct push_buffer;
+struct nvhost_syncpt;
+struct dentry;
+struct nvhost_job;
+struct nvhost_intr_syncpt;
+struct mem_handle;
+struct mem_mgr;
+struct nvhost_device;
+
+struct nvhost_channel_ops {
+	int (*init)(struct nvhost_channel *,
+		    struct nvhost_master *,
+		    int chid);
+	int (*submit)(struct nvhost_job *job);
+	int (*read3dreg)(struct nvhost_channel *channel,
+			struct nvhost_hwctx *hwctx,
+			u32 offset,
+			u32 *value);
+	int (*save_context)(struct nvhost_channel *channel);
+	int (*drain_read_fifo)(struct nvhost_channel *ch,
+		u32 *ptr, unsigned int count, unsigned int *pending);
+};
+
+struct nvhost_cdma_ops {
+	void (*start)(struct nvhost_cdma *);
+	void (*stop)(struct nvhost_cdma *);
+	void (*kick)(struct  nvhost_cdma *);
+	int (*timeout_init)(struct nvhost_cdma *,
+			    u32 syncpt_id);
+	void (*timeout_destroy)(struct nvhost_cdma *);
+	void (*timeout_teardown_begin)(struct nvhost_cdma *);
+	void (*timeout_teardown_end)(struct nvhost_cdma *,
+				     u32 getptr);
+	void (*timeout_cpu_incr)(struct nvhost_cdma *,
+				 u32 getptr,
+				 u32 syncpt_incrs,
+				 u32 syncval,
+				 u32 nr_slots,
+				 u32 waitbases);
+};
+
+struct nvhost_pushbuffer_ops {
+	void (*reset)(struct push_buffer *);
+	int (*init)(struct push_buffer *);
+	void (*destroy)(struct push_buffer *);
+	void (*push_to)(struct push_buffer *,
+			struct mem_mgr *, struct mem_handle *,
+			u32 op1, u32 op2);
+	void (*pop_from)(struct push_buffer *,
+			 unsigned int slots);
+	u32 (*space)(struct push_buffer *);
+	u32 (*putptr)(struct push_buffer *);
+};
+
+struct nvhost_debug_ops {
+	void (*debug_init)(struct dentry *de);
+	void (*show_channel_cdma)(struct nvhost_master *,
+				  struct nvhost_channel *,
+				  struct output *,
+				  int chid);
+	void (*show_channel_fifo)(struct nvhost_master *,
+				  struct nvhost_channel *,
+				  struct output *,
+				  int chid);
+	void (*show_mlocks)(struct nvhost_master *m,
+			    struct output *o);
+
+};
+
+struct nvhost_syncpt_ops {
+	void (*reset)(struct nvhost_syncpt *, u32 id);
+	void (*reset_wait_base)(struct nvhost_syncpt *, u32 id);
+	void (*read_wait_base)(struct nvhost_syncpt *, u32 id);
+	u32 (*update_min)(struct nvhost_syncpt *, u32 id);
+	void (*cpu_incr)(struct nvhost_syncpt *, u32 id);
+	int (*patch_wait)(struct nvhost_syncpt *sp,
+			void *patch_addr);
+	void (*debug)(struct nvhost_syncpt *);
+	const char * (*name)(struct nvhost_syncpt *, u32 id);
+	int (*mutex_try_lock)(struct nvhost_syncpt *,
+			      unsigned int idx);
+	void (*mutex_unlock)(struct nvhost_syncpt *,
+			     unsigned int idx);
+};
+
+struct nvhost_intr_ops {
+	void (*request_syncpt_irq)(struct nvhost_intr *);
+	void (*free_syncpt_irq)(struct nvhost_intr *);
+	void (*set_host_clocks_per_usec)(
+		struct nvhost_intr *, u32 clocks);
+	void (*set_syncpt_threshold)(
+		struct nvhost_intr *, u32 id, u32 thresh);
+	void (*enable_syncpt_intr)(struct nvhost_intr *, u32 id);
+	void (*disable_syncpt_intr)(struct nvhost_intr *, u32 id);
+	void (*disable_all_syncpt_intrs)(struct nvhost_intr *);
+	int  (*request_host_general_irq)(struct nvhost_intr *);
+	void (*free_host_general_irq)(struct nvhost_intr *);
+};
+
+struct nvhost_dev_ops {
+	struct nvhost_channel *(*alloc_nvhost_channel)(
+			struct nvhost_device *dev);
+	void (*free_nvhost_channel)(struct nvhost_channel *ch);
+};
+
+struct nvhost_mem_ops {
+	struct mem_mgr *(*alloc_mgr)(void);
+	void (*put_mgr)(struct mem_mgr *);
+	struct mem_mgr *(*get_mgr)(struct mem_mgr *);
+	struct mem_mgr *(*get_mgr_file)(int fd);
+	struct mem_handle *(*alloc)(struct mem_mgr *,
+			size_t size, size_t align,
+			int flags);
+	struct mem_handle *(*get)(struct mem_mgr *, u32 id);
+	void (*put)(struct mem_mgr *, struct mem_handle *);
+	phys_addr_t (*pin)(struct mem_mgr *, struct mem_handle *);
+	void (*unpin)(struct mem_mgr *, struct mem_handle *);
+	void *(*mmap)(struct mem_handle *);
+	void (*munmap)(struct mem_handle *, void *);
+};
+
+struct nvhost_chip_support {
+	struct nvhost_channel_ops channel;
+	struct nvhost_cdma_ops cdma;
+	struct nvhost_pushbuffer_ops push_buffer;
+	struct nvhost_debug_ops debug;
+	struct nvhost_syncpt_ops syncpt;
+	struct nvhost_intr_ops intr;
+	struct nvhost_dev_ops nvhost_dev;
+	struct nvhost_mem_ops mem;
+};
+
+struct nvhost_chip_support *nvhost_get_chip_ops(void);
+
+#define host_device_op()	nvhost_get_chip_ops()->nvhost_dev
+#define channel_cdma_op()	nvhost_get_chip_ops()->cdma
+#define channel_op()		nvhost_get_chip_ops()->channel
+#define syncpt_op()		nvhost_get_chip_ops()->syncpt
+#define intr_op()		nvhost_get_chip_ops()->intr
+#define cdma_op()		nvhost_get_chip_ops()->cdma
+#define cdma_pb_op()		nvhost_get_chip_ops()->push_buffer
+#define mem_op()		(nvhost_get_chip_ops()->mem)
+
+int nvhost_init_chip_support(struct nvhost_master *);
+
+#endif /* _NVHOST_CHIP_SUPPORT_H_ */
diff --git a/drivers/staging/tegra/video/host/debug.c b/drivers/staging/tegra/video/host/debug.c
new file mode 100644
index 000000000000..e416d9709e6b
--- /dev/null
+++ b/drivers/staging/tegra/video/host/debug.c
@@ -0,0 +1,234 @@
+/*
+ * drivers/video/tegra/host/debug.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <linux/io.h>
+
+#include "bus.h"
+#include "dev.h"
+#include "debug.h"
+#include "nvhost_acm.h"
+#include "nvhost_channel.h"
+#include "chip_support.h"
+
+pid_t nvhost_debug_null_kickoff_pid;
+unsigned int nvhost_debug_trace_cmdbuf;
+
+pid_t nvhost_debug_force_timeout_pid;
+u32 nvhost_debug_force_timeout_val;
+u32 nvhost_debug_force_timeout_channel;
+
+void nvhost_debug_output(struct output *o, const char* fmt, ...)
+{
+	va_list args;
+	int len;
+
+	va_start(args, fmt);
+	len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
+	va_end(args);
+	o->fn(o->ctx, o->buf, len);
+}
+
+static int show_channels(struct device *dev, void *data)
+{
+	struct nvhost_channel *ch;
+	struct nvhost_device *nvdev = to_nvhost_device(dev);
+	struct output *o = data;
+	struct nvhost_master *m;
+
+	if (nvdev == NULL)
+		return 0;
+
+	m = nvhost_get_host(nvdev);
+	ch = nvdev->channel;
+	if (ch) {
+		mutex_lock(&ch->reflock);
+		if (ch->refcount) {
+			mutex_lock(&ch->cdma.lock);
+			nvhost_get_chip_ops()->debug.show_channel_fifo(m, ch, o, nvdev->index);
+			nvhost_get_chip_ops()->debug.show_channel_cdma(m, ch, o, nvdev->index);
+			mutex_unlock(&ch->cdma.lock);
+		}
+		mutex_unlock(&ch->reflock);
+	}
+
+	return 0;
+}
+
+static void show_syncpts(struct nvhost_master *m, struct output *o)
+{
+	int i;
+	BUG_ON(!nvhost_get_chip_ops()->syncpt.name);
+	nvhost_debug_output(o, "---- syncpts ----\n");
+	for (i = 0; i < nvhost_syncpt_nb_pts(&m->syncpt); i++) {
+		u32 max = nvhost_syncpt_read_max(&m->syncpt, i);
+		u32 min = nvhost_syncpt_update_min(&m->syncpt, i);
+		if (!min && !max)
+			continue;
+		nvhost_debug_output(o, "id %d (%s) min %d max %d\n",
+				i, nvhost_get_chip_ops()->syncpt.name(&m->syncpt, i),
+				min, max);
+	}
+
+	for (i = 0; i < nvhost_syncpt_nb_pts(&m->syncpt); i++) {
+		u32 base_val;
+		base_val = nvhost_syncpt_read_wait_base(&m->syncpt, i);
+		if (base_val)
+			nvhost_debug_output(o, "waitbase id %d val %d\n",
+					i, base_val);
+	}
+
+	nvhost_debug_output(o, "\n");
+}
+
+static void show_all(struct nvhost_master *m, struct output *o)
+{
+	nvhost_module_busy(m->dev);
+
+	nvhost_get_chip_ops()->debug.show_mlocks(m, o);
+	show_syncpts(m, o);
+	nvhost_debug_output(o, "---- channels ----\n");
+	bus_for_each_dev((nvhost_bus_get())->nvhost_bus_type, NULL, o,
+			show_channels);
+
+	nvhost_module_idle(m->dev);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int show_channels_no_fifo(struct device *dev, void *data)
+{
+	struct nvhost_channel *ch;
+	struct nvhost_device *nvdev = to_nvhost_device(dev);
+	struct output *o = data;
+	struct nvhost_master *m;
+
+	if (nvdev == NULL)
+		return 0;
+
+	m = nvhost_get_host(nvdev);
+	ch = nvdev->channel;
+	if (ch) {
+		mutex_lock(&ch->reflock);
+		if (ch->refcount) {
+			mutex_lock(&ch->cdma.lock);
+			nvhost_get_chip_ops()->debug.show_channel_cdma(m,
+					ch, o, nvdev->index);
+			mutex_unlock(&ch->cdma.lock);
+		}
+		mutex_unlock(&ch->reflock);
+	}
+
+	return 0;
+}
+
+static void show_all_no_fifo(struct nvhost_master *m, struct output *o)
+{
+	nvhost_module_busy(m->dev);
+
+	nvhost_get_chip_ops()->debug.show_mlocks(m, o);
+	show_syncpts(m, o);
+	nvhost_debug_output(o, "---- channels ----\n");
+	bus_for_each_dev((nvhost_bus_get())->nvhost_bus_type, NULL, o,
+			show_channels_no_fifo);
+
+	nvhost_module_idle(m->dev);
+}
+
+static int nvhost_debug_show_all(struct seq_file *s, void *unused)
+{
+	struct output o = {
+		.fn = write_to_seqfile,
+		.ctx = s
+	};
+	show_all(s->private, &o);
+	return 0;
+}
+static int nvhost_debug_show(struct seq_file *s, void *unused)
+{
+	struct output o = {
+		.fn = write_to_seqfile,
+		.ctx = s
+	};
+	show_all_no_fifo(s->private, &o);
+	return 0;
+}
+
+static int nvhost_debug_open_all(struct inode *inode, struct file *file)
+{
+	return single_open(file, nvhost_debug_show_all, inode->i_private);
+}
+
+static const struct file_operations nvhost_debug_all_fops = {
+	.open		= nvhost_debug_open_all,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int nvhost_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, nvhost_debug_show, inode->i_private);
+}
+
+static const struct file_operations nvhost_debug_fops = {
+	.open		= nvhost_debug_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+void nvhost_debug_init(struct nvhost_master *master)
+{
+	struct dentry *de = debugfs_create_dir("tegra_host", NULL);
+
+	debugfs_create_file("status", S_IRUGO, de,
+			master, &nvhost_debug_fops);
+	debugfs_create_file("status_all", S_IRUGO, de,
+			master, &nvhost_debug_all_fops);
+
+	debugfs_create_u32("null_kickoff_pid", S_IRUGO|S_IWUSR, de,
+			&nvhost_debug_null_kickoff_pid);
+	debugfs_create_u32("trace_cmdbuf", S_IRUGO|S_IWUSR, de,
+			&nvhost_debug_trace_cmdbuf);
+
+	if (nvhost_get_chip_ops()->debug.debug_init)
+		nvhost_get_chip_ops()->debug.debug_init(de);
+
+	debugfs_create_u32("force_timeout_pid", S_IRUGO|S_IWUSR, de,
+			&nvhost_debug_force_timeout_pid);
+	debugfs_create_u32("force_timeout_val", S_IRUGO|S_IWUSR, de,
+			&nvhost_debug_force_timeout_val);
+	debugfs_create_u32("force_timeout_channel", S_IRUGO|S_IWUSR, de,
+			&nvhost_debug_force_timeout_channel);
+}
+#else
+void nvhost_debug_init(struct nvhost_master *master)
+{
+}
+#endif
+
+void nvhost_debug_dump(struct nvhost_master *master)
+{
+	struct output o = {
+		.fn = write_to_printk
+	};
+	show_all(master, &o);
+}
diff --git a/drivers/staging/tegra/video/host/debug.h b/drivers/staging/tegra/video/host/debug.h
new file mode 100644
index 000000000000..3dc156ab4741
--- /dev/null
+++ b/drivers/staging/tegra/video/host/debug.h
@@ -0,0 +1,50 @@
+/*
+ * drivers/video/tegra/host/debug.h
+ *
+ * Tegra Graphics Host Debug
+ *
+ * Copyright (c) 2011-2012 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __NVHOST_DEBUG_H
+#define __NVHOST_DEBUG_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+struct output {
+	void (*fn)(void *ctx, const char* str, size_t len);
+	void *ctx;
+	char buf[256];
+};
+
+static inline void write_to_seqfile(void *ctx, const char* str, size_t len)
+{
+	seq_write((struct seq_file *)ctx, str, len);
+}
+
+static inline void write_to_printk(void *ctx, const char* str, size_t len)
+{
+	printk(KERN_INFO "%s", str);
+}
+
+void nvhost_debug_output(struct output *o, const char* fmt, ...);
+
+extern pid_t nvhost_debug_null_kickoff_pid;
+extern pid_t nvhost_debug_force_timeout_pid;
+extern u32 nvhost_debug_force_timeout_val;
+extern u32 nvhost_debug_force_timeout_channel;
+extern unsigned int nvhost_debug_trace_cmdbuf;
+
+#endif /*__NVHOST_DEBUG_H */
diff --git a/drivers/staging/tegra/video/host/dev.c b/drivers/staging/tegra/video/host/dev.c
new file mode 100644
index 000000000000..4bc9562afd80
--- /dev/null
+++ b/drivers/staging/tegra/video/host/dev.c
@@ -0,0 +1,31 @@
+/*
+ * drivers/video/tegra/host/dev.c
+ *
+ * Tegra Graphics Host Driver Entrypoint
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/nvhost.h>
+#include <linux/module.h>
+
+#include <linux/nvhost.h>
+
+MODULE_AUTHOR("NVIDIA");
+MODULE_DESCRIPTION("Graphics host driver for Tegra products");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform-nvhost");
diff --git a/drivers/staging/tegra/video/host/dev.h b/drivers/staging/tegra/video/host/dev.h
new file mode 100644
index 000000000000..8700fdf9dea5
--- /dev/null
+++ b/drivers/staging/tegra/video/host/dev.h
@@ -0,0 +1,25 @@
+/*
+ * drivers/video/tegra/host/dev.h
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef NVHOST_DEV_H
+#define NVHOST_DEV_H
+
+#include "host1x/host1x.h"
+#include <linux/module.h>
+
+#endif
diff --git a/drivers/staging/tegra/video/host/gr2d/Makefile b/drivers/staging/tegra/video/host/gr2d/Makefile
new file mode 100644
index 000000000000..594720f6afc9
--- /dev/null
+++ b/drivers/staging/tegra/video/host/gr2d/Makefile
@@ -0,0 +1,8 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/staging/tegra/video/host
+EXTRA_CFLAGS += -Idrivers/staging/tegra/include
+
+nvhost-gr2d-objs  = \
+		gr2d.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-gr2d.o
diff --git a/drivers/staging/tegra/video/host/gr2d/gr2d.c b/drivers/staging/tegra/video/host/gr2d/gr2d.c
new file mode 100644
index 000000000000..f298cd9522f3
--- /dev/null
+++ b/drivers/staging/tegra/video/host/gr2d/gr2d.c
@@ -0,0 +1,80 @@
+/*
+ * drivers/video/tegra/host/gr2d/gr2d.c
+ *
+ * Tegra Graphics 2D
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "dev.h"
+#include "bus_client.h"
+
+static int gr2d_probe(struct nvhost_device *dev,
+	struct nvhost_device_id *id_table)
+{
+	return nvhost_client_device_init(dev);
+}
+
+static int __exit gr2d_remove(struct nvhost_device *dev)
+{
+	/* Add clean-up */
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int gr2d_suspend(struct nvhost_device *dev, pm_message_t state)
+{
+	return nvhost_client_device_suspend(dev);
+}
+
+static int gr2d_resume(struct nvhost_device *dev)
+{
+	dev_info(&dev->dev, "resuming\n");
+	return 0;
+}
+#endif
+
+static struct of_device_id gr2d_of_match[] = {
+	{ .compatible = "nvidia,tegra20-gr2d", },
+	{ .compatible = "nvidia,tegra30-gr2d", },
+	{ },
+};
+
+static struct nvhost_driver gr2d_driver = {
+	.probe = gr2d_probe,
+	.remove = __exit_p(gr2d_remove),
+#ifdef CONFIG_PM
+	.suspend = gr2d_suspend,
+	.resume = gr2d_resume,
+#endif
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "gr2d",
+		.of_match_table = of_match_ptr(gr2d_of_match),
+	}
+};
+
+static int __init gr2d_init(void)
+{
+	return nvhost_driver_register(&gr2d_driver);
+}
+
+static void __exit gr2d_exit(void)
+{
+	nvhost_driver_unregister(&gr2d_driver);
+}
+
+module_init(gr2d_init);
+module_exit(gr2d_exit);
diff --git a/drivers/staging/tegra/video/host/gr3d/Makefile b/drivers/staging/tegra/video/host/gr3d/Makefile
new file mode 100644
index 000000000000..e9128a934ea4
--- /dev/null
+++ b/drivers/staging/tegra/video/host/gr3d/Makefile
@@ -0,0 +1,11 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/staging/tegra/video/host
+EXTRA_CFLAGS += -Idrivers/staging/tegra/include
+
+nvhost-gr3d-objs  = \
+		gr3d.o \
+		gr3d_t20.o \
+		gr3d_t30.o \
+		scale3d.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-gr3d.o
diff --git a/drivers/staging/tegra/video/host/gr3d/gr3d.c b/drivers/staging/tegra/video/host/gr3d/gr3d.c
new file mode 100644
index 000000000000..f9697c57b06e
--- /dev/null
+++ b/drivers/staging/tegra/video/host/gr3d/gr3d.c
@@ -0,0 +1,278 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d.c
+ *
+ * Tegra Graphics Host 3D
+ *
+ * Copyright (c) 2012 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <linux/reset.h>
+
+#include "t20/t20.h"
+#include "host1x/host1x01_hardware.h"
+#include "nvhost_hwctx.h"
+#include "dev.h"
+#include "gr3d.h"
+#include "gr3d_t20.h"
+#include "gr3d_t30.h"
+#include "scale3d.h"
+#include "bus_client.h"
+#include "nvhost_channel.h"
+#include "nvhost_memmgr.h"
+#include "chip_support.h"
+
+void nvhost_3dctx_restore_begin(struct host1x_hwctx_handler *p, u32 *ptr)
+{
+	/* set class to host */
+	ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+					host1x_uclass_incr_syncpt_base_r(), 1);
+	/* increment sync point base */
+	ptr[1] = nvhost_class_host_incr_syncpt_base(p->waitbase,
+			p->restore_incrs);
+	/* set class to 3D */
+	ptr[2] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+	/* program PSEQ_QUAD_ID */
+	ptr[3] = nvhost_opcode_imm(AR3D_PSEQ_QUAD_ID, 0);
+}
+
+void nvhost_3dctx_restore_direct(u32 *ptr, u32 start_reg, u32 count)
+{
+	ptr[0] = nvhost_opcode_incr(start_reg, count);
+}
+
+void nvhost_3dctx_restore_indirect(u32 *ptr, u32 offset_reg, u32 offset,
+			u32 data_reg, u32 count)
+{
+	ptr[0] = nvhost_opcode_imm(offset_reg, offset);
+	ptr[1] = nvhost_opcode_nonincr(data_reg, count);
+}
+
+void nvhost_3dctx_restore_end(struct host1x_hwctx_handler *p, u32 *ptr)
+{
+	/* syncpt increment to track restore gather. */
+	ptr[0] = nvhost_opcode_imm_incr_syncpt(
+			host1x_uclass_incr_syncpt_cond_op_done_v(), p->syncpt);
+}
+
+/*** ctx3d ***/
+struct host1x_hwctx *nvhost_3dctx_alloc_common(struct host1x_hwctx_handler *p,
+		struct nvhost_channel *ch, bool map_restore)
+{
+	struct mem_mgr *memmgr = nvhost_get_host(ch->dev)->memmgr;
+	struct host1x_hwctx *ctx;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return NULL;
+	ctx->restore = mem_op().alloc(memmgr, p->restore_size * 4, 32,
+		map_restore ? mem_mgr_flag_write_combine
+			    : mem_mgr_flag_uncacheable);
+	if (IS_ERR_OR_NULL(ctx->restore)) {
+		ctx->restore = NULL;
+		goto fail;
+	}
+
+	if (map_restore) {
+		ctx->restore_virt = mem_op().mmap(ctx->restore);
+		if (!ctx->restore_virt)
+			goto fail;
+	} else
+		ctx->restore_virt = NULL;
+
+	kref_init(&ctx->hwctx.ref);
+	ctx->hwctx.h = &p->h;
+	ctx->hwctx.channel = ch;
+	ctx->hwctx.valid = false;
+	ctx->save_incrs = p->save_incrs;
+	ctx->save_thresh = p->save_thresh;
+	ctx->save_slots = p->save_slots;
+	ctx->restore_phys = mem_op().pin(memmgr, ctx->restore);
+	if (IS_ERR_VALUE(ctx->restore_phys))
+		goto fail;
+
+	ctx->restore_size = p->restore_size;
+	ctx->restore_incrs = p->restore_incrs;
+	return ctx;
+
+fail:
+	if (map_restore && ctx->restore_virt) {
+		mem_op().munmap(ctx->restore, ctx->restore_virt);
+		ctx->restore_virt = NULL;
+	}
+	mem_op().put(memmgr, ctx->restore);
+	ctx->restore = NULL;
+	kfree(ctx);
+	return NULL;
+}
+
+void nvhost_3dctx_get(struct nvhost_hwctx *ctx)
+{
+	kref_get(&ctx->ref);
+}
+
+void nvhost_3dctx_free(struct kref *ref)
+{
+	struct nvhost_hwctx *nctx = container_of(ref, struct nvhost_hwctx, ref);
+	struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
+	struct mem_mgr *memmgr = nvhost_get_host(nctx->channel->dev)->memmgr;
+
+	if (ctx->restore_virt) {
+		mem_op().munmap(ctx->restore, ctx->restore_virt);
+		ctx->restore_virt = NULL;
+	}
+	mem_op().unpin(memmgr, ctx->restore);
+	ctx->restore_phys = 0;
+	mem_op().put(memmgr, ctx->restore);
+	ctx->restore = NULL;
+	kfree(ctx);
+}
+
+void nvhost_3dctx_put(struct nvhost_hwctx *ctx)
+{
+	kref_put(&ctx->ref, nvhost_3dctx_free);
+}
+
+int nvhost_gr3d_prepare_power_off(struct nvhost_device *dev)
+{
+	return nvhost_channel_save_context(dev->channel);
+}
+
+enum gr3d_ip_ver {
+	gr3d_01 = 1,
+	gr3d_02,
+};
+
+struct gr3d_desc {
+	void (*finalize_poweron)(struct nvhost_device *dev);
+	void (*busy)(struct nvhost_device *);
+	void (*idle)(struct nvhost_device *);
+	void (*suspend_ndev)(struct nvhost_device *);
+	void (*init)(struct nvhost_device *dev);
+	void (*deinit)(struct nvhost_device *dev);
+	int (*prepare_poweroff)(struct nvhost_device *dev);
+	struct nvhost_hwctx_handler *(*alloc_hwctx_handler)(u32 syncpt,
+			u32 waitbase, struct nvhost_channel *ch);
+};
+
+static const struct gr3d_desc gr3d[] = {
+	[gr3d_01] = {
+		.finalize_poweron = NULL,
+		.busy = NULL,
+		.idle = NULL,
+		.suspend_ndev = NULL,
+		.init = NULL,
+		.deinit = NULL,
+		.prepare_poweroff = nvhost_gr3d_prepare_power_off,
+		.alloc_hwctx_handler = nvhost_gr3d_t20_ctxhandler_init,
+	},
+	[gr3d_02] = {
+		.finalize_poweron = NULL,
+		.busy = nvhost_scale3d_notify_busy,
+		.idle = nvhost_scale3d_notify_idle,
+		.suspend_ndev = nvhost_scale3d_suspend,
+		.init = nvhost_scale3d_init,
+		.deinit = nvhost_scale3d_deinit,
+		.prepare_poweroff = nvhost_gr3d_prepare_power_off,
+		.alloc_hwctx_handler = nvhost_gr3d_t30_ctxhandler_init,
+	},
+};
+
+static struct nvhost_device_id gr3d_id[] = {
+	{ "gr3d", gr3d_01 },
+	{ "gr3d", gr3d_02 },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(nvhost, gr3d_id);
+
+static int gr3d_probe(struct nvhost_device *dev,
+	struct nvhost_device_id *id_table)
+{
+	int index = 0;
+	struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+
+	index = id_table->version;
+
+	drv->finalize_poweron		= gr3d[index].finalize_poweron;
+	drv->busy			= gr3d[index].busy;
+	drv->idle			= gr3d[index].idle;
+	drv->suspend_ndev		= gr3d[index].suspend_ndev;
+	drv->init			= gr3d[index].init;
+	drv->deinit			= gr3d[index].deinit;
+	drv->prepare_poweroff		= gr3d[index].prepare_poweroff;
+	drv->alloc_hwctx_handler	= gr3d[index].alloc_hwctx_handler;
+
+	dev->rst = devm_reset_control_get(&dev->dev, "3d");
+	if (IS_ERR(dev->rst)) {
+		dev_err(&dev->dev, "failed to get reset\n");
+		return PTR_ERR(dev->rst);
+	}
+
+	return nvhost_client_device_init(dev);
+}
+
+static int __exit gr3d_remove(struct nvhost_device *dev)
+{
+	/* Add clean-up */
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int gr3d_suspend(struct nvhost_device *dev, pm_message_t state)
+{
+	return nvhost_client_device_suspend(dev);
+}
+
+static int gr3d_resume(struct nvhost_device *dev)
+{
+	dev_info(&dev->dev, "resuming\n");
+	return 0;
+}
+#endif
+
+static struct of_device_id gr3d_of_match[] = {
+	{ .compatible = "nvidia,tegra20-gr3d", },
+	{ .compatible = "nvidia,tegra30-gr3d", },
+	{ },
+};
+
+static struct nvhost_driver gr3d_driver = {
+	.probe = gr3d_probe,
+	.remove = __exit_p(gr3d_remove),
+#ifdef CONFIG_PM
+	.suspend = gr3d_suspend,
+	.resume = gr3d_resume,
+#endif
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "gr3d",
+		.of_match_table = of_match_ptr(gr3d_of_match),
+	},
+	.id_table = gr3d_id,
+};
+
+static int __init gr3d_init(void)
+{
+	return nvhost_driver_register(&gr3d_driver);
+}
+
+static void __exit gr3d_exit(void)
+{
+	nvhost_driver_unregister(&gr3d_driver);
+}
+
+module_init(gr3d_init);
+module_exit(gr3d_exit);
diff --git a/drivers/staging/tegra/video/host/gr3d/gr3d.h b/drivers/staging/tegra/video/host/gr3d/gr3d.h
new file mode 100644
index 000000000000..61f708cea95c
--- /dev/null
+++ b/drivers/staging/tegra/video/host/gr3d/gr3d.h
@@ -0,0 +1,57 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d.h
+ *
+ * Tegra Graphics Host 3D
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_GR3D_GR3D_H
+#define __NVHOST_GR3D_GR3D_H
+
+#include "host1x/host1x_hwctx.h"
+#include <linux/types.h>
+
+/* Registers of 3D unit */
+
+#define AR3D_PSEQ_QUAD_ID 0x545
+#define AR3D_DW_MEMORY_OUTPUT_ADDRESS 0x904
+#define AR3D_DW_MEMORY_OUTPUT_DATA 0x905
+#define AR3D_FDC_CONTROL_0 0xa00
+#define AR3D_FDC_CONTROL_0_RESET_VAL 0xe00
+#define AR3D_FDC_CONTROL_0_INVALIDATE 1
+#define AR3D_GSHIM_WRITE_MASK 0xb00
+#define AR3D_GSHIM_READ_SELECT 0xb01
+#define AR3D_GLOBAL_MEMORY_OUTPUT_READS 0xe40
+
+struct nvhost_hwctx;
+struct nvhost_channel;
+struct kref;
+
+/* Functions used commonly by all 3D context switch modules */
+void nvhost_3dctx_restore_begin(struct host1x_hwctx_handler *h, u32 *ptr);
+void nvhost_3dctx_restore_direct(u32 *ptr, u32 start_reg, u32 count);
+void nvhost_3dctx_restore_indirect(u32 *ptr, u32 offset_reg,
+		u32 offset,	u32 data_reg, u32 count);
+void nvhost_3dctx_restore_end(struct host1x_hwctx_handler *h, u32 *ptr);
+struct host1x_hwctx *nvhost_3dctx_alloc_common(
+		struct host1x_hwctx_handler *p,
+		struct nvhost_channel *ch, bool map_restore);
+void nvhost_3dctx_get(struct nvhost_hwctx *ctx);
+void nvhost_3dctx_free(struct kref *ref);
+void nvhost_3dctx_put(struct nvhost_hwctx *ctx);
+int nvhost_gr3d_prepare_power_off(struct nvhost_device *dev);
+
+#endif
diff --git a/drivers/staging/tegra/video/host/gr3d/gr3d_t20.c b/drivers/staging/tegra/video/host/gr3d/gr3d_t20.c
new file mode 100644
index 000000000000..694b00527790
--- /dev/null
+++ b/drivers/staging/tegra/video/host/gr3d/gr3d_t20.c
@@ -0,0 +1,399 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d_t20.c
+ *
+ * Tegra Graphics Host 3D for Tegra2
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvhost_hwctx.h"
+#include "nvhost_channel.h"
+#include "host1x/host1x.h"
+#include "host1x/host1x01_hardware.h"
+#include "gr3d.h"
+#include "chip_support.h"
+#include "nvhost_memmgr.h"
+
+#include <linux/slab.h>
+
+static const struct hwctx_reginfo ctxsave_regs_3d_global[] = {
+	HWCTX_REGINFO(0xe00,    4, DIRECT),
+	HWCTX_REGINFO(0xe05,   30, DIRECT),
+	HWCTX_REGINFO(0xe25,    2, DIRECT),
+	HWCTX_REGINFO(0xe28,    2, DIRECT),
+	HWCTX_REGINFO(0x001,    2, DIRECT),
+	HWCTX_REGINFO(0x00c,   10, DIRECT),
+	HWCTX_REGINFO(0x100,   34, DIRECT),
+	HWCTX_REGINFO(0x124,    2, DIRECT),
+	HWCTX_REGINFO(0x200,    5, DIRECT),
+	HWCTX_REGINFO(0x205, 1024, INDIRECT),
+	HWCTX_REGINFO(0x207, 1024, INDIRECT),
+	HWCTX_REGINFO(0x209,    1, DIRECT),
+	HWCTX_REGINFO(0x300,   64, DIRECT),
+	HWCTX_REGINFO(0x343,   25, DIRECT),
+	HWCTX_REGINFO(0x363,    2, DIRECT),
+	HWCTX_REGINFO(0x400,   16, DIRECT),
+	HWCTX_REGINFO(0x411,    1, DIRECT),
+	HWCTX_REGINFO(0x500,    4, DIRECT),
+	HWCTX_REGINFO(0x520,   32, DIRECT),
+	HWCTX_REGINFO(0x540,   64, INDIRECT),
+	HWCTX_REGINFO(0x600,   16, INDIRECT_4X),
+	HWCTX_REGINFO(0x603,  128, INDIRECT),
+	HWCTX_REGINFO(0x608,    4, DIRECT),
+	HWCTX_REGINFO(0x60e,    1, DIRECT),
+	HWCTX_REGINFO(0x700,   64, INDIRECT),
+	HWCTX_REGINFO(0x710,   50, DIRECT),
+	HWCTX_REGINFO(0x800,   16, INDIRECT_4X),
+	HWCTX_REGINFO(0x803,  512, INDIRECT),
+	HWCTX_REGINFO(0x805,   64, INDIRECT),
+	HWCTX_REGINFO(0x820,   32, DIRECT),
+	HWCTX_REGINFO(0x900,   64, INDIRECT),
+	HWCTX_REGINFO(0x902,    2, DIRECT),
+	HWCTX_REGINFO(0xa02,   10, DIRECT),
+	HWCTX_REGINFO(0xe04,    1, DIRECT),
+	HWCTX_REGINFO(0xe2a,    1, DIRECT),
+};
+
+/* the same context save command sequence is used for all contexts. */
+#define SAVE_BEGIN_V0_SIZE 5
+#define SAVE_DIRECT_V0_SIZE 3
+#define SAVE_INDIRECT_V0_SIZE 5
+#define SAVE_END_V0_SIZE 5
+#define SAVE_INCRS 3
+#define SAVE_THRESH_OFFSET 1
+#define RESTORE_BEGIN_SIZE 4
+#define RESTORE_DIRECT_SIZE 1
+#define RESTORE_INDIRECT_SIZE 2
+#define RESTORE_END_SIZE 1
+
+struct save_info {
+	u32 *ptr;
+	unsigned int save_count;
+	unsigned int restore_count;
+	unsigned int save_incrs;
+	unsigned int restore_incrs;
+};
+
+static u32 *setup_restore_regs_v0(u32 *ptr,
+			const struct hwctx_reginfo *regs,
+			unsigned int nr_regs)
+{
+	const struct hwctx_reginfo *rend = regs + nr_regs;
+
+	for ( ; regs != rend; ++regs) {
+		u32 offset = regs->offset;
+		u32 count = regs->count;
+		u32 indoff = offset + 1;
+		switch (regs->type) {
+		case HWCTX_REGINFO_DIRECT:
+			nvhost_3dctx_restore_direct(ptr, offset, count);
+			ptr += RESTORE_DIRECT_SIZE;
+			break;
+		case HWCTX_REGINFO_INDIRECT_4X:
+			++indoff;
+			/* fall through */
+		case HWCTX_REGINFO_INDIRECT:
+			nvhost_3dctx_restore_indirect(ptr,
+					offset, 0, indoff, count);
+			ptr += RESTORE_INDIRECT_SIZE;
+			break;
+		}
+		ptr += count;
+	}
+	return ptr;
+}
+
+static void setup_restore_v0(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+	nvhost_3dctx_restore_begin(h, ptr);
+	ptr += RESTORE_BEGIN_SIZE;
+
+	ptr = setup_restore_regs_v0(ptr,
+			ctxsave_regs_3d_global,
+			ARRAY_SIZE(ctxsave_regs_3d_global));
+
+	nvhost_3dctx_restore_end(h, ptr);
+
+	wmb();
+}
+
+/*** v0 saver ***/
+
+static void save_push_v0(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma)
+{
+	struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
+	struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx);
+
+	nvhost_cdma_push_gather(cdma,
+			nvhost_get_host(nctx->channel->dev)->memmgr,
+			p->save_buf,
+			0,
+			nvhost_opcode_gather(p->save_size),
+			p->save_phys);
+}
+
+static void save_begin_v0(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+	/* 3d: when done, increment syncpt to base+1 */
+	ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+	ptr[1] = nvhost_opcode_imm_incr_syncpt(
+			host1x_uclass_incr_syncpt_cond_op_done_v(),
+			h->syncpt); /*  incr 1 */
+	/* host: wait for syncpt base+1 */
+	ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+					host1x_uclass_wait_syncpt_base_r(), 1);
+	ptr[3] = nvhost_class_host_wait_syncpt_base(h->syncpt,
+						h->waitbase, 1);
+	/* host: signal context read thread to start reading */
+	ptr[4] = nvhost_opcode_imm_incr_syncpt(
+			host1x_uclass_incr_syncpt_cond_immediate_v(),
+			h->syncpt); /* incr 2 */
+}
+
+static void save_direct_v0(u32 *ptr, u32 start_reg, u32 count)
+{
+	ptr[0] = nvhost_opcode_nonincr(host1x_uclass_indoff_r(), 1);
+	ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+						start_reg, true);
+	ptr[2] = nvhost_opcode_nonincr(host1x_uclass_inddata_r(), count);
+}
+
+static void save_indirect_v0(u32 *ptr, u32 offset_reg, u32 offset,
+			u32 data_reg, u32 count)
+{
+	ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID,
+					offset_reg, 1);
+	ptr[1] = offset;
+	ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+					host1x_uclass_indoff_r(), 1);
+	ptr[3] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+						data_reg, false);
+	ptr[4] = nvhost_opcode_nonincr(host1x_uclass_inddata_r(), count);
+}
+
+static void save_end_v0(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+	/* Wait for context read service to finish (cpu incr 3) */
+	ptr[0] = nvhost_opcode_nonincr(host1x_uclass_wait_syncpt_base_r(), 1);
+	ptr[1] = nvhost_class_host_wait_syncpt_base(h->syncpt,
+			h->waitbase, h->save_incrs);
+	/* Advance syncpoint base */
+	ptr[2] = nvhost_opcode_nonincr(host1x_uclass_incr_syncpt_base_r(), 1);
+	ptr[3] = nvhost_class_host_incr_syncpt_base(h->waitbase,
+			h->save_incrs);
+	/* set class back to the unit */
+	ptr[4] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+}
+
+static u32 *save_regs_v0(u32 *ptr, unsigned int *pending,
+			struct nvhost_channel *ch,
+			const struct hwctx_reginfo *regs,
+			unsigned int nr_regs)
+{
+	const struct hwctx_reginfo *rend = regs + nr_regs;
+	int drain_result = 0;
+
+	for ( ; regs != rend; ++regs) {
+		u32 count = regs->count;
+		switch (regs->type) {
+		case HWCTX_REGINFO_DIRECT:
+			ptr += RESTORE_DIRECT_SIZE;
+			break;
+		case HWCTX_REGINFO_INDIRECT:
+		case HWCTX_REGINFO_INDIRECT_4X:
+			ptr += RESTORE_INDIRECT_SIZE;
+			break;
+		}
+		drain_result = nvhost_channel_drain_read_fifo(ch,
+			ptr, count, pending);
+		BUG_ON(drain_result < 0);
+		ptr += count;
+	}
+	return ptr;
+}
+
+/*** save ***/
+
+static void setup_save_regs(struct save_info *info,
+			const struct hwctx_reginfo *regs,
+			unsigned int nr_regs)
+{
+	const struct hwctx_reginfo *rend = regs + nr_regs;
+	u32 *ptr = info->ptr;
+	unsigned int save_count = info->save_count;
+	unsigned int restore_count = info->restore_count;
+
+	for ( ; regs != rend; ++regs) {
+		u32 offset = regs->offset;
+		u32 count = regs->count;
+		u32 indoff = offset + 1;
+		switch (regs->type) {
+		case HWCTX_REGINFO_DIRECT:
+			if (ptr) {
+				save_direct_v0(ptr, offset, count);
+				ptr += SAVE_DIRECT_V0_SIZE;
+			}
+			save_count += SAVE_DIRECT_V0_SIZE;
+			restore_count += RESTORE_DIRECT_SIZE;
+			break;
+		case HWCTX_REGINFO_INDIRECT_4X:
+			++indoff;
+			/* fall through */
+		case HWCTX_REGINFO_INDIRECT:
+			if (ptr) {
+				save_indirect_v0(ptr, offset, 0,
+						indoff, count);
+				ptr += SAVE_INDIRECT_V0_SIZE;
+			}
+			save_count += SAVE_INDIRECT_V0_SIZE;
+			restore_count += RESTORE_INDIRECT_SIZE;
+			break;
+		}
+		if (ptr) {
+			/* SAVE cases only: reserve room for incoming data */
+			u32 k = 0;
+			/*
+			 * Create a signature pattern for indirect data (which
+			 * will be overwritten by true incoming data) for
+			 * better deducing where we are in a long command
+			 * sequence, when given only a FIFO snapshot for debug
+			 * purposes.
+			*/
+			for (k = 0; k < count; k++)
+				*(ptr + k) = 0xd000d000 | (offset << 16) | k;
+			ptr += count;
+		}
+		save_count += count;
+		restore_count += count;
+	}
+
+	info->ptr = ptr;
+	info->save_count = save_count;
+	info->restore_count = restore_count;
+}
+
+static void setup_save(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+	struct save_info info = {
+		ptr,
+		SAVE_BEGIN_V0_SIZE,
+		RESTORE_BEGIN_SIZE,
+		SAVE_INCRS,
+		1
+	};
+
+	if (info.ptr) {
+		save_begin_v0(h, info.ptr);
+		info.ptr += SAVE_BEGIN_V0_SIZE;
+	}
+
+	/* save regs */
+	setup_save_regs(&info,
+			ctxsave_regs_3d_global,
+			ARRAY_SIZE(ctxsave_regs_3d_global));
+
+	if (info.ptr) {
+		save_end_v0(h, info.ptr);
+		info.ptr += SAVE_END_V0_SIZE;
+	}
+
+	wmb();
+
+	h->save_size = info.save_count + SAVE_END_V0_SIZE;
+	h->restore_size = info.restore_count + RESTORE_END_SIZE;
+	h->save_incrs = info.save_incrs;
+	h->save_thresh = h->save_incrs - SAVE_THRESH_OFFSET;
+	h->restore_incrs = info.restore_incrs;
+}
+
+
+
+/*** ctx3d ***/
+
+static struct nvhost_hwctx *ctx3d_alloc_v0(struct nvhost_hwctx_handler *h,
+		struct nvhost_channel *ch)
+{
+	struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h);
+	struct host1x_hwctx *ctx =
+		nvhost_3dctx_alloc_common(p, ch, true);
+	if (ctx) {
+		setup_restore_v0(p, ctx->restore_virt);
+		return &ctx->hwctx;
+	} else
+		return NULL;
+}
+
+static void ctx3d_save_service(struct nvhost_hwctx *nctx)
+{
+	struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
+
+	u32 *ptr = (u32 *)ctx->restore_virt + RESTORE_BEGIN_SIZE;
+	unsigned int pending = 0;
+
+	ptr = save_regs_v0(ptr, &pending, nctx->channel,
+			ctxsave_regs_3d_global,
+			ARRAY_SIZE(ctxsave_regs_3d_global));
+
+	wmb();
+	nvhost_syncpt_cpu_incr(&nvhost_get_host(nctx->channel->dev)->syncpt,
+			host1x_hwctx_handler(ctx)->syncpt);
+}
+
+struct nvhost_hwctx_handler *nvhost_gr3d_t20_ctxhandler_init(
+		u32 syncpt, u32 waitbase,
+		struct nvhost_channel *ch)
+{
+	struct mem_mgr *memmgr;
+	u32 *save_ptr;
+	struct host1x_hwctx_handler *p;
+
+	p = kmalloc(sizeof(*p), GFP_KERNEL);
+	if (!p)
+		return NULL;
+	memmgr = nvhost_get_host(ch->dev)->memmgr;
+
+	p->syncpt = syncpt;
+	p->waitbase = waitbase;
+
+	setup_save(p, NULL);
+
+	p->save_buf = mem_op().alloc(memmgr, p->save_size * sizeof(u32), 32,
+				mem_mgr_flag_write_combine);
+	if (IS_ERR_OR_NULL(p->save_buf)) {
+		p->save_buf = NULL;
+		return NULL;
+	}
+
+	p->save_slots = 1;
+
+	save_ptr = mem_op().mmap(p->save_buf);
+	if (!save_ptr) {
+		mem_op().put(memmgr, p->save_buf);
+		p->save_buf = NULL;
+		return NULL;
+	}
+
+	p->save_phys = mem_op().pin(memmgr, p->save_buf);
+
+	setup_save(p, save_ptr);
+
+	p->h.alloc = ctx3d_alloc_v0;
+	p->h.save_push = save_push_v0;
+	p->h.save_service = ctx3d_save_service;
+	p->h.get = nvhost_3dctx_get;
+	p->h.put = nvhost_3dctx_put;
+
+	return &p->h;
+}
diff --git a/drivers/staging/tegra/video/host/gr3d/gr3d_t20.h b/drivers/staging/tegra/video/host/gr3d/gr3d_t20.h
new file mode 100644
index 000000000000..e6fb8fdf8aba
--- /dev/null
+++ b/drivers/staging/tegra/video/host/gr3d/gr3d_t20.h
@@ -0,0 +1,33 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d_t20.h
+ *
+ * Tegra Graphics Host 3D for Tegra2
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_GR3D_GR3D_T20_H
+#define __NVHOST_GR3D_GR3D_T20_H
+
+#include <linux/types.h>
+
+struct nvhost_hwctx_handler;
+struct nvhost_channel;
+
+struct nvhost_hwctx_handler *nvhost_gr3d_t20_ctxhandler_init(
+		u32 syncpt, u32 waitbase,
+		struct nvhost_channel *ch);
+
+#endif
diff --git a/drivers/staging/tegra/video/host/gr3d/gr3d_t30.c b/drivers/staging/tegra/video/host/gr3d/gr3d_t30.c
new file mode 100644
index 000000000000..f4bf7f7abc14
--- /dev/null
+++ b/drivers/staging/tegra/video/host/gr3d/gr3d_t30.c
@@ -0,0 +1,437 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d_t30.c
+ *
+ * Tegra Graphics Host 3D for Tegra3
+ *
+ * Copyright (c) 2011-2012 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvhost_hwctx.h"
+#include "nvhost_channel.h"
+#include "nvhost_cdma.h"
+#include "dev.h"
+#include "host1x/host1x01_hardware.h"
+#include "gr3d.h"
+#include "chip_support.h"
+#include "nvhost_memmgr.h"
+
+#include <linux/slab.h>
+
+static const struct hwctx_reginfo ctxsave_regs_3d_global[] = {
+	HWCTX_REGINFO(0xe00,    4, DIRECT),
+	HWCTX_REGINFO(0xe05,   30, DIRECT),
+	HWCTX_REGINFO(0xe25,    2, DIRECT),
+	HWCTX_REGINFO(0xe28,    2, DIRECT),
+	HWCTX_REGINFO(0xe30,   16, DIRECT),
+	HWCTX_REGINFO(0x001,    2, DIRECT),
+	HWCTX_REGINFO(0x00c,   10, DIRECT),
+	HWCTX_REGINFO(0x100,   34, DIRECT),
+	HWCTX_REGINFO(0x124,    2, DIRECT),
+	HWCTX_REGINFO(0x200,    5, DIRECT),
+	HWCTX_REGINFO(0x205, 1024, INDIRECT),
+	HWCTX_REGINFO(0x207, 1024, INDIRECT),
+	HWCTX_REGINFO(0x209,    1, DIRECT),
+	HWCTX_REGINFO(0x300,   64, DIRECT),
+	HWCTX_REGINFO(0x343,   25, DIRECT),
+	HWCTX_REGINFO(0x363,    2, DIRECT),
+	HWCTX_REGINFO(0x400,   16, DIRECT),
+	HWCTX_REGINFO(0x411,    1, DIRECT),
+	HWCTX_REGINFO(0x412,    1, DIRECT),
+	HWCTX_REGINFO(0x500,    4, DIRECT),
+	HWCTX_REGINFO(0x520,   32, DIRECT),
+	HWCTX_REGINFO(0x540,   64, INDIRECT),
+	HWCTX_REGINFO(0x600,   16, INDIRECT_4X),
+	HWCTX_REGINFO(0x603,  128, INDIRECT),
+	HWCTX_REGINFO(0x608,    4, DIRECT),
+	HWCTX_REGINFO(0x60e,    1, DIRECT),
+	HWCTX_REGINFO(0x700,   64, INDIRECT),
+	HWCTX_REGINFO(0x710,   50, DIRECT),
+	HWCTX_REGINFO(0x750,   16, DIRECT),
+	HWCTX_REGINFO(0x800,   16, INDIRECT_4X),
+	HWCTX_REGINFO(0x803,  512, INDIRECT),
+	HWCTX_REGINFO(0x805,   64, INDIRECT),
+	HWCTX_REGINFO(0x820,   32, DIRECT),
+	HWCTX_REGINFO(0x900,   64, INDIRECT),
+	HWCTX_REGINFO(0x902,    2, DIRECT),
+	HWCTX_REGINFO(0x90a,    1, DIRECT),
+	HWCTX_REGINFO(0xa02,   10, DIRECT),
+	HWCTX_REGINFO(0xb04,    1, DIRECT),
+	HWCTX_REGINFO(0xb06,   13, DIRECT),
+};
+
+static const struct hwctx_reginfo ctxsave_regs_3d_perset[] = {
+	HWCTX_REGINFO(0xe04,    1, DIRECT),
+	HWCTX_REGINFO(0xe2a,    1, DIRECT),
+	HWCTX_REGINFO(0x413,    1, DIRECT),
+	HWCTX_REGINFO(0x90b,    1, DIRECT),
+	HWCTX_REGINFO(0xe41,    1, DIRECT),
+};
+
+static unsigned int restore_set1_offset;
+
+#define SAVE_BEGIN_V1_SIZE (1 + RESTORE_BEGIN_SIZE)
+#define SAVE_DIRECT_V1_SIZE (4 + RESTORE_DIRECT_SIZE)
+#define SAVE_INDIRECT_V1_SIZE (6 + RESTORE_INDIRECT_SIZE)
+#define SAVE_END_V1_SIZE (9 + RESTORE_END_SIZE)
+#define SAVE_INCRS 3
+#define SAVE_THRESH_OFFSET 0
+#define RESTORE_BEGIN_SIZE 4
+#define RESTORE_DIRECT_SIZE 1
+#define RESTORE_INDIRECT_SIZE 2
+#define RESTORE_END_SIZE 1
+
+struct save_info {
+	u32 *ptr;
+	unsigned int save_count;
+	unsigned int restore_count;
+	unsigned int save_incrs;
+	unsigned int restore_incrs;
+};
+
+/*** v1 saver ***/
+
+static void save_push_v1(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma)
+{
+	struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
+	struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx);
+
+	/* wait for 3d idle */
+	nvhost_cdma_push(cdma,
+			nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
+			nvhost_opcode_imm_incr_syncpt(
+				host1x_uclass_incr_syncpt_cond_op_done_v(),
+				p->syncpt));
+	nvhost_cdma_push(cdma,
+			nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+					host1x_uclass_wait_syncpt_base_r(), 1),
+			nvhost_class_host_wait_syncpt_base(p->syncpt,
+							p->waitbase, 1));
+	/* back to 3d */
+	nvhost_cdma_push(cdma,
+			nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
+			NVHOST_OPCODE_NOOP);
+
+	/* invalidate the FDC to prevent cache-coherency issues across GPUs
+	   note that we assume FDC_CONTROL_0 is left in the reset state by all
+	   contexts.  the invalidate bit will clear itself, so the register
+	   should be unchanged after this */
+	nvhost_cdma_push(cdma,
+		nvhost_opcode_imm(AR3D_FDC_CONTROL_0,
+			AR3D_FDC_CONTROL_0_RESET_VAL
+				| AR3D_FDC_CONTROL_0_INVALIDATE),
+		NVHOST_OPCODE_NOOP);
+
+	/* set register set 0 and 1 register read memory output addresses,
+	   and send their reads to memory */
+
+	nvhost_cdma_push(cdma,
+		nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 2),
+		nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1));
+	nvhost_cdma_push(cdma,
+		nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1),
+		ctx->restore_phys + restore_set1_offset * 4);
+
+	nvhost_cdma_push(cdma,
+		nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 1),
+		nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1));
+	nvhost_cdma_push(cdma,
+		nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1),
+		ctx->restore_phys);
+	/* gather the save buffer */
+	nvhost_cdma_push_gather(cdma,
+			nvhost_get_host(nctx->channel->dev)->memmgr,
+			p->save_buf,
+			0,
+			nvhost_opcode_gather(p->save_size),
+			p->save_phys);
+}
+
+static void save_begin_v1(struct host1x_hwctx_handler *p, u32 *ptr)
+{
+	ptr[0] = nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_DATA,
+			RESTORE_BEGIN_SIZE);
+	nvhost_3dctx_restore_begin(p, ptr + 1);
+	ptr += RESTORE_BEGIN_SIZE;
+}
+
+static void save_direct_v1(u32 *ptr, u32 start_reg, u32 count)
+{
+	ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID,
+			AR3D_DW_MEMORY_OUTPUT_DATA, 1);
+	nvhost_3dctx_restore_direct(ptr + 1, start_reg, count);
+	ptr += RESTORE_DIRECT_SIZE;
+	ptr[1] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+					host1x_uclass_indoff_r(), 1);
+	ptr[2] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+						start_reg, true);
+	/* TODO could do this in the setclass if count < 6 */
+	ptr[3] = nvhost_opcode_nonincr(host1x_uclass_inddata_r(), count);
+}
+
+static void save_indirect_v1(u32 *ptr, u32 offset_reg, u32 offset,
+			u32 data_reg, u32 count)
+{
+	ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+	ptr[1] = nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_DATA,
+			RESTORE_INDIRECT_SIZE);
+	nvhost_3dctx_restore_indirect(ptr + 2, offset_reg, offset, data_reg,
+			count);
+	ptr += RESTORE_INDIRECT_SIZE;
+	ptr[2] = nvhost_opcode_imm(offset_reg, offset);
+	ptr[3] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+					host1x_uclass_indoff_r(), 1);
+	ptr[4] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+						data_reg, false);
+	ptr[5] = nvhost_opcode_nonincr(host1x_uclass_inddata_r(), count);
+}
+
+static void save_end_v1(struct host1x_hwctx_handler *p, u32 *ptr)
+{
+	/* write end of restore buffer */
+	ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID,
+			AR3D_DW_MEMORY_OUTPUT_DATA, 1);
+	nvhost_3dctx_restore_end(p, ptr + 1);
+	ptr += RESTORE_END_SIZE;
+	/* reset to dual reg if necessary */
+	ptr[1] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK,
+			(1 << 2) - 1);
+	/* op_done syncpt incr to flush FDC */
+	ptr[2] = nvhost_opcode_imm_incr_syncpt(
+			host1x_uclass_incr_syncpt_cond_op_done_v(), p->syncpt);
+	/* host wait for that syncpt incr, and advance the wait base */
+	ptr[3] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+			host1x_uclass_wait_syncpt_base_r(),
+			nvhost_mask2(
+				host1x_uclass_wait_syncpt_base_r(),
+				host1x_uclass_incr_syncpt_base_r()));
+	ptr[4] = nvhost_class_host_wait_syncpt_base(p->syncpt,
+				p->waitbase, p->save_incrs - 1);
+	ptr[5] = nvhost_class_host_incr_syncpt_base(p->waitbase,
+			p->save_incrs);
+	/* set class back to 3d */
+	ptr[6] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+	/* send reg reads back to host */
+	ptr[7] = nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 0);
+	/* final syncpt increment to release waiters */
+	ptr[8] = nvhost_opcode_imm(0, p->syncpt);
+}
+
+/*** save ***/
+
+
+
+static void setup_save_regs(struct save_info *info,
+			const struct hwctx_reginfo *regs,
+			unsigned int nr_regs)
+{
+	const struct hwctx_reginfo *rend = regs + nr_regs;
+	u32 *ptr = info->ptr;
+	unsigned int save_count = info->save_count;
+	unsigned int restore_count = info->restore_count;
+
+	for ( ; regs != rend; ++regs) {
+		u32 offset = regs->offset;
+		u32 count = regs->count;
+		u32 indoff = offset + 1;
+		switch (regs->type) {
+		case HWCTX_REGINFO_DIRECT:
+			if (ptr) {
+				save_direct_v1(ptr, offset, count);
+				ptr += SAVE_DIRECT_V1_SIZE;
+			}
+			save_count += SAVE_DIRECT_V1_SIZE;
+			restore_count += RESTORE_DIRECT_SIZE;
+			break;
+		case HWCTX_REGINFO_INDIRECT_4X:
+			++indoff;
+			/* fall through */
+		case HWCTX_REGINFO_INDIRECT:
+			if (ptr) {
+				save_indirect_v1(ptr, offset, 0,
+						indoff, count);
+				ptr += SAVE_INDIRECT_V1_SIZE;
+			}
+			save_count += SAVE_INDIRECT_V1_SIZE;
+			restore_count += RESTORE_INDIRECT_SIZE;
+			break;
+		}
+		if (ptr) {
+			/* SAVE cases only: reserve room for incoming data */
+			u32 k = 0;
+			/*
+			 * Create a signature pattern for indirect data (which
+			 * will be overwritten by true incoming data) for
+			 * better deducing where we are in a long command
+			 * sequence, when given only a FIFO snapshot for debug
+			 * purposes.
+			*/
+			for (k = 0; k < count; k++)
+				*(ptr + k) = 0xd000d000 | (offset << 16) | k;
+			ptr += count;
+		}
+		save_count += count;
+		restore_count += count;
+	}
+
+	info->ptr = ptr;
+	info->save_count = save_count;
+	info->restore_count = restore_count;
+}
+
+static void switch_gpu(struct save_info *info,
+			unsigned int save_src_set,
+			u32 save_dest_sets,
+			u32 restore_dest_sets)
+{
+	if (info->ptr) {
+		info->ptr[0] = nvhost_opcode_setclass(
+				NV_GRAPHICS_3D_CLASS_ID,
+				AR3D_DW_MEMORY_OUTPUT_DATA, 1);
+		info->ptr[1] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK,
+				restore_dest_sets);
+		info->ptr[2] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK,
+				save_dest_sets);
+		info->ptr[3] = nvhost_opcode_imm(AR3D_GSHIM_READ_SELECT,
+				save_src_set);
+		info->ptr += 4;
+	}
+	info->save_count += 4;
+	info->restore_count += 1;
+}
+
+static void setup_save(struct host1x_hwctx_handler *p, u32 *ptr)
+{
+	struct save_info info = {
+		ptr,
+		SAVE_BEGIN_V1_SIZE,
+		RESTORE_BEGIN_SIZE,
+		SAVE_INCRS,
+		1
+	};
+	int save_end_size = SAVE_END_V1_SIZE;
+
+	if (info.ptr) {
+		save_begin_v1(p, info.ptr);
+		info.ptr += SAVE_BEGIN_V1_SIZE;
+	}
+
+	/* read from set0, write cmds through set0, restore to set0 and 1 */
+	switch_gpu(&info, 0, 1, 3);
+
+	/* save regs that are common to both sets */
+	setup_save_regs(&info,
+			ctxsave_regs_3d_global,
+			ARRAY_SIZE(ctxsave_regs_3d_global));
+
+	/* read from set 0, write cmds through set0, restore to set0 */
+	switch_gpu(&info, 0, 1, 1);
+
+	/* save set 0 specific regs */
+	setup_save_regs(&info,
+			ctxsave_regs_3d_perset,
+			ARRAY_SIZE(ctxsave_regs_3d_perset));
+
+
+	/* read from set1, write cmds through set1, restore to set1 */
+	switch_gpu(&info, 1, 2, 2);
+	/* note offset at which set 1 restore starts */
+	restore_set1_offset = info.restore_count;
+	/* save set 1 specific regs */
+	setup_save_regs(&info,
+			ctxsave_regs_3d_perset,
+			ARRAY_SIZE(ctxsave_regs_3d_perset));
+
+
+	/* read from set0, write cmds through set1, restore to set0 and 1 */
+	switch_gpu(&info, 0, 2, 3);
+
+	if (info.ptr) {
+		save_end_v1(p, info.ptr);
+		info.ptr += SAVE_END_V1_SIZE;
+	}
+
+	wmb();
+
+	p->save_size = info.save_count + save_end_size;
+	p->restore_size = info.restore_count + RESTORE_END_SIZE;
+	p->save_incrs = info.save_incrs;
+	p->save_thresh = p->save_incrs - SAVE_THRESH_OFFSET;
+	p->restore_incrs = info.restore_incrs;
+}
+
+
+/*** ctx3d ***/
+
+static struct nvhost_hwctx *ctx3d_alloc_v1(struct nvhost_hwctx_handler *h,
+		struct nvhost_channel *ch)
+{
+	struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h);
+	struct host1x_hwctx *ctx = nvhost_3dctx_alloc_common(p, ch, false);
+
+	if (ctx)
+		return &ctx->hwctx;
+	else
+		return NULL;
+}
+
+struct nvhost_hwctx_handler *nvhost_gr3d_t30_ctxhandler_init(
+		u32 syncpt, u32 waitbase,
+		struct nvhost_channel *ch)
+{
+	struct mem_mgr *memmgr;
+	u32 *save_ptr;
+	struct host1x_hwctx_handler *p;
+
+	p = kmalloc(sizeof(*p), GFP_KERNEL);
+	if (!p)
+		return NULL;
+
+	memmgr = nvhost_get_host(ch->dev)->memmgr;
+
+	p->syncpt = syncpt;
+	p->waitbase = waitbase;
+
+	setup_save(p, NULL);
+
+	p->save_buf = mem_op().alloc(memmgr, p->save_size * 4, 32,
+				mem_mgr_flag_write_combine);
+	if (IS_ERR_OR_NULL(p->save_buf)) {
+		p->save_buf = NULL;
+		return NULL;
+	}
+
+	p->save_slots = 8;
+
+	save_ptr = mem_op().mmap(p->save_buf);
+	if (!save_ptr) {
+		mem_op().put(memmgr, p->save_buf);
+		p->save_buf = NULL;
+		return NULL;
+	}
+
+	p->save_phys = mem_op().pin(memmgr, p->save_buf);
+
+	setup_save(p, save_ptr);
+
+	mem_op().munmap(p->save_buf, save_ptr);
+
+	p->h.alloc = ctx3d_alloc_v1;
+	p->h.save_push = save_push_v1;
+	p->h.save_service = NULL;
+	p->h.get = nvhost_3dctx_get;
+	p->h.put = nvhost_3dctx_put;
+
+	return &p->h;
+}
diff --git a/drivers/staging/tegra/video/host/gr3d/gr3d_t30.h b/drivers/staging/tegra/video/host/gr3d/gr3d_t30.h
new file mode 100644
index 000000000000..94d5dc0f353b
--- /dev/null
+++ b/drivers/staging/tegra/video/host/gr3d/gr3d_t30.h
@@ -0,0 +1,33 @@
+/*
+ * drivers/video/tegra/host/gr3d/gr3d_t30.h
+ *
+ * Tegra Graphics Host 3D for Tegra3
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_GR3D_GR3D_T30_H
+#define __NVHOST_GR3D_GR3D_T30_H
+
+#include <linux/types.h>
+
+struct nvhost_hwctx_handler;
+struct nvhost_channel;
+
+struct nvhost_hwctx_handler *nvhost_gr3d_t30_ctxhandler_init(
+		u32 syncpt, u32 waitbase,
+		struct nvhost_channel *ch);
+
+#endif
diff --git a/drivers/staging/tegra/video/host/gr3d/scale3d.c b/drivers/staging/tegra/video/host/gr3d/scale3d.c
new file mode 100644
index 000000000000..4991be8ea0cb
--- /dev/null
+++ b/drivers/staging/tegra/video/host/gr3d/scale3d.c
@@ -0,0 +1,941 @@
+/*
+ * drivers/video/tegra/host/gr3d/scale3d.c
+ *
+ * Tegra Graphics Host 3D clock scaling
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * 3d clock scaling
+ *
+ * module3d_notify_busy() is called upon submit, module3d_notify_idle() is
+ * called when all outstanding submits are completed. Idle times are measured
+ * over a fixed time period (scale3d.p_estimation_window). If the 3d module
+ * idle time percentage goes over the limit (set in scale3d.p_idle_max), 3d
+ * clocks are scaled down. If the percentage goes under the minimum limit (set
+ * in scale3d.p_idle_min), 3d clocks are scaled up. An additional test is made
+ * for clocking up quickly in response to load peaks.
+ *
+ * 3d.emc clock is scaled proportionately to 3d clock, with a quadratic-
+ * bezier-like factor added to pull 3d.emc rate a bit lower.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <mach/clk.h>
+#include <mach/fuse.h>
+#include "scale3d.h"
+#include "dev.h"
+#include <media/tegra_camera.h>
+
+#define GR3D_PRINT_STATS   BIT(1)
+#define GR3D_PRINT_BUSY    BIT(2)
+#define GR3D_PRINT_IDLE    BIT(3)
+#define GR3D_PRINT_HINT    BIT(4)
+#define GR3D_PRINT_TARGET  BIT(5)
+
+/* time frame for load and hint tracking - when events come in at a larger
+ * interval, this probably indicates the current estimates are stale
+ */
+#define GR3D_TIMEFRAME 1000000 /* 1 sec */
+
+/* the number of frames to use in the running average of load estimates and
+ * throughput hints. Choosing 6 frames targets a window of about 100 msec.
+ * Large flucutuations in frame times require a window that's large enough to
+ * prevent spiky scaling behavior, which in turn exacerbates frame rate
+ * instability.
+ */
+#define GR3D_FRAME_SPAN 6
+
+static int scale3d_is_enabled(void);
+static void scale3d_enable(int enable);
+
+#define POW2(x) ((x) * (x))
+
+/*
+ * 3D clock scaling should be treated differently when camera is on in AP37.
+ * 3D in AP37 requires 1.3V and combining it with MPE reaches to EDP limit.
+ * 3D clock really needs to be set to lower frequency which requires 1.0V.
+ * The same thing applies to 3D EMC clock.
+ */
+#define CAMERA_3D_CLK 300000000
+#define CAMERA_3D_EMC_CLK 437000000
+
+/*
+ * debugfs parameters to control 3d clock scaling test
+ *
+ * estimation_window  - time period for clock rate evaluation
+ * idle_min           - if less than [idle_min / 10] percent idle over
+ *                      [estimation_window] microseconds, clock up.
+ * idle_max      - if over [idle_max] percent idle over [estimation_window]
+ *                 microseconds, clock down.
+ * max_scale     - limits rate changes to no less than (100 - max_scale)% or
+ *                 (100 + 2 * max_scale)% of current clock rate
+ * verbosity     - bit flag to control debug printouts:
+ *                 1 - stats
+ *                 2 - busy
+ *                 3 - idle
+ *                 4 - hints
+ *                 5 - target frequencies
+ */
+
+struct scale3d_info_rec {
+	struct mutex lock; /* lock for timestamps etc */
+	int enable;
+	int init;
+	ktime_t last_scale;
+	int is_idle;
+	ktime_t last_adjust;
+	int fast_up_count;
+	int slow_down_count;
+	int is_scaled;
+	long emc_slope;
+	long emc_offset;
+	long emc_dip_slope;
+	long emc_dip_offset;
+	long emc_xmid;
+	unsigned long max_rate_3d;
+	unsigned long min_rate_3d;
+	ktime_t last_throughput_hint;
+
+	struct work_struct work;
+	struct delayed_work idle_timer;
+
+	ktime_t last_estimation_window;
+	long last_total_idle;
+	long total_idle;
+	ktime_t estimation_window;
+	ktime_t last_notification;
+	long idle_estimate;
+
+	unsigned int scale;
+	unsigned int p_busy_cutoff;
+	unsigned int p_estimation_window;
+	unsigned int p_use_throughput_hint;
+	unsigned int p_throughput_lo_limit;
+	unsigned int p_throughput_lower_limit;
+	unsigned int p_throughput_hi_limit;
+	unsigned int p_scale_step;
+	unsigned int p_idle_min;
+	unsigned int idle_min;
+	unsigned int p_idle_max;
+	unsigned int idle_max;
+	unsigned int p_adjust;
+	unsigned int p_scale_emc;
+	unsigned int p_emc_dip;
+	unsigned int p_verbosity;
+	struct clk *clk_3d;
+	struct clk *clk_3d2;
+	struct clk *clk_3d_emc;
+	int *freqlist;
+	int freq_count;
+};
+
+static struct scale3d_info_rec scale3d;
+
+static void scale_to_freq(unsigned long hz)
+{
+	unsigned long curr;
+
+	if (!tegra_is_clk_enabled(scale3d.clk_3d))
+		return;
+
+	if (tegra_chip_id == TEGRA30)
+		if (!tegra_is_clk_enabled(scale3d.clk_3d2))
+			return;
+
+	curr = clk_get_rate(scale3d.clk_3d);
+	if (hz == curr)
+		return;
+
+	if (!(hz >= scale3d.max_rate_3d && curr == scale3d.max_rate_3d)) {
+		if (tegra_chip_id == TEGRA30)
+			clk_set_rate(scale3d.clk_3d2, 0);
+		clk_set_rate(scale3d.clk_3d, hz);
+
+		if (scale3d.p_scale_emc) {
+			long after = (long) clk_get_rate(scale3d.clk_3d);
+			hz = after * scale3d.emc_slope + scale3d.emc_offset;
+			if (scale3d.p_emc_dip)
+				hz -=
+					(scale3d.emc_dip_slope *
+					POW2(after / 1000 - scale3d.emc_xmid) +
+					scale3d.emc_dip_offset);
+			clk_set_rate(scale3d.clk_3d_emc, hz);
+		}
+	}
+}
+
+static void scale3d_clocks(unsigned long percent)
+{
+	unsigned long hz, curr;
+
+	curr = clk_get_rate(scale3d.clk_3d);
+	hz = percent * (curr / 100);
+
+	scale_to_freq(hz);
+}
+
+static void scale3d_clocks_handler(struct work_struct *work)
+{
+	unsigned int scale;
+
+	mutex_lock(&scale3d.lock);
+	scale = scale3d.scale;
+	mutex_unlock(&scale3d.lock);
+
+	if (scale != 0)
+		scale3d_clocks(scale);
+}
+
+void nvhost_scale3d_suspend(struct nvhost_device *dev)
+{
+	if (!scale3d.enable)
+		return;
+
+	cancel_work_sync(&scale3d.work);
+	cancel_delayed_work(&scale3d.idle_timer);
+}
+
+/* set 3d clocks to max */
+static void reset_3d_clocks(void)
+{
+	if (clk_get_rate(scale3d.clk_3d) != scale3d.max_rate_3d) {
+// 		if (is_tegra_camera_on())
+// 			clk_set_rate(scale3d.clk_3d, CAMERA_3D_CLK);
+// 		else
+			clk_set_rate(scale3d.clk_3d, scale3d.max_rate_3d);
+		if (tegra_chip_id == TEGRA30) {
+// 			if (is_tegra_camera_on())
+// 				clk_set_rate(scale3d.clk_3d2, CAMERA_3D_CLK);
+// 			else
+				clk_set_rate(scale3d.clk_3d2,
+							scale3d.max_rate_3d);
+		}
+		if (scale3d.p_scale_emc) {
+// 			if (is_tegra_camera_on())
+// 				clk_set_rate(scale3d.clk_3d_emc,
+// 					CAMERA_3D_EMC_CLK);
+// 			else
+				clk_set_rate(scale3d.clk_3d_emc,
+					clk_round_rate(scale3d.clk_3d_emc,
+							400000000));
+		}
+	}
+}
+
+static int scale3d_is_enabled(void)
+{
+	int enable;
+
+	if (!scale3d.enable)
+		return 0;
+
+	mutex_lock(&scale3d.lock);
+	enable = scale3d.enable;
+	mutex_unlock(&scale3d.lock);
+
+	return enable;
+}
+
+static void scale3d_enable(int enable)
+{
+	int disable = 0;
+
+	mutex_lock(&scale3d.lock);
+
+	if (enable) {
+		if (scale3d.max_rate_3d != scale3d.min_rate_3d)
+			scale3d.enable = 1;
+	} else {
+		scale3d.enable = 0;
+		disable = 1;
+	}
+
+	mutex_unlock(&scale3d.lock);
+
+	if (disable)
+		reset_3d_clocks();
+}
+
+/* scaling_adjust - use scale up / scale down hint counts to adjust scaling
+ * parameters.
+ *
+ * hint_ratio is 100 x the ratio of scale up to scale down hints. Three cases
+ * are distinguished:
+ *
+ * hint_ratio < HINT_RATIO_MIN - set parameters to maximize scaling effect
+ * hint_ratio > HINT_RATIO_MAX - set parameters to minimize scaling effect
+ * hint_ratio between limits - scale parameters linearly
+ *
+ * the parameters adjusted are
+ *
+ * * idle_min percentage
+ * * idle_max percentage
+ */
+#define SCALING_ADJUST_PERIOD 1000000
+#define HINT_RATIO_MAX 400
+#define HINT_RATIO_MIN 100
+#define HINT_RATIO_MID ((HINT_RATIO_MAX + HINT_RATIO_MIN) / 2)
+#define HINT_RATIO_DIFF (HINT_RATIO_MAX - HINT_RATIO_MIN)
+
+static void scaling_adjust(ktime_t time)
+{
+	long hint_ratio;
+	int idle_min_adjustment;
+	int idle_max_adjustment;
+	unsigned long dt;
+
+	dt = (unsigned long) ktime_us_delta(time, scale3d.last_adjust);
+	if (dt < SCALING_ADJUST_PERIOD)
+		return;
+
+	hint_ratio = (100 * (scale3d.fast_up_count + 1)) /
+				 (scale3d.slow_down_count + 1);
+
+	if (hint_ratio > HINT_RATIO_MAX) {
+		idle_min_adjustment = scale3d.p_idle_min;
+		idle_max_adjustment = scale3d.p_idle_max;
+	} else if (hint_ratio < HINT_RATIO_MIN) {
+		idle_min_adjustment = -((int) scale3d.p_idle_min) / 2;
+		idle_max_adjustment = -((int) scale3d.p_idle_max) / 2;
+	} else {
+		int diff;
+		int factor;
+
+		diff = HINT_RATIO_MID - hint_ratio;
+		if (diff < 0)
+			factor = -diff * 2;
+		else {
+			factor = -diff;
+			diff *= 2;
+		}
+
+		idle_min_adjustment =
+			(factor * (int) scale3d.p_idle_min) / HINT_RATIO_DIFF;
+		idle_max_adjustment =
+			(factor * (int) scale3d.p_idle_max) / HINT_RATIO_DIFF;
+	}
+
+	scale3d.idle_min = scale3d.p_idle_min + idle_min_adjustment;
+	scale3d.idle_max = scale3d.p_idle_max + idle_max_adjustment;
+
+	if (scale3d.p_verbosity & GR3D_PRINT_STATS)
+		pr_info("scale3d stats: + %d - %d min %u max %u\n",
+			scale3d.fast_up_count, scale3d.slow_down_count,
+			scale3d.idle_min, scale3d.idle_max);
+
+	scale3d.fast_up_count = 0;
+	scale3d.slow_down_count = 0;
+	scale3d.last_adjust = time;
+}
+
+#undef SCALING_ADJUST_PERIOD
+#undef HINT_RATIO_MAX
+#undef HINT_RATIO_MIN
+#undef HINT_RATIO_MID
+#undef HINT_RATIO_DIFF
+
+static void scaling_state_check(ktime_t time)
+{
+	unsigned long dt;
+
+	/* adjustment: set scale parameters (idle_min, idle_max) +/- 25%
+	 * based on ratio of scale up to scale down hints
+	 */
+	if (scale3d.p_adjust)
+		scaling_adjust(time);
+	else {
+		scale3d.idle_min = scale3d.p_idle_min;
+		scale3d.idle_max = scale3d.p_idle_max;
+	}
+
+	dt = (unsigned long) ktime_us_delta(time, scale3d.last_scale);
+	if (dt < scale3d.p_estimation_window)
+		return;
+
+	scale3d.last_scale = time;
+
+	/* if too busy, scale up */
+	if (scale3d.idle_estimate < scale3d.idle_min) {
+		scale3d.is_scaled = 0;
+		scale3d.fast_up_count++;
+		if (scale3d.p_verbosity & GR3D_PRINT_BUSY)
+			pr_info("scale3d: %ld/1000 busy\n",
+				1000 - scale3d.idle_estimate);
+
+		reset_3d_clocks();
+		return;
+	}
+
+	if (scale3d.p_verbosity & GR3D_PRINT_IDLE)
+		pr_info("scale3d: idle %lu/1000\n",
+			scale3d.idle_estimate);
+
+	if (scale3d.idle_estimate > scale3d.idle_max) {
+		if (!scale3d.is_scaled)
+			scale3d.is_scaled = 1;
+
+		scale3d.slow_down_count++;
+		/* if idle time is high, clock down */
+		scale3d.scale =
+			100 - (scale3d.idle_estimate - scale3d.idle_min) / 10;
+		schedule_work(&scale3d.work);
+	}
+}
+
+/* the idle estimate is done by keeping 2 time stamps, initially set to the
+ * same time. Once the estimation_window time has been exceeded, one time
+ * stamp is moved up to the current time. The idle estimate is calculated
+ * based on the idle time percentage from the earlier estimate. The next time
+ * an estimation_window time is exceeded, the previous idle time and estimates
+ * are moved up - this is intended to prevent abrupt changes to the idle
+ * estimate.
+ */
+static void update_load_estimate(int idle)
+{
+	unsigned long window;
+	unsigned long t;
+
+	ktime_t now = ktime_get();
+	t = ktime_us_delta(now, scale3d.last_notification);
+
+	/* if the last event was over GR3D_TIMEFRAME usec ago (1 sec), the
+	 * current load tracking data is probably stale
+	 */
+	if (t > GR3D_TIMEFRAME) {
+		scale3d.is_idle = idle;
+		scale3d.last_notification = now;
+		scale3d.estimation_window = now;
+		scale3d.last_estimation_window = now;
+		scale3d.total_idle = 0;
+		scale3d.last_total_idle = 0;
+		scale3d.idle_estimate = idle ? 1000 : 0;
+		return;
+	}
+
+	if (scale3d.is_idle) {
+		scale3d.total_idle += t;
+		scale3d.last_total_idle += t;
+	}
+
+	scale3d.is_idle = idle;
+	scale3d.last_notification = now;
+
+	window = ktime_us_delta(now, scale3d.last_estimation_window);
+	/* prevent division by 0 if events come in less than 1 usec apart */
+	if (window > 0)
+		scale3d.idle_estimate =
+			(1000 * scale3d.last_total_idle) / window;
+
+	/* move up to the last estimation window */
+	if (ktime_us_delta(now, scale3d.estimation_window) >
+		scale3d.p_estimation_window) {
+		scale3d.last_estimation_window = scale3d.estimation_window;
+		scale3d.last_total_idle = scale3d.total_idle;
+		scale3d.total_idle = 0;
+		scale3d.estimation_window = now;
+	}
+}
+
+void nvhost_scale3d_notify_idle(struct nvhost_device *dev)
+{
+	ktime_t t;
+	unsigned long dt;
+	int delay;
+
+	if (!scale3d.enable)
+		return;
+
+	update_load_estimate(1);
+
+	t = ktime_get();
+
+	/* if throughput hint enabled, and last hint is recent enough, return */
+	if (scale3d.p_use_throughput_hint) {
+		dt = ktime_us_delta(t, scale3d.last_throughput_hint);
+		if (dt < GR3D_TIMEFRAME)
+			return;
+	}
+
+	mutex_lock(&scale3d.lock);
+
+	scaling_state_check(t);
+
+	/* delay idle_max % of 2 * estimation_window (given in microseconds) */
+	delay = (scale3d.idle_max * scale3d.p_estimation_window) / 500000;
+	schedule_delayed_work(&scale3d.idle_timer, msecs_to_jiffies(delay));
+
+	mutex_unlock(&scale3d.lock);
+}
+
+void nvhost_scale3d_notify_busy(struct nvhost_device *dev)
+{
+	ktime_t t;
+
+	if (!scale3d.enable)
+		return;
+
+	update_load_estimate(0);
+
+	t = ktime_get();
+
+	/* if throughput hint enabled, and last hint is recent enough, return */
+	if (scale3d.p_use_throughput_hint) {
+		unsigned long dt;
+		dt = ktime_us_delta(t, scale3d.last_throughput_hint);
+		if (dt < GR3D_TIMEFRAME)
+			return;
+	}
+
+	mutex_lock(&scale3d.lock);
+
+	cancel_delayed_work(&scale3d.idle_timer);
+	scaling_state_check(t);
+
+	mutex_unlock(&scale3d.lock);
+}
+
+struct score {
+	int size;		/* number of elements */
+	int pos;		/* position in ring buffer */
+	int count;		/* actual item count */
+	unsigned int sum;	/* running sum */
+	unsigned int prev;	/* previous score after 'reset' operation */
+	unsigned int list[];	/* ring buffer */
+};
+
+static struct score *score_init(int capacity)
+{
+	struct score *s;
+
+	s = kzalloc(sizeof(struct score) + capacity * sizeof(int), GFP_KERNEL);
+	if (s == NULL)
+		return NULL;
+
+	s->size = capacity;
+
+	return s;
+}
+
+static void score_delete(struct score *s)
+{
+	kfree(s);
+}
+
+#define score_get_average(s) ((s)->count ? (s)->sum / (s)->count : 0)
+
+static void score_add(struct score *s, unsigned int reading)
+{
+	if (s->count < s->size) {
+		s->sum += reading;
+		s->count++;
+	} else
+		s->sum = s->sum - s->list[s->pos] + reading;
+
+	s->list[s->pos] = reading;
+	s->pos = (s->pos + 1) % s->size;
+}
+
+
+static unsigned int score_reset(struct score *s)
+{
+	s->prev = s->sum;
+
+	s->count = 0;
+	s->pos = 0;
+	s->sum = 0;
+
+	return s->prev;
+}
+
+int freqlist_up(long target, int steps)
+{
+	int i, pos;
+
+	for (i = 0; i < scale3d.freq_count; i++)
+		if (scale3d.freqlist[i] >= target)
+			break;
+
+	pos = min(scale3d.freq_count - 1, i + steps);
+	return scale3d.freqlist[pos];
+}
+
+int freqlist_down(long target, int steps)
+{
+	int i, pos;
+
+	for (i = scale3d.freq_count - 1; i >= 0; i--)
+		if (scale3d.freqlist[i] <= target)
+			break;
+
+	pos = max(0, i - steps);
+	return scale3d.freqlist[pos];
+}
+
+static struct score *busy_history;
+static struct score *hint_history;
+
+/* When a throughput hint is given, perform scaling based on the hint and on
+ * the current idle estimation. This is done as follows:
+ *
+ * 1. On moderate loads force min frequency if the throughput hint is not too
+ *    low.
+ * 2. Otherwise, calculate target-rate = max-rate * load-percentage
+ * 3. Unless the current or average throughput hint is below the minimum
+ *    limit, in which case, choose a higher rate
+ * 4. Or the average throughput hint is above the maximum limit, in which case,
+ *    choose a lower rate.
+ */
+void nvhost_scale3d_set_throughput_hint(int hint)
+{
+	ktime_t now;
+	long busy;
+	long curr;
+	long target;
+	long dt;
+	int avg_busy, avg_hint;
+
+	if (!scale3d.enable)
+		return;
+
+	if (!scale3d.p_use_throughput_hint)
+		return;
+
+	if (scale3d.p_verbosity & GR3D_PRINT_HINT)
+		pr_info("3fds: idle %ld, hint %d\n",
+			scale3d.idle_estimate, hint);
+
+	now = ktime_get();
+	dt = ktime_us_delta(now, scale3d.last_throughput_hint);
+	if (dt > GR3D_TIMEFRAME) {
+		score_reset(busy_history);
+		score_reset(hint_history);
+	}
+
+	scale3d.last_throughput_hint = now;
+
+	busy = 1000 - scale3d.idle_estimate;
+	curr = clk_get_rate(scale3d.clk_3d);
+	target = scale3d.min_rate_3d;
+
+	score_add(busy_history, busy);
+	score_add(hint_history, hint);
+
+	avg_busy = score_get_average(busy_history);
+	avg_hint = score_get_average(hint_history);
+
+	if (busy > 0)
+		target = (curr / 1000) * busy;
+
+	/* In practice, running the gpu at min frequency is typically
+	 * sufficient to keep up performance at loads up to 70% on cases,
+	 * but the average hint value is tested to keep performance up if
+	 * needed.
+	 */
+	if (avg_busy <= scale3d.p_busy_cutoff &&
+	    avg_hint >= scale3d.p_throughput_lower_limit)
+		target = scale3d.min_rate_3d;
+	else {
+		target = (scale3d.max_rate_3d / 1000) * avg_busy;
+
+		/* Scale up if either the current hint or the running average
+		 * are below the target to prevent performance drop.
+		 */
+		if (hint <= scale3d.p_throughput_lo_limit ||
+		    avg_hint <= scale3d.p_throughput_lo_limit) {
+			if (target < curr)
+				target = curr;
+			target = freqlist_up(target, scale3d.p_scale_step);
+		} else if (avg_hint >= scale3d.p_throughput_hi_limit) {
+			if (target > curr)
+				target = curr;
+			target = freqlist_down(target, scale3d.p_scale_step);
+		}
+	}
+
+	scale_to_freq(target);
+
+	if (scale3d.p_verbosity & GR3D_PRINT_TARGET)
+		pr_info("3dfs: busy %ld <%d>, curr %ld, t %ld, hint %d <%d>\n",
+			busy, avg_busy, curr / 1000000, target, hint, avg_hint);
+}
+EXPORT_SYMBOL(nvhost_scale3d_set_throughput_hint);
+
+static void scale3d_idle_handler(struct work_struct *work)
+{
+	int notify_idle = 0;
+
+	if (!scale3d.enable)
+		return;
+
+	mutex_lock(&scale3d.lock);
+
+	if (scale3d.is_idle && tegra_is_clk_enabled(scale3d.clk_3d)) {
+		unsigned long curr = clk_get_rate(scale3d.clk_3d);
+		if (curr > scale3d.min_rate_3d)
+			notify_idle = 1;
+	}
+
+	mutex_unlock(&scale3d.lock);
+
+	if (notify_idle)
+		nvhost_scale3d_notify_idle(NULL);
+}
+
+/*
+ * debugfs parameters to control 3d clock scaling
+ */
+
+void nvhost_scale3d_debug_init(struct dentry *de)
+{
+	struct dentry *d, *f;
+
+	d = debugfs_create_dir("scaling", de);
+	if (!d) {
+		pr_err("scale3d: can\'t create debugfs directory\n");
+		return;
+	}
+
+#define CREATE_SCALE3D_FILE(fname) \
+	do {\
+		f = debugfs_create_u32(#fname, S_IRUGO | S_IWUSR, d,\
+			&scale3d.p_##fname);\
+		if (NULL == f) {\
+			pr_err("scale3d: can\'t create file " #fname "\n");\
+			return;\
+		} \
+	} while (0)
+
+	CREATE_SCALE3D_FILE(estimation_window);
+	CREATE_SCALE3D_FILE(idle_min);
+	CREATE_SCALE3D_FILE(idle_max);
+	CREATE_SCALE3D_FILE(adjust);
+	CREATE_SCALE3D_FILE(scale_emc);
+	CREATE_SCALE3D_FILE(emc_dip);
+	CREATE_SCALE3D_FILE(use_throughput_hint);
+	CREATE_SCALE3D_FILE(throughput_hi_limit);
+	CREATE_SCALE3D_FILE(throughput_lo_limit);
+	CREATE_SCALE3D_FILE(throughput_lower_limit);
+	CREATE_SCALE3D_FILE(scale_step);
+	CREATE_SCALE3D_FILE(verbosity);
+#undef CREATE_SCALE3D_FILE
+}
+
+static ssize_t enable_3d_scaling_show(struct device *device,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t res;
+
+	res = snprintf(buf, PAGE_SIZE, "%d\n", scale3d_is_enabled());
+
+	return res;
+}
+
+static ssize_t enable_3d_scaling_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long val = 0;
+
+	if (kstrtoul(buf, 10, &val) < 0)
+		return -EINVAL;
+
+	scale3d_enable(val);
+
+	return count;
+}
+
+static DEVICE_ATTR(enable_3d_scaling, S_IRUGO | S_IWUSR,
+	enable_3d_scaling_show, enable_3d_scaling_store);
+
+#define MAX_FREQ_COUNT 0x40 /* 64 frequencies should be enough for anyone */
+
+void nvhost_scale3d_init(struct nvhost_device *d)
+{
+	if (!scale3d.init) {
+		int error;
+		unsigned long max_emc, min_emc;
+		long correction;
+		long rate;
+		int freqs[MAX_FREQ_COUNT];
+
+		mutex_init(&scale3d.lock);
+
+		INIT_WORK(&scale3d.work, scale3d_clocks_handler);
+		INIT_DELAYED_WORK(&scale3d.idle_timer, scale3d_idle_handler);
+
+		scale3d.clk_3d = d->clk[0];
+		if (tegra_chip_id == TEGRA30) {
+			scale3d.clk_3d2 = d->clk[1];
+			scale3d.clk_3d_emc = d->clk[2];
+		} else
+			scale3d.clk_3d_emc = d->clk[1];
+
+		scale3d.max_rate_3d = clk_round_rate(scale3d.clk_3d, 300000000);
+		scale3d.min_rate_3d = clk_round_rate(scale3d.clk_3d, 0);
+
+		if (scale3d.max_rate_3d == scale3d.min_rate_3d) {
+			pr_warn("scale3d: 3d max rate = min rate (%lu), "
+				"disabling\n", scale3d.max_rate_3d);
+			scale3d.enable = 0;
+			return;
+		}
+
+		/* emc scaling:
+		 *
+		 * Remc = S * R3d + O - (Sd * (R3d - Rm)^2 + Od)
+		 *
+		 * Remc - 3d.emc rate
+		 * R3d  - 3d.cbus rate
+		 * Rm   - 3d.cbus 'middle' rate = (max + min)/2
+		 * S    - emc_slope
+		 * O    - emc_offset
+		 * Sd   - emc_dip_slope
+		 * Od   - emc_dip_offset
+		 *
+		 * this superposes a quadratic dip centered around the middle 3d
+		 * frequency over a linear correlation of 3d.emc to 3d clock
+		 * rates.
+		 *
+		 * S, O are chosen so that the maximum 3d rate produces the
+		 * maximum 3d.emc rate exactly, and the minimum 3d rate produces
+		 * at least the minimum 3d.emc rate.
+		 *
+		 * Sd and Od are chosen to produce the largest dip that will
+		 * keep 3d.emc frequencies monotonously decreasing with 3d
+		 * frequencies. To achieve this, the first derivative of Remc
+		 * with respect to R3d should be zero for the minimal 3d rate:
+		 *
+		 *   R'emc = S - 2 * Sd * (R3d - Rm)
+		 *   R'emc(R3d-min) = 0
+		 *   S = 2 * Sd * (R3d-min - Rm)
+		 *     = 2 * Sd * (R3d-min - R3d-max) / 2
+		 *   Sd = S / (R3d-min - R3d-max)
+		 *
+		 *   +---------------------------------------------------+
+		 *   | Sd = -(emc-max - emc-min) / (R3d-min - R3d-max)^2 |
+		 *   +---------------------------------------------------+
+		 *
+		 *   dip = Sd * (R3d - Rm)^2 + Od
+		 *
+		 * requiring dip(R3d-min) = 0 and dip(R3d-max) = 0 gives
+		 *
+		 *   Sd * (R3d-min - Rm)^2 + Od = 0
+		 *   Od = -Sd * ((R3d-min - R3d-max) / 2)^2
+		 *      = -Sd * ((R3d-min - R3d-max)^2) / 4
+		 *
+		 *   +------------------------------+
+		 *   | Od = (emc-max - emc-min) / 4 |
+		 *   +------------------------------+
+		 */
+
+		max_emc = clk_round_rate(scale3d.clk_3d_emc, 400000000);
+		min_emc = clk_round_rate(scale3d.clk_3d_emc, 0);
+
+		scale3d.emc_slope = (max_emc - min_emc) /
+			 (scale3d.max_rate_3d - scale3d.min_rate_3d);
+		scale3d.emc_offset = max_emc -
+			scale3d.emc_slope * scale3d.max_rate_3d;
+		/* guarantee max 3d rate maps to max emc rate */
+		scale3d.emc_offset += max_emc -
+			(scale3d.emc_slope * scale3d.max_rate_3d +
+			scale3d.emc_offset);
+
+		scale3d.emc_dip_offset = (max_emc - min_emc) / 4;
+		scale3d.emc_dip_slope =
+			-4 * (scale3d.emc_dip_offset /
+			(POW2(scale3d.max_rate_3d - scale3d.min_rate_3d)));
+		scale3d.emc_xmid =
+			(scale3d.max_rate_3d + scale3d.min_rate_3d) / 2;
+		correction =
+			scale3d.emc_dip_offset +
+				scale3d.emc_dip_slope *
+				POW2(scale3d.max_rate_3d - scale3d.emc_xmid);
+		scale3d.emc_dip_offset -= correction;
+
+		scale3d.is_idle = 1;
+
+		/* set scaling parameter defaults */
+		scale3d.enable = 1;
+		scale3d.idle_min = scale3d.p_idle_min = 100;
+		scale3d.idle_max = scale3d.p_idle_max = 150;
+		scale3d.p_scale_emc = 1;
+		scale3d.p_emc_dip = 1;
+		scale3d.p_verbosity = 0;
+		scale3d.p_adjust = 1;
+		scale3d.p_use_throughput_hint = 1;
+		scale3d.p_throughput_lower_limit = 940;
+		scale3d.p_throughput_lo_limit = 990;
+		scale3d.p_throughput_hi_limit = 1010;
+		scale3d.p_scale_step = 1;
+		scale3d.p_estimation_window = 8000;
+		scale3d.p_busy_cutoff = 750;
+
+		error = device_create_file(&d->dev,
+				&dev_attr_enable_3d_scaling);
+		if (error)
+			dev_err(&d->dev, "failed to create sysfs attributes");
+
+		rate = 0;
+		scale3d.freq_count = 0;
+		while (rate <= scale3d.max_rate_3d) {
+			long rounded_rate;
+			if (unlikely(scale3d.freq_count == MAX_FREQ_COUNT)) {
+				pr_err("%s: too many frequencies\n", __func__);
+				break;
+			}
+			rounded_rate =
+				clk_round_rate(scale3d.clk_3d, rate);
+			freqs[scale3d.freq_count++] = rounded_rate;
+			rate = rounded_rate + 2000;
+		}
+		scale3d.freqlist =
+			kmalloc(scale3d.freq_count * sizeof(int), GFP_KERNEL);
+		if (scale3d.freqlist == NULL)
+			pr_err("%s: can\'t allocate freq table\n", __func__);
+
+		memcpy(scale3d.freqlist, freqs,
+			scale3d.freq_count * sizeof(int));
+
+		busy_history = score_init(GR3D_FRAME_SPAN);
+		if (busy_history == NULL)
+			pr_err("%s: can\'t init load tracking array\n",
+			       __func__);
+
+		hint_history = score_init(GR3D_FRAME_SPAN);
+		if (hint_history == NULL)
+			pr_err("%s: can\'t init throughput tracking array\n",
+			       __func__);
+
+		scale3d.init = 1;
+	}
+}
+
+void nvhost_scale3d_deinit(struct nvhost_device *dev)
+{
+	device_remove_file(&dev->dev, &dev_attr_enable_3d_scaling);
+	scale3d.init = 0;
+	if (scale3d.freqlist != NULL) {
+		kfree(scale3d.freqlist);
+		scale3d.freq_count = 0;
+		scale3d.freqlist = NULL;
+	}
+
+	score_delete(busy_history);
+	score_delete(hint_history);
+}
diff --git a/drivers/staging/tegra/video/host/gr3d/scale3d.h b/drivers/staging/tegra/video/host/gr3d/scale3d.h
new file mode 100644
index 000000000000..f8aae1d591a6
--- /dev/null
+++ b/drivers/staging/tegra/video/host/gr3d/scale3d.h
@@ -0,0 +1,47 @@
+/*
+ * drivers/video/tegra/host/t30/scale3d.h
+ *
+ * Tegra Graphics Host 3D Clock Scaling
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef NVHOST_T30_SCALE3D_H
+#define NVHOST_T30_SCALE3D_H
+
+struct nvhost_device;
+struct device;
+struct dentry;
+
+/* Initialization and de-initialization for module */
+void nvhost_scale3d_init(struct nvhost_device *);
+void nvhost_scale3d_deinit(struct nvhost_device *);
+
+/* Suspend is called when powering down module */
+void nvhost_scale3d_suspend(struct nvhost_device *);
+
+/* reset 3d module load counters, called on resume */
+void nvhost_scale3d_reset(void);
+
+/*
+ * call when performing submit to notify scaling mechanism that 3d module is
+ * in use
+ */
+void nvhost_scale3d_notify_busy(struct nvhost_device *);
+void nvhost_scale3d_notify_idle(struct nvhost_device *);
+
+void nvhost_scale3d_debug_init(struct dentry *de);
+
+#endif
diff --git a/drivers/staging/tegra/video/host/host1x/Makefile b/drivers/staging/tegra/video/host/host1x/Makefile
new file mode 100644
index 000000000000..8a1a6e39d34b
--- /dev/null
+++ b/drivers/staging/tegra/video/host/host1x/Makefile
@@ -0,0 +1,8 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/staging/tegra/video/host
+EXTRA_CFLAGS += -Idrivers/staging/tegra/include
+
+nvhost-host1x-objs  = \
+	host1x.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-host1x.o
diff --git a/drivers/staging/tegra/video/host/host1x/host1x.c b/drivers/staging/tegra/video/host/host1x/host1x.c
new file mode 100644
index 000000000000..eac937e5db0c
--- /dev/null
+++ b/drivers/staging/tegra/video/host/host1x/host1x.c
@@ -0,0 +1,580 @@
+/*
+ * drivers/video/tegra/host/dev.c
+ *
+ * Tegra Graphics Host Driver Entrypoint
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+#include <linux/clk.h>
+
+#include "dev.h"
+#include "bus.h"
+#include <trace/events/nvhost.h>
+
+#include <linux/nvhost.h>
+#include <linux/nvhost_ioctl.h>
+
+#include "debug.h"
+#include "bus_client.h"
+#include "nvhost_acm.h"
+#include "nvhost_channel.h"
+#include "nvhost_job.h"
+
+#define DRIVER_NAME		"host1x"
+
+struct nvhost_ctrl_userctx {
+	struct nvhost_master *dev;
+	u32 *mod_locks;
+};
+
+static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
+{
+	struct nvhost_ctrl_userctx *priv = filp->private_data;
+	int i;
+
+	trace_nvhost_ctrlrelease(priv->dev->dev->name);
+
+	filp->private_data = NULL;
+	if (priv->mod_locks[0])
+		nvhost_module_idle(priv->dev->dev);
+	for (i = 1; i < nvhost_syncpt_nb_mlocks(&priv->dev->syncpt); i++)
+		if (priv->mod_locks[i])
+			nvhost_mutex_unlock(&priv->dev->syncpt, i);
+	kfree(priv->mod_locks);
+	kfree(priv);
+	return 0;
+}
+
+static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
+{
+	struct nvhost_master *host =
+		container_of(inode->i_cdev, struct nvhost_master, cdev);
+	struct nvhost_ctrl_userctx *priv;
+	u32 *mod_locks;
+
+	trace_nvhost_ctrlopen(host->dev->name);
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	mod_locks = kzalloc(sizeof(u32)
+			* nvhost_syncpt_nb_mlocks(&host->syncpt),
+			GFP_KERNEL);
+
+	if (!(priv && mod_locks)) {
+		kfree(priv);
+		kfree(mod_locks);
+		return -ENOMEM;
+	}
+
+	priv->dev = host;
+	priv->mod_locks = mod_locks;
+	filp->private_data = priv;
+	return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_read(struct nvhost_ctrl_userctx *ctx,
+	struct nvhost_ctrl_syncpt_read_args *args)
+{
+	if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
+		return -EINVAL;
+	args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
+	trace_nvhost_ioctl_ctrl_syncpt_read(args->id, args->value);
+	return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_incr(struct nvhost_ctrl_userctx *ctx,
+	struct nvhost_ctrl_syncpt_incr_args *args)
+{
+	if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
+		return -EINVAL;
+	trace_nvhost_ioctl_ctrl_syncpt_incr(args->id);
+	nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
+	return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_waitex(struct nvhost_ctrl_userctx *ctx,
+	struct nvhost_ctrl_syncpt_waitex_args *args)
+{
+	u32 timeout;
+	int err;
+	if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
+		return -EINVAL;
+	if (args->timeout == NVHOST_NO_TIMEOUT)
+		timeout = MAX_SCHEDULE_TIMEOUT;
+	else
+		timeout = (u32)msecs_to_jiffies(args->timeout);
+
+	err = nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
+					args->thresh, timeout, &args->value);
+	trace_nvhost_ioctl_ctrl_syncpt_wait(args->id, args->thresh,
+	  args->timeout, args->value, err);
+
+	return err;
+}
+
+static int nvhost_ioctl_ctrl_module_mutex(struct nvhost_ctrl_userctx *ctx,
+	struct nvhost_ctrl_module_mutex_args *args)
+{
+	int err = 0;
+	if (args->id >= nvhost_syncpt_nb_mlocks(&ctx->dev->syncpt) ||
+	    args->lock > 1)
+		return -EINVAL;
+
+	trace_nvhost_ioctl_ctrl_module_mutex(args->lock, args->id);
+	if (args->lock && !ctx->mod_locks[args->id]) {
+		if (args->id == 0)
+			nvhost_module_busy(ctx->dev->dev);
+		else
+			err = nvhost_mutex_try_lock(&ctx->dev->syncpt,
+					args->id);
+		if (!err)
+			ctx->mod_locks[args->id] = 1;
+	} else if (!args->lock && ctx->mod_locks[args->id]) {
+		if (args->id == 0)
+			nvhost_module_idle(ctx->dev->dev);
+		else
+			nvhost_mutex_unlock(&ctx->dev->syncpt, args->id);
+		ctx->mod_locks[args->id] = 0;
+	}
+	return err;
+}
+
+static int match_by_moduleid(struct device *dev, void *data)
+{
+	struct nvhost_device *ndev = to_nvhost_device(dev);
+	u32 id = (u32)data;
+
+	return id == ndev->moduleid;
+}
+
+static struct nvhost_device *get_ndev_by_moduleid(struct nvhost_master *host,
+		u32 id)
+{
+	struct device *dev = bus_find_device(nvhost_bus_inst->nvhost_bus_type,
+			NULL, (void *)id, match_by_moduleid);
+
+	return dev ? to_nvhost_device(dev) : NULL;
+}
+
+static int nvhost_ioctl_ctrl_module_regrdwr(struct nvhost_ctrl_userctx *ctx,
+	struct nvhost_ctrl_module_regrdwr_args *args)
+{
+	u32 num_offsets = args->num_offsets;
+	u32 *offsets = args->offsets;
+	u32 *values = args->values;
+	u32 vals[64];
+	struct nvhost_device *ndev;
+
+	trace_nvhost_ioctl_ctrl_module_regrdwr(args->id,
+			args->num_offsets, args->write);
+	/* Check that there is something to read and that block size is
+	 * u32 aligned */
+	if (num_offsets == 0 || args->block_size & 3)
+		return -EINVAL;
+
+	ndev = get_ndev_by_moduleid(ctx->dev, args->id);
+	if (!ndev)
+		return -EINVAL;
+
+	while (num_offsets--) {
+		int err;
+		int remaining = args->block_size >> 2;
+		u32 offs;
+		if (get_user(offs, offsets))
+			return -EFAULT;
+		offsets++;
+		while (remaining) {
+			int batch = min(remaining, 64);
+			if (args->write) {
+				if (copy_from_user(vals, values,
+							batch*sizeof(u32)))
+					return -EFAULT;
+				err = nvhost_write_module_regs(ndev,
+						offs, batch, vals);
+				if (err)
+					return err;
+			} else {
+				err = nvhost_read_module_regs(ndev,
+						offs, batch, vals);
+				if (err)
+					return err;
+				if (copy_to_user(values, vals,
+							batch*sizeof(u32)))
+					return -EFAULT;
+			}
+			remaining -= batch;
+			offs += batch*sizeof(u32);
+			values += batch;
+		}
+	}
+
+	return 0;
+}
+
+static int nvhost_ioctl_ctrl_get_version(struct nvhost_ctrl_userctx *ctx,
+	struct nvhost_get_param_args *args)
+{
+	args->value = NVHOST_SUBMIT_VERSION_MAX_SUPPORTED;
+	return 0;
+}
+
+static long nvhost_ctrlctl(struct file *filp,
+	unsigned int cmd, unsigned long arg)
+{
+	struct nvhost_ctrl_userctx *priv = filp->private_data;
+	u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
+	int err = 0;
+
+	if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
+		(_IOC_NR(cmd) == 0) ||
+		(_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST) ||
+		(_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE))
+		return -EFAULT;
+
+	if (_IOC_DIR(cmd) & _IOC_WRITE) {
+		if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
+			return -EFAULT;
+	}
+
+	switch (cmd) {
+	case NVHOST_IOCTL_CTRL_SYNCPT_READ:
+		err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
+		break;
+	case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
+		err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
+		break;
+	case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
+		err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
+		break;
+	case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
+		err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
+		break;
+	case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
+		err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
+		break;
+	case NVHOST_IOCTL_CTRL_SYNCPT_WAITEX:
+		err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
+		break;
+	case NVHOST_IOCTL_CTRL_GET_VERSION:
+		err = nvhost_ioctl_ctrl_get_version(priv, (void *)buf);
+		break;
+	default:
+		err = -ENOTTY;
+		break;
+	}
+
+	if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
+		err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
+
+	return err;
+}
+
+static const struct file_operations nvhost_ctrlops = {
+	.owner = THIS_MODULE,
+	.release = nvhost_ctrlrelease,
+	.open = nvhost_ctrlopen,
+	.unlocked_ioctl = nvhost_ctrlctl
+};
+
+static void power_on_host(struct nvhost_device *dev)
+{
+	struct nvhost_master *host = nvhost_get_drvdata(dev);
+	nvhost_syncpt_reset(&host->syncpt);
+	nvhost_intr_start(&host->intr, clk_get_rate(dev->clk[0]));
+}
+
+static int power_off_host(struct nvhost_device *dev)
+{
+	struct nvhost_master *host = nvhost_get_drvdata(dev);
+	nvhost_syncpt_save(&host->syncpt);
+	nvhost_intr_stop(&host->intr);
+	return 0;
+}
+
+static void clock_on_host(struct nvhost_device *dev)
+{
+	struct nvhost_master *host = nvhost_get_drvdata(dev);
+	nvhost_intr_start(&host->intr, clk_get_rate(dev->clk[0]));
+}
+
+static int clock_off_host(struct nvhost_device *dev)
+{
+	struct nvhost_master *host = nvhost_get_drvdata(dev);
+	nvhost_intr_stop(&host->intr);
+	return 0;
+}
+
+static int nvhost_user_init(struct nvhost_master *host)
+{
+	int err, devno;
+
+	host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
+	if (IS_ERR(host->nvhost_class)) {
+		err = PTR_ERR(host->nvhost_class);
+		dev_err(&host->dev->dev, "failed to create class\n");
+		goto fail;
+	}
+
+	err = alloc_chrdev_region(&devno, 0, 1, IFACE_NAME);
+	if (err < 0) {
+		dev_err(&host->dev->dev, "failed to reserve chrdev region\n");
+		goto fail;
+	}
+
+	cdev_init(&host->cdev, &nvhost_ctrlops);
+	host->cdev.owner = THIS_MODULE;
+	err = cdev_add(&host->cdev, devno, 1);
+	if (err < 0)
+		goto fail;
+	host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
+			IFACE_NAME "-ctrl");
+	if (IS_ERR(host->ctrl)) {
+		err = PTR_ERR(host->ctrl);
+		dev_err(&host->dev->dev, "failed to create ctrl device\n");
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return err;
+}
+
+struct nvhost_channel *nvhost_alloc_channel(struct nvhost_device *dev)
+{
+	BUG_ON(!host_device_op().alloc_nvhost_channel);
+	return host_device_op().alloc_nvhost_channel(dev);
+}
+
+void nvhost_free_channel(struct nvhost_channel *ch)
+{
+	BUG_ON(!host_device_op().free_nvhost_channel);
+	host_device_op().free_nvhost_channel(ch);
+}
+
+static void nvhost_free_resources(struct nvhost_master *host)
+{
+	kfree(host->intr.syncpt);
+	host->intr.syncpt = 0;
+}
+
+static int nvhost_alloc_resources(struct nvhost_master *host)
+{
+	int err;
+
+	err = nvhost_init_chip_support(host);
+	if (err)
+		return err;
+
+	host->intr.syncpt = kzalloc(sizeof(struct nvhost_intr_syncpt) *
+				    nvhost_syncpt_nb_pts(&host->syncpt),
+				    GFP_KERNEL);
+
+	if (!host->intr.syncpt) {
+		/* frees happen in the support removal phase */
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int nvhost_probe(struct nvhost_device *dev,
+	struct nvhost_device_id *id_table)
+{
+	struct nvhost_master *host;
+	struct resource *regs, *intr0, *intr1;
+	int i, err;
+
+	dev_info(&dev->dev, "probe\n");
+
+	regs = nvhost_get_resource(dev, IORESOURCE_MEM, 0);
+	intr0 = nvhost_get_resource(dev, IORESOURCE_IRQ, 0);
+	intr1 = nvhost_get_resource(dev, IORESOURCE_IRQ, 1);
+
+	if (!regs)
+		dev_err(&dev->dev, "missing regs\n");
+
+	if (!intr0)
+		dev_err(&dev->dev, "missing intr0\n");
+
+	if (!intr1)
+		dev_err(&dev->dev, "missing intr1\n");
+
+	if (!regs || !intr0 || !intr1) {
+		dev_err(&dev->dev, "missing required platform resources\n");
+		return -ENXIO;
+	}
+
+	host = kzalloc(sizeof(*host), GFP_KERNEL);
+	if (!host)
+		return -ENOMEM;
+
+	/*  Register host1x device as bus master */
+	host->dev = dev;
+
+	/* Copy host1x parameters */
+	memcpy(&host->info, dev->dev.platform_data,
+			sizeof(struct host1x_device_info));
+
+	host->reg_mem = request_mem_region(regs->start,
+					resource_size(regs), dev->name);
+	if (!host->reg_mem) {
+		dev_err(&dev->dev, "failed to get host register memory\n");
+		err = -ENXIO;
+		goto fail;
+	}
+
+	host->aperture = ioremap(regs->start, resource_size(regs));
+	if (!host->aperture) {
+		dev_err(&dev->dev, "failed to remap host registers\n");
+		err = -ENXIO;
+		goto fail;
+	}
+
+	err = nvhost_alloc_resources(host);
+	if (err) {
+		dev_err(&dev->dev, "failed to init chip support\n");
+		goto fail;
+	}
+
+	host->memmgr = mem_op().alloc_mgr();
+	if (!host->memmgr) {
+		dev_err(&dev->dev, "unable to create nvmap client\n");
+		err = -EIO;
+		goto fail;
+	}
+
+	/*  Register host1x device as bus master */
+	host->dev = dev;
+
+	/*  Give pointer to host1x via driver */
+	nvhost_set_drvdata(dev, host);
+
+	dev_info(&dev->dev, "nvhost_bus_add_host\n");
+
+	nvhost_bus_add_host(host);
+
+	err = nvhost_syncpt_init(dev, &host->syncpt);
+	if (err)
+		goto fail;
+
+	dev_info(&dev->dev, "nvhost_intr_init\n");
+
+	err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
+	if (err)
+		goto fail;
+
+	dev_info(&dev->dev, "nvhost_user_init\n");
+
+	err = nvhost_user_init(host);
+	if (err)
+		goto fail;
+
+	dev_info(&dev->dev, "nvhost_module_init\n");
+
+	err = nvhost_module_init(dev);
+	if (err)
+		goto fail;
+
+	for (i = 0; i < host->dev->num_clks; i++)
+		clk_prepare_enable(host->dev->clk[i]);
+	nvhost_syncpt_reset(&host->syncpt);
+	for (i = 0; i < host->dev->num_clks; i++)
+		clk_disable_unprepare(host->dev->clk[i]);
+
+	dev_info(&dev->dev, "nvhost_debug_init\n");
+
+	nvhost_debug_init(host);
+
+	dev_info(&dev->dev, "initialized\n");
+	return 0;
+
+fail:
+	nvhost_free_resources(host);
+	if (host->memmgr)
+		mem_op().put_mgr(host->memmgr);
+	kfree(host);
+	return err;
+}
+
+static int __exit nvhost_remove(struct nvhost_device *dev)
+{
+	struct nvhost_master *host = nvhost_get_drvdata(dev);
+	nvhost_intr_deinit(&host->intr);
+	nvhost_syncpt_deinit(&host->syncpt);
+	nvhost_free_resources(host);
+	return 0;
+}
+
+static int nvhost_suspend(struct nvhost_device *dev, pm_message_t state)
+{
+	struct nvhost_master *host = nvhost_get_drvdata(dev);
+	int ret = 0;
+
+	ret = nvhost_module_suspend(host->dev);
+	dev_info(&dev->dev, "suspend status: %d\n", ret);
+
+	return ret;
+}
+
+static int nvhost_resume(struct nvhost_device *dev)
+{
+	dev_info(&dev->dev, "resuming\n");
+	return 0;
+}
+
+static struct of_device_id nvhost_of_match[] = {
+	{ .compatible = "nvidia,tegra20-host1x", },
+	{ .compatible = "nvidia,tegra30-host1x", },
+	{ },
+};
+
+static struct nvhost_driver nvhost_driver = {
+	.probe = nvhost_probe,
+	.remove = __exit_p(nvhost_remove),
+	.suspend = nvhost_suspend,
+	.resume = nvhost_resume,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = DRIVER_NAME,
+		.of_match_table = of_match_ptr(nvhost_of_match),
+	},
+	.finalize_poweron = power_on_host,
+	.prepare_poweroff = power_off_host,
+	.finalize_clockon = clock_on_host,
+	.prepare_clockoff = clock_off_host,
+};
+
+static int __init nvhost_mod_init(void)
+{
+	return nvhost_driver_register(&nvhost_driver);
+}
+
+static void __exit nvhost_mod_exit(void)
+{
+	nvhost_driver_unregister(&nvhost_driver);
+}
+
+/* host1x master device needs nvmap to be instantiated first.
+ * nvmap is instantiated via fs_initcall.
+ * Hence instantiate host1x master device using rootfs_initcall
+ * which is one level after fs_initcall. */
+rootfs_initcall(nvhost_mod_init);
+module_exit(nvhost_mod_exit);
+
diff --git a/drivers/staging/tegra/video/host/host1x/host1x.h b/drivers/staging/tegra/video/host/host1x/host1x.h
new file mode 100644
index 000000000000..f70e6fb2194a
--- /dev/null
+++ b/drivers/staging/tegra/video/host/host1x/host1x.h
@@ -0,0 +1,90 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x.h
+ *
+ * Tegra Graphics Host Driver Entrypoint
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_HOST1X_H
+#define __NVHOST_HOST1X_H
+
+#include <linux/cdev.h>
+#include <linux/nvhost.h>
+
+#include "../nvhost_syncpt.h"
+#include "../nvhost_intr.h"
+
+#define TRACE_MAX_LENGTH	128U
+#define IFACE_NAME		"nvhost"
+
+#define TEGRA_HOST1X_BASE	0x50000000
+#define TEGRA_HOST1X_SIZE	0x24000
+
+#define TEGRA_ISP_BASE		0x54100000
+#define TEGRA_ISP_SIZE		SZ_256K
+
+#define TEGRA_MPE_BASE		0x54040000
+#define TEGRA_MPE_SIZE		SZ_256K
+
+#define TEGRA_VI_BASE		0x54080000
+#define TEGRA_VI_SIZE		SZ_256K
+
+struct nvhost_channel;
+struct mem_mgr;
+
+struct host1x_device_info {
+	int		nb_channels;	/* host1x: num channels supported */
+	int		nb_pts; 	/* host1x: num syncpoints supported */
+	int		nb_bases;	/* host1x: num syncpoints supported */
+	u32		client_managed; /* host1x: client managed syncpts */
+	int		nb_mlocks;	/* host1x: number of mlocks */
+	const char	**syncpt_names;	/* names of sync points */
+};
+
+struct nvhost_master {
+	void __iomem *aperture;
+	void __iomem *sync_aperture;
+	struct resource *reg_mem;
+	struct class *nvhost_class;
+	struct cdev cdev;
+	struct device *ctrl;
+	struct nvhost_syncpt syncpt;
+	struct mem_mgr *memmgr;
+	struct nvhost_intr intr;
+	struct nvhost_device *dev;
+	atomic_t clientid;
+
+	struct host1x_device_info info;
+};
+
+extern struct nvhost_master *nvhost;
+
+void nvhost_debug_init(struct nvhost_master *master);
+void nvhost_debug_dump(struct nvhost_master *master);
+
+struct nvhost_channel *nvhost_alloc_channel(struct nvhost_device *dev);
+void nvhost_free_channel(struct nvhost_channel *ch);
+
+extern pid_t nvhost_debug_null_kickoff_pid;
+
+static inline struct nvhost_master *nvhost_get_host(struct nvhost_device *_dev)
+{
+	return (_dev->dev.parent) ? \
+		((struct nvhost_master *) dev_get_drvdata(_dev->dev.parent)) : \
+		((struct nvhost_master *) dev_get_drvdata(&(_dev->dev)));
+}
+
+#endif
diff --git a/drivers/staging/tegra/video/host/host1x/host1x01_hardware.h b/drivers/staging/tegra/video/host/host1x/host1x01_hardware.h
new file mode 100644
index 000000000000..1d30cc74266a
--- /dev/null
+++ b/drivers/staging/tegra/video/host/host1x/host1x01_hardware.h
@@ -0,0 +1,170 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x01_hardware.h
+ *
+ * Tegra Graphics Host Register Offsets for T20/T30
+ *
+ * Copyright (c) 2010-2012 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_HOST1X01_HARDWARE_H
+#define __NVHOST_HOST1X01_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include "hw_host1x01_channel.h"
+#include "hw_host1x01_sync.h"
+#include "hw_host1x01_uclass.h"
+
+/* class ids */
+enum {
+	NV_HOST1X_CLASS_ID = 0x1,
+	NV_VIDEO_ENCODE_MPEG_CLASS_ID = 0x20,
+	NV_GRAPHICS_3D_CLASS_ID = 0x60
+};
+
+
+/* channel registers */
+#define NV_HOST1X_CHANNEL_MAP_SIZE_BYTES 16384
+#define NV_HOST1X_SYNC_MLOCK_NUM 16
+
+/* sync registers */
+#define HOST1X_CHANNEL_SYNC_REG_BASE   0x3000
+#define NV_HOST1X_NB_MLOCKS 16
+
+static inline u32 nvhost_class_host_wait_syncpt(
+	unsigned indx, unsigned threshold)
+{
+	return (indx << 24) | (threshold & 0xffffff);
+}
+
+static inline u32 nvhost_class_host_load_syncpt_base(
+	unsigned indx, unsigned threshold)
+{
+	return host1x_uclass_wait_syncpt_indx_f(indx)
+		| host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 nvhost_class_host_wait_syncpt_base(
+	unsigned indx, unsigned base_indx, unsigned offset)
+{
+	return host1x_uclass_wait_syncpt_base_indx_f(indx)
+		| host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+		| host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 nvhost_class_host_incr_syncpt_base(
+	unsigned base_indx, unsigned offset)
+{
+	return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+		| host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 nvhost_class_host_incr_syncpt(
+	unsigned cond, unsigned indx)
+{
+	return host1x_uclass_incr_syncpt_cond_f(cond)
+		| host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+enum {
+	NV_HOST_MODULE_HOST1X = 0,
+	NV_HOST_MODULE_MPE = 1,
+	NV_HOST_MODULE_GR3D = 6
+};
+
+static inline u32 nvhost_class_host_indoff_reg_write(
+	unsigned mod_id, unsigned offset, bool auto_inc)
+{
+	u32 v = host1x_uclass_indoff_indbe_f(0xf)
+		| host1x_uclass_indoff_indmodid_f(mod_id)
+		| host1x_uclass_indoff_indroffset_f(offset);
+	if (auto_inc)
+		v |= host1x_uclass_indoff_autoinc_f(1);
+	return v;
+}
+
+static inline u32 nvhost_class_host_indoff_reg_read(
+	unsigned mod_id, unsigned offset, bool auto_inc)
+{
+	u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+		| host1x_uclass_indoff_indroffset_f(offset)
+		| host1x_uclass_indoff_rwn_read_v();
+	if (auto_inc)
+		v |= host1x_uclass_indoff_autoinc_f(1);
+	return v;
+}
+
+
+/* cdma opcodes */
+static inline u32 nvhost_opcode_setclass(
+	unsigned class_id, unsigned offset, unsigned mask)
+{
+	return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 nvhost_opcode_incr(unsigned offset, unsigned count)
+{
+	return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_nonincr(unsigned offset, unsigned count)
+{
+	return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_mask(unsigned offset, unsigned mask)
+{
+	return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 nvhost_opcode_imm(unsigned offset, unsigned value)
+{
+	return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 nvhost_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+	return nvhost_opcode_imm(host1x_uclass_incr_syncpt_r(),
+		nvhost_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 nvhost_opcode_restart(unsigned address)
+{
+	return (5 << 28) | (address >> 4);
+}
+
+static inline u32 nvhost_opcode_gather(unsigned count)
+{
+	return (6 << 28) | count;
+}
+
+static inline u32 nvhost_opcode_gather_nonincr(unsigned offset,	unsigned count)
+{
+	return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 nvhost_opcode_gather_incr(unsigned offset, unsigned count)
+{
+	return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define NVHOST_OPCODE_NOOP nvhost_opcode_nonincr(0, 0)
+
+static inline u32 nvhost_mask2(unsigned x, unsigned y)
+{
+	return 1 | (1 << (y - x));
+}
+
+#endif
diff --git a/drivers/staging/tegra/video/host/host1x/host1x_cdma.c b/drivers/staging/tegra/video/host/host1x/host1x_cdma.c
new file mode 100644
index 000000000000..5a29ff652efe
--- /dev/null
+++ b/drivers/staging/tegra/video/host/host1x/host1x_cdma.c
@@ -0,0 +1,517 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_cdma.c
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include "nvhost_acm.h"
+#include "nvhost_cdma.h"
+#include "nvhost_channel.h"
+#include "dev.h"
+#include "chip_support.h"
+#include "nvhost_memmgr.h"
+
+#include "host1x_cdma.h"
+#include "host1x_hwctx.h"
+
+static inline u32 host1x_channel_dmactrl(int stop, int get_rst, int init_get)
+{
+	return host1x_channel_dmactrl_dmastop_f(stop)
+		| host1x_channel_dmactrl_dmagetrst_f(get_rst)
+		| host1x_channel_dmactrl_dmainitget_f(init_get);
+}
+
+static void cdma_timeout_handler(struct work_struct *work);
+
+/*
+ * push_buffer
+ *
+ * The push buffer is a circular array of words to be fetched by command DMA.
+ * Note that it works slightly differently to the sync queue; fence == cur
+ * means that the push buffer is full, not empty.
+ */
+
+
+/**
+ * Reset to empty push buffer
+ */
+static void push_buffer_reset(struct push_buffer *pb)
+{
+	pb->fence = PUSH_BUFFER_SIZE - 8;
+	pb->cur = 0;
+}
+
+/**
+ * Init push buffer resources
+ */
+static int push_buffer_init(struct push_buffer *pb)
+{
+	struct nvhost_cdma *cdma = pb_to_cdma(pb);
+	struct mem_mgr *mgr = cdma_to_memmgr(cdma);
+	pb->mem = NULL;
+	pb->mapped = NULL;
+	pb->phys = 0;
+	pb->client_handle = NULL;
+
+	BUG_ON(!cdma_pb_op().reset);
+	cdma_pb_op().reset(pb);
+
+	/* allocate and map pushbuffer memory */
+	pb->mem = mem_op().alloc(mgr, PUSH_BUFFER_SIZE + 4, 32,
+			      mem_mgr_flag_write_combine);
+	if (IS_ERR_OR_NULL(pb->mem)) {
+		pb->mem = NULL;
+		goto fail;
+	}
+	pb->mapped = mem_op().mmap(pb->mem);
+	if (pb->mapped == NULL)
+		goto fail;
+
+	/* pin pushbuffer and get physical address */
+	pb->phys = mem_op().pin(mgr, pb->mem);
+	if (pb->phys >= 0xfffff000) {
+		pb->phys = 0;
+		goto fail;
+	}
+
+	/* memory for storing nvmap client and handles for each opcode pair */
+	pb->client_handle = kzalloc(NVHOST_GATHER_QUEUE_SIZE *
+				sizeof(struct mem_mgr_handle),
+			GFP_KERNEL);
+	if (!pb->client_handle)
+		goto fail;
+
+	/* put the restart at the end of pushbuffer memory */
+	*(pb->mapped + (PUSH_BUFFER_SIZE >> 2)) =
+		nvhost_opcode_restart(pb->phys);
+
+	return 0;
+
+fail:
+	cdma_pb_op().destroy(pb);
+	return -ENOMEM;
+}
+
+/**
+ * Clean up push buffer resources
+ */
+static void push_buffer_destroy(struct push_buffer *pb)
+{
+	struct nvhost_cdma *cdma = pb_to_cdma(pb);
+	struct mem_mgr *mgr = cdma_to_memmgr(cdma);
+	if (pb->mapped)
+		mem_op().munmap(pb->mem, pb->mapped);
+
+	if (pb->phys != 0)
+		mem_op().unpin(mgr, pb->mem);
+
+	if (pb->mem)
+		mem_op().put(mgr, pb->mem);
+
+	kfree(pb->client_handle);
+
+	pb->mem = NULL;
+	pb->mapped = NULL;
+	pb->phys = 0;
+	pb->client_handle = 0;
+}
+
+/**
+ * Push two words to the push buffer
+ * Caller must ensure push buffer is not full
+ */
+static void push_buffer_push_to(struct push_buffer *pb,
+		struct mem_mgr *client, struct mem_handle *handle,
+		u32 op1, u32 op2)
+{
+	u32 cur = pb->cur;
+	u32 *p = (u32 *)((u32)pb->mapped + cur);
+	u32 cur_nvmap = (cur/8) & (NVHOST_GATHER_QUEUE_SIZE - 1);
+	BUG_ON(cur == pb->fence);
+	*(p++) = op1;
+	*(p++) = op2;
+	pb->client_handle[cur_nvmap].client = client;
+	pb->client_handle[cur_nvmap].handle = handle;
+	pb->cur = (cur + 8) & (PUSH_BUFFER_SIZE - 1);
+}
+
+/**
+ * Pop a number of two word slots from the push buffer
+ * Caller must ensure push buffer is not empty
+ */
+static void push_buffer_pop_from(struct push_buffer *pb,
+		unsigned int slots)
+{
+	/* Clear the nvmap references for old items from pb */
+	unsigned int i;
+	u32 fence_nvmap = pb->fence/8;
+	for (i = 0; i < slots; i++) {
+		int cur_fence_nvmap = (fence_nvmap+i)
+				& (NVHOST_GATHER_QUEUE_SIZE - 1);
+		struct mem_mgr_handle *h = &pb->client_handle[cur_fence_nvmap];
+		h->client = NULL;
+		h->handle = NULL;
+	}
+	/* Advance the next write position */
+	pb->fence = (pb->fence + slots * 8) & (PUSH_BUFFER_SIZE - 1);
+}
+
+/**
+ * Return the number of two word slots free in the push buffer
+ */
+static u32 push_buffer_space(struct push_buffer *pb)
+{
+	return ((pb->fence - pb->cur) & (PUSH_BUFFER_SIZE - 1)) / 8;
+}
+
+static u32 push_buffer_putptr(struct push_buffer *pb)
+{
+	return pb->phys + pb->cur;
+}
+
+/*
+ * The syncpt incr buffer is filled with methods to increment syncpts, which
+ * is later GATHER-ed into the mainline PB. It's used when a timed out context
+ * is interleaved with other work, so needs to inline the syncpt increments
+ * to maintain the count (but otherwise does no work).
+ */
+
+/**
+ * Init timeout resources
+ */
+static int cdma_timeout_init(struct nvhost_cdma *cdma,
+				 u32 syncpt_id)
+{
+	if (syncpt_id == NVSYNCPT_INVALID)
+		return -EINVAL;
+
+	INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
+	cdma->timeout.initialized = true;
+
+	return 0;
+}
+
+/**
+ * Clean up timeout resources
+ */
+static void cdma_timeout_destroy(struct nvhost_cdma *cdma)
+{
+	if (cdma->timeout.initialized)
+		cancel_delayed_work(&cdma->timeout.wq);
+	cdma->timeout.initialized = false;
+}
+
+/**
+ * Increment timedout buffer's syncpt via CPU.
+ */
+static void cdma_timeout_cpu_incr(struct nvhost_cdma *cdma, u32 getptr,
+				u32 syncpt_incrs, u32 syncval, u32 nr_slots,
+				u32 waitbases)
+{
+	struct nvhost_master *dev = cdma_to_dev(cdma);
+	struct push_buffer *pb = &cdma->push_buffer;
+	u32 i, getidx;
+
+	for (i = 0; i < syncpt_incrs; i++)
+		nvhost_syncpt_cpu_incr(&dev->syncpt, cdma->timeout.syncpt_id);
+
+	/* after CPU incr, ensure shadow is up to date */
+	nvhost_syncpt_update_min(&dev->syncpt, cdma->timeout.syncpt_id);
+
+	/* Synchronize wait bases. 2D wait bases are synchronized with
+	 * syncpoint 19. Hence wait bases are not updated when syncptid=18. */
+
+	if (cdma->timeout.syncpt_id != NVSYNCPT_2D_0 && waitbases) {
+		void __iomem *p;
+		p = dev->sync_aperture + host1x_sync_syncpt_base_0_r() +
+				(__ffs(waitbases) * sizeof(u32));
+		writel(syncval, p);
+		dev->syncpt.base_val[__ffs(waitbases)] = syncval;
+	}
+
+	/* NOP all the PB slots */
+	getidx = getptr - pb->phys;
+	while (nr_slots--) {
+		u32 *p = (u32 *)((u32)pb->mapped + getidx);
+		*(p++) = NVHOST_OPCODE_NOOP;
+		*(p++) = NVHOST_OPCODE_NOOP;
+		dev_dbg(&dev->dev->dev, "%s: NOP at 0x%x\n",
+			__func__, pb->phys + getidx);
+		getidx = (getidx + 8) & (PUSH_BUFFER_SIZE - 1);
+	}
+	wmb();
+}
+
+/**
+ * Start channel DMA
+ */
+static void cdma_start(struct nvhost_cdma *cdma)
+{
+	void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+
+	if (cdma->running)
+		return;
+
+	BUG_ON(!cdma_pb_op().putptr);
+	cdma->last_put = cdma_pb_op().putptr(&cdma->push_buffer);
+
+	writel(host1x_channel_dmactrl(true, false, false),
+		chan_regs + host1x_channel_dmactrl_r());
+
+	/* set base, put, end pointer (all of memory) */
+	writel(0, chan_regs + host1x_channel_dmastart_r());
+	writel(cdma->last_put, chan_regs + host1x_channel_dmaput_r());
+	writel(0xFFFFFFFF, chan_regs + host1x_channel_dmaend_r());
+
+	/* reset GET */
+	writel(host1x_channel_dmactrl(true, true, true),
+		chan_regs + host1x_channel_dmactrl_r());
+
+	/* start the command DMA */
+	writel(host1x_channel_dmactrl(false, false, false),
+		chan_regs + host1x_channel_dmactrl_r());
+
+	cdma->running = true;
+}
+
+/**
+ * Similar to cdma_start(), but rather than starting from an idle
+ * state (where DMA GET is set to DMA PUT), on a timeout we restore
+ * DMA GET from an explicit value (so DMA may again be pending).
+ */
+static void cdma_timeout_restart(struct nvhost_cdma *cdma, u32 getptr)
+{
+	struct nvhost_master *dev = cdma_to_dev(cdma);
+	void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+
+	if (cdma->running)
+		return;
+
+	BUG_ON(!cdma_pb_op().putptr);
+	cdma->last_put = cdma_pb_op().putptr(&cdma->push_buffer);
+
+	writel(host1x_channel_dmactrl(true, false, false),
+		chan_regs + host1x_channel_dmactrl_r());
+
+	/* set base, end pointer (all of memory) */
+	writel(0, chan_regs + host1x_channel_dmastart_r());
+	writel(0xFFFFFFFF, chan_regs + host1x_channel_dmaend_r());
+
+	/* set GET, by loading the value in PUT (then reset GET) */
+	writel(getptr, chan_regs + host1x_channel_dmaput_r());
+	writel(host1x_channel_dmactrl(true, true, true),
+		chan_regs + host1x_channel_dmactrl_r());
+
+	dev_dbg(&dev->dev->dev,
+		"%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
+		__func__,
+		readl(chan_regs + host1x_channel_dmaget_r()),
+		readl(chan_regs + host1x_channel_dmaput_r()),
+		cdma->last_put);
+
+	/* deassert GET reset and set PUT */
+	writel(host1x_channel_dmactrl(true, false, false),
+		chan_regs + host1x_channel_dmactrl_r());
+	writel(cdma->last_put, chan_regs + host1x_channel_dmaput_r());
+
+	/* start the command DMA */
+	writel(host1x_channel_dmactrl(false, false, false),
+		chan_regs + host1x_channel_dmactrl_r());
+
+	cdma->running = true;
+}
+
+/**
+ * Kick channel DMA into action by writing its PUT offset (if it has changed)
+ */
+static void cdma_kick(struct nvhost_cdma *cdma)
+{
+	u32 put;
+	BUG_ON(!cdma_pb_op().putptr);
+
+	put = cdma_pb_op().putptr(&cdma->push_buffer);
+
+	if (put != cdma->last_put) {
+		void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+		wmb();
+		writel(put, chan_regs + host1x_channel_dmaput_r());
+		cdma->last_put = put;
+	}
+}
+
+static void cdma_stop(struct nvhost_cdma *cdma)
+{
+	void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+
+	mutex_lock(&cdma->lock);
+	if (cdma->running) {
+		nvhost_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
+		writel(host1x_channel_dmactrl(true, false, false),
+			chan_regs + host1x_channel_dmactrl_r());
+		cdma->running = false;
+	}
+	mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Stops both channel's command processor and CDMA immediately.
+ * Also, tears down the channel and resets corresponding module.
+ */
+static void cdma_timeout_teardown_begin(struct nvhost_cdma *cdma)
+{
+	struct nvhost_master *dev = cdma_to_dev(cdma);
+	struct nvhost_channel *ch = cdma_to_channel(cdma);
+	u32 cmdproc_stop;
+
+	BUG_ON(cdma->torndown);
+
+	dev_dbg(&dev->dev->dev,
+		"begin channel teardown (channel id %d)\n", ch->chid);
+
+	cmdproc_stop = readl(dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+	cmdproc_stop |= BIT(ch->chid);
+	writel(cmdproc_stop, dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+
+	dev_dbg(&dev->dev->dev,
+		"%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
+		__func__,
+		readl(ch->aperture + host1x_channel_dmaget_r()),
+		readl(ch->aperture + host1x_channel_dmaput_r()),
+		cdma->last_put);
+
+	writel(host1x_channel_dmactrl(true, false, false),
+		ch->aperture + host1x_channel_dmactrl_r());
+
+	writel(BIT(ch->chid), dev->sync_aperture + host1x_sync_ch_teardown_r());
+	nvhost_module_reset(ch->dev);
+
+	cdma->running = false;
+	cdma->torndown = true;
+}
+
+static void cdma_timeout_teardown_end(struct nvhost_cdma *cdma, u32 getptr)
+{
+	struct nvhost_master *dev = cdma_to_dev(cdma);
+	struct nvhost_channel *ch = cdma_to_channel(cdma);
+	u32 cmdproc_stop;
+
+	BUG_ON(!cdma->torndown || cdma->running);
+
+	dev_dbg(&dev->dev->dev,
+		"end channel teardown (id %d, DMAGET restart = 0x%x)\n",
+		ch->chid, getptr);
+
+	cmdproc_stop = readl(dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+	cmdproc_stop &= ~(BIT(ch->chid));
+	writel(cmdproc_stop, dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+
+	cdma->torndown = false;
+	cdma_timeout_restart(cdma, getptr);
+}
+
+/**
+ * If this timeout fires, it indicates the current sync_queue entry has
+ * exceeded its TTL and the userctx should be timed out and remaining
+ * submits already issued cleaned up (future submits return an error).
+ */
+static void cdma_timeout_handler(struct work_struct *work)
+{
+	struct nvhost_cdma *cdma;
+	struct nvhost_master *dev;
+	struct nvhost_syncpt *sp;
+	struct nvhost_channel *ch;
+
+	u32 syncpt_val;
+
+	u32 prev_cmdproc, cmdproc_stop;
+
+	cdma = container_of(to_delayed_work(work), struct nvhost_cdma,
+			    timeout.wq);
+	dev = cdma_to_dev(cdma);
+	sp = &dev->syncpt;
+	ch = cdma_to_channel(cdma);
+
+	mutex_lock(&cdma->lock);
+
+	if (!cdma->timeout.clientid) {
+		dev_dbg(&dev->dev->dev,
+			 "cdma_timeout: expired, but has no clientid\n");
+		mutex_unlock(&cdma->lock);
+		return;
+	}
+
+	/* stop processing to get a clean snapshot */
+	prev_cmdproc = readl(dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+	cmdproc_stop = prev_cmdproc | BIT(ch->chid);
+	writel(cmdproc_stop, dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+
+	dev_dbg(&dev->dev->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n",
+		prev_cmdproc, cmdproc_stop);
+
+	syncpt_val = nvhost_syncpt_update_min(&dev->syncpt,
+			cdma->timeout.syncpt_id);
+
+	/* has buffer actually completed? */
+	if ((s32)(syncpt_val - cdma->timeout.syncpt_val) >= 0) {
+		dev_dbg(&dev->dev->dev,
+			 "cdma_timeout: expired, but buffer had completed\n");
+		/* restore */
+		cmdproc_stop = prev_cmdproc & ~(BIT(ch->chid));
+		writel(cmdproc_stop,
+			dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+		mutex_unlock(&cdma->lock);
+		return;
+	}
+
+	dev_warn(&dev->dev->dev,
+		"%s: timeout: %d (%s) ctx 0x%p, HW thresh %d, done %d\n",
+		__func__,
+		cdma->timeout.syncpt_id,
+		syncpt_op().name(sp, cdma->timeout.syncpt_id),
+		cdma->timeout.ctx,
+		syncpt_val, cdma->timeout.syncpt_val);
+
+	/* stop HW, resetting channel/module */
+	cdma_op().timeout_teardown_begin(cdma);
+
+	nvhost_cdma_update_sync_queue(cdma, sp, ch->dev);
+	mutex_unlock(&cdma->lock);
+}
+
+static const struct nvhost_cdma_ops host1x_cdma_ops = {
+	.start = cdma_start,
+	.stop = cdma_stop,
+	.kick = cdma_kick,
+
+	.timeout_init = cdma_timeout_init,
+	.timeout_destroy = cdma_timeout_destroy,
+	.timeout_teardown_begin = cdma_timeout_teardown_begin,
+	.timeout_teardown_end = cdma_timeout_teardown_end,
+	.timeout_cpu_incr = cdma_timeout_cpu_incr,
+};
+
+static const struct nvhost_pushbuffer_ops host1x_pushbuffer_ops = {
+	.reset = push_buffer_reset,
+	.init = push_buffer_init,
+	.destroy = push_buffer_destroy,
+	.push_to = push_buffer_push_to,
+	.pop_from = push_buffer_pop_from,
+	.space = push_buffer_space,
+	.putptr = push_buffer_putptr,
+};
+
diff --git a/drivers/staging/tegra/video/host/host1x/host1x_cdma.h b/drivers/staging/tegra/video/host/host1x/host1x_cdma.h
new file mode 100644
index 000000000000..94bfc092c8c9
--- /dev/null
+++ b/drivers/staging/tegra/video/host/host1x/host1x_cdma.h
@@ -0,0 +1,39 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_cdma.h
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_HOST1X_HOST1X_CDMA_H
+#define __NVHOST_HOST1X_HOST1X_CDMA_H
+
+/* Size of the sync queue. If it is too small, we won't be able to queue up
+ * many command buffers. If it is too large, we waste memory. */
+#define NVHOST_SYNC_QUEUE_SIZE 512
+
+/* Number of gathers we allow to be queued up per channel. Must be a
+ * power of two. Currently sized such that pushbuffer is 4KB (512*8B). */
+#define NVHOST_GATHER_QUEUE_SIZE 512
+
+/* 8 bytes per slot. (This number does not include the final RESTART.) */
+#define PUSH_BUFFER_SIZE (NVHOST_GATHER_QUEUE_SIZE * 8)
+
+/* 4K page containing GATHERed methods to increment channel syncpts
+ * and replaces the original timed out contexts GATHER slots */
+#define SYNCPT_INCR_BUFFER_SIZE_WORDS   (4096 / sizeof(u32))
+
+#endif
diff --git a/drivers/staging/tegra/video/host/host1x/host1x_channel.c b/drivers/staging/tegra/video/host/host1x/host1x_channel.c
new file mode 100644
index 000000000000..9dc5a661514b
--- /dev/null
+++ b/drivers/staging/tegra/video/host/host1x/host1x_channel.c
@@ -0,0 +1,681 @@
+/*
+ * drivers/video/tegra/host/host1x/channel_host1x.c
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvhost_channel.h"
+#include "dev.h"
+#include "nvhost_acm.h"
+#include "nvhost_job.h"
+#include "nvhost_hwctx.h"
+#include <trace/events/nvhost.h>
+#include <linux/slab.h>
+
+#include "host1x_hwctx.h"
+#include "nvhost_intr.h"
+
+#define NV_FIFO_READ_TIMEOUT 200000
+
+static int host1x_drain_read_fifo(struct nvhost_channel *ch,
+	u32 *ptr, unsigned int count, unsigned int *pending);
+
+static void sync_waitbases(struct nvhost_channel *ch, u32 syncpt_val)
+{
+	unsigned long waitbase;
+	unsigned long int waitbase_mask = ch->dev->waitbases;
+	if (ch->dev->waitbasesync) {
+		waitbase = find_first_bit(&waitbase_mask, BITS_PER_LONG);
+		nvhost_cdma_push(&ch->cdma,
+			nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+				host1x_uclass_load_syncpt_base_r(),
+				1),
+				nvhost_class_host_load_syncpt_base(waitbase,
+						syncpt_val));
+	}
+}
+
+static void *pre_submit_ctxsave(struct nvhost_job *job,
+		struct nvhost_hwctx *cur_ctx)
+{
+	struct nvhost_channel *ch = job->ch;
+	void *ctxsave_waiter = NULL;
+
+	/* Is a save needed? */
+	if (!cur_ctx || ch->cur_ctx == job->hwctx)
+		return NULL;
+
+	if (cur_ctx->has_timedout) {
+		dev_dbg(&ch->dev->dev,
+			"%s: skip save of timed out context (0x%p)\n",
+			__func__, ch->cur_ctx);
+
+		return NULL;
+	}
+
+	/* Allocate save waiter if needed */
+	if (ch->ctxhandler->save_service) {
+		ctxsave_waiter = nvhost_intr_alloc_waiter();
+		if (!ctxsave_waiter)
+			return ERR_PTR(-ENOMEM);
+	}
+
+	return ctxsave_waiter;
+}
+
+static void submit_ctxsave(struct nvhost_job *job, void *ctxsave_waiter,
+		struct nvhost_hwctx *cur_ctx)
+{
+	struct nvhost_master *host = nvhost_get_host(job->ch->dev);
+	struct nvhost_channel *ch = job->ch;
+	u32 syncval;
+	int err;
+	u32 save_thresh = 0;
+
+	/* Is a save needed? */
+	if (!cur_ctx || cur_ctx == job->hwctx || cur_ctx->has_timedout)
+		return;
+
+	/* Retrieve save threshold if we have a waiter */
+	if (ctxsave_waiter)
+		save_thresh =
+			nvhost_syncpt_read_max(&host->syncpt, job->syncpt_id)
+			+ to_host1x_hwctx(cur_ctx)->save_thresh;
+
+	/* Adjust the syncpoint max */
+	job->syncpt_incrs += to_host1x_hwctx(cur_ctx)->save_incrs;
+	syncval = nvhost_syncpt_incr_max(&host->syncpt,
+			job->syncpt_id,
+			to_host1x_hwctx(cur_ctx)->save_incrs);
+
+	/* Send the save to channel */
+	cur_ctx->valid = true;
+	ch->ctxhandler->save_push(cur_ctx, &ch->cdma);
+	nvhost_job_get_hwctx(job, cur_ctx);
+
+	/* Notify save service */
+	if (ctxsave_waiter) {
+		err = nvhost_intr_add_action(&host->intr,
+			job->syncpt_id,
+			save_thresh,
+			NVHOST_INTR_ACTION_CTXSAVE, cur_ctx,
+			ctxsave_waiter,
+			NULL);
+		ctxsave_waiter = NULL;
+		WARN(err, "Failed to set ctx save interrupt");
+	}
+
+	trace_nvhost_channel_context_save(ch->dev->name, cur_ctx);
+}
+
+static void submit_ctxrestore(struct nvhost_job *job)
+{
+	struct nvhost_master *host = nvhost_get_host(job->ch->dev);
+	struct nvhost_channel *ch = job->ch;
+	u32 syncval;
+	struct host1x_hwctx *ctx =
+		job->hwctx ? to_host1x_hwctx(job->hwctx) : NULL;
+
+	/* First check if we have a valid context to restore */
+	if(ch->cur_ctx == job->hwctx || !job->hwctx || !job->hwctx->valid)
+		return;
+
+	/* Increment syncpt max */
+	job->syncpt_incrs += ctx->restore_incrs;
+	syncval = nvhost_syncpt_incr_max(&host->syncpt,
+			job->syncpt_id,
+			ctx->restore_incrs);
+
+	/* Send restore buffer to channel */
+	nvhost_cdma_push_gather(&ch->cdma,
+		host->memmgr,
+		ctx->restore,
+		0,
+		nvhost_opcode_gather(ctx->restore_size),
+		ctx->restore_phys);
+
+	trace_nvhost_channel_context_restore(ch->dev->name, &ctx->hwctx);
+}
+
+static void submit_nullkickoff(struct nvhost_job *job, int user_syncpt_incrs)
+{
+	struct nvhost_channel *ch = job->ch;
+	int incr;
+	u32 op_incr;
+
+	/* push increments that correspond to nulled out commands */
+	op_incr = nvhost_opcode_imm_incr_syncpt(
+			host1x_uclass_incr_syncpt_cond_op_done_v(),
+			job->syncpt_id);
+	for (incr = 0; incr < (user_syncpt_incrs >> 1); incr++)
+		nvhost_cdma_push(&ch->cdma, op_incr, op_incr);
+	if (user_syncpt_incrs & 1)
+		nvhost_cdma_push(&ch->cdma, op_incr, NVHOST_OPCODE_NOOP);
+
+	/* for 3d, waitbase needs to be incremented after each submit */
+	if (ch->dev->class == NV_GRAPHICS_3D_CLASS_ID) {
+		u32 waitbase = to_host1x_hwctx_handler(job->hwctx->h)->waitbase;
+		nvhost_cdma_push(&ch->cdma,
+			nvhost_opcode_setclass(
+				NV_HOST1X_CLASS_ID,
+				host1x_uclass_incr_syncpt_base_r(),
+				1),
+			nvhost_class_host_incr_syncpt_base(
+				waitbase,
+				user_syncpt_incrs));
+	}
+}
+
+static void submit_gathers(struct nvhost_job *job)
+{
+	/* push user gathers */
+	int i;
+	for (i = 0 ; i < job->num_gathers; i++) {
+		u32 op1 = nvhost_opcode_gather(job->gathers[i].words);
+		u32 op2 = job->gathers[i].mem;
+		nvhost_cdma_push_gather(&job->ch->cdma,
+				job->memmgr,
+				job->gathers[i].ref,
+				job->gathers[i].offset,
+				op1, op2);
+	}
+}
+
+static int host1x_channel_submit(struct nvhost_job *job)
+{
+	struct nvhost_channel *ch = job->ch;
+	struct nvhost_syncpt *sp = &nvhost_get_host(job->ch->dev)->syncpt;
+	u32 user_syncpt_incrs = job->syncpt_incrs;
+	u32 prev_max = 0;
+	u32 syncval;
+	int err;
+	void *completed_waiter = NULL, *ctxsave_waiter = NULL;
+	struct nvhost_driver *drv = to_nvhost_driver(ch->dev->dev.driver);
+
+	/* Bail out on timed out contexts */
+	if (job->hwctx && job->hwctx->has_timedout)
+		return -ETIMEDOUT;
+
+	/* Turn on the client module and host1x */
+	nvhost_module_busy(ch->dev);
+	if (drv->busy)
+		drv->busy(ch->dev);
+
+	/* before error checks, return current max */
+	prev_max = job->syncpt_end =
+		nvhost_syncpt_read_max(sp, job->syncpt_id);
+
+	/* get submit lock */
+	err = mutex_lock_interruptible(&ch->submitlock);
+	if (err) {
+		nvhost_module_idle(ch->dev);
+		goto error;
+	}
+
+	/* Do the needed allocations */
+	ctxsave_waiter = pre_submit_ctxsave(job, ch->cur_ctx);
+	if (IS_ERR(ctxsave_waiter)) {
+		err = PTR_ERR(ctxsave_waiter);
+		nvhost_module_idle(ch->dev);
+		mutex_unlock(&ch->submitlock);
+		goto error;
+	}
+
+	completed_waiter = nvhost_intr_alloc_waiter();
+	if (!completed_waiter) {
+		nvhost_module_idle(ch->dev);
+		mutex_unlock(&ch->submitlock);
+		err = -ENOMEM;
+		goto error;
+	}
+
+	/* begin a CDMA submit */
+	err = nvhost_cdma_begin(&ch->cdma, job);
+	if (err) {
+		mutex_unlock(&ch->submitlock);
+		nvhost_module_idle(ch->dev);
+		goto error;
+	}
+
+	if (ch->dev->serialize) {
+		/* Force serialization by inserting a host wait for the
+		 * previous job to finish before this one can commence. */
+		nvhost_cdma_push(&ch->cdma,
+				nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+					host1x_uclass_wait_syncpt_r(),
+					1),
+				nvhost_class_host_wait_syncpt(job->syncpt_id,
+					nvhost_syncpt_read_max(sp,
+						job->syncpt_id)));
+	}
+
+	submit_ctxsave(job, ctxsave_waiter, ch->cur_ctx);
+	submit_ctxrestore(job);
+	ch->cur_ctx = job->hwctx;
+
+	syncval = nvhost_syncpt_incr_max(sp,
+			job->syncpt_id, user_syncpt_incrs);
+
+	job->syncpt_end = syncval;
+
+	/* add a setclass for modules that require it */
+	if (ch->dev->class)
+		nvhost_cdma_push(&ch->cdma,
+			nvhost_opcode_setclass(ch->dev->class, 0, 0),
+			NVHOST_OPCODE_NOOP);
+
+	if (job->null_kickoff)
+		submit_nullkickoff(job, user_syncpt_incrs);
+	else
+		submit_gathers(job);
+
+	sync_waitbases(ch, job->syncpt_end);
+
+	/* end CDMA submit & stash pinned hMems into sync queue */
+	nvhost_cdma_end(&ch->cdma, job);
+
+	trace_nvhost_channel_submitted(ch->dev->name,
+			prev_max, syncval);
+
+	/* schedule a submit complete interrupt */
+	err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr,
+			job->syncpt_id, syncval,
+			NVHOST_INTR_ACTION_SUBMIT_COMPLETE, ch,
+			completed_waiter,
+			NULL);
+	completed_waiter = NULL;
+	WARN(err, "Failed to set submit complete interrupt");
+
+	mutex_unlock(&ch->submitlock);
+
+	return 0;
+
+error:
+	kfree(ctxsave_waiter);
+	kfree(completed_waiter);
+	return err;
+}
+
+static int host1x_channel_read_3d_reg(
+	struct nvhost_channel *channel,
+	struct nvhost_hwctx *hwctx,
+	u32 offset,
+	u32 *value)
+{
+	struct host1x_hwctx *hwctx_to_save = NULL;
+	struct nvhost_hwctx_handler *h = hwctx->h;
+	struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h);
+	bool need_restore = false;
+	u32 syncpt_incrs = 4;
+	unsigned int pending = 0;
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+	void *ref;
+	void *ctx_waiter, *read_waiter, *completed_waiter;
+	struct nvhost_job *job;
+	u32 syncval;
+	int err;
+
+	if (hwctx && hwctx->has_timedout)
+		return -ETIMEDOUT;
+
+	ctx_waiter = nvhost_intr_alloc_waiter();
+	read_waiter = nvhost_intr_alloc_waiter();
+	completed_waiter = nvhost_intr_alloc_waiter();
+	if (!ctx_waiter || !read_waiter || !completed_waiter) {
+		err = -ENOMEM;
+		goto done;
+	}
+
+	job = nvhost_job_alloc(channel, hwctx,
+			NULL,
+			nvhost_get_host(channel->dev)->memmgr, 0, 0);
+	if (!job) {
+		err = -ENOMEM;
+		goto done;
+	}
+
+	/* keep module powered */
+	nvhost_module_busy(channel->dev);
+
+	/* get submit lock */
+	err = mutex_lock_interruptible(&channel->submitlock);
+	if (err) {
+		nvhost_module_idle(channel->dev);
+		return err;
+	}
+
+	/* context switch */
+	if (channel->cur_ctx != hwctx) {
+		hwctx_to_save = channel->cur_ctx ?
+			to_host1x_hwctx(channel->cur_ctx) : NULL;
+		if (hwctx_to_save) {
+			syncpt_incrs += hwctx_to_save->save_incrs;
+			hwctx_to_save->hwctx.valid = true;
+			nvhost_job_get_hwctx(job, &hwctx_to_save->hwctx);
+		}
+		channel->cur_ctx = hwctx;
+		if (channel->cur_ctx && channel->cur_ctx->valid) {
+			need_restore = true;
+			syncpt_incrs += to_host1x_hwctx(channel->cur_ctx)
+				->restore_incrs;
+		}
+	}
+
+	syncval = nvhost_syncpt_incr_max(&nvhost_get_host(channel->dev)->syncpt,
+		p->syncpt, syncpt_incrs);
+
+	job->syncpt_id = p->syncpt;
+	job->syncpt_incrs = syncpt_incrs;
+	job->syncpt_end = syncval;
+
+	/* begin a CDMA submit */
+	nvhost_cdma_begin(&channel->cdma, job);
+
+	/* push save buffer (pre-gather setup depends on unit) */
+	if (hwctx_to_save)
+		h->save_push(&hwctx_to_save->hwctx, &channel->cdma);
+
+	/* gather restore buffer */
+	if (need_restore)
+		nvhost_cdma_push(&channel->cdma,
+			nvhost_opcode_gather(to_host1x_hwctx(channel->cur_ctx)
+				->restore_size),
+			to_host1x_hwctx(channel->cur_ctx)->restore_phys);
+
+	/* Switch to 3D - wait for it to complete what it was doing */
+	nvhost_cdma_push(&channel->cdma,
+		nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
+		nvhost_opcode_imm_incr_syncpt(
+			host1x_uclass_incr_syncpt_cond_op_done_v(),
+			p->syncpt));
+	nvhost_cdma_push(&channel->cdma,
+		nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+			host1x_uclass_wait_syncpt_base_r(), 1),
+		nvhost_class_host_wait_syncpt_base(p->syncpt,
+			p->waitbase, 1));
+	/*  Tell 3D to send register value to FIFO */
+	nvhost_cdma_push(&channel->cdma,
+		nvhost_opcode_nonincr(host1x_uclass_indoff_r(), 1),
+		nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+			offset, false));
+	nvhost_cdma_push(&channel->cdma,
+		nvhost_opcode_imm(host1x_uclass_inddata_r(), 0),
+		NVHOST_OPCODE_NOOP);
+	/*  Increment syncpt to indicate that FIFO can be read */
+	nvhost_cdma_push(&channel->cdma,
+		nvhost_opcode_imm_incr_syncpt(
+			host1x_uclass_incr_syncpt_cond_immediate_v(),
+			p->syncpt),
+		NVHOST_OPCODE_NOOP);
+	/*  Wait for value to be read from FIFO */
+	nvhost_cdma_push(&channel->cdma,
+		nvhost_opcode_nonincr(host1x_uclass_wait_syncpt_base_r(), 1),
+		nvhost_class_host_wait_syncpt_base(p->syncpt,
+			p->waitbase, 3));
+	/*  Indicate submit complete */
+	nvhost_cdma_push(&channel->cdma,
+		nvhost_opcode_nonincr(host1x_uclass_incr_syncpt_base_r(), 1),
+		nvhost_class_host_incr_syncpt_base(p->waitbase, 4));
+	nvhost_cdma_push(&channel->cdma,
+		NVHOST_OPCODE_NOOP,
+		nvhost_opcode_imm_incr_syncpt(
+			host1x_uclass_incr_syncpt_cond_immediate_v(),
+			p->syncpt));
+
+	/* end CDMA submit  */
+	nvhost_cdma_end(&channel->cdma, job);
+	nvhost_job_put(job);
+	job = NULL;
+
+	/*
+	 * schedule a context save interrupt (to drain the host FIFO
+	 * if necessary, and to release the restore buffer)
+	 */
+	if (hwctx_to_save) {
+		err = nvhost_intr_add_action(
+			&nvhost_get_host(channel->dev)->intr,
+			p->syncpt,
+			syncval - syncpt_incrs
+				+ hwctx_to_save->save_incrs
+				- 1,
+			NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save,
+			ctx_waiter,
+			NULL);
+		ctx_waiter = NULL;
+		WARN(err, "Failed to set context save interrupt");
+	}
+
+	/* Wait for FIFO to be ready */
+	err = nvhost_intr_add_action(&nvhost_get_host(channel->dev)->intr,
+			p->syncpt, syncval - 2,
+			NVHOST_INTR_ACTION_WAKEUP, &wq,
+			read_waiter,
+			&ref);
+	read_waiter = NULL;
+	WARN(err, "Failed to set wakeup interrupt");
+	wait_event(wq,
+		nvhost_syncpt_is_expired(&nvhost_get_host(channel->dev)->syncpt,
+				p->syncpt, syncval - 2));
+	nvhost_intr_put_ref(&nvhost_get_host(channel->dev)->intr, p->syncpt,
+			ref);
+
+	/* Read the register value from FIFO */
+	err = host1x_drain_read_fifo(channel, value, 1, &pending);
+
+	/* Indicate we've read the value */
+	nvhost_syncpt_cpu_incr(&nvhost_get_host(channel->dev)->syncpt,
+			p->syncpt);
+
+	/* Schedule a submit complete interrupt */
+	err = nvhost_intr_add_action(&nvhost_get_host(channel->dev)->intr,
+			p->syncpt, syncval,
+			NVHOST_INTR_ACTION_SUBMIT_COMPLETE, channel,
+			completed_waiter, NULL);
+	completed_waiter = NULL;
+	WARN(err, "Failed to set submit complete interrupt");
+
+	mutex_unlock(&channel->submitlock);
+
+done:
+	kfree(ctx_waiter);
+	kfree(read_waiter);
+	kfree(completed_waiter);
+	return err;
+}
+
+
+static int host1x_drain_read_fifo(struct nvhost_channel *ch,
+	u32 *ptr, unsigned int count, unsigned int *pending)
+{
+	unsigned int entries = *pending;
+	unsigned long timeout = jiffies + NV_FIFO_READ_TIMEOUT;
+	void __iomem *chan_regs = ch->aperture;
+	while (count) {
+		unsigned int num;
+
+		while (!entries && time_before(jiffies, timeout)) {
+			/* query host for number of entries in fifo */
+			entries = host1x_channel_fifostat_outfentries_v(
+				readl(chan_regs + host1x_channel_fifostat_r()));
+			if (!entries)
+				cpu_relax();
+		}
+
+		/*  timeout -> return error */
+		if (!entries)
+			return -EIO;
+
+		num = min(entries, count);
+		entries -= num;
+		count -= num;
+
+		while (num & ~0x3) {
+			u32 arr[4];
+			arr[0] = readl(chan_regs + host1x_channel_inddata_r());
+			arr[1] = readl(chan_regs + host1x_channel_inddata_r());
+			arr[2] = readl(chan_regs + host1x_channel_inddata_r());
+			arr[3] = readl(chan_regs + host1x_channel_inddata_r());
+			memcpy(ptr, arr, 4*sizeof(u32));
+			ptr += 4;
+			num -= 4;
+		}
+		while (num--)
+			*ptr++ = readl(chan_regs + host1x_channel_inddata_r());
+	}
+	*pending = entries;
+
+	return 0;
+}
+
+static int host1x_save_context(struct nvhost_channel *ch)
+{
+	struct nvhost_device *dev = ch->dev;
+	struct nvhost_hwctx *hwctx_to_save;
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+	u32 syncpt_incrs, syncpt_val;
+	int err = 0;
+	void *ref;
+	void *ctx_waiter = NULL, *wakeup_waiter = NULL;
+	struct nvhost_job *job;
+	struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+	u32 syncpt_id;
+
+	ctx_waiter = nvhost_intr_alloc_waiter();
+	wakeup_waiter = nvhost_intr_alloc_waiter();
+	if (!ctx_waiter || !wakeup_waiter) {
+		err = -ENOMEM;
+		goto done;
+	}
+
+	if (drv->busy)
+		drv->busy(dev);
+
+	mutex_lock(&ch->submitlock);
+	hwctx_to_save = ch->cur_ctx;
+	if (!hwctx_to_save) {
+		mutex_unlock(&ch->submitlock);
+		goto done;
+	}
+
+	job = nvhost_job_alloc(ch, hwctx_to_save,
+			NULL,
+			nvhost_get_host(ch->dev)->memmgr, 0, 0);
+	if (!job) {
+		err = -ENOMEM;
+		mutex_unlock(&ch->submitlock);
+		goto done;
+	}
+
+	hwctx_to_save->valid = true;
+	ch->cur_ctx = NULL;
+	syncpt_id = to_host1x_hwctx_handler(hwctx_to_save->h)->syncpt;
+
+	syncpt_incrs = to_host1x_hwctx(hwctx_to_save)->save_incrs;
+	syncpt_val = nvhost_syncpt_incr_max(&nvhost_get_host(ch->dev)->syncpt,
+					syncpt_id, syncpt_incrs);
+
+	job->syncpt_id = syncpt_id;
+	job->syncpt_incrs = syncpt_incrs;
+	job->syncpt_end = syncpt_val;
+
+	err = nvhost_cdma_begin(&ch->cdma, job);
+	if (err) {
+		mutex_unlock(&ch->submitlock);
+		goto done;
+	}
+
+	ch->ctxhandler->save_push(hwctx_to_save, &ch->cdma);
+	nvhost_cdma_end(&ch->cdma, job);
+	nvhost_job_put(job);
+	job = NULL;
+
+	err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr, syncpt_id,
+			syncpt_val - syncpt_incrs +
+				to_host1x_hwctx(hwctx_to_save)->save_thresh,
+			NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save,
+			ctx_waiter,
+			NULL);
+	ctx_waiter = NULL;
+	WARN(err, "Failed to set context save interrupt");
+
+	err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr,
+			syncpt_id, syncpt_val,
+			NVHOST_INTR_ACTION_WAKEUP, &wq,
+			wakeup_waiter,
+			&ref);
+	wakeup_waiter = NULL;
+	WARN(err, "Failed to set wakeup interrupt");
+	wait_event(wq,
+		nvhost_syncpt_is_expired(&nvhost_get_host(ch->dev)->syncpt,
+				syncpt_id, syncpt_val));
+
+	nvhost_intr_put_ref(&nvhost_get_host(ch->dev)->intr, syncpt_id, ref);
+
+	nvhost_cdma_update(&ch->cdma);
+
+	mutex_unlock(&ch->submitlock);
+
+done:
+	kfree(ctx_waiter);
+	kfree(wakeup_waiter);
+	return err;
+}
+
+static inline void __iomem *host1x_channel_aperture(void __iomem *p, int ndx)
+{
+	p += ndx * NV_HOST1X_CHANNEL_MAP_SIZE_BYTES;
+	return p;
+}
+
+static inline int host1x_hwctx_handler_init(struct nvhost_channel *ch)
+{
+	int err = 0;
+	unsigned long syncpts = ch->dev->syncpts;
+	unsigned long waitbases = ch->dev->waitbases;
+	u32 syncpt = find_first_bit(&syncpts, BITS_PER_LONG);
+	u32 waitbase = find_first_bit(&waitbases, BITS_PER_LONG);
+	struct nvhost_driver *drv = to_nvhost_driver(ch->dev->dev.driver);
+
+	if (drv->alloc_hwctx_handler) {
+		ch->ctxhandler = drv->alloc_hwctx_handler(syncpt,
+				waitbase, ch);
+		if (!ch->ctxhandler)
+			err = -ENOMEM;
+	}
+
+	return err;
+}
+
+static int host1x_channel_init(struct nvhost_channel *ch,
+	struct nvhost_master *dev, int index)
+{
+	ch->chid = index;
+	mutex_init(&ch->reflock);
+	mutex_init(&ch->submitlock);
+
+	ch->aperture = host1x_channel_aperture(dev->aperture, index);
+
+	return host1x_hwctx_handler_init(ch);
+}
+
+static const struct nvhost_channel_ops host1x_channel_ops = {
+	.init = host1x_channel_init,
+	.submit = host1x_channel_submit,
+	.read3dreg = host1x_channel_read_3d_reg,
+	.save_context = host1x_save_context,
+	.drain_read_fifo = host1x_drain_read_fifo,
+};
diff --git a/drivers/staging/tegra/video/host/host1x/host1x_debug.c b/drivers/staging/tegra/video/host/host1x/host1x_debug.c
new file mode 100644
index 000000000000..bdfc9cf186b3
--- /dev/null
+++ b/drivers/staging/tegra/video/host/host1x/host1x_debug.c
@@ -0,0 +1,405 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_debug.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/mm.h>
+
+#include <linux/io.h>
+
+#include "dev.h"
+#include "debug.h"
+#include "nvhost_cdma.h"
+#include "nvhost_channel.h"
+#include "nvhost_job.h"
+#include "chip_support.h"
+#include "nvhost_memmgr.h"
+
+#define NVHOST_DEBUG_MAX_PAGE_OFFSET 102400
+
+enum {
+	NVHOST_DBG_STATE_CMD = 0,
+	NVHOST_DBG_STATE_DATA = 1,
+	NVHOST_DBG_STATE_GATHER = 2
+};
+
+static int show_channel_command(struct output *o, u32 addr, u32 val, int *count)
+{
+	unsigned mask;
+	unsigned subop;
+
+	switch (val >> 28) {
+	case 0x0:
+		mask = val & 0x3f;
+		if (mask) {
+			nvhost_debug_output(o,
+				"SETCL(class=%03x, offset=%03x, mask=%02x, [",
+				val >> 6 & 0x3ff, val >> 16 & 0xfff, mask);
+			*count = hweight8(mask);
+			return NVHOST_DBG_STATE_DATA;
+		} else {
+			nvhost_debug_output(o, "SETCL(class=%03x)\n",
+				val >> 6 & 0x3ff);
+			return NVHOST_DBG_STATE_CMD;
+		}
+
+	case 0x1:
+		nvhost_debug_output(o, "INCR(offset=%03x, [",
+			val >> 16 & 0xfff);
+		*count = val & 0xffff;
+		return NVHOST_DBG_STATE_DATA;
+
+	case 0x2:
+		nvhost_debug_output(o, "NONINCR(offset=%03x, [",
+			val >> 16 & 0xfff);
+		*count = val & 0xffff;
+		return NVHOST_DBG_STATE_DATA;
+
+	case 0x3:
+		mask = val & 0xffff;
+		nvhost_debug_output(o, "MASK(offset=%03x, mask=%03x, [",
+			   val >> 16 & 0xfff, mask);
+		*count = hweight16(mask);
+		return NVHOST_DBG_STATE_DATA;
+
+	case 0x4:
+		nvhost_debug_output(o, "IMM(offset=%03x, data=%03x)\n",
+			   val >> 16 & 0xfff, val & 0xffff);
+		return NVHOST_DBG_STATE_CMD;
+
+	case 0x5:
+		nvhost_debug_output(o, "RESTART(offset=%08x)\n", val << 4);
+		return NVHOST_DBG_STATE_CMD;
+
+	case 0x6:
+		nvhost_debug_output(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
+			val >> 16 & 0xfff, val >> 15 & 0x1, val >> 14 & 0x1,
+			val & 0x3fff);
+		*count = val & 0x3fff; /* TODO: insert */
+		return NVHOST_DBG_STATE_GATHER;
+
+	case 0xe:
+		subop = val >> 24 & 0xf;
+		if (subop == 0)
+			nvhost_debug_output(o, "ACQUIRE_MLOCK(index=%d)\n",
+				val & 0xff);
+		else if (subop == 1)
+			nvhost_debug_output(o, "RELEASE_MLOCK(index=%d)\n",
+				val & 0xff);
+		else
+			nvhost_debug_output(o, "EXTEND_UNKNOWN(%08x)\n", val);
+		return NVHOST_DBG_STATE_CMD;
+
+	default:
+		return NVHOST_DBG_STATE_CMD;
+	}
+}
+
+static void show_channel_gather(struct output *o, u32 addr,
+		phys_addr_t phys_addr, u32 words, struct nvhost_cdma *cdma);
+
+static void show_channel_word(struct output *o, int *state, int *count,
+		u32 addr, u32 val, struct nvhost_cdma *cdma)
+{
+	static int start_count, dont_print;
+
+	switch (*state) {
+	case NVHOST_DBG_STATE_CMD:
+		if (addr)
+			nvhost_debug_output(o, "%08x: %08x:", addr, val);
+		else
+			nvhost_debug_output(o, "%08x:", val);
+
+		*state = show_channel_command(o, addr, val, count);
+		dont_print = 0;
+		start_count = *count;
+		if (*state == NVHOST_DBG_STATE_DATA && *count == 0) {
+			*state = NVHOST_DBG_STATE_CMD;
+			nvhost_debug_output(o, "])\n");
+		}
+		break;
+
+	case NVHOST_DBG_STATE_DATA:
+		(*count)--;
+		if (start_count - *count < 64)
+			nvhost_debug_output(o, "%08x%s",
+				val, *count > 0 ? ", " : "])\n");
+		else if (!dont_print && (*count > 0)) {
+			nvhost_debug_output(o, "[truncated; %d more words]\n",
+				*count);
+			dont_print = 1;
+		}
+		if (*count == 0)
+			*state = NVHOST_DBG_STATE_CMD;
+		break;
+
+	case NVHOST_DBG_STATE_GATHER:
+		*state = NVHOST_DBG_STATE_CMD;
+		nvhost_debug_output(o, "%08x]):\n", val);
+		if (cdma) {
+			show_channel_gather(o, addr, val,
+					*count, cdma);
+		}
+		break;
+	}
+}
+
+static void do_show_channel_gather(struct output *o,
+		phys_addr_t phys_addr,
+		u32 words, struct nvhost_cdma *cdma,
+		phys_addr_t pin_addr, u32 *map_addr)
+{
+	/* Map dmaget cursor to corresponding nvmap_handle */
+	u32 offset;
+	int state, count, i;
+
+	offset = phys_addr - pin_addr;
+	/*
+	 * Sometimes we're given different hardware address to the same
+	 * page - in these cases the offset will get an invalid number and
+	 * we just have to bail out.
+	 */
+	if (offset > NVHOST_DEBUG_MAX_PAGE_OFFSET) {
+		nvhost_debug_output(o, "[address mismatch]\n");
+	} else {
+		/* GATHER buffer starts always with commands */
+		state = NVHOST_DBG_STATE_CMD;
+		for (i = 0; i < words; i++)
+			show_channel_word(o, &state, &count,
+					phys_addr + i * 4,
+					*(map_addr + offset/4 + i),
+					cdma);
+	}
+}
+
+static void show_channel_gather(struct output *o, u32 addr,
+		phys_addr_t phys_addr,
+		u32 words, struct nvhost_cdma *cdma)
+{
+#if defined(CONFIG_TEGRA_NVMAP)
+	/* Map dmaget cursor to corresponding nvmap_handle */
+	struct push_buffer *pb = &cdma->push_buffer;
+	u32 cur = addr - pb->phys;
+	struct mem_mgr_handle *nvmap = &pb->client_handle[cur/8];
+	u32 *map_addr, offset;
+	phys_addr_t pin_addr;
+
+	if (!nvmap || !nvmap->handle || !nvmap->client) {
+		nvhost_debug_output(o, "[already deallocated]\n");
+		return;
+	}
+
+	map_addr = mem_op().mmap(nvmap->handle);
+	if (!map_addr) {
+		nvhost_debug_output(o, "[could not mmap]\n");
+		return;
+	}
+
+	/* Get base address from nvmap */
+	pin_addr = mem_op().pin(nvmap->client, nvmap->handle);
+	if (IS_ERR_VALUE(pin_addr)) {
+		nvhost_debug_output(o, "[couldn't pin]\n");
+		mem_op().munmap(nvmap->handle, map_addr);
+		return;
+	}
+
+	offset = phys_addr - pin_addr;
+	do_show_channel_gather(o, phys_addr, words, cdma,
+			pin_addr, map_addr);
+	mem_op().unpin(nvmap->client, nvmap->handle);
+	mem_op().munmap(nvmap->handle, map_addr);
+#endif
+}
+
+static void show_channel_gathers(struct output *o, struct nvhost_cdma *cdma)
+{
+	struct nvhost_job *job;
+
+	list_for_each_entry(job, &cdma->sync_queue, list) {
+		int i;
+		nvhost_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d,"
+				" first_get=%08x, timeout=%d, ctx=%p,"
+				" num_slots=%d, num_handles=%d\n",
+				job,
+				job->syncpt_id,
+				job->syncpt_end,
+				job->first_get,
+				job->timeout,
+				job->hwctx,
+				job->num_slots,
+				job->num_unpins);
+
+		for (i = 0; i < job->num_gathers; i++) {
+			struct nvhost_job_gather *g = &job->gathers[i];
+			u32 *mapped = g ? mem_op().mmap(g->ref) : NULL;
+			if (!mapped) {
+				nvhost_debug_output(o, "[could not mmap]\n");
+				continue;
+			}
+
+			nvhost_debug_output(o, "    GATHER at %08x, %d words\n",
+				g->mem, g->words);
+
+			do_show_channel_gather(o, g->mem + g->offset,
+					g->words, cdma, g->mem, mapped);
+			mem_op().munmap(g->ref, mapped);
+		}
+	}
+}
+
+static void t20_debug_show_channel_cdma(struct nvhost_master *m,
+	struct nvhost_channel *ch, struct output *o, int chid)
+{
+	struct nvhost_channel *channel = ch;
+	struct nvhost_cdma *cdma = &channel->cdma;
+	u32 dmaput, dmaget, dmactrl;
+	u32 cbstat, cbread;
+	u32 val, base, baseval;
+
+	dmaput = readl(channel->aperture + host1x_channel_dmaput_r());
+	dmaget = readl(channel->aperture + host1x_channel_dmaget_r());
+	dmactrl = readl(channel->aperture + host1x_channel_dmactrl_r());
+	cbread = readl(m->sync_aperture + host1x_sync_cbread0_r() + 4 * chid);
+	cbstat = readl(m->sync_aperture + host1x_sync_cbstat_0_r() + 4 * chid);
+
+	nvhost_debug_output(o, "%d-%s (%d): ", chid,
+			    channel->dev->name,
+			    channel->dev->refcount);
+
+	if (host1x_channel_dmactrl_dmastop_v(dmactrl)
+		|| !channel->cdma.push_buffer.mapped) {
+		nvhost_debug_output(o, "inactive\n\n");
+		return;
+	}
+
+	switch (cbstat) {
+	case 0x00010008:
+		nvhost_debug_output(o, "waiting on syncpt %d val %d\n",
+			cbread >> 24, cbread & 0xffffff);
+		break;
+
+	case 0x00010009:
+		base = (cbread >> 16) & 0xff;
+		baseval = readl(m->sync_aperture +
+				host1x_sync_syncpt_base_0_r() + 4 * base);
+		val = cbread & 0xffff;
+		nvhost_debug_output(o, "waiting on syncpt %d val %d "
+			  "(base %d = %d; offset = %d)\n",
+			cbread >> 24, baseval + val,
+			base, baseval, val);
+		break;
+
+	default:
+		nvhost_debug_output(o,
+				"active class %02x, offset %04x, val %08x\n",
+				host1x_sync_cbstat_0_cbclass0_v(cbstat),
+				host1x_sync_cbstat_0_cboffset0_v(cbstat),
+				cbread);
+		break;
+	}
+
+	nvhost_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
+		dmaput, dmaget, dmactrl);
+	nvhost_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat);
+
+	show_channel_gathers(o, cdma);
+	nvhost_debug_output(o, "\n");
+}
+
+static void t20_debug_show_channel_fifo(struct nvhost_master *m,
+	struct nvhost_channel *ch, struct output *o, int chid)
+{
+	u32 val, rd_ptr, wr_ptr, start, end;
+	struct nvhost_channel *channel = ch;
+	int state, count;
+
+	nvhost_debug_output(o, "%d: fifo:\n", chid);
+
+	val = readl(channel->aperture + host1x_channel_fifostat_r());
+	nvhost_debug_output(o, "FIFOSTAT %08x\n", val);
+	if (host1x_channel_fifostat_cfempty_v(val)) {
+		nvhost_debug_output(o, "[empty]\n");
+		return;
+	}
+
+	writel(0x0, m->sync_aperture + host1x_sync_cfpeek_ctrl_r());
+	writel(host1x_sync_cfpeek_ctrl_cfpeek_ena_f(1)
+			| host1x_sync_cfpeek_ctrl_cfpeek_channr_f(chid),
+		m->sync_aperture + host1x_sync_cfpeek_ctrl_r());
+
+	val = readl(m->sync_aperture + host1x_sync_cfpeek_ptrs_r());
+	rd_ptr = host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(val);
+	wr_ptr = host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(val);
+
+	val = readl(m->sync_aperture + host1x_sync_cf0_setup_r() + 4 * chid);
+	start = host1x_sync_cf0_setup_cf0_base_v(val);
+	end = host1x_sync_cf0_setup_cf0_limit_v(val);
+
+	state = NVHOST_DBG_STATE_CMD;
+
+	do {
+		writel(0x0, m->sync_aperture + host1x_sync_cfpeek_ctrl_r());
+		writel(host1x_sync_cfpeek_ctrl_cfpeek_ena_f(1)
+				| host1x_sync_cfpeek_ctrl_cfpeek_channr_f(chid)
+				| host1x_sync_cfpeek_ctrl_cfpeek_addr_f(rd_ptr),
+			m->sync_aperture + host1x_sync_cfpeek_ctrl_r());
+		val = readl(m->sync_aperture + host1x_sync_cfpeek_read_r());
+
+		show_channel_word(o, &state, &count, 0, val, NULL);
+
+		if (rd_ptr == end)
+			rd_ptr = start;
+		else
+			rd_ptr++;
+	} while (rd_ptr != wr_ptr);
+
+	if (state == NVHOST_DBG_STATE_DATA)
+		nvhost_debug_output(o, ", ...])\n");
+	nvhost_debug_output(o, "\n");
+
+	writel(0x0, m->sync_aperture + host1x_sync_cfpeek_ctrl_r());
+}
+
+static void t20_debug_show_mlocks(struct nvhost_master *m, struct output *o)
+{
+	u32 __iomem *mlo_regs = m->sync_aperture +
+		host1x_sync_mlock_owner_0_r();
+	int i;
+
+	nvhost_debug_output(o, "---- mlocks ----\n");
+	for (i = 0; i < NV_HOST1X_NB_MLOCKS; i++) {
+		u32 owner = readl(mlo_regs + i);
+		if (host1x_sync_mlock_owner_0_mlock_ch_owns_0_v(owner))
+			nvhost_debug_output(o, "%d: locked by channel %d\n",
+				i,
+				host1x_sync_mlock_owner_0_mlock_owner_chid_0_f(
+					owner));
+		else if (host1x_sync_mlock_owner_0_mlock_cpu_owns_0_v(owner))
+			nvhost_debug_output(o, "%d: locked by cpu\n", i);
+		else
+			nvhost_debug_output(o, "%d: unlocked\n", i);
+	}
+	nvhost_debug_output(o, "\n");
+}
+
+static const struct nvhost_debug_ops host1x_debug_ops = {
+	.show_channel_cdma = t20_debug_show_channel_cdma,
+	.show_channel_fifo = t20_debug_show_channel_fifo,
+	.show_mlocks = t20_debug_show_mlocks,
+};
diff --git a/drivers/staging/tegra/video/host/host1x/host1x_hwctx.h b/drivers/staging/tegra/video/host/host1x/host1x_hwctx.h
new file mode 100644
index 000000000000..13f0071d1e33
--- /dev/null
+++ b/drivers/staging/tegra/video/host/host1x/host1x_hwctx.h
@@ -0,0 +1,66 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_hwctx.h
+ *
+ * Tegra Graphics Host HOST1X Hardware Context Interface
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __NVHOST_HOST1X_HWCTX_H
+#define __NVHOST_HOST1X_HWCTX_H
+
+#include <linux/kref.h>
+#include "nvhost_hwctx.h"
+
+struct nvhost_hwctx_handler;
+struct nvhost_channel;
+
+#define to_host1x_hwctx_handler(handler) \
+	container_of((handler), struct host1x_hwctx_handler, h)
+#define to_host1x_hwctx(h) container_of((h), struct host1x_hwctx, hwctx)
+#define host1x_hwctx_handler(_hwctx) to_host1x_hwctx_handler((_hwctx)->hwctx.h)
+
+struct host1x_hwctx {
+	struct nvhost_hwctx hwctx;
+
+	u32 save_incrs;
+	u32 save_thresh;
+	u32 save_slots;
+
+	struct mem_handle *restore;
+	u32 *restore_virt;
+	phys_addr_t restore_phys;
+	u32 restore_size;
+	u32 restore_incrs;
+};
+
+struct host1x_hwctx_handler {
+	struct nvhost_hwctx_handler h;
+
+	u32 syncpt;
+	u32 waitbase;
+	u32 restore_size;
+	u32 restore_incrs;
+	struct mem_handle *save_buf;
+	u32 save_incrs;
+	u32 save_thresh;
+	u32 save_slots;
+	phys_addr_t save_phys;
+	u32 save_size;
+};
+
+#endif
diff --git a/drivers/staging/tegra/video/host/host1x/host1x_intr.c b/drivers/staging/tegra/video/host/host1x/host1x_intr.c
new file mode 100644
index 000000000000..289cce63792b
--- /dev/null
+++ b/drivers/staging/tegra/video/host/host1x/host1x_intr.c
@@ -0,0 +1,278 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_intr.c
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <asm/mach/irq.h>
+
+#include "nvhost_intr.h"
+#include "dev.h"
+
+/* Spacing between sync registers */
+#define REGISTER_STRIDE 4
+
+/*** HW host sync management ***/
+
+static void t20_intr_syncpt_thresh_isr(struct nvhost_intr_syncpt *syncpt);
+
+static void t20_syncpt_thresh_cascade_fn(struct work_struct *work)
+{
+	struct nvhost_intr_syncpt *sp =
+		container_of(work, struct nvhost_intr_syncpt, work);
+	nvhost_syncpt_thresh_fn(sp);
+}
+
+static irqreturn_t t20_syncpt_thresh_cascade_isr(int irq, void *dev_id)
+{
+	struct nvhost_intr *intr = dev_id;
+	struct nvhost_master *dev = intr_to_dev(intr);
+	void __iomem *sync_regs = dev->sync_aperture;
+	unsigned long reg;
+	int i, id;
+
+	for (i = 0; i < dev->info.nb_pts / BITS_PER_LONG; i++) {
+		reg = readl(sync_regs +
+			    host1x_sync_syncpt_thresh_cpu0_int_status_r() +
+			    i * REGISTER_STRIDE);
+		for_each_set_bit(id, &reg, BITS_PER_LONG) {
+			struct nvhost_intr_syncpt *sp =
+				intr->syncpt + (i * BITS_PER_LONG + id);
+			t20_intr_syncpt_thresh_isr(sp);
+			queue_work(intr->wq, &sp->work);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void t20_request_syncpt_irq(struct nvhost_intr *intr)
+{
+	struct nvhost_master *dev = intr_to_dev(intr);
+	void __iomem *sync_regs = dev->sync_aperture;
+	int i, err;
+
+	if (intr->syncpt_irq_requested)
+		return;
+
+	writel(0xffffffffUL,
+	       sync_regs + host1x_sync_syncpt_thresh_int_disable_r());
+	writel(0xffffffffUL,
+	       sync_regs + host1x_sync_syncpt_thresh_cpu0_int_status_r());
+
+	for (i = 0; i < dev->info.nb_pts; i++)
+		INIT_WORK(&intr->syncpt[i].work, t20_syncpt_thresh_cascade_fn);
+
+	err = request_irq(intr->syncpt_irq, t20_syncpt_thresh_cascade_isr,
+			  IRQF_SHARED, "host1x_syncpt", intr);
+	WARN_ON(IS_ERR_VALUE(err));
+
+	/* disable the ip_busy_timeout. this prevents write drops, etc.
+	 * there's no real way to recover from a hung client anyway.
+	 */
+	writel(0, sync_regs + host1x_sync_ip_busy_timeout_r());
+
+	/* increase the auto-ack timout to the maximum value. 2d will hang
+	 * otherwise on ap20.
+	 */
+	writel(0xff, sync_regs + host1x_sync_ctxsw_timeout_cfg_r());
+
+	intr->syncpt_irq_requested = true;
+}
+
+static void t20_free_syncpt_irq(struct nvhost_intr *intr)
+{
+	if (intr->syncpt_irq_requested) {
+		free_irq(intr->syncpt_irq, intr);
+		flush_workqueue(intr->wq);
+		intr->syncpt_irq_requested = false;
+	}
+}
+
+static void t20_intr_set_host_clocks_per_usec(struct nvhost_intr *intr, u32 cpm)
+{
+	struct nvhost_master *dev = intr_to_dev(intr);
+	void __iomem *sync_regs = dev->sync_aperture;
+	/* write microsecond clock register */
+	writel(cpm, sync_regs + host1x_sync_usec_clk_r());
+}
+
+static void t20_intr_set_syncpt_threshold(struct nvhost_intr *intr,
+	u32 id, u32 thresh)
+{
+	struct nvhost_master *dev = intr_to_dev(intr);
+	void __iomem *sync_regs = dev->sync_aperture;
+	thresh &= 0xffff;
+	writel(thresh, sync_regs +
+		(host1x_sync_syncpt_int_thresh_0_r() + id * REGISTER_STRIDE));
+}
+
+static void t20_intr_enable_syncpt_intr(struct nvhost_intr *intr, u32 id)
+{
+	struct nvhost_master *dev = intr_to_dev(intr);
+	void __iomem *sync_regs = dev->sync_aperture;
+
+	writel(BIT_MASK(id), sync_regs +
+			host1x_sync_syncpt_thresh_int_enable_cpu0_r() +
+			BIT_WORD(id) * REGISTER_STRIDE);
+}
+
+static void t20_intr_disable_syncpt_intr(struct nvhost_intr *intr, u32 id)
+{
+	struct nvhost_master *dev = intr_to_dev(intr);
+	void __iomem *sync_regs = dev->sync_aperture;
+
+	writel(BIT_MASK(id), sync_regs +
+			host1x_sync_syncpt_thresh_int_disable_r() +
+			BIT_WORD(id) * REGISTER_STRIDE);
+}
+
+static void t20_intr_disable_all_syncpt_intrs(struct nvhost_intr *intr)
+{
+	struct nvhost_master *dev = intr_to_dev(intr);
+	void __iomem *sync_regs = dev->sync_aperture;
+	u32 reg;
+
+	for (reg = 0; reg <= BIT_WORD(dev->info.nb_pts) * REGISTER_STRIDE;
+			reg += REGISTER_STRIDE) {
+		/* disable interrupts for both cpu's */
+		writel(0xffffffffu, sync_regs +
+				host1x_sync_syncpt_thresh_int_disable_r() +
+				reg);
+
+		/* clear status for both cpu's */
+		writel(0xffffffffu, sync_regs +
+			host1x_sync_syncpt_thresh_cpu0_int_status_r() + reg);
+		writel(0xffffffffu, sync_regs +
+			host1x_sync_syncpt_thresh_cpu1_int_status_r() + reg);
+	}
+}
+
+/**
+ * Sync point threshold interrupt service function
+ * Handles sync point threshold triggers, in interrupt context
+ */
+static void t20_intr_syncpt_thresh_isr(struct nvhost_intr_syncpt *syncpt)
+{
+	unsigned int id = syncpt->id;
+	struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt);
+
+	void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+
+	u32 reg = BIT_WORD(id) * REGISTER_STRIDE;
+
+	writel(BIT_MASK(id), sync_regs +
+		host1x_sync_syncpt_thresh_int_disable_r() + reg);
+	writel(BIT_MASK(id), sync_regs +
+		host1x_sync_syncpt_thresh_cpu0_int_status_r() + reg);
+}
+
+/**
+ * Host general interrupt service function
+ * Handles read / write failures
+ */
+static irqreturn_t t20_intr_host1x_isr(int irq, void *dev_id)
+{
+	struct nvhost_intr *intr = dev_id;
+	void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+	u32 stat;
+	u32 ext_stat;
+	u32 addr;
+
+	stat = readl(sync_regs + host1x_sync_hintstatus_r());
+	ext_stat = readl(sync_regs + host1x_sync_hintstatus_ext_r());
+
+	if (host1x_sync_hintstatus_ext_ip_read_int_v(ext_stat)) {
+		addr = readl(sync_regs + host1x_sync_ip_read_timeout_addr_r());
+		pr_err("Host read timeout at address %x\n", addr);
+	}
+
+	if (host1x_sync_hintstatus_ext_ip_write_int_v(ext_stat)) {
+		addr = readl(sync_regs + host1x_sync_ip_write_timeout_addr_r());
+		pr_err("Host write timeout at address %x\n", addr);
+	}
+
+	writel(ext_stat, sync_regs + host1x_sync_hintstatus_ext_r());
+	writel(stat, sync_regs + host1x_sync_hintstatus_r());
+
+	return IRQ_HANDLED;
+}
+static int t20_intr_request_host_general_irq(struct nvhost_intr *intr)
+{
+	void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+	int err;
+
+	if (intr->host_general_irq_requested)
+		return 0;
+
+	/* master disable for general (not syncpt) host interrupts */
+	writel(0, sync_regs + host1x_sync_intmask_r());
+
+	/* clear status & extstatus */
+	writel(0xfffffffful, sync_regs + host1x_sync_hintstatus_ext_r());
+	writel(0xfffffffful, sync_regs + host1x_sync_hintstatus_r());
+
+	err = request_irq(intr->host_general_irq, t20_intr_host1x_isr, 0,
+			"host_status", intr);
+	if (err)
+		return err;
+
+	/* enable extra interrupt sources IP_READ_INT and IP_WRITE_INT */
+	writel(BIT(30) | BIT(31), sync_regs + host1x_sync_hintmask_ext_r());
+
+	/* enable extra interrupt sources */
+	writel(BIT(31), sync_regs + host1x_sync_hintmask_r());
+
+	/* enable host module interrupt to CPU0 */
+	writel(BIT(0), sync_regs + host1x_sync_intc0mask_r());
+
+	/* master enable for general (not syncpt) host interrupts */
+	writel(BIT(0), sync_regs + host1x_sync_intmask_r());
+
+	intr->host_general_irq_requested = true;
+
+	return err;
+}
+
+static void t20_intr_free_host_general_irq(struct nvhost_intr *intr)
+{
+	if (intr->host_general_irq_requested) {
+		void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+
+		/* master disable for general (not syncpt) host interrupts */
+		writel(0, sync_regs + host1x_sync_intmask_r());
+
+		free_irq(intr->host_general_irq, intr);
+		intr->host_general_irq_requested = false;
+	}
+}
+
+static const struct nvhost_intr_ops host1x_intr_ops = {
+	.request_syncpt_irq = t20_request_syncpt_irq,
+	.free_syncpt_irq = t20_free_syncpt_irq,
+	.set_host_clocks_per_usec = t20_intr_set_host_clocks_per_usec,
+	.set_syncpt_threshold = t20_intr_set_syncpt_threshold,
+	.enable_syncpt_intr = t20_intr_enable_syncpt_intr,
+	.disable_syncpt_intr = t20_intr_disable_syncpt_intr,
+	.disable_all_syncpt_intrs = t20_intr_disable_all_syncpt_intrs,
+	.request_host_general_irq = t20_intr_request_host_general_irq,
+	.free_host_general_irq = t20_intr_free_host_general_irq,
+};
diff --git a/drivers/staging/tegra/video/host/host1x/host1x_syncpt.c b/drivers/staging/tegra/video/host/host1x/host1x_syncpt.c
new file mode 100644
index 000000000000..8cca9dbbbc08
--- /dev/null
+++ b/drivers/staging/tegra/video/host/host1x/host1x_syncpt.c
@@ -0,0 +1,180 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_syncpt.c
+ *
+ * Tegra Graphics Host Syncpoints for HOST1X
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/nvhost_ioctl.h>
+#include <linux/io.h>
+#include <trace/events/nvhost.h>
+#include "nvhost_syncpt.h"
+#include "nvhost_acm.h"
+#include "host1x.h"
+#include "chip_support.h"
+
+/**
+ * Write the current syncpoint value back to hw.
+ */
+static void t20_syncpt_reset(struct nvhost_syncpt *sp, u32 id)
+{
+	struct nvhost_master *dev = syncpt_to_dev(sp);
+	int min = nvhost_syncpt_read_min(sp, id);
+	writel(min, dev->sync_aperture + (host1x_sync_syncpt_0_r() + id * 4));
+}
+
+/**
+ * Write the current waitbase value back to hw.
+ */
+static void t20_syncpt_reset_wait_base(struct nvhost_syncpt *sp, u32 id)
+{
+	struct nvhost_master *dev = syncpt_to_dev(sp);
+	writel(sp->base_val[id],
+		dev->sync_aperture + (host1x_sync_syncpt_base_0_r() + id * 4));
+}
+
+/**
+ * Read waitbase value from hw.
+ */
+static void t20_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id)
+{
+	struct nvhost_master *dev = syncpt_to_dev(sp);
+	sp->base_val[id] = readl(dev->sync_aperture +
+				(host1x_sync_syncpt_base_0_r() + id * 4));
+}
+
+/**
+ * Updates the last value read from hardware.
+ * (was nvhost_syncpt_update_min)
+ */
+static u32 t20_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
+{
+	struct nvhost_master *dev = syncpt_to_dev(sp);
+	void __iomem *sync_regs = dev->sync_aperture;
+	u32 old, live;
+
+	do {
+		old = nvhost_syncpt_read_min(sp, id);
+		live = readl(sync_regs + (host1x_sync_syncpt_0_r() + id * 4));
+	} while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old);
+
+	if (!nvhost_syncpt_check_max(sp, id, live))
+		dev_err(&syncpt_to_dev(sp)->dev->dev,
+				"%s failed: id=%u, min=%d, max=%d\n",
+				__func__,
+				nvhost_syncpt_read_min(sp, id),
+				nvhost_syncpt_read_max(sp, id),
+				id);
+
+	return live;
+}
+
+/**
+ * Write a cpu syncpoint increment to the hardware, without touching
+ * the cache. Caller is responsible for host being powered.
+ */
+static void t20_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
+{
+	struct nvhost_master *dev = syncpt_to_dev(sp);
+	u32 reg_offset = id / 32;
+
+	BUG_ON(!nvhost_module_powered(dev->dev));
+	if (!nvhost_syncpt_client_managed(sp, id)
+			&& nvhost_syncpt_min_eq_max(sp, id)) {
+		dev_err(&syncpt_to_dev(sp)->dev->dev,
+			"Trying to increment syncpoint id %d beyond max\n",
+			id);
+		nvhost_debug_dump(syncpt_to_dev(sp));
+		return;
+	}
+	writel(BIT_MASK(id), dev->sync_aperture +
+			host1x_sync_syncpt_cpu_incr_r() + reg_offset * 4);
+	wmb();
+}
+
+/* remove a wait pointed to by patch_addr */
+static int host1x_syncpt_patch_wait(struct nvhost_syncpt *sp,
+		void *patch_addr)
+{
+	u32 override = nvhost_class_host_wait_syncpt(
+			NVSYNCPT_GRAPHICS_HOST, 0);
+	__raw_writel(override, patch_addr);
+	return 0;
+}
+
+
+static const char *t20_syncpt_name(struct nvhost_syncpt *sp, u32 id)
+{
+	struct host1x_device_info *info = &syncpt_to_dev(sp)->info;
+	return (id >= info->nb_pts) ? NULL : info->syncpt_names[id];
+}
+
+static void t20_syncpt_debug(struct nvhost_syncpt *sp)
+{
+	u32 i;
+	for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) {
+		u32 max = nvhost_syncpt_read_max(sp, i);
+		u32 min = nvhost_syncpt_update_min(sp, i);
+		if (!max && !min)
+			continue;
+		dev_info(&syncpt_to_dev(sp)->dev->dev,
+			"id %d (%s) min %d max %d\n",
+			i, syncpt_op().name(sp, i),
+			min, max);
+
+	}
+
+	for (i = 0; i < nvhost_syncpt_nb_bases(sp); i++) {
+		u32 base_val;
+		t20_syncpt_read_wait_base(sp, i);
+		base_val = sp->base_val[i];
+		if (base_val)
+			dev_info(&syncpt_to_dev(sp)->dev->dev,
+					"waitbase id %d val %d\n",
+					i, base_val);
+
+	}
+}
+
+static int syncpt_mutex_try_lock(struct nvhost_syncpt *sp,
+		unsigned int idx)
+{
+	void __iomem *sync_regs = syncpt_to_dev(sp)->sync_aperture;
+	/* mlock registers returns 0 when the lock is aquired.
+	 * writing 0 clears the lock. */
+	return !!readl(sync_regs + (host1x_sync_mlock_0_r() + idx * 4));
+}
+
+static void syncpt_mutex_unlock(struct nvhost_syncpt *sp,
+	       unsigned int idx)
+{
+	void __iomem *sync_regs = syncpt_to_dev(sp)->sync_aperture;
+
+	writel(0, sync_regs + (host1x_sync_mlock_0_r() + idx * 4));
+}
+
+static const struct nvhost_syncpt_ops host1x_syncpt_ops = {
+	.reset = t20_syncpt_reset,
+	.reset_wait_base = t20_syncpt_reset_wait_base,
+	.read_wait_base = t20_syncpt_read_wait_base,
+	.update_min = t20_syncpt_update_min,
+	.cpu_incr = t20_syncpt_cpu_incr,
+	.patch_wait = host1x_syncpt_patch_wait,
+	.debug = t20_syncpt_debug,
+	.name = t20_syncpt_name,
+	.mutex_try_lock = syncpt_mutex_try_lock,
+	.mutex_unlock = syncpt_mutex_unlock,
+};
diff --git a/drivers/staging/tegra/video/host/host1x/host1x_syncpt.h b/drivers/staging/tegra/video/host/host1x/host1x_syncpt.h
new file mode 100644
index 000000000000..a971db8b1d94
--- /dev/null
+++ b/drivers/staging/tegra/video/host/host1x/host1x_syncpt.h
@@ -0,0 +1,62 @@
+/*
+ * drivers/video/tegra/host/host1x/host1x_syncpt.h
+ *
+ * Tegra Graphics Host Syncpoints for HOST1X
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_HOST1X_HOST1X_SYNCPT_H
+#define __NVHOST_HOST1X_HOST1X_SYNCPT_H
+
+/* FIXME:
+ * Sync point ids are now split into 2 files.
+ * 1 if this one and other is in include/linux/nvhost.h
+ * So if someone decides to add new sync point in future
+ * please check both the header files
+ */
+#define NVSYNCPT_CSI_VI_0		     (11)
+#define NVSYNCPT_CSI_VI_1		     (12)
+#define NVSYNCPT_VI_ISP_0		     (13)
+#define NVSYNCPT_VI_ISP_1		     (14)
+#define NVSYNCPT_VI_ISP_2		     (15)
+#define NVSYNCPT_VI_ISP_3		     (16)
+#define NVSYNCPT_VI_ISP_4		     (17)
+#define NVSYNCPT_2D_0			     (18)
+#define NVSYNCPT_2D_1			     (19)
+#define NVSYNCPT_3D			     (22)
+#define NVSYNCPT_MPE			     (23)
+#define NVSYNCPT_MPE_EBM_EOF		     (28)
+#define NVSYNCPT_MPE_WR_SAFE		     (29)
+
+/* sync points that are wholly managed by the client */
+#define NVSYNCPTS_CLIENT_MANAGED ( \
+	BIT(NVSYNCPT_DISP0_A) | BIT(NVSYNCPT_DISP1_A) | \
+	BIT(NVSYNCPT_DISP0_B) | BIT(NVSYNCPT_DISP1_B) | \
+	BIT(NVSYNCPT_DISP0_C) | BIT(NVSYNCPT_DISP1_C) | \
+	BIT(NVSYNCPT_DSI) | \
+	BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1) | \
+	BIT(NVSYNCPT_CSI_VI_0) | BIT(NVSYNCPT_CSI_VI_1) | \
+	BIT(NVSYNCPT_VI_ISP_1) | BIT(NVSYNCPT_VI_ISP_2) | \
+	BIT(NVSYNCPT_VI_ISP_3) | BIT(NVSYNCPT_VI_ISP_4) | \
+	BIT(NVSYNCPT_MPE_EBM_EOF) | BIT(NVSYNCPT_MPE_WR_SAFE) | \
+	BIT(NVSYNCPT_2D_1) | BIT(NVSYNCPT_AVP_0))
+
+#define NVWAITBASE_2D_0 (1)
+#define NVWAITBASE_2D_1 (2)
+#define NVWAITBASE_3D   (3)
+#define NVWAITBASE_MPE  (4)
+
+#endif
diff --git a/drivers/staging/tegra/video/host/host1x/hw_host1x01_channel.h b/drivers/staging/tegra/video/host/host1x/hw_host1x01_channel.h
new file mode 100644
index 000000000000..ca2f9a0778cd
--- /dev/null
+++ b/drivers/staging/tegra/video/host/host1x/hw_host1x01_channel.h
@@ -0,0 +1,182 @@
+/*
+ * drivers/video/tegra/host/host1x/hw_host1x_channel_host1x.h
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef __hw_host1x_channel_host1x_h__
+#define __hw_host1x_channel_host1x_h__
+/*This file is autogenerated.  Do not edit. */
+
+static inline u32 host1x_channel_fifostat_r(void)
+{
+	return 0x0;
+}
+static inline u32 host1x_channel_fifostat_cfempty_s(void)
+{
+	return 1;
+}
+static inline u32 host1x_channel_fifostat_cfempty_f(u32 v)
+{
+	return (v & 0x1) << 10;
+}
+static inline u32 host1x_channel_fifostat_cfempty_m(void)
+{
+	return 0x1 << 10;
+}
+static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
+{
+	return (r >> 10) & 0x1;
+}
+static inline u32 host1x_channel_fifostat_cfempty_notempty_v(void)
+{
+	return 0;
+}
+static inline u32 host1x_channel_fifostat_cfempty_empty_v(void)
+{
+	return 1;
+}
+static inline u32 host1x_channel_fifostat_outfentries_s(void)
+{
+	return 5;
+}
+static inline u32 host1x_channel_fifostat_outfentries_f(u32 v)
+{
+	return (v & 0x1f) << 24;
+}
+static inline u32 host1x_channel_fifostat_outfentries_m(void)
+{
+	return 0x1f << 24;
+}
+static inline u32 host1x_channel_fifostat_outfentries_v(u32 r)
+{
+	return (r >> 24) & 0x1f;
+}
+static inline u32 host1x_channel_inddata_r(void)
+{
+	return 0xc;
+}
+static inline u32 host1x_channel_dmastart_r(void)
+{
+	return 0x14;
+}
+static inline u32 host1x_channel_dmaput_r(void)
+{
+	return 0x18;
+}
+static inline u32 host1x_channel_dmaget_r(void)
+{
+	return 0x1c;
+}
+static inline u32 host1x_channel_dmaend_r(void)
+{
+	return 0x20;
+}
+static inline u32 host1x_channel_dmactrl_r(void)
+{
+	return 0x24;
+}
+static inline u32 host1x_channel_dmactrl_dmastop_s(void)
+{
+	return 1;
+}
+static inline u32 host1x_channel_dmactrl_dmastop_f(u32 v)
+{
+	return (v & 0x1) << 0;
+}
+static inline u32 host1x_channel_dmactrl_dmastop_m(void)
+{
+	return 0x1 << 0;
+}
+static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
+{
+	return (r >> 0) & 0x1;
+}
+static inline u32 host1x_channel_dmactrl_dmastop_run_v(void)
+{
+	return 0;
+}
+static inline u32 host1x_channel_dmactrl_dmastop_stop_v(void)
+{
+	return 1;
+}
+static inline u32 host1x_channel_dmactrl_dmagetrst_s(void)
+{
+	return 1;
+}
+static inline u32 host1x_channel_dmactrl_dmagetrst_f(u32 v)
+{
+	return (v & 0x1) << 1;
+}
+static inline u32 host1x_channel_dmactrl_dmagetrst_m(void)
+{
+	return 0x1 << 1;
+}
+static inline u32 host1x_channel_dmactrl_dmagetrst_v(u32 r)
+{
+	return (r >> 1) & 0x1;
+}
+static inline u32 host1x_channel_dmactrl_dmainitget_s(void)
+{
+	return 1;
+}
+static inline u32 host1x_channel_dmactrl_dmainitget_f(u32 v)
+{
+	return (v & 0x1) << 2;
+}
+static inline u32 host1x_channel_dmactrl_dmainitget_m(void)
+{
+	return 0x1 << 2;
+}
+static inline u32 host1x_channel_dmactrl_dmainitget_v(u32 r)
+{
+	return (r >> 2) & 0x1;
+}
+
+#endif /* __hw_host1x_channel_host1x_h__ */
diff --git a/drivers/staging/tegra/video/host/host1x/hw_host1x01_sync.h b/drivers/staging/tegra/video/host/host1x/hw_host1x01_sync.h
new file mode 100644
index 000000000000..67f0cbfb85b9
--- /dev/null
+++ b/drivers/staging/tegra/video/host/host1x/hw_host1x01_sync.h
@@ -0,0 +1,398 @@
+/*
+ * drivers/video/tegra/host/host1x/hw_host1x_sync_host1x.h
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef __hw_host1x_sync_host1x_h__
+#define __hw_host1x_sync_host1x_h__
+/*This file is autogenerated.  Do not edit. */
+
+static inline u32 host1x_sync_intmask_r(void)
+{
+	return 0x4;
+}
+static inline u32 host1x_sync_intc0mask_r(void)
+{
+	return 0x8;
+}
+static inline u32 host1x_sync_hintstatus_r(void)
+{
+	return 0x20;
+}
+static inline u32 host1x_sync_hintmask_r(void)
+{
+	return 0x24;
+}
+static inline u32 host1x_sync_hintstatus_ext_r(void)
+{
+	return 0x28;
+}
+static inline u32 host1x_sync_hintstatus_ext_ip_read_int_s(void)
+{
+	return 1;
+}
+static inline u32 host1x_sync_hintstatus_ext_ip_read_int_f(u32 v)
+{
+	return (v & 0x1) << 30;
+}
+static inline u32 host1x_sync_hintstatus_ext_ip_read_int_m(void)
+{
+	return 0x1 << 30;
+}
+static inline u32 host1x_sync_hintstatus_ext_ip_read_int_v(u32 r)
+{
+	return (r >> 30) & 0x1;
+}
+static inline u32 host1x_sync_hintstatus_ext_ip_write_int_s(void)
+{
+	return 1;
+}
+static inline u32 host1x_sync_hintstatus_ext_ip_write_int_f(u32 v)
+{
+	return (v & 0x1) << 31;
+}
+static inline u32 host1x_sync_hintstatus_ext_ip_write_int_m(void)
+{
+	return 0x1 << 31;
+}
+static inline u32 host1x_sync_hintstatus_ext_ip_write_int_v(u32 r)
+{
+	return (r >> 31) & 0x1;
+}
+static inline u32 host1x_sync_hintmask_ext_r(void)
+{
+	return 0x2c;
+}
+static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(void)
+{
+	return 0x40;
+}
+static inline u32 host1x_sync_syncpt_thresh_cpu1_int_status_r(void)
+{
+	return 0x48;
+}
+static inline u32 host1x_sync_syncpt_thresh_int_disable_r(void)
+{
+	return 0x60;
+}
+static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(void)
+{
+	return 0x68;
+}
+static inline u32 host1x_sync_cf0_setup_r(void)
+{
+	return 0x80;
+}
+static inline u32 host1x_sync_cf0_setup_cf0_base_s(void)
+{
+	return 9;
+}
+static inline u32 host1x_sync_cf0_setup_cf0_base_f(u32 v)
+{
+	return (v & 0x1ff) << 0;
+}
+static inline u32 host1x_sync_cf0_setup_cf0_base_m(void)
+{
+	return 0x1ff << 0;
+}
+static inline u32 host1x_sync_cf0_setup_cf0_base_v(u32 r)
+{
+	return (r >> 0) & 0x1ff;
+}
+static inline u32 host1x_sync_cf0_setup_cf0_limit_s(void)
+{
+	return 9;
+}
+static inline u32 host1x_sync_cf0_setup_cf0_limit_f(u32 v)
+{
+	return (v & 0x1ff) << 16;
+}
+static inline u32 host1x_sync_cf0_setup_cf0_limit_m(void)
+{
+	return 0x1ff << 16;
+}
+static inline u32 host1x_sync_cf0_setup_cf0_limit_v(u32 r)
+{
+	return (r >> 16) & 0x1ff;
+}
+static inline u32 host1x_sync_cmdproc_stop_r(void)
+{
+	return 0xac;
+}
+static inline u32 host1x_sync_ch_teardown_r(void)
+{
+	return 0xb0;
+}
+static inline u32 host1x_sync_usec_clk_r(void)
+{
+	return 0x1a4;
+}
+static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
+{
+	return 0x1a8;
+}
+static inline u32 host1x_sync_ip_busy_timeout_r(void)
+{
+	return 0x1bc;
+}
+static inline u32 host1x_sync_ip_read_timeout_addr_r(void)
+{
+	return 0x1c0;
+}
+static inline u32 host1x_sync_ip_write_timeout_addr_r(void)
+{
+	return 0x1c4;
+}
+static inline u32 host1x_sync_mlock_0_r(void)
+{
+	return 0x2c0;
+}
+static inline u32 host1x_sync_mlock_owner_0_r(void)
+{
+	return 0x340;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_owner_chid_0_s(void)
+{
+	return 4;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_owner_chid_0_f(u32 v)
+{
+	return (v & 0xf) << 8;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_owner_chid_0_m(void)
+{
+	return 0xf << 8;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_owner_chid_0_v(u32 r)
+{
+	return (r >> 8) & 0xf;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_cpu_owns_0_s(void)
+{
+	return 1;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_cpu_owns_0_f(u32 v)
+{
+	return (v & 0x1) << 1;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_cpu_owns_0_m(void)
+{
+	return 0x1 << 1;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_cpu_owns_0_v(u32 r)
+{
+	return (r >> 1) & 0x1;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_ch_owns_0_s(void)
+{
+	return 1;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_ch_owns_0_f(u32 v)
+{
+	return (v & 0x1) << 0;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_ch_owns_0_m(void)
+{
+	return 0x1 << 0;
+}
+static inline u32 host1x_sync_mlock_owner_0_mlock_ch_owns_0_v(u32 r)
+{
+	return (r >> 0) & 0x1;
+}
+static inline u32 host1x_sync_syncpt_0_r(void)
+{
+	return 0x400;
+}
+static inline u32 host1x_sync_syncpt_int_thresh_0_r(void)
+{
+	return 0x500;
+}
+static inline u32 host1x_sync_syncpt_base_0_r(void)
+{
+	return 0x600;
+}
+static inline u32 host1x_sync_syncpt_cpu_incr_r(void)
+{
+	return 0x700;
+}
+static inline u32 host1x_sync_cbread0_r(void)
+{
+	return 0x720;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_r(void)
+{
+	return 0x74c;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_addr_s(void)
+{
+	return 9;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_addr_f(u32 v)
+{
+	return (v & 0x1ff) << 0;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_addr_m(void)
+{
+	return 0x1ff << 0;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_addr_v(u32 r)
+{
+	return (r >> 0) & 0x1ff;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_channr_s(void)
+{
+	return 3;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_channr_f(u32 v)
+{
+	return (v & 0x7) << 16;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_channr_m(void)
+{
+	return 0x7 << 16;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_channr_v(u32 r)
+{
+	return (r >> 16) & 0x7;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_ena_s(void)
+{
+	return 1;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_ena_f(u32 v)
+{
+	return (v & 0x1) << 31;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_ena_m(void)
+{
+	return 0x1 << 31;
+}
+static inline u32 host1x_sync_cfpeek_ctrl_cfpeek_ena_v(u32 r)
+{
+	return (r >> 31) & 0x1;
+}
+static inline u32 host1x_sync_cfpeek_read_r(void)
+{
+	return 0x750;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_r(void)
+{
+	return 0x754;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_s(void)
+{
+	return 9;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_f(u32 v)
+{
+	return (v & 0x1ff) << 0;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_m(void)
+{
+	return 0x1ff << 0;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
+{
+	return (r >> 0) & 0x1ff;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_s(void)
+{
+	return 9;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_f(u32 v)
+{
+	return (v & 0x1ff) << 16;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_m(void)
+{
+	return 0x1ff << 16;
+}
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
+{
+	return (r >> 16) & 0x1ff;
+}
+static inline u32 host1x_sync_cbstat_0_r(void)
+{
+	return 0x758;
+}
+static inline u32 host1x_sync_cbstat_0_cboffset0_s(void)
+{
+	return 16;
+}
+static inline u32 host1x_sync_cbstat_0_cboffset0_f(u32 v)
+{
+	return (v & 0xffff) << 0;
+}
+static inline u32 host1x_sync_cbstat_0_cboffset0_m(void)
+{
+	return 0xffff << 0;
+}
+static inline u32 host1x_sync_cbstat_0_cboffset0_v(u32 r)
+{
+	return (r >> 0) & 0xffff;
+}
+static inline u32 host1x_sync_cbstat_0_cbclass0_s(void)
+{
+	return 10;
+}
+static inline u32 host1x_sync_cbstat_0_cbclass0_f(u32 v)
+{
+	return (v & 0x3ff) << 16;
+}
+static inline u32 host1x_sync_cbstat_0_cbclass0_m(void)
+{
+	return 0x3ff << 16;
+}
+static inline u32 host1x_sync_cbstat_0_cbclass0_v(u32 r)
+{
+	return (r >> 16) & 0x3ff;
+}
+
+#endif /* __hw_host1x_sync_host1x_h__ */
diff --git a/drivers/staging/tegra/video/host/host1x/hw_host1x01_uclass.h b/drivers/staging/tegra/video/host/host1x/hw_host1x01_uclass.h
new file mode 100644
index 000000000000..ed6e4b706ab9
--- /dev/null
+++ b/drivers/staging/tegra/video/host/host1x/hw_host1x01_uclass.h
@@ -0,0 +1,474 @@
+/*
+ * drivers/video/tegra/host/host1x/hw_host1x_uclass_host1x.h
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef __hw_host1x_uclass_host1x_h__
+#define __hw_host1x_uclass_host1x_h__
+/*This file is autogenerated.  Do not edit. */
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+	return 0x0;
+}
+static inline u32 host1x_uclass_incr_syncpt_cond_s(void)
+{
+	return 8;
+}
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+	return (v & 0xff) << 8;
+}
+static inline u32 host1x_uclass_incr_syncpt_cond_m(void)
+{
+	return 0xff << 8;
+}
+static inline u32 host1x_uclass_incr_syncpt_cond_v(u32 r)
+{
+	return (r >> 8) & 0xff;
+}
+static inline u32 host1x_uclass_incr_syncpt_cond_immediate_v(void)
+{
+	return 0;
+}
+static inline u32 host1x_uclass_incr_syncpt_cond_op_done_v(void)
+{
+	return 1;
+}
+static inline u32 host1x_uclass_incr_syncpt_cond_rd_done_v(void)
+{
+	return 2;
+}
+static inline u32 host1x_uclass_incr_syncpt_cond_reg_wr_safe_v(void)
+{
+	return 3;
+}
+static inline u32 host1x_uclass_incr_syncpt_indx_s(void)
+{
+	return 8;
+}
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+	return (v & 0xff) << 0;
+}
+static inline u32 host1x_uclass_incr_syncpt_indx_m(void)
+{
+	return 0xff << 0;
+}
+static inline u32 host1x_uclass_incr_syncpt_indx_v(u32 r)
+{
+	return (r >> 0) & 0xff;
+}
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+	return 0x8;
+}
+static inline u32 host1x_uclass_wait_syncpt_indx_s(void)
+{
+	return 8;
+}
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+static inline u32 host1x_uclass_wait_syncpt_indx_m(void)
+{
+	return 0xff << 24;
+}
+static inline u32 host1x_uclass_wait_syncpt_indx_v(u32 r)
+{
+	return (r >> 24) & 0xff;
+}
+static inline u32 host1x_uclass_wait_syncpt_thresh_s(void)
+{
+	return 24;
+}
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+static inline u32 host1x_uclass_wait_syncpt_thresh_m(void)
+{
+	return 0xffffff << 0;
+}
+static inline u32 host1x_uclass_wait_syncpt_thresh_v(u32 r)
+{
+	return (r >> 0) & 0xffffff;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+	return 0x9;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_indx_s(void)
+{
+	return 8;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_indx_m(void)
+{
+	return 0xff << 24;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_indx_v(u32 r)
+{
+	return (r >> 24) & 0xff;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_s(void)
+{
+	return 8;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 16;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_m(void)
+{
+	return 0xff << 16;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_v(u32 r)
+{
+	return (r >> 16) & 0xff;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_offset_s(void)
+{
+	return 16;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+	return (v & 0xffff) << 0;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_offset_m(void)
+{
+	return 0xffff << 0;
+}
+static inline u32 host1x_uclass_wait_syncpt_base_offset_v(u32 r)
+{
+	return (r >> 0) & 0xffff;
+}
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+	return 0xb;
+}
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_s(void)
+{
+	return 8;
+}
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_m(void)
+{
+	return 0xff << 24;
+}
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_v(u32 r)
+{
+	return (r >> 24) & 0xff;
+}
+static inline u32 host1x_uclass_load_syncpt_base_value_s(void)
+{
+	return 24;
+}
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+static inline u32 host1x_uclass_load_syncpt_base_value_m(void)
+{
+	return 0xffffff << 0;
+}
+static inline u32 host1x_uclass_load_syncpt_base_value_v(u32 r)
+{
+	return (r >> 0) & 0xffffff;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_r(void)
+{
+	return 0xc;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_s(void)
+{
+	return 8;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_m(void)
+{
+	return 0xff << 24;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_v(u32 r)
+{
+	return (r >> 24) & 0xff;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_offset_s(void)
+{
+	return 24;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_offset_m(void)
+{
+	return 0xffffff << 0;
+}
+static inline u32 host1x_uclass_incr_syncpt_base_offset_v(u32 r)
+{
+	return (r >> 0) & 0xffffff;
+}
+static inline u32 host1x_uclass_indoff_r(void)
+{
+	return 0x2d;
+}
+static inline u32 host1x_uclass_indoff_indbe_s(void)
+{
+	return 4;
+}
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+	return (v & 0xf) << 28;
+}
+static inline u32 host1x_uclass_indoff_indbe_m(void)
+{
+	return 0xf << 28;
+}
+static inline u32 host1x_uclass_indoff_indbe_v(u32 r)
+{
+	return (r >> 28) & 0xf;
+}
+static inline u32 host1x_uclass_indoff_autoinc_s(void)
+{
+	return 1;
+}
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+	return (v & 0x1) << 27;
+}
+static inline u32 host1x_uclass_indoff_autoinc_m(void)
+{
+	return 0x1 << 27;
+}
+static inline u32 host1x_uclass_indoff_autoinc_v(u32 r)
+{
+	return (r >> 27) & 0x1;
+}
+static inline u32 host1x_uclass_indoff_spool_s(void)
+{
+	return 1;
+}
+static inline u32 host1x_uclass_indoff_spool_f(u32 v)
+{
+	return (v & 0x1) << 26;
+}
+static inline u32 host1x_uclass_indoff_spool_m(void)
+{
+	return 0x1 << 26;
+}
+static inline u32 host1x_uclass_indoff_spool_v(u32 r)
+{
+	return (r >> 26) & 0x1;
+}
+static inline u32 host1x_uclass_indoff_indoffset_s(void)
+{
+	return 24;
+}
+static inline u32 host1x_uclass_indoff_indoffset_f(u32 v)
+{
+	return (v & 0xffffff) << 2;
+}
+static inline u32 host1x_uclass_indoff_indoffset_m(void)
+{
+	return 0xffffff << 2;
+}
+static inline u32 host1x_uclass_indoff_indoffset_v(u32 r)
+{
+	return (r >> 2) & 0xffffff;
+}
+static inline u32 host1x_uclass_indoff_indmodid_s(void)
+{
+	return 8;
+}
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+	return (v & 0xff) << 18;
+}
+static inline u32 host1x_uclass_indoff_indmodid_m(void)
+{
+	return 0xff << 18;
+}
+static inline u32 host1x_uclass_indoff_indmodid_v(u32 r)
+{
+	return (r >> 18) & 0xff;
+}
+static inline u32 host1x_uclass_indoff_indmodid_host1x_v(void)
+{
+	return 0;
+}
+static inline u32 host1x_uclass_indoff_indmodid_mpe_v(void)
+{
+	return 1;
+}
+static inline u32 host1x_uclass_indoff_indmodid_vi_v(void)
+{
+	return 2;
+}
+static inline u32 host1x_uclass_indoff_indmodid_epp_v(void)
+{
+	return 3;
+}
+static inline u32 host1x_uclass_indoff_indmodid_isp_v(void)
+{
+	return 4;
+}
+static inline u32 host1x_uclass_indoff_indmodid_gr2d_v(void)
+{
+	return 5;
+}
+static inline u32 host1x_uclass_indoff_indmodid_gr3d_v(void)
+{
+	return 6;
+}
+static inline u32 host1x_uclass_indoff_indmodid_display_v(void)
+{
+	return 8;
+}
+static inline u32 host1x_uclass_indoff_indmodid_tvo_v(void)
+{
+	return 11;
+}
+static inline u32 host1x_uclass_indoff_indmodid_displayb_v(void)
+{
+	return 9;
+}
+static inline u32 host1x_uclass_indoff_indmodid_dsi_v(void)
+{
+	return 12;
+}
+static inline u32 host1x_uclass_indoff_indmodid_hdmi_v(void)
+{
+	return 10;
+}
+static inline u32 host1x_uclass_indoff_indmodid_dsib_v(void)
+{
+	return 16;
+}
+static inline u32 host1x_uclass_indoff_indroffset_s(void)
+{
+	return 16;
+}
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+	return (v & 0xffff) << 2;
+}
+static inline u32 host1x_uclass_indoff_indroffset_m(void)
+{
+	return 0xffff << 2;
+}
+static inline u32 host1x_uclass_indoff_indroffset_v(u32 r)
+{
+	return (r >> 2) & 0xffff;
+}
+static inline u32 host1x_uclass_indoff_acctype_s(void)
+{
+	return 1;
+}
+static inline u32 host1x_uclass_indoff_acctype_f(u32 v)
+{
+	return (v & 0x1) << 1;
+}
+static inline u32 host1x_uclass_indoff_acctype_m(void)
+{
+	return 0x1 << 1;
+}
+static inline u32 host1x_uclass_indoff_acctype_v(u32 r)
+{
+	return (r >> 1) & 0x1;
+}
+static inline u32 host1x_uclass_indoff_acctype_reg_v(void)
+{
+	return 0;
+}
+static inline u32 host1x_uclass_indoff_acctype_fb_v(void)
+{
+	return 1;
+}
+static inline u32 host1x_uclass_indoff_rwn_s(void)
+{
+	return 1;
+}
+static inline u32 host1x_uclass_indoff_rwn_f(u32 v)
+{
+	return (v & 0x1) << 0;
+}
+static inline u32 host1x_uclass_indoff_rwn_m(void)
+{
+	return 0x1 << 0;
+}
+static inline u32 host1x_uclass_indoff_rwn_v(u32 r)
+{
+	return (r >> 0) & 0x1;
+}
+static inline u32 host1x_uclass_indoff_rwn_write_v(void)
+{
+	return 0;
+}
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+	return 1;
+}
+static inline u32 host1x_uclass_inddata_r(void)
+{
+	return 0x2e;
+}
+
+#endif /* __hw_host1x_uclass_host1x_h__ */
diff --git a/drivers/staging/tegra/video/host/isp/Makefile b/drivers/staging/tegra/video/host/isp/Makefile
new file mode 100644
index 000000000000..dd0353e2d7ae
--- /dev/null
+++ b/drivers/staging/tegra/video/host/isp/Makefile
@@ -0,0 +1,8 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/staging/tegra/video/host
+EXTRA_CFLAGS += -Idrivers/staging/tegra/include
+
+nvhost-isp-objs  = \
+		isp.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-isp.o
diff --git a/drivers/staging/tegra/video/host/isp/isp.c b/drivers/staging/tegra/video/host/isp/isp.c
new file mode 100644
index 000000000000..27fd5b739d0e
--- /dev/null
+++ b/drivers/staging/tegra/video/host/isp/isp.c
@@ -0,0 +1,86 @@
+/*
+ * drivers/video/tegra/host/isp/isp.c
+ *
+ * Tegra Graphics ISP
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "dev.h"
+#include "bus_client.h"
+
+static int isp_probe(struct nvhost_device *dev,
+	struct nvhost_device_id *id_table)
+{
+	int err = 0;
+
+	err = nvhost_client_device_get_resources(dev);
+	if (err)
+		return err;
+
+	return nvhost_client_device_init(dev);
+}
+
+static int __exit isp_remove(struct nvhost_device *dev)
+{
+	/* Add clean-up */
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int isp_suspend(struct nvhost_device *dev, pm_message_t state)
+{
+	return nvhost_client_device_suspend(dev);
+}
+
+static int isp_resume(struct nvhost_device *dev)
+{
+	dev_info(&dev->dev, "resuming\n");
+	return 0;
+}
+#endif
+
+static struct of_device_id isp_of_match[] = {
+	{ .compatible = "nvidia,tegra20-isp", },
+	{ .compatible = "nvidia,tegra30-isp", },
+	{ },
+};
+
+static struct nvhost_driver isp_driver = {
+	.probe = isp_probe,
+	.remove = __exit_p(isp_remove),
+#ifdef CONFIG_PM
+	.suspend = isp_suspend,
+	.resume = isp_resume,
+#endif
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "isp",
+		.of_match_table = of_match_ptr(isp_of_match),
+	}
+};
+
+static int __init isp_init(void)
+{
+	return nvhost_driver_register(&isp_driver);
+}
+
+static void __exit isp_exit(void)
+{
+	nvhost_driver_unregister(&isp_driver);
+}
+
+module_init(isp_init);
+module_exit(isp_exit);
diff --git a/drivers/staging/tegra/video/host/mpe/Makefile b/drivers/staging/tegra/video/host/mpe/Makefile
new file mode 100644
index 000000000000..9550b4312faa
--- /dev/null
+++ b/drivers/staging/tegra/video/host/mpe/Makefile
@@ -0,0 +1,8 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/staging/tegra/video/host
+EXTRA_CFLAGS += -Idrivers/staging/tegra/include
+
+nvhost-mpe-objs  = \
+		mpe.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-mpe.o
diff --git a/drivers/staging/tegra/video/host/mpe/mpe.c b/drivers/staging/tegra/video/host/mpe/mpe.c
new file mode 100644
index 000000000000..26b82e6209cc
--- /dev/null
+++ b/drivers/staging/tegra/video/host/mpe/mpe.c
@@ -0,0 +1,696 @@
+/*
+ * drivers/video/tegra/host/mpe/mpe.c
+ *
+ * Tegra Graphics Host MPE
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/reset.h>
+
+#include "nvhost_hwctx.h"
+#include "nvhost_channel.h"
+#include "dev.h"
+#include "host1x/host1x01_hardware.h"
+#include "host1x/host1x_hwctx.h"
+#include "t20/t20.h"
+#include "chip_support.h"
+#include "nvhost_memmgr.h"
+
+#include <linux/slab.h>
+
+#include "bus_client.h"
+
+enum {
+	HWCTX_REGINFO_NORMAL = 0,
+	HWCTX_REGINFO_STASH,
+	HWCTX_REGINFO_CALCULATE,
+	HWCTX_REGINFO_WRITEBACK
+};
+
+const struct hwctx_reginfo ctxsave_regs_mpe[] = {
+	HWCTX_REGINFO(0x124,  1, STASH),
+	HWCTX_REGINFO(0x123,  1, STASH),
+	HWCTX_REGINFO(0x103,  1, STASH),
+	HWCTX_REGINFO(0x074,  1, STASH),
+	HWCTX_REGINFO(0x021,  1, NORMAL),
+	HWCTX_REGINFO(0x020,  1, STASH),
+	HWCTX_REGINFO(0x024,  2, NORMAL),
+	HWCTX_REGINFO(0x0e6,  1, NORMAL),
+	HWCTX_REGINFO(0x3fc,  1, NORMAL),
+	HWCTX_REGINFO(0x3d0,  1, NORMAL),
+	HWCTX_REGINFO(0x3d4,  1, NORMAL),
+	HWCTX_REGINFO(0x013,  1, NORMAL),
+	HWCTX_REGINFO(0x022,  1, NORMAL),
+	HWCTX_REGINFO(0x030,  4, NORMAL),
+	HWCTX_REGINFO(0x023,  1, NORMAL),
+	HWCTX_REGINFO(0x070,  1, NORMAL),
+	HWCTX_REGINFO(0x0a0,  9, NORMAL),
+	HWCTX_REGINFO(0x071,  1, NORMAL),
+	HWCTX_REGINFO(0x100,  4, NORMAL),
+	HWCTX_REGINFO(0x104,  2, NORMAL),
+	HWCTX_REGINFO(0x108,  9, NORMAL),
+	HWCTX_REGINFO(0x112,  2, NORMAL),
+	HWCTX_REGINFO(0x114,  1, STASH),
+	HWCTX_REGINFO(0x014,  1, NORMAL),
+	HWCTX_REGINFO(0x072,  1, NORMAL),
+	HWCTX_REGINFO(0x200,  1, NORMAL),
+	HWCTX_REGINFO(0x0d1,  1, NORMAL),
+	HWCTX_REGINFO(0x0d0,  1, NORMAL),
+	HWCTX_REGINFO(0x0c0,  1, NORMAL),
+	HWCTX_REGINFO(0x0c3,  2, NORMAL),
+	HWCTX_REGINFO(0x0d2,  1, NORMAL),
+	HWCTX_REGINFO(0x0d8,  1, NORMAL),
+	HWCTX_REGINFO(0x0e0,  2, NORMAL),
+	HWCTX_REGINFO(0x07f,  2, NORMAL),
+	HWCTX_REGINFO(0x084,  8, NORMAL),
+	HWCTX_REGINFO(0x0d3,  1, NORMAL),
+	HWCTX_REGINFO(0x040, 13, NORMAL),
+	HWCTX_REGINFO(0x050,  6, NORMAL),
+	HWCTX_REGINFO(0x058,  1, NORMAL),
+	HWCTX_REGINFO(0x057,  1, NORMAL),
+	HWCTX_REGINFO(0x111,  1, NORMAL),
+	HWCTX_REGINFO(0x130,  3, NORMAL),
+	HWCTX_REGINFO(0x201,  1, NORMAL),
+	HWCTX_REGINFO(0x068,  2, NORMAL),
+	HWCTX_REGINFO(0x08c,  1, NORMAL),
+	HWCTX_REGINFO(0x0cf,  1, NORMAL),
+	HWCTX_REGINFO(0x082,  2, NORMAL),
+	HWCTX_REGINFO(0x075,  1, NORMAL),
+	HWCTX_REGINFO(0x0e8,  1, NORMAL),
+	HWCTX_REGINFO(0x056,  1, NORMAL),
+	HWCTX_REGINFO(0x057,  1, NORMAL),
+	HWCTX_REGINFO(0x073,  1, CALCULATE),
+	HWCTX_REGINFO(0x074,  1, NORMAL),
+	HWCTX_REGINFO(0x075,  1, NORMAL),
+	HWCTX_REGINFO(0x076,  1, STASH),
+	HWCTX_REGINFO(0x11a,  9, NORMAL),
+	HWCTX_REGINFO(0x123,  1, NORMAL),
+	HWCTX_REGINFO(0x124,  1, NORMAL),
+	HWCTX_REGINFO(0x12a,  5, NORMAL),
+	HWCTX_REGINFO(0x12f,  1, STASH),
+	HWCTX_REGINFO(0x125,  2, NORMAL),
+	HWCTX_REGINFO(0x034,  1, NORMAL),
+	HWCTX_REGINFO(0x133,  2, NORMAL),
+	HWCTX_REGINFO(0x127,  1, NORMAL),
+	HWCTX_REGINFO(0x106,  1, WRITEBACK),
+	HWCTX_REGINFO(0x107,  1, WRITEBACK)
+};
+
+#define NR_STASHES 8
+#define NR_WRITEBACKS 2
+
+#define RC_RAM_LOAD_CMD 0x115
+#define RC_RAM_LOAD_DATA 0x116
+#define RC_RAM_READ_CMD 0x128
+#define RC_RAM_READ_DATA 0x129
+#define RC_RAM_SIZE 692
+
+#define IRFR_RAM_LOAD_CMD 0xc5
+#define IRFR_RAM_LOAD_DATA 0xc6
+#define IRFR_RAM_READ_CMD 0xcd
+#define IRFR_RAM_READ_DATA 0xce
+#define IRFR_RAM_SIZE 408
+
+struct mpe_save_info {
+	u32 in[NR_STASHES];
+	u32 out[NR_WRITEBACKS];
+	unsigned in_pos;
+	unsigned out_pos;
+	u32 h264_mode;
+};
+
+/*** restore ***/
+
+static unsigned int restore_size;
+
+static void restore_begin(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+	/* set class to host */
+	ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+					host1x_uclass_incr_syncpt_base_r(), 1);
+	/* increment sync point base */
+	ptr[1] = nvhost_class_host_incr_syncpt_base(h->waitbase, 1);
+	/* set class to MPE */
+	ptr[2] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0);
+}
+#define RESTORE_BEGIN_SIZE 3
+
+static void restore_ram(u32 *ptr, unsigned words,
+			unsigned cmd_reg, unsigned data_reg)
+{
+	ptr[0] = nvhost_opcode_imm(cmd_reg, words);
+	ptr[1] = nvhost_opcode_nonincr(data_reg, words);
+}
+#define RESTORE_RAM_SIZE 2
+
+static void restore_end(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+	/* syncpt increment to track restore gather. */
+	ptr[0] = nvhost_opcode_imm_incr_syncpt(
+			host1x_uclass_incr_syncpt_cond_op_done_v(),
+			h->syncpt);
+}
+#define RESTORE_END_SIZE 1
+
+static u32 *setup_restore_regs(u32 *ptr,
+			const struct hwctx_reginfo *regs,
+			unsigned int nr_regs)
+{
+	const struct hwctx_reginfo *rend = regs + nr_regs;
+
+	for ( ; regs != rend; ++regs) {
+		u32 offset = regs->offset;
+		u32 count = regs->count;
+		*ptr++ = nvhost_opcode_incr(offset, count);
+		ptr += count;
+	}
+	return ptr;
+}
+
+static u32 *setup_restore_ram(u32 *ptr, unsigned words,
+			unsigned cmd_reg, unsigned data_reg)
+{
+	restore_ram(ptr, words, cmd_reg, data_reg);
+	return ptr + (RESTORE_RAM_SIZE + words);
+}
+
+static void setup_restore(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+	restore_begin(h, ptr);
+	ptr += RESTORE_BEGIN_SIZE;
+
+	ptr = setup_restore_regs(ptr, ctxsave_regs_mpe,
+				ARRAY_SIZE(ctxsave_regs_mpe));
+
+	ptr = setup_restore_ram(ptr, RC_RAM_SIZE,
+			RC_RAM_LOAD_CMD, RC_RAM_LOAD_DATA);
+
+	ptr = setup_restore_ram(ptr, IRFR_RAM_SIZE,
+			IRFR_RAM_LOAD_CMD, IRFR_RAM_LOAD_DATA);
+
+	restore_end(h, ptr);
+
+	wmb();
+}
+
+/*** save ***/
+
+struct save_info {
+	u32 *ptr;
+	unsigned int save_count;
+	unsigned int restore_count;
+};
+
+static void save_begin(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+	/* MPE: when done, increment syncpt to base+1 */
+	ptr[0] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0);
+	ptr[1] = nvhost_opcode_imm_incr_syncpt(
+			host1x_uclass_incr_syncpt_cond_op_done_v(), h->syncpt);
+	/* host: wait for syncpt base+1 */
+	ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+					host1x_uclass_wait_syncpt_base_r(), 1);
+	ptr[3] = nvhost_class_host_wait_syncpt_base(h->syncpt, h->waitbase, 1);
+	/* host: signal context read thread to start reading */
+	ptr[4] = nvhost_opcode_imm_incr_syncpt(
+			host1x_uclass_incr_syncpt_cond_immediate_v(),
+			h->syncpt);
+}
+#define SAVE_BEGIN_SIZE 5
+
+static void save_direct(u32 *ptr, u32 start_reg, u32 count)
+{
+	ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+					host1x_uclass_indoff_r(), 1);
+	ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_MPE,
+						start_reg, true);
+	ptr[2] = nvhost_opcode_nonincr(host1x_uclass_inddata_r(), count);
+}
+#define SAVE_DIRECT_SIZE 3
+
+static void save_set_ram_cmd(u32 *ptr, u32 cmd_reg, u32 count)
+{
+	ptr[0] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID,
+					cmd_reg, 1);
+	ptr[1] = count;
+}
+#define SAVE_SET_RAM_CMD_SIZE 2
+
+static void save_read_ram_data_nasty(u32 *ptr, u32 data_reg)
+{
+	ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+					host1x_uclass_indoff_r(), 1);
+	ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_MPE,
+						data_reg, false);
+	ptr[2] = nvhost_opcode_imm(host1x_uclass_inddata_r(), 0);
+	/* write junk data to avoid 'cached problem with register memory' */
+	ptr[3] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID,
+					data_reg, 1);
+	ptr[4] = 0x99;
+}
+#define SAVE_READ_RAM_DATA_NASTY_SIZE 5
+
+static void save_end(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+	/* Wait for context read service to finish (cpu incr 3) */
+	ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+					host1x_uclass_wait_syncpt_base_r(), 1);
+	ptr[1] = nvhost_class_host_wait_syncpt_base(h->syncpt, h->waitbase, 3);
+	/* Advance syncpoint base */
+	ptr[2] = nvhost_opcode_nonincr(host1x_uclass_incr_syncpt_base_r(), 1);
+	ptr[3] = nvhost_class_host_incr_syncpt_base(h->waitbase, 3);
+	/* set class back to the unit */
+	ptr[4] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0);
+}
+#define SAVE_END_SIZE 5
+
+static void setup_save_regs(struct save_info *info,
+			const struct hwctx_reginfo *regs,
+			unsigned int nr_regs)
+{
+	const struct hwctx_reginfo *rend = regs + nr_regs;
+	u32 *ptr = info->ptr;
+	unsigned int save_count = info->save_count;
+	unsigned int restore_count = info->restore_count;
+
+	for ( ; regs != rend; ++regs) {
+		u32 offset = regs->offset;
+		u32 count = regs->count;
+		if (regs->type != HWCTX_REGINFO_WRITEBACK) {
+			if (ptr) {
+				save_direct(ptr, offset, count);
+				ptr += SAVE_DIRECT_SIZE;
+				memset(ptr, 0, count * 4);
+				ptr += count;
+			}
+			save_count += (SAVE_DIRECT_SIZE + count);
+		}
+		restore_count += (1 + count);
+	}
+
+	info->ptr = ptr;
+	info->save_count = save_count;
+	info->restore_count = restore_count;
+}
+
+static void setup_save_ram_nasty(struct save_info *info,	unsigned words,
+					unsigned cmd_reg, unsigned data_reg)
+{
+	u32 *ptr = info->ptr;
+	unsigned int save_count = info->save_count;
+	unsigned int restore_count = info->restore_count;
+	unsigned i;
+
+	if (ptr) {
+		save_set_ram_cmd(ptr, cmd_reg, words);
+		ptr += SAVE_SET_RAM_CMD_SIZE;
+		for (i = words; i; --i) {
+			save_read_ram_data_nasty(ptr, data_reg);
+			ptr += SAVE_READ_RAM_DATA_NASTY_SIZE;
+		}
+	}
+
+	save_count += SAVE_SET_RAM_CMD_SIZE;
+	save_count += words * SAVE_READ_RAM_DATA_NASTY_SIZE;
+	restore_count += (RESTORE_RAM_SIZE + words);
+
+	info->ptr = ptr;
+	info->save_count = save_count;
+	info->restore_count = restore_count;
+}
+
+static void setup_save(struct host1x_hwctx_handler *h, u32 *ptr)
+{
+	struct save_info info = {
+		ptr,
+		SAVE_BEGIN_SIZE,
+		RESTORE_BEGIN_SIZE
+	};
+
+	if (info.ptr) {
+		save_begin(h, info.ptr);
+		info.ptr += SAVE_BEGIN_SIZE;
+	}
+
+	setup_save_regs(&info, ctxsave_regs_mpe,
+			ARRAY_SIZE(ctxsave_regs_mpe));
+
+	setup_save_ram_nasty(&info, RC_RAM_SIZE,
+			RC_RAM_READ_CMD, RC_RAM_READ_DATA);
+
+	setup_save_ram_nasty(&info, IRFR_RAM_SIZE,
+			IRFR_RAM_READ_CMD, IRFR_RAM_READ_DATA);
+
+	if (info.ptr) {
+		save_end(h, info.ptr);
+		info.ptr += SAVE_END_SIZE;
+	}
+
+	wmb();
+
+	h->save_size = info.save_count + SAVE_END_SIZE;
+	restore_size = info.restore_count + RESTORE_END_SIZE;
+}
+
+static u32 calculate_mpe(u32 word, struct mpe_save_info *msi)
+{
+	u32 buffer_full_read = msi->in[0] & 0x01ffffff;
+	u32 byte_len = msi->in[1];
+	u32 drain = (msi->in[2] >> 2) & 0x007fffff;
+	u32 rep_frame = msi->in[3] & 0x0000ffff;
+	u32 h264_mode = (msi->in[4] >> 11) & 1;
+	int new_buffer_full;
+
+	if (h264_mode)
+		byte_len >>= 3;
+	new_buffer_full = buffer_full_read + byte_len - (drain * 4);
+	msi->out[0] = max(0, new_buffer_full);
+	msi->out[1] = rep_frame;
+	if (rep_frame == 0)
+		word &= 0xffff0000;
+	return word;
+}
+
+static u32 *save_regs(u32 *ptr, unsigned int *pending,
+		struct nvhost_channel *channel,
+		const struct hwctx_reginfo *regs,
+		unsigned int nr_regs,
+		struct mpe_save_info *msi)
+{
+	const struct hwctx_reginfo *rend = regs + nr_regs;
+
+	for ( ; regs != rend; ++regs) {
+		u32 count = regs->count;
+		++ptr; /* restore incr */
+		if (regs->type == HWCTX_REGINFO_NORMAL) {
+			nvhost_channel_drain_read_fifo(channel,
+						ptr, count, pending);
+			ptr += count;
+		} else {
+			u32 word;
+			if (regs->type == HWCTX_REGINFO_WRITEBACK) {
+				BUG_ON(msi->out_pos >= NR_WRITEBACKS);
+				word = msi->out[msi->out_pos++];
+			} else {
+				nvhost_channel_drain_read_fifo(channel,
+						&word, 1, pending);
+				if (regs->type == HWCTX_REGINFO_STASH) {
+					BUG_ON(msi->in_pos >= NR_STASHES);
+					msi->in[msi->in_pos++] = word;
+				} else {
+					word = calculate_mpe(word, msi);
+				}
+			}
+			*ptr++ = word;
+		}
+	}
+	return ptr;
+}
+
+static u32 *save_ram(u32 *ptr, unsigned int *pending,
+		struct nvhost_channel *channel,
+		unsigned words,	unsigned cmd_reg, unsigned data_reg)
+{
+	int err = 0;
+	ptr += RESTORE_RAM_SIZE;
+	err = nvhost_channel_drain_read_fifo(channel, ptr, words, pending);
+	WARN_ON(err);
+	return ptr + words;
+}
+
+/*** ctxmpe ***/
+
+static struct nvhost_hwctx *ctxmpe_alloc(struct nvhost_hwctx_handler *h,
+		struct nvhost_channel *ch)
+{
+	struct mem_mgr *memmgr = nvhost_get_host(ch->dev)->memmgr;
+	struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h);
+	struct host1x_hwctx *ctx;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return NULL;
+	ctx->restore = mem_op().alloc(memmgr, restore_size * 4, 32,
+				mem_mgr_flag_write_combine);
+	if (IS_ERR_OR_NULL(ctx->restore)) {
+		kfree(ctx);
+		return NULL;
+	}
+
+	ctx->restore_virt = mem_op().mmap(ctx->restore);
+	if (!ctx->restore_virt) {
+		mem_op().put(memmgr, ctx->restore);
+		kfree(ctx);
+		return NULL;
+	}
+
+	kref_init(&ctx->hwctx.ref);
+	ctx->hwctx.h = &p->h;
+	ctx->hwctx.channel = ch;
+	ctx->hwctx.valid = false;
+	ctx->save_incrs = 3;
+	ctx->save_thresh = 2;
+	ctx->save_slots = p->save_slots;
+	ctx->restore_phys = mem_op().pin(memmgr, ctx->restore);
+	ctx->restore_size = restore_size;
+	ctx->restore_incrs = 1;
+
+	setup_restore(p, ctx->restore_virt);
+
+	return &ctx->hwctx;
+}
+
+static void ctxmpe_get(struct nvhost_hwctx *ctx)
+{
+	kref_get(&ctx->ref);
+}
+
+static void ctxmpe_free(struct kref *ref)
+{
+	struct nvhost_hwctx *nctx = container_of(ref, struct nvhost_hwctx, ref);
+	struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
+	struct mem_mgr *memmgr = nvhost_get_host(nctx->channel->dev)->memmgr;
+
+	if (ctx->restore_virt)
+		mem_op().munmap(ctx->restore, ctx->restore_virt);
+	mem_op().unpin(memmgr, ctx->restore);
+	mem_op().put(memmgr, ctx->restore);
+	kfree(ctx);
+}
+
+static void ctxmpe_put(struct nvhost_hwctx *ctx)
+{
+	kref_put(&ctx->ref, ctxmpe_free);
+}
+
+static void ctxmpe_save_push(struct nvhost_hwctx *nctx,
+		struct nvhost_cdma *cdma)
+{
+	struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
+	struct host1x_hwctx_handler *h = host1x_hwctx_handler(ctx);
+	nvhost_cdma_push_gather(cdma,
+			nvhost_get_host(nctx->channel->dev)->memmgr,
+			h->save_buf,
+			0,
+			nvhost_opcode_gather(h->save_size),
+			h->save_phys);
+}
+
+static void ctxmpe_save_service(struct nvhost_hwctx *nctx)
+{
+	struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
+	struct host1x_hwctx_handler *h = host1x_hwctx_handler(ctx);
+
+	u32 *ptr = (u32 *)ctx->restore_virt + RESTORE_BEGIN_SIZE;
+	unsigned int pending = 0;
+	struct mpe_save_info msi;
+
+	msi.in_pos = 0;
+	msi.out_pos = 0;
+
+	ptr = save_regs(ptr, &pending, nctx->channel,
+			ctxsave_regs_mpe, ARRAY_SIZE(ctxsave_regs_mpe), &msi);
+
+	ptr = save_ram(ptr, &pending, nctx->channel,
+		RC_RAM_SIZE, RC_RAM_READ_CMD, RC_RAM_READ_DATA);
+
+	ptr = save_ram(ptr, &pending, nctx->channel,
+		IRFR_RAM_SIZE, IRFR_RAM_READ_CMD, IRFR_RAM_READ_DATA);
+
+	wmb();
+	nvhost_syncpt_cpu_incr(&nvhost_get_host(nctx->channel->dev)->syncpt,
+			h->syncpt);
+}
+
+struct nvhost_hwctx_handler *nvhost_mpe_ctxhandler_init(u32 syncpt,
+	u32 waitbase, struct nvhost_channel *ch)
+{
+	struct mem_mgr *memmgr;
+	u32 *save_ptr;
+	struct host1x_hwctx_handler *p;
+
+	p = kmalloc(sizeof(*p), GFP_KERNEL);
+	if (!p)
+		return NULL;
+
+	memmgr = nvhost_get_host(ch->dev)->memmgr;
+
+	p->syncpt = syncpt;
+	p->waitbase = waitbase;
+
+	setup_save(p, NULL);
+
+	p->save_buf = mem_op().alloc(memmgr, p->save_size * 4, 32,
+				mem_mgr_flag_write_combine);
+	if (IS_ERR_OR_NULL(p->save_buf)) {
+		p->save_buf = NULL;
+		return NULL;
+	}
+
+	save_ptr = mem_op().mmap(p->save_buf);
+	if (!save_ptr) {
+		mem_op().put(memmgr, p->save_buf);
+		p->save_buf = NULL;
+		return NULL;
+	}
+
+	p->save_phys = mem_op().pin(memmgr, p->save_buf);
+	p->save_slots = 1;
+
+	setup_save(p, save_ptr);
+
+	p->h.alloc = ctxmpe_alloc;
+	p->h.save_push = ctxmpe_save_push;
+	p->h.save_service = ctxmpe_save_service;
+	p->h.get = ctxmpe_get;
+	p->h.put = ctxmpe_put;
+
+	return &p->h;
+}
+
+int nvhost_mpe_prepare_power_off(struct nvhost_device *dev)
+{
+	return nvhost_channel_save_context(dev->channel);
+}
+
+enum mpe_ip_ver {
+	mpe_01 = 1,
+	mpe_02,
+};
+
+struct mpe_desc {
+	int (*prepare_poweroff)(struct nvhost_device *dev);
+	struct nvhost_hwctx_handler *(*alloc_hwctx_handler)(u32 syncpt,
+			u32 waitbase, struct nvhost_channel *ch);
+};
+
+static const struct mpe_desc mpe[] = {
+	[mpe_01] = {
+		.prepare_poweroff = nvhost_mpe_prepare_power_off,
+		.alloc_hwctx_handler = nvhost_mpe_ctxhandler_init,
+	},
+	[mpe_02] = {
+		.prepare_poweroff = nvhost_mpe_prepare_power_off,
+		.alloc_hwctx_handler = nvhost_mpe_ctxhandler_init,
+	},
+};
+
+static struct nvhost_device_id mpe_id[] = {
+	{ "mpe", mpe_01 },
+	{ "mpe", mpe_02 },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(nvhost, mpe_id);
+
+static int mpe_probe(struct nvhost_device *dev,
+	struct nvhost_device_id *id_table)
+{
+	int err = 0;
+	int index = 0;
+	struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+
+	index = id_table->version;
+
+	drv->prepare_poweroff		= mpe[index].prepare_poweroff;
+	drv->alloc_hwctx_handler	= mpe[index].alloc_hwctx_handler;
+
+	err = nvhost_client_device_get_resources(dev);
+	if (err)
+		return err;
+
+	dev->rst = devm_reset_control_get(&dev->dev, "mpe");
+	if (IS_ERR(dev->rst)) {
+		dev_err(&dev->dev, "failed to get reset\n");
+		return PTR_ERR(dev->rst);
+	}
+
+	return nvhost_client_device_init(dev);
+}
+
+static int __exit mpe_remove(struct nvhost_device *dev)
+{
+	/* Add clean-up */
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int mpe_suspend(struct nvhost_device *dev, pm_message_t state)
+{
+	return nvhost_client_device_suspend(dev);
+}
+
+static int mpe_resume(struct nvhost_device *dev)
+{
+	dev_info(&dev->dev, "resuming\n");
+	return 0;
+}
+#endif
+
+static struct of_device_id mpe_of_match[] = {
+	{ .compatible = "nvidia,tegra20-mpe", },
+	{ .compatible = "nvidia,tegra30-mpe", },
+	{ },
+};
+
+static struct nvhost_driver mpe_driver = {
+	.probe = mpe_probe,
+	.remove = __exit_p(mpe_remove),
+#ifdef CONFIG_PM
+	.suspend = mpe_suspend,
+	.resume = mpe_resume,
+#endif
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "mpe",
+		.of_match_table = of_match_ptr(mpe_of_match),
+	},
+	.id_table = mpe_id,
+};
+
+static int __init mpe_init(void)
+{
+	return nvhost_driver_register(&mpe_driver);
+}
+
+static void __exit mpe_exit(void)
+{
+	nvhost_driver_unregister(&mpe_driver);
+}
+
+module_init(mpe_init);
+module_exit(mpe_exit);
diff --git a/drivers/staging/tegra/video/host/mpe/mpe.h b/drivers/staging/tegra/video/host/mpe/mpe.h
new file mode 100644
index 000000000000..1bc2a8a04c1a
--- /dev/null
+++ b/drivers/staging/tegra/video/host/mpe/mpe.h
@@ -0,0 +1,32 @@
+/*
+ * drivers/video/tegra/host/mpe/mpe.h
+ *
+ * Tegra Graphics Host MPE
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_MPE_MPE_H
+#define __NVHOST_MPE_MPE_H
+
+struct nvhost_hwctx_handler;
+struct nvhost_device;
+
+struct nvhost_hwctx_handler *nvhost_mpe_ctxhandler_init(
+		u32 syncpt, u32 waitbase,
+		struct nvhost_channel *ch);
+int nvhost_mpe_prepare_power_off(struct nvhost_device *dev);
+
+#endif
diff --git a/drivers/staging/tegra/video/host/nvhost_acm.c b/drivers/staging/tegra/video/host/nvhost_acm.c
new file mode 100644
index 000000000000..df2f0498168b
--- /dev/null
+++ b/drivers/staging/tegra/video/host/nvhost_acm.c
@@ -0,0 +1,671 @@
+/*
+ * drivers/video/tegra/host/nvhost_acm.c
+ *
+ * Tegra Graphics Host Automatic Clock Management
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvhost_acm.h"
+#include "dev.h"
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <mach/powergate.h>
+#include <mach/clk.h>
+
+#define ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT	(2 * HZ)
+#define POWERGATE_DELAY 			10
+#define MAX_DEVID_LENGTH			16
+
+DEFINE_MUTEX(client_list_lock);
+
+struct nvhost_module_client {
+	struct list_head node;
+	unsigned long rate[NVHOST_MODULE_MAX_CLOCKS];
+	void *priv;
+};
+
+static void do_powergate_locked(struct nvhost_device *dev)
+{
+	int id = dev->powergate_ids[0];
+
+	dev_info(&dev->dev, "%s id=%d\n", __func__, id);
+
+	if (id != -1 && tegra_powergate_is_powered(id)) {
+		clk_disable_unprepare(dev->clk[0]);
+		WARN_ON(tegra_powergate_power_off(id));
+	}
+}
+
+static void do_unpowergate_locked(struct nvhost_device *dev)
+{
+	int id = dev->powergate_ids[0];
+
+	dev_info(&dev->dev, "%s id=%d\n", __func__, id);
+
+	if (id != -1 && dev->rst) {
+		WARN_ON(tegra_powergate_sequence_power_up(id, dev->clk[0], dev->rst));
+		clk_disable_unprepare(dev->clk[0]);
+	}
+}
+
+static void do_module_reset_locked(struct nvhost_device *dev)
+{
+	/* assert module and mc client reset */
+	if (dev->rst) {
+		if (dev->powergate_ids[0] != -1)
+			tegra_powergate_mc_disable(dev->powergate_ids[0]);
+
+		if (dev->powergate_ids[1] != -1)
+			tegra_powergate_mc_disable(dev->powergate_ids[1]);
+
+		reset_control_assert(dev->rst);
+
+		if (dev->powergate_ids[0] != -1)
+			tegra_powergate_mc_flush(dev->powergate_ids[0]);
+
+		if (dev->powergate_ids[1] != -1)
+			tegra_powergate_mc_flush(dev->powergate_ids[1]);
+	}
+
+	udelay(POWERGATE_DELAY);
+
+	/* deassert reset */
+	if (dev->rst) {
+		if (dev->powergate_ids[0] != -1)
+			tegra_powergate_mc_flush_done(dev->powergate_ids[0]);
+
+		if (dev->powergate_ids[1] != -1)
+			tegra_powergate_mc_flush_done(dev->powergate_ids[1]);
+
+		reset_control_deassert(dev->rst);
+
+		if (dev->powergate_ids[0] != -1)
+			tegra_powergate_mc_enable(dev->powergate_ids[0]);
+
+		if (dev->powergate_ids[1] != -1)
+			tegra_powergate_mc_enable(dev->powergate_ids[1]);
+	}
+}
+
+void nvhost_module_reset(struct nvhost_device *dev)
+{
+	dev_dbg(&dev->dev,
+		"%s: asserting %s module reset (id %d, id2 %d)\n",
+		__func__, dev->name,
+		dev->powergate_ids[0], dev->powergate_ids[1]);
+
+	mutex_lock(&dev->lock);
+	do_module_reset_locked(dev);
+	mutex_unlock(&dev->lock);
+
+	dev_dbg(&dev->dev, "%s: module %s out of reset\n",
+		__func__, dev->name);
+}
+
+static void to_state_clockgated_locked(struct nvhost_device *dev)
+{
+	struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+
+	if (dev->powerstate == NVHOST_POWER_STATE_RUNNING) {
+		int i, err;
+		if (drv->prepare_clockoff) {
+			err = drv->prepare_clockoff(dev);
+			if (err) {
+				dev_err(&dev->dev, "error clock gating");
+				return;
+			}
+		}
+		for (i = 0; i < dev->num_clks; i++)
+			clk_disable_unprepare(dev->clk[i]);
+		if (dev->dev.parent)
+			nvhost_module_idle(to_nvhost_device(dev->dev.parent));
+	} else if (dev->powerstate == NVHOST_POWER_STATE_POWERGATED
+			&& dev->can_powergate) {
+		do_unpowergate_locked(dev);
+
+		if (dev->powerup_reset)
+			do_module_reset_locked(dev);
+	}
+	dev->powerstate = NVHOST_POWER_STATE_CLOCKGATED;
+}
+
+static void to_state_running_locked(struct nvhost_device *dev)
+{
+	struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+	int prev_state = dev->powerstate;
+
+	if (dev->powerstate == NVHOST_POWER_STATE_POWERGATED)
+		to_state_clockgated_locked(dev);
+
+	if (dev->powerstate == NVHOST_POWER_STATE_CLOCKGATED) {
+		int i;
+
+		if (dev->dev.parent)
+			nvhost_module_busy(to_nvhost_device(dev->dev.parent));
+
+		for (i = 0; i < dev->num_clks; i++) {
+			int err = clk_prepare_enable(dev->clk[i]);
+			if (err) {
+				dev_err(&dev->dev, "Cannot turn on clock %s",
+					dev->clocks[i].name);
+				return;
+			}
+		}
+
+		/* Invoke callback after enabling clock. This is used for
+		 * re-enabling host1x interrupts. */
+		if (prev_state == NVHOST_POWER_STATE_CLOCKGATED
+				&& drv->finalize_clockon)
+			drv->finalize_clockon(dev);
+
+		/* Invoke callback after power un-gating. This is used for
+		 * restoring context. */
+		if (prev_state == NVHOST_POWER_STATE_POWERGATED
+				&& drv->finalize_poweron)
+			drv->finalize_poweron(dev);
+	}
+	dev->powerstate = NVHOST_POWER_STATE_RUNNING;
+}
+
+/* This gets called from powergate_handler() and from module suspend.
+ * Module suspend is done for all modules, runtime power gating only
+ * for modules with can_powergate set.
+ */
+static int to_state_powergated_locked(struct nvhost_device *dev)
+{
+	int err = 0;
+	struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+
+	if (drv->prepare_poweroff
+			&& dev->powerstate != NVHOST_POWER_STATE_POWERGATED) {
+		/* Clock needs to be on in prepare_poweroff */
+		to_state_running_locked(dev);
+		err = drv->prepare_poweroff(dev);
+		if (err)
+			return err;
+	}
+
+	if (dev->powerstate == NVHOST_POWER_STATE_RUNNING)
+		to_state_clockgated_locked(dev);
+
+	if (dev->can_powergate)
+		do_powergate_locked(dev);
+
+	dev->powerstate = NVHOST_POWER_STATE_POWERGATED;
+	return 0;
+}
+
+static void schedule_powergating_locked(struct nvhost_device *dev)
+{
+	if (dev->can_powergate)
+		schedule_delayed_work(&dev->powerstate_down,
+				msecs_to_jiffies(dev->powergate_delay));
+}
+
+static void schedule_clockgating_locked(struct nvhost_device *dev)
+{
+	schedule_delayed_work(&dev->powerstate_down,
+			msecs_to_jiffies(dev->clockgate_delay));
+}
+
+void nvhost_module_busy(struct nvhost_device *dev)
+{
+	struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+
+	if (drv->busy)
+		drv->busy(dev);
+
+	mutex_lock(&dev->lock);
+	cancel_delayed_work(&dev->powerstate_down);
+
+	dev->refcount++;
+	if (dev->refcount > 0 && !nvhost_module_powered(dev))
+		to_state_running_locked(dev);
+	mutex_unlock(&dev->lock);
+}
+
+static void powerstate_down_handler(struct work_struct *work)
+{
+	struct nvhost_device *dev;
+
+	dev = container_of(to_delayed_work(work),
+			struct nvhost_device,
+			powerstate_down);
+
+	mutex_lock(&dev->lock);
+	if (dev->refcount == 0) {
+		switch (dev->powerstate) {
+		case NVHOST_POWER_STATE_RUNNING:
+			to_state_clockgated_locked(dev);
+			schedule_powergating_locked(dev);
+			break;
+		case NVHOST_POWER_STATE_CLOCKGATED:
+			if (to_state_powergated_locked(dev))
+				schedule_powergating_locked(dev);
+			break;
+		default:
+			break;
+		}
+	}
+	mutex_unlock(&dev->lock);
+}
+
+void nvhost_module_idle_mult(struct nvhost_device *dev, int refs)
+{
+	struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+	bool kick = false;
+
+	mutex_lock(&dev->lock);
+	dev->refcount -= refs;
+	if (dev->refcount == 0) {
+		if (nvhost_module_powered(dev))
+			schedule_clockgating_locked(dev);
+		kick = true;
+	}
+	mutex_unlock(&dev->lock);
+
+	if (kick) {
+		wake_up(&dev->idle_wq);
+
+		if (drv->idle)
+			drv->idle(dev);
+	}
+}
+
+int nvhost_module_get_rate(struct nvhost_device *dev, unsigned long *rate,
+		int index)
+{
+	struct clk *c;
+
+	c = dev->clk[index];
+	if (IS_ERR_OR_NULL(c))
+		return -EINVAL;
+
+	/* Need to enable client to get correct rate */
+	nvhost_module_busy(dev);
+	*rate = clk_get_rate(c);
+	nvhost_module_idle(dev);
+	return 0;
+
+}
+
+static int nvhost_module_update_rate(struct nvhost_device *dev, int index)
+{
+	unsigned long rate = 0;
+	struct nvhost_module_client *m;
+	struct clk *c;
+
+	c = dev->clk[index];
+	if (IS_ERR_OR_NULL(c))
+		return -EINVAL;
+
+	list_for_each_entry(m, &dev->client_list, node) {
+		rate = max(m->rate[index], rate);
+	}
+	if (!rate)
+		rate = clk_round_rate(dev->clk[index],
+				dev->clocks[index].default_rate);
+
+	return clk_set_rate(dev->clk[index], rate);
+}
+
+int nvhost_module_set_rate(struct nvhost_device *dev, void *priv,
+		unsigned long rate, int index)
+{
+	struct nvhost_module_client *m;
+	int i, ret = 0;
+
+	mutex_lock(&client_list_lock);
+	list_for_each_entry(m, &dev->client_list, node) {
+		if (m->priv == priv) {
+			for (i = 0; i < dev->num_clks; i++)
+				m->rate[i] = clk_round_rate(dev->clk[i], rate);
+			break;
+		}
+	}
+
+	for (i = 0; i < dev->num_clks; i++) {
+		ret = nvhost_module_update_rate(dev, i);
+		if (ret < 0)
+			break;
+	}
+	mutex_unlock(&client_list_lock);
+	return ret;
+
+}
+
+int nvhost_module_add_client(struct nvhost_device *dev, void *priv)
+{
+	int i;
+	unsigned long rate;
+	struct nvhost_module_client *client;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&client->node);
+	client->priv = priv;
+
+	for (i = 0; i < dev->num_clks; i++) {
+		rate = clk_round_rate(dev->clk[i],
+				dev->clocks[i].default_rate);
+		client->rate[i] = rate;
+	}
+	mutex_lock(&client_list_lock);
+	list_add_tail(&client->node, &dev->client_list);
+	mutex_unlock(&client_list_lock);
+	return 0;
+}
+
+void nvhost_module_remove_client(struct nvhost_device *dev, void *priv)
+{
+	int i;
+	struct nvhost_module_client *m;
+	int found = 0;
+
+	mutex_lock(&client_list_lock);
+	list_for_each_entry(m, &dev->client_list, node) {
+		if (priv == m->priv) {
+			list_del(&m->node);
+			found = 1;
+			break;
+		}
+	}
+	if (found) {
+		kfree(m);
+		for (i = 0; i < dev->num_clks; i++)
+			nvhost_module_update_rate(dev, i);
+	}
+	mutex_unlock(&client_list_lock);
+}
+
+static ssize_t refcount_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	int ret;
+	struct nvhost_device_power_attr *power_attribute =
+		container_of(attr, struct nvhost_device_power_attr, \
+			power_attr[NVHOST_POWER_SYSFS_ATTRIB_REFCOUNT]);
+	struct nvhost_device *dev = power_attribute->ndev;
+
+	mutex_lock(&dev->lock);
+	ret = sprintf(buf, "%d\n", dev->refcount);
+	mutex_unlock(&dev->lock);
+
+	return ret;
+}
+
+static ssize_t powergate_delay_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int powergate_delay = 0, ret = 0;
+	struct nvhost_device_power_attr *power_attribute =
+		container_of(attr, struct nvhost_device_power_attr, \
+			power_attr[NVHOST_POWER_SYSFS_ATTRIB_POWERGATE_DELAY]);
+	struct nvhost_device *dev = power_attribute->ndev;
+
+	if (!dev->can_powergate) {
+		dev_info(&dev->dev, "does not support power-gating\n");
+		return count;
+	}
+
+	mutex_lock(&dev->lock);
+	ret = sscanf(buf, "%d", &powergate_delay);
+	if (ret == 1 && powergate_delay >= 0)
+		dev->powergate_delay = powergate_delay;
+	else
+		dev_err(&dev->dev, "Invalid powergate delay\n");
+	mutex_unlock(&dev->lock);
+
+	return count;
+}
+
+static ssize_t powergate_delay_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	int ret;
+	struct nvhost_device_power_attr *power_attribute =
+		container_of(attr, struct nvhost_device_power_attr, \
+			power_attr[NVHOST_POWER_SYSFS_ATTRIB_POWERGATE_DELAY]);
+	struct nvhost_device *dev = power_attribute->ndev;
+
+	mutex_lock(&dev->lock);
+	ret = sprintf(buf, "%d\n", dev->powergate_delay);
+	mutex_unlock(&dev->lock);
+
+	return ret;
+}
+
+static ssize_t clockgate_delay_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int clockgate_delay = 0, ret = 0;
+	struct nvhost_device_power_attr *power_attribute =
+		container_of(attr, struct nvhost_device_power_attr, \
+			power_attr[NVHOST_POWER_SYSFS_ATTRIB_CLOCKGATE_DELAY]);
+	struct nvhost_device *dev = power_attribute->ndev;
+
+	mutex_lock(&dev->lock);
+	ret = sscanf(buf, "%d", &clockgate_delay);
+	if (ret == 1 && clockgate_delay >= 0)
+		dev->clockgate_delay = clockgate_delay;
+	else
+		dev_err(&dev->dev, "Invalid clockgate delay\n");
+	mutex_unlock(&dev->lock);
+
+	return count;
+}
+
+static ssize_t clockgate_delay_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	int ret;
+	struct nvhost_device_power_attr *power_attribute =
+		container_of(attr, struct nvhost_device_power_attr, \
+			power_attr[NVHOST_POWER_SYSFS_ATTRIB_CLOCKGATE_DELAY]);
+	struct nvhost_device *dev = power_attribute->ndev;
+
+	mutex_lock(&dev->lock);
+	ret = sprintf(buf, "%d\n", dev->clockgate_delay);
+	mutex_unlock(&dev->lock);
+
+	return ret;
+}
+
+int nvhost_module_init(struct nvhost_device *dev)
+{
+	int i = 0, err = 0;
+	struct kobj_attribute *attr = NULL;
+
+	/* initialize clocks to known state */
+	INIT_LIST_HEAD(&dev->client_list);
+	while (i < NVHOST_MODULE_MAX_CLOCKS && dev->clocks[i].name) {
+		char devname[MAX_DEVID_LENGTH];
+		long rate = dev->clocks[i].default_rate;
+		struct clk *c;
+
+		snprintf(devname, MAX_DEVID_LENGTH, "tegra_%s", dev->name);
+// 		c = clk_get_sys(devname, dev->clocks[i].name);
+		c = clk_get(&dev->dev, dev->clocks[i].name);
+		if (IS_ERR(c)) {
+			dev_err(&dev->dev, "Cannot get clock %s\n",
+					dev->clocks[i].name);
+			i++;
+			continue;
+		}
+
+		rate = clk_round_rate(c, rate);
+		clk_prepare_enable(c);
+		clk_set_rate(c, rate);
+		clk_disable_unprepare(c);
+		dev->clk[i] = c;
+		i++;
+	}
+	dev->num_clks = i;
+
+	mutex_init(&dev->lock);
+	init_waitqueue_head(&dev->idle_wq);
+	INIT_DELAYED_WORK(&dev->powerstate_down, powerstate_down_handler);
+
+	/* power gate units that we can power gate */
+	if (dev->can_powergate) {
+		do_powergate_locked(dev);
+		dev->powerstate = NVHOST_POWER_STATE_POWERGATED;
+	} else {
+		do_unpowergate_locked(dev);
+		dev->powerstate = NVHOST_POWER_STATE_CLOCKGATED;
+	}
+
+	/* Init the power sysfs attributes for this device */
+	dev->power_attrib = kzalloc(sizeof(struct nvhost_device_power_attr),
+		GFP_KERNEL);
+	if (!dev->power_attrib) {
+		dev_err(&dev->dev, "Unable to allocate sysfs attributes\n");
+		return -ENOMEM;
+	}
+	dev->power_attrib->ndev = dev;
+
+	for (i = 0; i < NVHOST_POWER_SYSFS_ATTRIB_MAX; i++)
+		sysfs_attr_init(&dev->power_attrib->power_attr[i].attr);
+
+	dev->power_kobj = kobject_create_and_add("acm", &dev->dev.kobj);
+	if (!dev->power_kobj) {
+		dev_err(&dev->dev, "Could not add dir 'power'\n");
+		err = -EIO;
+		goto fail_attrib_alloc;
+	}
+
+	attr = &dev->power_attrib->power_attr[NVHOST_POWER_SYSFS_ATTRIB_CLOCKGATE_DELAY];
+	attr->attr.name = "clockgate_delay";
+	attr->attr.mode = S_IWUSR | S_IRUGO;
+	attr->show = clockgate_delay_show;
+	attr->store = clockgate_delay_store;
+	if (sysfs_create_file(dev->power_kobj, &attr->attr)) {
+		dev_err(&dev->dev, "Could not create sysfs attribute clockgate_delay\n");
+		err = -EIO;
+		goto fail_clockdelay;
+	}
+
+	attr = &dev->power_attrib->power_attr[NVHOST_POWER_SYSFS_ATTRIB_POWERGATE_DELAY];
+	attr->attr.name = "powergate_delay";
+	attr->attr.mode = S_IWUSR | S_IRUGO;
+	attr->show = powergate_delay_show;
+	attr->store = powergate_delay_store;
+	if (sysfs_create_file(dev->power_kobj, &attr->attr)) {
+		dev_err(&dev->dev, "Could not create sysfs attribute powergate_delay\n");
+		err = -EIO;
+		goto fail_powergatedelay;
+	}
+
+	attr = &dev->power_attrib->power_attr[NVHOST_POWER_SYSFS_ATTRIB_REFCOUNT];
+	attr->attr.name = "refcount";
+	attr->attr.mode = S_IRUGO;
+	attr->show = refcount_show;
+	if (sysfs_create_file(dev->power_kobj, &attr->attr)) {
+		dev_err(&dev->dev, "Could not create sysfs attribute refcount\n");
+		err = -EIO;
+		goto fail_refcount;
+	}
+
+	return 0;
+
+fail_refcount:
+	attr = &dev->power_attrib->power_attr[NVHOST_POWER_SYSFS_ATTRIB_POWERGATE_DELAY];
+	sysfs_remove_file(dev->power_kobj, &attr->attr);
+
+fail_powergatedelay:
+	attr = &dev->power_attrib->power_attr[NVHOST_POWER_SYSFS_ATTRIB_CLOCKGATE_DELAY];
+	sysfs_remove_file(dev->power_kobj, &attr->attr);
+
+fail_clockdelay:
+	kobject_put(dev->power_kobj);
+
+fail_attrib_alloc:
+	kfree(dev->power_attrib);
+
+	return err;
+}
+
+static int is_module_idle(struct nvhost_device *dev)
+{
+	int count;
+	mutex_lock(&dev->lock);
+	count = dev->refcount;
+	WARN(count != 0, "is_module_idle %s count: %d\n", dev->name, count);
+	mutex_unlock(&dev->lock);
+	return (count == 0);
+}
+
+int nvhost_module_suspend(struct nvhost_device *dev)
+{
+	int ret;
+	struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+
+	ret = wait_event_timeout(dev->idle_wq, is_module_idle(dev),
+			ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT);
+	if (ret == 0) {
+		dev_info(&dev->dev, "%s prevented suspend\n",
+				dev->name);
+		return -EBUSY;
+	}
+
+	mutex_lock(&dev->lock);
+	cancel_delayed_work(&dev->powerstate_down);
+	to_state_powergated_locked(dev);
+	mutex_unlock(&dev->lock);
+
+	if (drv->suspend_ndev)
+		drv->suspend_ndev(dev);
+
+	return 0;
+}
+
+void nvhost_module_deinit(struct nvhost_device *dev)
+{
+	int i;
+	struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
+
+	if (drv->deinit)
+		drv->deinit(dev);
+
+	nvhost_module_suspend(dev);
+	for (i = 0; i < dev->num_clks; i++)
+		clk_put(dev->clk[i]);
+	dev->powerstate = NVHOST_POWER_STATE_DEINIT;
+}
+
+/* public host1x power management APIs */
+bool nvhost_module_powered_ext(struct nvhost_device *dev)
+{
+	return nvhost_module_powered(dev);
+}
+
+void nvhost_module_busy_ext(struct nvhost_device *dev)
+{
+	nvhost_module_busy(dev);
+}
+
+void nvhost_module_idle_ext(struct nvhost_device *dev)
+{
+	nvhost_module_idle(dev);
+}
diff --git a/drivers/staging/tegra/video/host/nvhost_acm.h b/drivers/staging/tegra/video/host/nvhost_acm.h
new file mode 100644
index 000000000000..a5894dcfc0b2
--- /dev/null
+++ b/drivers/staging/tegra/video/host/nvhost_acm.h
@@ -0,0 +1,58 @@
+/*
+ * drivers/video/tegra/host/nvhost_acm.h
+ *
+ * Tegra Graphics Host Automatic Clock Management
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_ACM_H
+#define __NVHOST_ACM_H
+
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/nvhost.h>
+
+/* Sets clocks and powergating state for a module */
+int nvhost_module_init(struct nvhost_device *ndev);
+void nvhost_module_deinit(struct nvhost_device *dev);
+int nvhost_module_suspend(struct nvhost_device *dev);
+
+void nvhost_module_reset(struct nvhost_device *dev);
+void nvhost_module_busy(struct nvhost_device *dev);
+void nvhost_module_idle_mult(struct nvhost_device *dev, int refs);
+int nvhost_module_add_client(struct nvhost_device *dev,
+		void *priv);
+void nvhost_module_remove_client(struct nvhost_device *dev,
+		void *priv);
+int nvhost_module_get_rate(struct nvhost_device *dev,
+		unsigned long *rate,
+		int index);
+int nvhost_module_set_rate(struct nvhost_device *dev, void *priv,
+		unsigned long rate, int index);
+
+static inline bool nvhost_module_powered(struct nvhost_device *dev)
+{
+	return dev->powerstate == NVHOST_POWER_STATE_RUNNING;
+}
+
+static inline void nvhost_module_idle(struct nvhost_device *dev)
+{
+	nvhost_module_idle_mult(dev, 1);
+}
+
+#endif
diff --git a/drivers/staging/tegra/video/host/nvhost_cdma.c b/drivers/staging/tegra/video/host/nvhost_cdma.c
new file mode 100644
index 000000000000..dae3b7e6182d
--- /dev/null
+++ b/drivers/staging/tegra/video/host/nvhost_cdma.c
@@ -0,0 +1,559 @@
+/*
+ * drivers/video/tegra/host/nvhost_cdma.c
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvhost_cdma.h"
+#include "nvhost_channel.h"
+#include "nvhost_job.h"
+#include "nvhost_hwctx.h"
+#include "dev.h"
+#include "debug.h"
+#include "nvhost_memmgr.h"
+#include "chip_support.h"
+#include <asm/cacheflush.h>
+
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <trace/events/nvhost.h>
+#include <linux/interrupt.h>
+
+/*
+ * TODO:
+ *   stats
+ *     - for figuring out what to optimize further
+ *   resizable push buffer
+ *     - some channels hardly need any, some channels (3d) could use more
+ */
+
+/**
+ * Add an entry to the sync queue.
+ */
+static void add_to_sync_queue(struct nvhost_cdma *cdma,
+			      struct nvhost_job *job,
+			      u32 nr_slots,
+			      u32 first_get)
+{
+	BUG_ON(job->syncpt_id == NVSYNCPT_INVALID);
+
+	job->first_get = first_get;
+	job->num_slots = nr_slots;
+	nvhost_job_get(job);
+	list_add_tail(&job->list, &cdma->sync_queue);
+
+	switch (job->priority) {
+	case NVHOST_PRIORITY_HIGH:
+		cdma->high_prio_count++;
+		break;
+	case NVHOST_PRIORITY_MEDIUM:
+		cdma->med_prio_count++;
+		break;
+	case NVHOST_PRIORITY_LOW:
+		cdma->low_prio_count++;
+		break;
+	}
+}
+
+/**
+ * Return the status of the cdma's sync queue or push buffer for the given event
+ *  - sq empty: returns 1 for empty, 0 for not empty (as in "1 empty queue" :-)
+ *  - pb space: returns the number of free slots in the channel's push buffer
+ * Must be called with the cdma lock held.
+ */
+static unsigned int cdma_status_locked(struct nvhost_cdma *cdma,
+		enum cdma_event event)
+{
+	switch (event) {
+	case CDMA_EVENT_SYNC_QUEUE_EMPTY:
+		return list_empty(&cdma->sync_queue) ? 1 : 0;
+	case CDMA_EVENT_PUSH_BUFFER_SPACE: {
+		struct push_buffer *pb = &cdma->push_buffer;
+		BUG_ON(!cdma_pb_op().space);
+		return cdma_pb_op().space(pb);
+	}
+	default:
+		return 0;
+	}
+}
+
+/**
+ * Sleep (if necessary) until the requested event happens
+ *   - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
+ *     - Returns 1
+ *   - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
+ *     - Return the amount of space (> 0)
+ * Must be called with the cdma lock held.
+ */
+unsigned int nvhost_cdma_wait_locked(struct nvhost_cdma *cdma,
+		enum cdma_event event)
+{
+	for (;;) {
+		unsigned int space = cdma_status_locked(cdma, event);
+		if (space)
+			return space;
+
+		trace_nvhost_wait_cdma(cdma_to_channel(cdma)->dev->name,
+				event);
+
+		/* If somebody has managed to already start waiting, yield */
+		if (cdma->event != CDMA_EVENT_NONE) {
+			mutex_unlock(&cdma->lock);
+			schedule();
+			mutex_lock(&cdma->lock);
+			continue;
+		}
+		cdma->event = event;
+
+		mutex_unlock(&cdma->lock);
+		down(&cdma->sem);
+		mutex_lock(&cdma->lock);
+	}
+	return 0;
+}
+
+/**
+ * Start timer for a buffer submition that has completed yet.
+ * Must be called with the cdma lock held.
+ */
+static void cdma_start_timer_locked(struct nvhost_cdma *cdma,
+		struct nvhost_job *job)
+{
+	BUG_ON(!job);
+	if (cdma->timeout.clientid) {
+		/* timer already started */
+		return;
+	}
+
+	cdma->timeout.ctx = job->hwctx;
+	cdma->timeout.clientid = job->clientid;
+	cdma->timeout.syncpt_id = job->syncpt_id;
+	cdma->timeout.syncpt_val = job->syncpt_end;
+	cdma->timeout.start_ktime = ktime_get();
+
+	schedule_delayed_work(&cdma->timeout.wq,
+			msecs_to_jiffies(job->timeout));
+}
+
+/**
+ * Stop timer when a buffer submition completes.
+ * Must be called with the cdma lock held.
+ */
+static void stop_cdma_timer_locked(struct nvhost_cdma *cdma)
+{
+	cancel_delayed_work(&cdma->timeout.wq);
+	cdma->timeout.ctx = NULL;
+	cdma->timeout.clientid = 0;
+}
+
+/**
+ * For all sync queue entries that have already finished according to the
+ * current sync point registers:
+ *  - unpin & unref their mems
+ *  - pop their push buffer slots
+ *  - remove them from the sync queue
+ * This is normally called from the host code's worker thread, but can be
+ * called manually if necessary.
+ * Must be called with the cdma lock held.
+ */
+static void update_cdma_locked(struct nvhost_cdma *cdma)
+{
+	bool signal = false;
+	struct nvhost_master *dev = cdma_to_dev(cdma);
+	struct nvhost_syncpt *sp = &dev->syncpt;
+	struct nvhost_job *job, *n;
+
+	/* If CDMA is stopped, queue is cleared and we can return */
+	if (!cdma->running)
+		return;
+
+	/*
+	 * Walk the sync queue, reading the sync point registers as necessary,
+	 * to consume as many sync queue entries as possible without blocking
+	 */
+	list_for_each_entry_safe(job, n, &cdma->sync_queue, list) {
+		BUG_ON(job->syncpt_id == NVSYNCPT_INVALID);
+
+		/* Check whether this syncpt has completed, and bail if not */
+		if (!nvhost_syncpt_is_expired(sp,
+				job->syncpt_id, job->syncpt_end)) {
+			/* Start timer on next pending syncpt */
+			if (job->timeout)
+				cdma_start_timer_locked(cdma, job);
+			break;
+		}
+
+		/* Cancel timeout, when a buffer completes */
+		if (cdma->timeout.clientid)
+			stop_cdma_timer_locked(cdma);
+
+		/* Unpin the memory */
+		nvhost_job_unpin(job);
+
+		/* Pop push buffer slots */
+		if (job->num_slots) {
+			struct push_buffer *pb = &cdma->push_buffer;
+			BUG_ON(!cdma_pb_op().pop_from);
+			cdma_pb_op().pop_from(pb, job->num_slots);
+			if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
+				signal = true;
+		}
+
+		list_del(&job->list);
+
+		switch (job->priority) {
+		case NVHOST_PRIORITY_HIGH:
+			cdma->high_prio_count--;
+			break;
+		case NVHOST_PRIORITY_MEDIUM:
+			cdma->med_prio_count--;
+			break;
+		case NVHOST_PRIORITY_LOW:
+			cdma->low_prio_count--;
+			break;
+		}
+
+		nvhost_job_put(job);
+	}
+
+	if (list_empty(&cdma->sync_queue) &&
+				cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
+			signal = true;
+
+	/* Wake up CdmaWait() if the requested event happened */
+	if (signal) {
+		cdma->event = CDMA_EVENT_NONE;
+		up(&cdma->sem);
+	}
+}
+
+void nvhost_cdma_update_sync_queue(struct nvhost_cdma *cdma,
+		struct nvhost_syncpt *syncpt, struct nvhost_device *dev)
+{
+	u32 get_restart;
+	u32 syncpt_incrs;
+	struct nvhost_job *job = NULL;
+	u32 syncpt_val;
+
+	syncpt_val = nvhost_syncpt_update_min(syncpt, cdma->timeout.syncpt_id);
+
+	dev_dbg(&dev->dev,
+		"%s: starting cleanup (thresh %d)\n",
+		__func__, syncpt_val);
+
+	/*
+	 * Move the sync_queue read pointer to the first entry that hasn't
+	 * completed based on the current HW syncpt value. It's likely there
+	 * won't be any (i.e. we're still at the head), but covers the case
+	 * where a syncpt incr happens just prior/during the teardown.
+	 */
+
+	dev_dbg(&dev->dev,
+		"%s: skip completed buffers still in sync_queue\n",
+		__func__);
+
+	list_for_each_entry(job, &cdma->sync_queue, list) {
+		if (syncpt_val < job->syncpt_end)
+			break;
+
+		nvhost_job_dump(&dev->dev, job);
+	}
+
+	/*
+	 * Walk the sync_queue, first incrementing with the CPU syncpts that
+	 * are partially executed (the first buffer) or fully skipped while
+	 * still in the current context (slots are also NOP-ed).
+	 *
+	 * At the point contexts are interleaved, syncpt increments must be
+	 * done inline with the pushbuffer from a GATHER buffer to maintain
+	 * the order (slots are modified to be a GATHER of syncpt incrs).
+	 *
+	 * Note: save in get_restart the location where the timed out buffer
+	 * started in the PB, so we can start the refetch from there (with the
+	 * modified NOP-ed PB slots). This lets things appear to have completed
+	 * properly for this buffer and resources are freed.
+	 */
+
+	dev_dbg(&dev->dev,
+		"%s: perform CPU incr on pending same ctx buffers\n",
+		__func__);
+
+	get_restart = cdma->last_put;
+	if (!list_empty(&cdma->sync_queue))
+		get_restart = job->first_get;
+
+	/* do CPU increments as long as this context continues */
+	list_for_each_entry_from(job, &cdma->sync_queue, list) {
+		/* different context, gets us out of this loop */
+		if (job->clientid != cdma->timeout.clientid)
+			break;
+
+		/* won't need a timeout when replayed */
+		job->timeout = 0;
+
+		syncpt_incrs = job->syncpt_end - syncpt_val;
+		dev_dbg(&dev->dev,
+			"%s: CPU incr (%d)\n", __func__, syncpt_incrs);
+
+		nvhost_job_dump(&dev->dev, job);
+
+		/* safe to use CPU to incr syncpts */
+		cdma_op().timeout_cpu_incr(cdma,
+				job->first_get,
+				syncpt_incrs,
+				job->syncpt_end,
+				job->num_slots,
+				dev->waitbases);
+
+		syncpt_val += syncpt_incrs;
+	}
+
+	dev_dbg(&dev->dev,
+		"%s: finished sync_queue modification\n", __func__);
+
+	/* roll back DMAGET and start up channel again */
+	cdma_op().timeout_teardown_end(cdma, get_restart);
+
+	if (cdma->timeout.ctx)
+		cdma->timeout.ctx->has_timedout = true;
+}
+
+/**
+ * Create a cdma
+ */
+int nvhost_cdma_init(struct nvhost_cdma *cdma)
+{
+	int err;
+	struct push_buffer *pb = &cdma->push_buffer;
+	BUG_ON(!cdma_pb_op().init);
+	mutex_init(&cdma->lock);
+	sema_init(&cdma->sem, 0);
+
+	INIT_LIST_HEAD(&cdma->sync_queue);
+
+	cdma->event = CDMA_EVENT_NONE;
+	cdma->running = false;
+	cdma->torndown = false;
+
+	err = cdma_pb_op().init(pb);
+	if (err)
+		return err;
+	return 0;
+}
+
+/**
+ * Destroy a cdma
+ */
+void nvhost_cdma_deinit(struct nvhost_cdma *cdma)
+{
+	struct push_buffer *pb = &cdma->push_buffer;
+
+	BUG_ON(!cdma_pb_op().destroy);
+	BUG_ON(cdma->running);
+	cdma_pb_op().destroy(pb);
+	cdma_op().timeout_destroy(cdma);
+}
+
+/**
+ * Begin a cdma submit
+ */
+int nvhost_cdma_begin(struct nvhost_cdma *cdma, struct nvhost_job *job)
+{
+	mutex_lock(&cdma->lock);
+
+	if (job->timeout) {
+		/* init state on first submit with timeout value */
+		if (!cdma->timeout.initialized) {
+			int err;
+			BUG_ON(!cdma_op().timeout_init);
+			err = cdma_op().timeout_init(cdma,
+				job->syncpt_id);
+			if (err) {
+				mutex_unlock(&cdma->lock);
+				return err;
+			}
+		}
+	}
+	if (!cdma->running) {
+		BUG_ON(!cdma_op().start);
+		cdma_op().start(cdma);
+	}
+	cdma->slots_free = 0;
+	cdma->slots_used = 0;
+	cdma->first_get = cdma_pb_op().putptr(&cdma->push_buffer);
+	return 0;
+}
+
+static void trace_write_gather(struct nvhost_cdma *cdma,
+		struct mem_handle *ref,
+		u32 offset, u32 words)
+{
+	void *mem = NULL;
+
+	if (nvhost_debug_trace_cmdbuf) {
+		mem = mem_op().mmap(ref);
+		if (IS_ERR_OR_NULL(mem))
+			mem = NULL;
+	};
+
+	if (mem) {
+		u32 i;
+		/*
+		 * Write in batches of 128 as there seems to be a limit
+		 * of how much you can output to ftrace at once.
+		 */
+		for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
+			trace_nvhost_cdma_push_gather(
+				cdma_to_channel(cdma)->dev->name,
+				(u32)ref,
+				min(words - i, TRACE_MAX_LENGTH),
+				offset + i * sizeof(u32),
+				mem);
+		}
+		mem_op().munmap(ref, mem);
+	}
+}
+
+/**
+ * Push two words into a push buffer slot
+ * Blocks as necessary if the push buffer is full.
+ */
+void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2)
+{
+	if (nvhost_debug_trace_cmdbuf)
+		trace_nvhost_cdma_push(cdma_to_channel(cdma)->dev->name,
+				op1, op2);
+
+	nvhost_cdma_push_gather(cdma, NULL, NULL, 0, op1, op2);
+}
+
+/**
+ * Push two words into a push buffer slot
+ * Blocks as necessary if the push buffer is full.
+ */
+void nvhost_cdma_push_gather(struct nvhost_cdma *cdma,
+		struct mem_mgr *client, struct mem_handle *handle,
+		u32 offset, u32 op1, u32 op2)
+{
+	u32 slots_free = cdma->slots_free;
+	struct push_buffer *pb = &cdma->push_buffer;
+
+	BUG_ON(!cdma_pb_op().push_to);
+	BUG_ON(!cdma_op().kick);
+
+	if (handle)
+		trace_write_gather(cdma, handle, offset, op1 & 0xffff);
+
+	if (slots_free == 0) {
+		cdma_op().kick(cdma);
+		slots_free = nvhost_cdma_wait_locked(cdma,
+				CDMA_EVENT_PUSH_BUFFER_SPACE);
+	}
+	cdma->slots_free = slots_free - 1;
+	cdma->slots_used++;
+	cdma_pb_op().push_to(pb, client, handle, op1, op2);
+}
+
+/**
+ * End a cdma submit
+ * Kick off DMA, add job to the sync queue, and a number of slots to be freed
+ * from the pushbuffer. The handles for a submit must all be pinned at the same
+ * time, but they can be unpinned in smaller chunks.
+ */
+void nvhost_cdma_end(struct nvhost_cdma *cdma,
+		struct nvhost_job *job)
+{
+	bool was_idle = list_empty(&cdma->sync_queue);
+
+	BUG_ON(!cdma_op().kick);
+	cdma_op().kick(cdma);
+
+	BUG_ON(job->syncpt_id == NVSYNCPT_INVALID);
+
+	add_to_sync_queue(cdma,
+			job,
+			cdma->slots_used,
+			cdma->first_get);
+
+	/* start timer on idle -> active transitions */
+	if (job->timeout && was_idle)
+		cdma_start_timer_locked(cdma, job);
+
+	trace_nvhost_cdma_end(job->ch->dev->name,
+			job->priority,
+			job->ch->cdma.high_prio_count,
+			job->ch->cdma.med_prio_count,
+			job->ch->cdma.low_prio_count);
+
+	mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Update cdma state according to current sync point values
+ */
+void nvhost_cdma_update(struct nvhost_cdma *cdma)
+{
+	mutex_lock(&cdma->lock);
+	update_cdma_locked(cdma);
+	mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Wait for push buffer to be empty.
+ * @cdma pointer to channel cdma
+ * @timeout timeout in ms
+ * Returns -ETIME if timeout was reached, zero if push buffer is empty.
+ */
+int nvhost_cdma_flush(struct nvhost_cdma *cdma, int timeout)
+{
+	unsigned int space, err = 0;
+	unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
+
+	trace_nvhost_cdma_flush(cdma_to_channel(cdma)->dev->name, timeout);
+
+	/*
+	 * Wait for at most timeout ms. Recalculate timeout at each iteration
+	 * to better keep within given timeout.
+	 */
+	while(!err && time_before(jiffies, end_jiffies)) {
+		int timeout_jiffies = end_jiffies - jiffies;
+
+		mutex_lock(&cdma->lock);
+		space = cdma_status_locked(cdma,
+				CDMA_EVENT_SYNC_QUEUE_EMPTY);
+		if (space) {
+			mutex_unlock(&cdma->lock);
+			return 0;
+		}
+
+		/*
+		 * Wait for sync queue to become empty. If there is already
+		 * an event pending, we need to poll.
+		 */
+		if (cdma->event != CDMA_EVENT_NONE) {
+			mutex_unlock(&cdma->lock);
+			schedule();
+		} else {
+			cdma->event = CDMA_EVENT_SYNC_QUEUE_EMPTY;
+
+			mutex_unlock(&cdma->lock);
+			err = down_timeout(&cdma->sem,
+					jiffies_to_msecs(timeout_jiffies));
+		}
+	}
+	return err;
+}
diff --git a/drivers/staging/tegra/video/host/nvhost_cdma.h b/drivers/staging/tegra/video/host/nvhost_cdma.h
new file mode 100644
index 000000000000..a9522c5f6326
--- /dev/null
+++ b/drivers/staging/tegra/video/host/nvhost_cdma.h
@@ -0,0 +1,117 @@
+/*
+ * drivers/video/tegra/host/nvhost_cdma.h
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_CDMA_H
+#define __NVHOST_CDMA_H
+
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+
+#include <linux/nvhost.h>
+#include <linux/list.h>
+
+struct nvhost_syncpt;
+struct nvhost_userctx_timeout;
+struct nvhost_job;
+struct mem_mgr;
+struct mem_handle;
+
+/*
+ * cdma
+ *
+ * This is in charge of a host command DMA channel.
+ * Sends ops to a push buffer, and takes responsibility for unpinning
+ * (& possibly freeing) of memory after those ops have completed.
+ * Producer:
+ *	begin
+ *		push - send ops to the push buffer
+ *	end - start command DMA and enqueue handles to be unpinned
+ * Consumer:
+ *	update - call to update sync queue and push buffer, unpin memory
+ */
+
+struct push_buffer {
+	struct mem_handle *mem;		/* handle to pushbuffer memory */
+	u32 *mapped;			/* mapped pushbuffer memory */
+	u32 phys;			/* physical address of pushbuffer */
+	u32 fence;			/* index we've written */
+	u32 cur;			/* index to write to */
+	struct mem_mgr_handle *client_handle; /* handle for each opcode pair */
+};
+
+struct buffer_timeout {
+	struct delayed_work wq;		/* work queue */
+	bool initialized;		/* timer one-time setup flag */
+	u32 syncpt_id;			/* buffer completion syncpt id */
+	u32 syncpt_val;			/* syncpt value when completed */
+	ktime_t start_ktime;		/* starting time */
+	/* context timeout information */
+	struct nvhost_hwctx *ctx;
+	int clientid;
+};
+
+enum cdma_event {
+	CDMA_EVENT_NONE,		/* not waiting for any event */
+	CDMA_EVENT_SYNC_QUEUE_EMPTY,	/* wait for empty sync queue */
+	CDMA_EVENT_PUSH_BUFFER_SPACE	/* wait for space in push buffer */
+};
+
+struct nvhost_cdma {
+	struct mutex lock;		/* controls access to shared state */
+	struct semaphore sem;		/* signalled when event occurs */
+	enum cdma_event event;		/* event that sem is waiting for */
+	unsigned int slots_used;	/* pb slots used in current submit */
+	unsigned int slots_free;	/* pb slots free in current submit */
+	unsigned int first_get;		/* DMAGET value, where submit begins */
+	unsigned int last_put;		/* last value written to DMAPUT */
+	struct push_buffer push_buffer;	/* channel's push buffer */
+	struct list_head sync_queue;	/* job queue */
+	struct buffer_timeout timeout;	/* channel's timeout state/wq */
+	bool running;
+	bool torndown;
+	int high_prio_count;
+	int med_prio_count;
+	int low_prio_count;
+};
+
+#define cdma_to_channel(cdma) container_of(cdma, struct nvhost_channel, cdma)
+#define cdma_to_dev(cdma) nvhost_get_host(cdma_to_channel(cdma)->dev)
+#define cdma_to_memmgr(cdma) ((cdma_to_dev(cdma))->memmgr)
+#define pb_to_cdma(pb) container_of(pb, struct nvhost_cdma, push_buffer)
+
+int	nvhost_cdma_init(struct nvhost_cdma *cdma);
+void	nvhost_cdma_deinit(struct nvhost_cdma *cdma);
+void	nvhost_cdma_stop(struct nvhost_cdma *cdma);
+int	nvhost_cdma_begin(struct nvhost_cdma *cdma, struct nvhost_job *job);
+void	nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2);
+void	nvhost_cdma_push_gather(struct nvhost_cdma *cdma,
+		struct mem_mgr *client,
+		struct mem_handle *handle, u32 offset, u32 op1, u32 op2);
+void	nvhost_cdma_end(struct nvhost_cdma *cdma,
+		struct nvhost_job *job);
+void	nvhost_cdma_update(struct nvhost_cdma *cdma);
+int	nvhost_cdma_flush(struct nvhost_cdma *cdma, int timeout);
+void	nvhost_cdma_peek(struct nvhost_cdma *cdma,
+		u32 dmaget, int slot, u32 *out);
+unsigned int nvhost_cdma_wait_locked(struct nvhost_cdma *cdma,
+		enum cdma_event event);
+void nvhost_cdma_update_sync_queue(struct nvhost_cdma *cdma,
+		struct nvhost_syncpt *syncpt, struct nvhost_device *dev);
+#endif
diff --git a/drivers/staging/tegra/video/host/nvhost_channel.c b/drivers/staging/tegra/video/host/nvhost_channel.c
new file mode 100644
index 000000000000..fd309ee9917b
--- /dev/null
+++ b/drivers/staging/tegra/video/host/nvhost_channel.c
@@ -0,0 +1,188 @@
+/*
+ * drivers/video/tegra/host/nvhost_channel.c
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvhost_channel.h"
+#include "dev.h"
+#include "nvhost_acm.h"
+#include "nvhost_job.h"
+#include "chip_support.h"
+
+#include <trace/events/nvhost.h>
+#include <linux/nvhost_ioctl.h>
+#include <linux/slab.h>
+
+#define NVHOST_CHANNEL_LOW_PRIO_MAX_WAIT 50
+
+int nvhost_channel_init(struct nvhost_channel *ch,
+		struct nvhost_master *dev, int index)
+{
+	int err;
+	struct nvhost_device *ndev;
+
+	/* Link nvhost_device to nvhost_channel */
+	err = channel_op().init(ch, dev, index);
+	if (err < 0) {
+		dev_err(&dev->dev->dev, "failed to init channel %d\n",
+				index);
+		return err;
+	}
+	ndev = ch->dev;
+	ndev->channel = ch;
+
+	return 0;
+}
+
+int nvhost_channel_submit(struct nvhost_job *job)
+{
+	/*
+	 * Check if queue has higher priority jobs running. If so, wait until
+	 * queue is empty. Ignores result from nvhost_cdma_flush, as we submit
+	 * either when push buffer is empty or when we reach the timeout.
+	 */
+	int higher_count = 0;
+
+	switch (job->priority) {
+	case NVHOST_PRIORITY_HIGH:
+		higher_count = 0;
+		break;
+	case NVHOST_PRIORITY_MEDIUM:
+		higher_count = job->ch->cdma.high_prio_count;
+		break;
+	case NVHOST_PRIORITY_LOW:
+		higher_count = job->ch->cdma.high_prio_count
+			+ job->ch->cdma.med_prio_count;
+		break;
+	}
+	if (higher_count > 0)
+		(void)nvhost_cdma_flush(&job->ch->cdma,
+				NVHOST_CHANNEL_LOW_PRIO_MAX_WAIT);
+
+	return channel_op().submit(job);
+}
+
+struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch)
+{
+	int err = 0;
+	struct nvhost_driver *drv = to_nvhost_driver(ch->dev->dev.driver);
+
+	mutex_lock(&ch->reflock);
+	if (ch->refcount == 0) {
+		if (drv->init)
+			drv->init(ch->dev);
+		err = nvhost_cdma_init(&ch->cdma);
+	} else if (ch->dev->exclusive) {
+		err = -EBUSY;
+	}
+	if (!err)
+		ch->refcount++;
+
+	mutex_unlock(&ch->reflock);
+
+	/* Keep alive modules that needs to be when a channel is open */
+	if (!err && ch->dev->keepalive)
+		nvhost_module_busy(ch->dev);
+
+	return err ? NULL : ch;
+}
+
+void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx)
+{
+	BUG_ON(!channel_cdma_op().stop);
+
+	if (ctx) {
+		mutex_lock(&ch->submitlock);
+		if (ch->cur_ctx == ctx)
+			ch->cur_ctx = NULL;
+		mutex_unlock(&ch->submitlock);
+	}
+
+	/* Allow keep-alive'd module to be turned off */
+	if (ch->dev->keepalive)
+		nvhost_module_idle(ch->dev);
+
+	mutex_lock(&ch->reflock);
+	if (ch->refcount == 1) {
+		channel_cdma_op().stop(&ch->cdma);
+		nvhost_cdma_deinit(&ch->cdma);
+		nvhost_module_suspend(ch->dev);
+	}
+	ch->refcount--;
+	mutex_unlock(&ch->reflock);
+}
+
+int nvhost_channel_suspend(struct nvhost_channel *ch)
+{
+	int ret = 0;
+
+	mutex_lock(&ch->reflock);
+	BUG_ON(!channel_cdma_op().stop);
+
+	if (ch->refcount) {
+		ret = nvhost_module_suspend(ch->dev);
+		if (!ret)
+			channel_cdma_op().stop(&ch->cdma);
+	}
+	mutex_unlock(&ch->reflock);
+
+	return ret;
+}
+
+struct nvhost_channel *nvhost_alloc_channel_internal(int chindex,
+	int max_channels, int *current_channel_count)
+{
+	struct nvhost_channel *ch = NULL;
+
+	if ( (chindex > max_channels) ||
+	     ( (*current_channel_count + 1) > max_channels) )
+		return NULL;
+	else {
+		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+		if (ch == NULL)
+			return NULL;
+		else {
+			(*current_channel_count)++;
+			return ch;
+		}
+	}
+}
+
+void nvhost_free_channel_internal(struct nvhost_channel *ch,
+	int *current_channel_count)
+{
+	kfree(ch);
+	(*current_channel_count)--;
+}
+
+int nvhost_channel_save_context(struct nvhost_channel *ch)
+{
+	struct nvhost_hwctx *cur_ctx = ch->cur_ctx;
+	int err = 0;
+	if (cur_ctx)
+		err = channel_op().save_context(ch);
+
+	return err;
+
+}
+
+int nvhost_channel_drain_read_fifo(struct nvhost_channel *ch,
+			u32 *ptr, unsigned int count, unsigned int *pending)
+{
+	return channel_op().drain_read_fifo(ch, ptr, count, pending);
+}
diff --git a/drivers/staging/tegra/video/host/nvhost_channel.h b/drivers/staging/tegra/video/host/nvhost_channel.h
new file mode 100644
index 000000000000..d7f096db1ffa
--- /dev/null
+++ b/drivers/staging/tegra/video/host/nvhost_channel.h
@@ -0,0 +1,77 @@
+/*
+ * drivers/video/tegra/host/nvhost_channel.h
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_CHANNEL_H
+#define __NVHOST_CHANNEL_H
+
+#include <linux/cdev.h>
+#include <linux/io.h>
+#include "nvhost_cdma.h"
+
+#define NVHOST_MAX_WAIT_CHECKS		256
+#define NVHOST_MAX_GATHERS		512
+#define NVHOST_MAX_HANDLES		1280
+#define NVHOST_MAX_POWERGATE_IDS	2
+
+struct nvhost_master;
+struct nvhost_device;
+struct nvhost_channel;
+struct nvhost_hwctx;
+
+struct nvhost_channel {
+	int refcount;
+	int chid;
+	u32 syncpt_id;
+	struct mutex reflock;
+	struct mutex submitlock;
+	void __iomem *aperture;
+	struct nvhost_hwctx *cur_ctx;
+	struct device *node;
+	struct nvhost_device *dev;
+	struct cdev cdev;
+	struct nvhost_hwctx_handler *ctxhandler;
+	struct nvhost_cdma cdma;
+};
+
+int nvhost_channel_init(struct nvhost_channel *ch,
+	struct nvhost_master *dev, int index);
+
+int nvhost_channel_submit(struct nvhost_job *job);
+
+struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch);
+void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx);
+int nvhost_channel_suspend(struct nvhost_channel *ch);
+
+int nvhost_channel_drain_read_fifo(struct nvhost_channel *ch,
+			u32 *ptr, unsigned int count, unsigned int *pending);
+
+int nvhost_channel_read_3d_reg(struct nvhost_channel *channel,
+	struct nvhost_hwctx *hwctx,
+	u32 offset, u32 *value);
+
+struct nvhost_channel *nvhost_alloc_channel_internal(int chindex,
+	int max_channels, int *current_channel_count);
+
+void nvhost_free_channel_internal(struct nvhost_channel *ch,
+	int *current_channel_count);
+
+int nvhost_channel_save_context(struct nvhost_channel *ch);
+
+#endif
diff --git a/drivers/staging/tegra/video/host/nvhost_hwctx.h b/drivers/staging/tegra/video/host/nvhost_hwctx.h
new file mode 100644
index 000000000000..47bc3d408fde
--- /dev/null
+++ b/drivers/staging/tegra/video/host/nvhost_hwctx.h
@@ -0,0 +1,66 @@
+/*
+ * drivers/video/tegra/host/nvhost_hwctx.h
+ *
+ * Tegra Graphics Host Hardware Context Interface
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_HWCTX_H
+#define __NVHOST_HWCTX_H
+
+#include <linux/string.h>
+#include <linux/kref.h>
+
+#include <linux/nvhost.h>
+
+struct nvhost_channel;
+struct nvhost_cdma;
+
+struct nvhost_hwctx {
+	struct kref ref;
+	struct nvhost_hwctx_handler *h;
+	struct nvhost_channel *channel;
+	bool valid;
+	bool has_timedout;
+};
+
+struct nvhost_hwctx_handler {
+	struct nvhost_hwctx * (*alloc) (struct nvhost_hwctx_handler *h,
+			struct nvhost_channel *ch);
+	void (*get) (struct nvhost_hwctx *ctx);
+	void (*put) (struct nvhost_hwctx *ctx);
+	void (*save_push) (struct nvhost_hwctx *ctx,
+			struct nvhost_cdma *cdma);
+	void (*save_service) (struct nvhost_hwctx *ctx);
+	void *priv;
+};
+
+
+struct hwctx_reginfo {
+	unsigned int offset:12;
+	unsigned int count:16;
+	unsigned int type:2;
+};
+
+enum {
+	HWCTX_REGINFO_DIRECT = 0,
+	HWCTX_REGINFO_INDIRECT,
+	HWCTX_REGINFO_INDIRECT_4X
+};
+
+#define HWCTX_REGINFO(offset, count, type) {offset, count, HWCTX_REGINFO_##type}
+
+#endif
diff --git a/drivers/staging/tegra/video/host/nvhost_intr.c b/drivers/staging/tegra/video/host/nvhost_intr.c
new file mode 100644
index 000000000000..2b6b5d0af6ce
--- /dev/null
+++ b/drivers/staging/tegra/video/host/nvhost_intr.c
@@ -0,0 +1,406 @@
+/*
+ * drivers/video/tegra/host/nvhost_intr.c
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvhost_intr.h"
+#include "dev.h"
+#include "nvhost_acm.h"
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <trace/events/nvhost.h>
+#include "nvhost_channel.h"
+#include "nvhost_hwctx.h"
+#include "chip_support.h"
+
+/*** Wait list management ***/
+
+struct nvhost_waitlist {
+	struct list_head list;
+	struct kref refcount;
+	u32 thresh;
+	enum nvhost_intr_action action;
+	atomic_t state;
+	void *data;
+	int count;
+};
+
+enum waitlist_state {
+	WLS_PENDING,
+	WLS_REMOVED,
+	WLS_CANCELLED,
+	WLS_HANDLED
+};
+
+static void waiter_release(struct kref *kref)
+{
+	kfree(container_of(kref, struct nvhost_waitlist, refcount));
+}
+
+/**
+ * add a waiter to a waiter queue, sorted by threshold
+ * returns true if it was added at the head of the queue
+ */
+static bool add_waiter_to_queue(struct nvhost_waitlist *waiter,
+				struct list_head *queue)
+{
+	struct nvhost_waitlist *pos;
+	u32 thresh = waiter->thresh;
+
+	list_for_each_entry_reverse(pos, queue, list)
+		if ((s32)(pos->thresh - thresh) <= 0) {
+			list_add(&waiter->list, &pos->list);
+			return false;
+		}
+
+	list_add(&waiter->list, queue);
+	return true;
+}
+
+/**
+ * run through a waiter queue for a single sync point ID
+ * and gather all completed waiters into lists by actions
+ */
+static void remove_completed_waiters(struct list_head *head, u32 sync,
+			struct list_head completed[NVHOST_INTR_ACTION_COUNT])
+{
+	struct list_head *dest;
+	struct nvhost_waitlist *waiter, *next, *prev;
+
+	list_for_each_entry_safe(waiter, next, head, list) {
+		if ((s32)(waiter->thresh - sync) > 0)
+			break;
+
+		dest = completed + waiter->action;
+
+		/* consolidate submit cleanups */
+		if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE
+			&& !list_empty(dest)) {
+			prev = list_entry(dest->prev,
+					struct nvhost_waitlist, list);
+			if (prev->data == waiter->data) {
+				prev->count++;
+				dest = NULL;
+			}
+		}
+
+		/* PENDING->REMOVED or CANCELLED->HANDLED */
+		if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
+			list_del(&waiter->list);
+			kref_put(&waiter->refcount, waiter_release);
+		} else {
+			list_move_tail(&waiter->list, dest);
+		}
+	}
+}
+
+void reset_threshold_interrupt(struct nvhost_intr *intr,
+			       struct list_head *head,
+			       unsigned int id)
+{
+	u32 thresh = list_first_entry(head,
+				struct nvhost_waitlist, list)->thresh;
+	BUG_ON(!(intr_op().set_syncpt_threshold &&
+		 intr_op().enable_syncpt_intr));
+
+	intr_op().set_syncpt_threshold(intr, id, thresh);
+	intr_op().enable_syncpt_intr(intr, id);
+}
+
+
+static void action_submit_complete(struct nvhost_waitlist *waiter)
+{
+	struct nvhost_channel *channel = waiter->data;
+	int nr_completed = waiter->count;
+
+	nvhost_cdma_update(&channel->cdma);
+	nvhost_module_idle_mult(channel->dev, nr_completed);
+
+	/*  Add nr_completed to trace */
+	trace_nvhost_channel_submit_complete(channel->dev->name,
+			nr_completed, waiter->thresh,
+			channel->cdma.high_prio_count,
+			channel->cdma.med_prio_count,
+			channel->cdma.low_prio_count);
+}
+
+static void action_ctxsave(struct nvhost_waitlist *waiter)
+{
+	struct nvhost_hwctx *hwctx = waiter->data;
+	struct nvhost_channel *channel = hwctx->channel;
+
+	if (channel->ctxhandler->save_service)
+		channel->ctxhandler->save_service(hwctx);
+}
+
+static void action_wakeup(struct nvhost_waitlist *waiter)
+{
+	wait_queue_head_t *wq = waiter->data;
+
+	wake_up(wq);
+}
+
+static void action_wakeup_interruptible(struct nvhost_waitlist *waiter)
+{
+	wait_queue_head_t *wq = waiter->data;
+
+	wake_up_interruptible(wq);
+}
+
+typedef void (*action_handler)(struct nvhost_waitlist *waiter);
+
+static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = {
+	action_submit_complete,
+	action_ctxsave,
+	action_wakeup,
+	action_wakeup_interruptible,
+};
+
+static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT])
+{
+	struct list_head *head = completed;
+	int i;
+
+	for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) {
+		action_handler handler = action_handlers[i];
+		struct nvhost_waitlist *waiter, *next;
+
+		list_for_each_entry_safe(waiter, next, head, list) {
+			list_del(&waiter->list);
+			handler(waiter);
+			WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != WLS_REMOVED);
+			kref_put(&waiter->refcount, waiter_release);
+		}
+	}
+}
+
+/**
+ * Remove & handle all waiters that have completed for the given syncpt
+ */
+static int process_wait_list(struct nvhost_intr *intr,
+			     struct nvhost_intr_syncpt *syncpt,
+			     u32 threshold)
+{
+	struct list_head completed[NVHOST_INTR_ACTION_COUNT];
+	unsigned int i;
+	int empty;
+
+	for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i)
+		INIT_LIST_HEAD(completed + i);
+
+	spin_lock(&syncpt->lock);
+
+	remove_completed_waiters(&syncpt->wait_head, threshold, completed);
+
+	empty = list_empty(&syncpt->wait_head);
+	if (empty)
+		intr_op().disable_syncpt_intr(intr, syncpt->id);
+	else
+		reset_threshold_interrupt(intr, &syncpt->wait_head,
+					  syncpt->id);
+
+	spin_unlock(&syncpt->lock);
+
+	run_handlers(completed);
+
+	return empty;
+}
+
+/*** host syncpt interrupt service functions ***/
+/**
+ * Sync point threshold interrupt service thread function
+ * Handles sync point threshold triggers, in thread context
+ */
+void inline nvhost_syncpt_thresh_fn(struct nvhost_intr_syncpt *syncpt)
+{
+	unsigned int id = syncpt->id;
+	struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt);
+	struct nvhost_master *dev = intr_to_dev(intr);
+
+	(void)process_wait_list(intr, syncpt,
+				nvhost_syncpt_update_min(&dev->syncpt, id));
+}
+
+
+/*** host general interrupt service functions ***/
+
+
+/*** Main API ***/
+
+int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
+			enum nvhost_intr_action action, void *data,
+			void *_waiter,
+			void **ref)
+{
+	struct nvhost_waitlist *waiter = _waiter;
+	struct nvhost_intr_syncpt *syncpt;
+	int queue_was_empty;
+
+	BUG_ON(waiter == NULL);
+
+	BUG_ON(!(intr_op().set_syncpt_threshold &&
+		 intr_op().enable_syncpt_intr));
+
+	/* initialize a new waiter */
+	INIT_LIST_HEAD(&waiter->list);
+	kref_init(&waiter->refcount);
+	if (ref)
+		kref_get(&waiter->refcount);
+	waiter->thresh = thresh;
+	waiter->action = action;
+	atomic_set(&waiter->state, WLS_PENDING);
+	waiter->data = data;
+	waiter->count = 1;
+
+	syncpt = intr->syncpt + id;
+
+	spin_lock(&syncpt->lock);
+
+	queue_was_empty = list_empty(&syncpt->wait_head);
+
+	if (add_waiter_to_queue(waiter, &syncpt->wait_head)) {
+		/* added at head of list - new threshold value */
+		intr_op().set_syncpt_threshold(intr, id, thresh);
+
+		/* added as first waiter - enable interrupt */
+		if (queue_was_empty)
+			intr_op().enable_syncpt_intr(intr, id);
+	}
+
+	spin_unlock(&syncpt->lock);
+
+	if (ref)
+		*ref = waiter;
+	return 0;
+}
+
+void *nvhost_intr_alloc_waiter()
+{
+	return kzalloc(sizeof(struct nvhost_waitlist),
+			GFP_KERNEL|__GFP_REPEAT);
+}
+
+void nvhost_intr_put_ref(struct nvhost_intr *intr, u32 id, void *ref)
+{
+	struct nvhost_waitlist *waiter = ref;
+	struct nvhost_intr_syncpt *syncpt;
+	struct nvhost_master *host = intr_to_dev(intr);
+
+	while (atomic_cmpxchg(&waiter->state,
+				WLS_PENDING, WLS_CANCELLED) == WLS_REMOVED)
+		schedule();
+
+	syncpt = intr->syncpt + id;
+	(void)process_wait_list(intr, syncpt,
+				nvhost_syncpt_update_min(&host->syncpt, id));
+
+	kref_put(&waiter->refcount, waiter_release);
+}
+
+
+/*** Init & shutdown ***/
+
+int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync)
+{
+	unsigned int id;
+	struct nvhost_intr_syncpt *syncpt;
+	struct nvhost_master *host = intr_to_dev(intr);
+	u32 nb_pts = nvhost_syncpt_nb_pts(&host->syncpt);
+
+	mutex_init(&intr->mutex);
+	intr->syncpt_irq = irq_sync;
+	intr->syncpt_irq_requested = false;
+	intr->wq = create_workqueue("host_syncpt");
+	if (!intr->wq)
+		return -ENOMEM;
+	intr->host_general_irq = irq_gen;
+	intr->host_general_irq_requested = false;
+
+	for (id = 0, syncpt = intr->syncpt;
+	     id < nb_pts;
+	     ++id, ++syncpt) {
+		syncpt->intr = &host->intr;
+		syncpt->id = id;
+		spin_lock_init(&syncpt->lock);
+		INIT_LIST_HEAD(&syncpt->wait_head);
+	}
+
+	return 0;
+}
+
+void nvhost_intr_deinit(struct nvhost_intr *intr)
+{
+	nvhost_intr_stop(intr);
+	destroy_workqueue(intr->wq);
+}
+
+void nvhost_intr_start(struct nvhost_intr *intr, u32 hz)
+{
+	BUG_ON(!(intr_op().request_syncpt_irq &&
+		 intr_op().set_host_clocks_per_usec &&
+		 intr_op().request_host_general_irq));
+
+	mutex_lock(&intr->mutex);
+
+	intr_op().request_syncpt_irq(intr);
+	intr_op().set_host_clocks_per_usec(intr,
+					       (hz + 1000000 - 1)/1000000);
+
+	intr_op().request_host_general_irq(intr);
+
+	mutex_unlock(&intr->mutex);
+}
+
+void nvhost_intr_stop(struct nvhost_intr *intr)
+{
+	unsigned int id;
+	struct nvhost_intr_syncpt *syncpt;
+	u32 nb_pts = nvhost_syncpt_nb_pts(&intr_to_dev(intr)->syncpt);
+
+	BUG_ON(!(intr_op().disable_all_syncpt_intrs &&
+		 intr_op().free_host_general_irq &&
+		 intr_op().free_syncpt_irq));
+
+	mutex_lock(&intr->mutex);
+
+	intr_op().disable_all_syncpt_intrs(intr);
+
+	for (id = 0, syncpt = intr->syncpt;
+	     id < nb_pts;
+	     ++id, ++syncpt) {
+		struct nvhost_waitlist *waiter, *next;
+		list_for_each_entry_safe(waiter, next, &syncpt->wait_head, list) {
+			if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED)
+				== WLS_CANCELLED) {
+				list_del(&waiter->list);
+				kref_put(&waiter->refcount, waiter_release);
+			}
+		}
+
+		if (!list_empty(&syncpt->wait_head)) {  /* output diagnostics */
+			printk(KERN_DEBUG "%s id=%d\n", __func__, id);
+			BUG_ON(1);
+		}
+	}
+
+	intr_op().free_host_general_irq(intr);
+	intr_op().free_syncpt_irq(intr);
+
+	mutex_unlock(&intr->mutex);
+}
diff --git a/drivers/staging/tegra/video/host/nvhost_intr.h b/drivers/staging/tegra/video/host/nvhost_intr.h
new file mode 100644
index 000000000000..e401f642de54
--- /dev/null
+++ b/drivers/staging/tegra/video/host/nvhost_intr.h
@@ -0,0 +1,115 @@
+/*
+ * drivers/video/tegra/host/nvhost_intr.h
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_INTR_H
+#define __NVHOST_INTR_H
+
+#include <linux/kthread.h>
+#include <linux/semaphore.h>
+#include <linux/interrupt.h>
+
+struct nvhost_channel;
+
+enum nvhost_intr_action {
+	/**
+	 * Perform cleanup after a submit has completed.
+	 * 'data' points to a channel
+	 */
+	NVHOST_INTR_ACTION_SUBMIT_COMPLETE = 0,
+
+	/**
+	 * Save a HW context.
+	 * 'data' points to a context
+	 */
+	NVHOST_INTR_ACTION_CTXSAVE,
+
+	/**
+	 * Wake up a  task.
+	 * 'data' points to a wait_queue_head_t
+	 */
+	NVHOST_INTR_ACTION_WAKEUP,
+
+	/**
+	 * Wake up a interruptible task.
+	 * 'data' points to a wait_queue_head_t
+	 */
+	NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
+
+	NVHOST_INTR_ACTION_COUNT
+};
+
+struct nvhost_intr;
+
+struct nvhost_intr_syncpt {
+	struct  nvhost_intr *intr;
+	u8 id;
+	spinlock_t lock;
+	struct list_head wait_head;
+	struct work_struct work;
+};
+
+struct nvhost_intr {
+	struct nvhost_intr_syncpt *syncpt;
+	struct mutex mutex;
+	int host_general_irq;
+	bool host_general_irq_requested;
+	bool syncpt_irq_requested;
+	int syncpt_irq;
+	struct workqueue_struct *wq;
+};
+#define intr_to_dev(x) container_of(x, struct nvhost_master, intr)
+#define intr_syncpt_to_intr(is) (is->intr)
+
+/**
+ * Schedule an action to be taken when a sync point reaches the given threshold.
+ *
+ * @id the sync point
+ * @thresh the threshold
+ * @action the action to take
+ * @data a pointer to extra data depending on action, see above
+ * @waiter waiter allocated with nvhost_intr_alloc_waiter - assumes ownership
+ * @ref must be passed if cancellation is possible, else NULL
+ *
+ * This is a non-blocking api.
+ */
+int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
+			enum nvhost_intr_action action, void *data,
+			void *waiter,
+			void **ref);
+
+/**
+ * Allocate a waiter.
+ */
+void *nvhost_intr_alloc_waiter(void);
+
+/**
+ * Unreference an action submitted to nvhost_intr_add_action().
+ * You must call this if you passed non-NULL as ref.
+ * @ref the ref returned from nvhost_intr_add_action()
+ */
+void nvhost_intr_put_ref(struct nvhost_intr *intr, u32 id, void *ref);
+
+int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync);
+void nvhost_intr_deinit(struct nvhost_intr *intr);
+void nvhost_intr_start(struct nvhost_intr *intr, u32 hz);
+void nvhost_intr_stop(struct nvhost_intr *intr);
+
+void nvhost_syncpt_thresh_fn(struct nvhost_intr_syncpt *syncpt);
+#endif
diff --git a/drivers/staging/tegra/video/host/nvhost_job.c b/drivers/staging/tegra/video/host/nvhost_job.c
new file mode 100644
index 000000000000..f0f7e64d4504
--- /dev/null
+++ b/drivers/staging/tegra/video/host/nvhost_job.c
@@ -0,0 +1,358 @@
+/*
+ * drivers/video/tegra/host/nvhost_job.c
+ *
+ * Tegra Graphics Host Job
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <linux/kref.h>
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+#include <trace/events/nvhost.h>
+#include "nvhost_channel.h"
+#include "nvhost_job.h"
+#include "nvhost_hwctx.h"
+#include "nvhost_syncpt.h"
+#include "dev.h"
+#include "nvhost_memmgr.h"
+#include "chip_support.h"
+
+/* Magic to use to fill freed handle slots */
+#define BAD_MAGIC 0xdeadbeef
+
+static size_t job_size(struct nvhost_submit_hdr_ext *hdr)
+{
+	s64 num_relocs = hdr ? (int)hdr->num_relocs : 0;
+	s64 num_waitchks = hdr ? (int)hdr->num_waitchks : 0;
+	s64 num_cmdbufs = hdr ? (int)hdr->num_cmdbufs : 0;
+	s64 num_unpins = num_cmdbufs + num_relocs;
+	s64 total;
+
+	if(num_relocs < 0 || num_waitchks < 0 || num_cmdbufs < 0)
+		return 0;
+
+	total = sizeof(struct nvhost_job)
+			+ num_relocs * sizeof(struct nvhost_reloc)
+			+ num_relocs * sizeof(struct nvhost_reloc_shift)
+			+ num_unpins * sizeof(struct mem_handle *)
+			+ num_waitchks * sizeof(struct nvhost_waitchk)
+			+ num_cmdbufs * sizeof(struct nvhost_job_gather);
+
+	if(total > ULONG_MAX)
+		return 0;
+	return (size_t)total;
+}
+
+static void init_fields(struct nvhost_job *job,
+		struct nvhost_submit_hdr_ext *hdr,
+		int priority, int clientid)
+{
+	int num_relocs = hdr ? hdr->num_relocs : 0;
+	int num_waitchks = hdr ? hdr->num_waitchks : 0;
+	int num_cmdbufs = hdr ? hdr->num_cmdbufs : 0;
+	int num_unpins = num_cmdbufs + num_relocs;
+	void *mem = job;
+
+	/* First init state to zero */
+	job->priority = priority;
+	job->clientid = clientid;
+
+	/*
+	 * Redistribute memory to the structs.
+	 * Overflows and negative conditions have
+	 * already been checked in job_alloc().
+	 */
+	mem += sizeof(struct nvhost_job);
+	job->relocarray = num_relocs ? mem : NULL;
+	mem += num_relocs * sizeof(struct nvhost_reloc);
+	job->relocshiftarray = num_relocs ? mem : NULL;
+	mem += num_relocs * sizeof(struct nvhost_reloc_shift);
+	job->unpins = num_unpins ? mem : NULL;
+	mem += num_unpins * sizeof(struct mem_handle *);
+	job->waitchk = num_waitchks ? mem : NULL;
+	mem += num_waitchks * sizeof(struct nvhost_waitchk);
+	job->gathers = num_cmdbufs ? mem : NULL;
+
+	/* Copy information from header */
+	if (hdr) {
+		job->waitchk_mask = hdr->waitchk_mask;
+		job->syncpt_id = hdr->syncpt_id;
+		job->syncpt_incrs = hdr->syncpt_incrs;
+	}
+}
+
+struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch,
+		struct nvhost_hwctx *hwctx,
+		struct nvhost_submit_hdr_ext *hdr,
+		struct mem_mgr *memmgr,
+		int priority,
+		int clientid)
+{
+	struct nvhost_job *job = NULL;
+	size_t size = job_size(hdr);
+
+	if(!size)
+		goto error;
+	job = vzalloc(size);
+	if (!job)
+		goto error;
+
+	kref_init(&job->ref);
+	job->ch = ch;
+	job->hwctx = hwctx;
+	if (hwctx)
+		hwctx->h->get(hwctx);
+	job->memmgr = memmgr ? mem_op().get_mgr(memmgr) : NULL;
+
+	init_fields(job, hdr, priority, clientid);
+
+	return job;
+
+error:
+	if (job)
+		nvhost_job_put(job);
+	return NULL;
+}
+
+void nvhost_job_get(struct nvhost_job *job)
+{
+	kref_get(&job->ref);
+}
+
+static void job_free(struct kref *ref)
+{
+	struct nvhost_job *job = container_of(ref, struct nvhost_job, ref);
+
+	if (job->hwctxref)
+		job->hwctxref->h->put(job->hwctxref);
+	if (job->hwctx)
+		job->hwctx->h->put(job->hwctx);
+	if (job->memmgr)
+		mem_op().put_mgr(job->memmgr);
+	vfree(job);
+}
+
+/* Acquire reference to a hardware context. Used for keeping saved contexts in
+ * memory. */
+void nvhost_job_get_hwctx(struct nvhost_job *job, struct nvhost_hwctx *hwctx)
+{
+	BUG_ON(job->hwctxref);
+
+	job->hwctxref = hwctx;
+	hwctx->h->get(hwctx);
+}
+
+void nvhost_job_put(struct nvhost_job *job)
+{
+	kref_put(&job->ref, job_free);
+}
+
+void nvhost_job_add_gather(struct nvhost_job *job,
+		u32 mem_id, u32 words, u32 offset)
+{
+	struct nvhost_job_gather *cur_gather =
+			&job->gathers[job->num_gathers];
+
+	cur_gather->words = words;
+	cur_gather->mem_id = mem_id;
+	cur_gather->offset = offset;
+	job->num_gathers += 1;
+}
+
+static int do_relocs(struct nvhost_job *job, u32 cmdbuf_mem, void *cmdbuf_addr)
+{
+	phys_addr_t target_phys = -EINVAL;
+	int i;
+	u32 mem_id = 0;
+	struct mem_handle *target_ref = NULL;
+
+	/* pin & patch the relocs for one gather */
+	for (i = 0; i < job->num_relocs; i++) {
+		struct nvhost_reloc *reloc = &job->relocarray[i];
+		struct nvhost_reloc_shift *shift = &job->relocshiftarray[i];
+
+		/* skip all other gathers */
+		if (cmdbuf_mem != reloc->cmdbuf_mem)
+			continue;
+
+		/* check if pin-mem is same as previous */
+		if (reloc->target != mem_id) {
+			target_ref = mem_op().get(job->memmgr, reloc->target);
+			if (IS_ERR(target_ref))
+				return PTR_ERR(target_ref);
+
+			target_phys = mem_op().pin(job->memmgr, target_ref);
+			if (IS_ERR((void *)target_phys)) {
+				mem_op().put(job->memmgr, target_ref);
+				return target_phys;
+			}
+
+			mem_id = reloc->target;
+			job->unpins[job->num_unpins++] = target_ref;
+		}
+
+		__raw_writel(
+			(target_phys + reloc->target_offset) >> shift->shift,
+			(cmdbuf_addr + reloc->cmdbuf_offset));
+
+		/* Different gathers might have same mem_id. This ensures we
+		 * perform reloc only once per gather memid. */
+		reloc->cmdbuf_mem = 0;
+	}
+
+	return 0;
+}
+
+/*
+ * Check driver supplied waitchk structs for syncpt thresholds
+ * that have already been satisfied and NULL the comparison (to
+ * avoid a wrap condition in the HW).
+ */
+static int do_waitchks(struct nvhost_job *job, struct nvhost_syncpt *sp,
+		u32 patch_mem, void *patch_addr)
+{
+	int i;
+
+	/* compare syncpt vs wait threshold */
+	for (i = 0; i < job->num_waitchk; i++) {
+		struct nvhost_waitchk *wait = &job->waitchk[i];
+
+		/* skip all other gathers */
+		if (patch_mem != wait->mem)
+			continue;
+
+		trace_nvhost_syncpt_wait_check(wait->mem, wait->offset,
+				wait->syncpt_id, wait->thresh,
+				nvhost_syncpt_read(sp, wait->syncpt_id));
+		if (nvhost_syncpt_is_expired(sp,
+					wait->syncpt_id, wait->thresh)) {
+			/*
+			 * NULL an already satisfied WAIT_SYNCPT host method,
+			 * by patching its args in the command stream. The
+			 * method data is changed to reference a reserved
+			 * (never given out or incr) NVSYNCPT_GRAPHICS_HOST
+			 * syncpt with a matching threshold value of 0, so
+			 * is guaranteed to be popped by the host HW.
+			 */
+			dev_dbg(&syncpt_to_dev(sp)->dev->dev,
+			    "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
+			    wait->syncpt_id,
+			    syncpt_op().name(sp, wait->syncpt_id),
+			    wait->thresh,
+			    nvhost_syncpt_read_min(sp, wait->syncpt_id));
+
+			/* patch the wait */
+			nvhost_syncpt_patch_wait(sp,
+					(patch_addr + wait->offset));
+		}
+
+		wait->mem = 0;
+	}
+	return 0;
+}
+
+int nvhost_job_pin(struct nvhost_job *job, struct nvhost_syncpt *sp)
+{
+	int err = 0, i = 0;
+	phys_addr_t gather_phys = 0;
+	void *gather_addr = NULL;
+	unsigned long waitchk_mask = job->waitchk_mask;
+
+	/* get current syncpt values for waitchk */
+	for_each_set_bit(i, &waitchk_mask, sizeof(job->waitchk_mask))
+		nvhost_syncpt_update_min(sp, i);
+
+	/* pin gathers */
+	for (i = 0; i < job->num_gathers; i++) {
+		struct nvhost_job_gather *g = &job->gathers[i];
+
+		/* process each gather mem only once */
+		if (!g->ref) {
+			g->ref = mem_op().get(job->memmgr,
+					job->gathers[i].mem_id);
+			if (IS_ERR(g->ref)) {
+				err = PTR_ERR(g->ref);
+				g->ref = NULL;
+				break;
+			}
+
+			gather_phys = mem_op().pin(job->memmgr, g->ref);
+			if (IS_ERR((void *)gather_phys)) {
+				mem_op().put(job->memmgr, g->ref);
+				err = gather_phys;
+				break;
+			}
+
+			/* store the gather ref into unpin array */
+			job->unpins[job->num_unpins++] = g->ref;
+
+			gather_addr = mem_op().mmap(g->ref);
+			if (!gather_addr) {
+				err = -ENOMEM;
+				break;
+			}
+
+			err = do_relocs(job, g->mem_id, gather_addr);
+			if (!err)
+				err = do_waitchks(job, sp,
+						g->mem_id, gather_addr);
+			mem_op().munmap(g->ref, gather_addr);
+
+			if (err)
+				break;
+		}
+		g->mem = gather_phys + g->offset;
+	}
+	wmb();
+
+	return err;
+}
+
+void nvhost_job_unpin(struct nvhost_job *job)
+{
+	int i;
+
+	for (i = 0; i < job->num_unpins; i++) {
+		mem_op().unpin(job->memmgr, job->unpins[i]);
+		mem_op().put(job->memmgr, job->unpins[i]);
+	}
+
+	memset(job->unpins, BAD_MAGIC,
+			job->num_unpins * sizeof(struct mem_handle *));
+	job->num_unpins = 0;
+}
+
+/**
+ * Debug routine used to dump job entries
+ */
+void nvhost_job_dump(struct device *dev, struct nvhost_job *job)
+{
+	dev_dbg(dev, "    SYNCPT_ID   %d\n",
+		job->syncpt_id);
+	dev_dbg(dev, "    SYNCPT_VAL  %d\n",
+		job->syncpt_end);
+	dev_dbg(dev, "    FIRST_GET   0x%x\n",
+		job->first_get);
+	dev_dbg(dev, "    TIMEOUT     %d\n",
+		job->timeout);
+	dev_dbg(dev, "    CTX 0x%p\n",
+		job->hwctx);
+	dev_dbg(dev, "    NUM_SLOTS   %d\n",
+		job->num_slots);
+	dev_dbg(dev, "    NUM_HANDLES %d\n",
+		job->num_unpins);
+}
diff --git a/drivers/staging/tegra/video/host/nvhost_job.h b/drivers/staging/tegra/video/host/nvhost_job.h
new file mode 100644
index 000000000000..3b444579c543
--- /dev/null
+++ b/drivers/staging/tegra/video/host/nvhost_job.h
@@ -0,0 +1,148 @@
+/*
+ * drivers/video/tegra/host/nvhost_job.h
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_JOB_H
+#define __NVHOST_JOB_H
+
+#include <linux/nvhost_ioctl.h>
+
+struct nvhost_channel;
+struct nvhost_hwctx;
+struct nvhost_waitchk;
+struct nvhost_syncpt;
+
+struct nvhost_job_gather {
+	u32 words;
+	phys_addr_t mem;
+	u32 mem_id;
+	int offset;
+	struct mem_handle *ref;
+};
+
+/*
+ * Each submit is tracked as a nvhost_job.
+ */
+struct nvhost_job {
+	/* When refcount goes to zero, job can be freed */
+	struct kref ref;
+
+	/* List entry */
+	struct list_head list;
+
+	/* Channel where job is submitted to */
+	struct nvhost_channel *ch;
+
+	/* Hardware context valid for this client */
+	struct nvhost_hwctx *hwctx;
+	int clientid;
+
+	/* Nvmap to be used for pinning & unpinning memory */
+	struct mem_mgr *memmgr;
+
+	/* Gathers and their memory */
+	struct nvhost_job_gather *gathers;
+	int num_gathers;
+
+	/* Wait checks to be processed at submit time */
+	struct nvhost_waitchk *waitchk;
+	int num_waitchk;
+	u32 waitchk_mask;
+
+	/* Array of handles to be pinned & unpinned */
+	struct nvhost_reloc *relocarray;
+	struct nvhost_reloc_shift *relocshiftarray;
+	int num_relocs;
+	struct mem_handle **unpins;
+	int num_unpins;
+
+	/* Sync point id, number of increments and end related to the submit */
+	u32 syncpt_id;
+	u32 syncpt_incrs;
+	u32 syncpt_end;
+
+	/* Priority of this submit. */
+	int priority;
+
+	/* Maximum time to wait for this job */
+	int timeout;
+
+	/* Null kickoff prevents submit from being sent to hardware */
+	bool null_kickoff;
+
+	/* Index and number of slots used in the push buffer */
+	int first_get;
+	int num_slots;
+
+	/* Context to be freed */
+	struct nvhost_hwctx *hwctxref;
+};
+
+/*
+ * Allocate memory for a job. Just enough memory will be allocated to
+ * accomodate the submit announced in submit header.
+ */
+struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch,
+		struct nvhost_hwctx *hwctx,
+		struct nvhost_submit_hdr_ext *hdr,
+		struct mem_mgr *memmgr,
+		int priority, int clientid);
+
+/*
+ * Add a gather to a job.
+ */
+void nvhost_job_add_gather(struct nvhost_job *job,
+		u32 mem_id, u32 words, u32 offset);
+
+/*
+ * Increment reference going to nvhost_job.
+ */
+void nvhost_job_get(struct nvhost_job *job);
+
+/*
+ * Increment reference for a hardware context.
+ */
+void nvhost_job_get_hwctx(struct nvhost_job *job, struct nvhost_hwctx *hwctx);
+
+/*
+ * Decrement reference job, free if goes to zero.
+ */
+void nvhost_job_put(struct nvhost_job *job);
+
+/*
+ * Pin memory related to job. This handles relocation of addresses to the
+ * host1x address space. Handles both the gather memory and any other memory
+ * referred to from the gather buffers.
+ *
+ * Handles also patching out host waits that would wait for an expired sync
+ * point value.
+ */
+int nvhost_job_pin(struct nvhost_job *job, struct nvhost_syncpt *sp);
+
+/*
+ * Unpin memory related to job.
+ */
+void nvhost_job_unpin(struct nvhost_job *job);
+
+/*
+ * Dump contents of job to debug output.
+ */
+void nvhost_job_dump(struct device *dev, struct nvhost_job *job);
+
+#endif
diff --git a/drivers/staging/tegra/video/host/nvhost_memmgr.c b/drivers/staging/tegra/video/host/nvhost_memmgr.c
new file mode 100644
index 000000000000..6baef9a77dfb
--- /dev/null
+++ b/drivers/staging/tegra/video/host/nvhost_memmgr.c
@@ -0,0 +1,35 @@
+/*
+ * drivers/video/tegra/host/nvhost_memmgr.c
+ *
+ * Tegra Graphics Host Memory Management Abstraction
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/bug.h>
+
+#include "nvhost_memmgr.h"
+#include "nvmap.h"
+
+int nvhost_memmgr_init(struct nvhost_chip_support *chip)
+{
+#ifdef CONFIG_TEGRA_GRHOST_USE_NVMAP
+	return nvhost_init_nvmap_support(chip);
+#endif
+	BUG_ON(!"No memory manager selected");
+	return -ENODEV;
+}
diff --git a/drivers/staging/tegra/video/host/nvhost_memmgr.h b/drivers/staging/tegra/video/host/nvhost_memmgr.h
new file mode 100644
index 000000000000..d61379b6ff55
--- /dev/null
+++ b/drivers/staging/tegra/video/host/nvhost_memmgr.h
@@ -0,0 +1,38 @@
+/*
+ * drivers/video/tegra/host/nvhost_memmgr.h
+ *
+ * Tegra Graphics Host Memory Management Abstraction header
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _NVHOST_MEM_MGR_H_
+#define _NVHOST_MEM_MGR_H_
+
+struct nvhost_chip_support;
+
+enum mem_mgr_flag {
+	mem_mgr_flag_uncacheable = 0,
+	mem_mgr_flag_write_combine = 1,
+};
+
+struct mem_mgr_handle {
+	struct mem_mgr *client;
+	struct mem_handle *handle;
+};
+
+int nvhost_memmgr_init(struct nvhost_chip_support *chip);
+
+#endif
diff --git a/drivers/staging/tegra/video/host/nvhost_syncpt.c b/drivers/staging/tegra/video/host/nvhost_syncpt.c
new file mode 100644
index 000000000000..c4ecb79fa076
--- /dev/null
+++ b/drivers/staging/tegra/video/host/nvhost_syncpt.c
@@ -0,0 +1,510 @@
+/*
+ * drivers/video/tegra/host/nvhost_syncpt.c
+ *
+ * Tegra Graphics Host Syncpoints
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/nvhost_ioctl.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <trace/events/nvhost.h>
+#include "nvhost_syncpt.h"
+#include "nvhost_acm.h"
+#include "dev.h"
+#include "chip_support.h"
+
+#define MAX_SYNCPT_LENGTH 5
+/* Name of sysfs node for min and max value */
+static const char *min_name = "min";
+static const char *max_name = "max";
+
+/**
+ * Resets syncpoint and waitbase values to sw shadows
+ */
+void nvhost_syncpt_reset(struct nvhost_syncpt *sp)
+{
+	u32 i;
+	BUG_ON(!(syncpt_op().reset && syncpt_op().reset_wait_base));
+
+	for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++)
+		syncpt_op().reset(sp, i);
+	for (i = 0; i < nvhost_syncpt_nb_bases(sp); i++)
+		syncpt_op().reset_wait_base(sp, i);
+	wmb();
+}
+
+/**
+ * Updates sw shadow state for client managed registers
+ */
+void nvhost_syncpt_save(struct nvhost_syncpt *sp)
+{
+	u32 i;
+	BUG_ON(!(syncpt_op().update_min && syncpt_op().read_wait_base));
+
+	for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) {
+		if (nvhost_syncpt_client_managed(sp, i))
+			syncpt_op().update_min(sp, i);
+		else
+			BUG_ON(!nvhost_syncpt_min_eq_max(sp, i));
+	}
+
+	for (i = 0; i < nvhost_syncpt_nb_bases(sp); i++)
+		syncpt_op().read_wait_base(sp, i);
+}
+
+/**
+ * Updates the last value read from hardware.
+ */
+u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
+{
+	u32 val;
+
+	BUG_ON(!syncpt_op().update_min);
+
+	val = syncpt_op().update_min(sp, id);
+	trace_nvhost_syncpt_update_min(id, val);
+
+	return val;
+}
+
+/**
+ * Get the current syncpoint value
+ */
+u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id)
+{
+	u32 val;
+	BUG_ON(!syncpt_op().update_min);
+	nvhost_module_busy(syncpt_to_dev(sp)->dev);
+	val = syncpt_op().update_min(sp, id);
+	nvhost_module_idle(syncpt_to_dev(sp)->dev);
+	return val;
+}
+
+/**
+ * Get the current syncpoint base
+ */
+u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id)
+{
+	u32 val;
+	BUG_ON(!syncpt_op().read_wait_base);
+	nvhost_module_busy(syncpt_to_dev(sp)->dev);
+	syncpt_op().read_wait_base(sp, id);
+	val = sp->base_val[id];
+	nvhost_module_idle(syncpt_to_dev(sp)->dev);
+	return val;
+}
+
+/**
+ * Write a cpu syncpoint increment to the hardware, without touching
+ * the cache. Caller is responsible for host being powered.
+ */
+void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
+{
+	BUG_ON(!syncpt_op().cpu_incr);
+	syncpt_op().cpu_incr(sp, id);
+}
+
+/**
+ * Increment syncpoint value from cpu, updating cache
+ */
+void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
+{
+	if (nvhost_syncpt_client_managed(sp, id))
+		nvhost_syncpt_incr_max(sp, id, 1);
+	nvhost_module_busy(syncpt_to_dev(sp)->dev);
+	nvhost_syncpt_cpu_incr(sp, id);
+	nvhost_module_idle(syncpt_to_dev(sp)->dev);
+}
+
+/**
+ * Updated sync point form hardware, and returns true if syncpoint is expired,
+ * false if we may need to wait
+ */
+static bool syncpt_update_min_is_expired(
+	struct nvhost_syncpt *sp,
+	u32 id,
+	u32 thresh)
+{
+	syncpt_op().update_min(sp, id);
+	return nvhost_syncpt_is_expired(sp, id, thresh);
+}
+
+/**
+ * Main entrypoint for syncpoint value waits.
+ */
+int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
+			u32 thresh, u32 timeout, u32 *value)
+{
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+	void *ref;
+	void *waiter;
+	int err = 0, check_count = 0, low_timeout = 0;
+	u32 val;
+
+	if (value)
+		*value = 0;
+
+	/* first check cache */
+	if (nvhost_syncpt_is_expired(sp, id, thresh)) {
+		if (value)
+			*value = nvhost_syncpt_read_min(sp, id);
+		return 0;
+	}
+
+	/* keep host alive */
+	nvhost_module_busy(syncpt_to_dev(sp)->dev);
+
+	/* try to read from register */
+	val = syncpt_op().update_min(sp, id);
+	if (nvhost_syncpt_is_expired(sp, id, thresh)) {
+		if (value)
+			*value = val;
+		goto done;
+	}
+
+	if (!timeout) {
+		err = -EAGAIN;
+		goto done;
+	}
+
+	/* schedule a wakeup when the syncpoint value is reached */
+	waiter = nvhost_intr_alloc_waiter();
+	if (!waiter) {
+		err = -ENOMEM;
+		goto done;
+	}
+
+	err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
+				NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq,
+				waiter,
+				&ref);
+	if (err)
+		goto done;
+
+	err = -EAGAIN;
+	/* Caller-specified timeout may be impractically low */
+	if (timeout < SYNCPT_CHECK_PERIOD)
+		low_timeout = timeout;
+
+	/* wait for the syncpoint, or timeout, or signal */
+	while (timeout) {
+		u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
+		int remain = wait_event_interruptible_timeout(wq,
+				syncpt_update_min_is_expired(sp, id, thresh),
+				check);
+		if (remain > 0 || nvhost_syncpt_is_expired(sp, id, thresh)) {
+			if (value)
+				*value = nvhost_syncpt_read_min(sp, id);
+			err = 0;
+			break;
+		}
+		if (remain < 0) {
+			err = remain;
+			break;
+		}
+		if (timeout != NVHOST_NO_TIMEOUT)
+			timeout -= check;
+		if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
+			dev_warn(&syncpt_to_dev(sp)->dev->dev,
+				"%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n",
+				 current->comm, id, syncpt_op().name(sp, id),
+				 thresh, timeout);
+			syncpt_op().debug(sp);
+			if (check_count == MAX_STUCK_CHECK_COUNT) {
+				if (low_timeout) {
+					dev_warn(&syncpt_to_dev(sp)->dev->dev,
+						"is timeout %d too low?\n",
+						low_timeout);
+				}
+				nvhost_debug_dump(syncpt_to_dev(sp));
+			}
+			check_count++;
+		}
+	}
+	nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), id, ref);
+
+done:
+	nvhost_module_idle(syncpt_to_dev(sp)->dev);
+	return err;
+}
+
+/**
+ * Returns true if syncpoint is expired, false if we may need to wait
+ */
+bool nvhost_syncpt_is_expired(
+	struct nvhost_syncpt *sp,
+	u32 id,
+	u32 thresh)
+{
+	u32 current_val;
+	u32 future_val;
+	smp_rmb();
+	current_val = (u32)atomic_read(&sp->min_val[id]);
+	future_val = (u32)atomic_read(&sp->max_val[id]);
+
+	/* Note the use of unsigned arithmetic here (mod 1<<32).
+	 *
+	 * c = current_val = min_val	= the current value of the syncpoint.
+	 * t = thresh			= the value we are checking
+	 * f = future_val  = max_val	= the value c will reach when all
+	 *			   	  outstanding increments have completed.
+	 *
+	 * Note that c always chases f until it reaches f.
+	 *
+	 * Dtf = (f - t)
+	 * Dtc = (c - t)
+	 *
+	 *  Consider all cases:
+	 *
+	 *	A) .....c..t..f.....	Dtf < Dtc	need to wait
+	 *	B) .....c.....f..t..	Dtf > Dtc	expired
+	 *	C) ..t..c.....f.....	Dtf > Dtc	expired	   (Dct very large)
+	 *
+	 *  Any case where f==c: always expired (for any t).  	Dtf == Dcf
+	 *  Any case where t==c: always expired (for any f).  	Dtf >= Dtc (because Dtc==0)
+	 *  Any case where t==f!=c: always wait.	 	Dtf <  Dtc (because Dtf==0,
+	 *							Dtc!=0)
+	 *
+	 *  Other cases:
+	 *
+	 *	A) .....t..f..c.....	Dtf < Dtc	need to wait
+	 *	A) .....f..c..t.....	Dtf < Dtc	need to wait
+	 *	A) .....f..t..c.....	Dtf > Dtc	expired
+	 *
+	 *   So:
+	 *	   Dtf >= Dtc implies EXPIRED	(return true)
+	 *	   Dtf <  Dtc implies WAIT	(return false)
+	 *
+	 * Note: If t is expired then we *cannot* wait on it. We would wait
+	 * forever (hang the system).
+	 *
+	 * Note: do NOT get clever and remove the -thresh from both sides. It
+	 * is NOT the same.
+	 *
+	 * If future valueis zero, we have a client managed sync point. In that
+	 * case we do a direct comparison.
+	 */
+	if (!nvhost_syncpt_client_managed(sp, id))
+		return future_val - thresh >= current_val - thresh;
+	else
+		return (s32)(current_val - thresh) >= 0;
+}
+
+void nvhost_syncpt_debug(struct nvhost_syncpt *sp)
+{
+	syncpt_op().debug(sp);
+}
+
+int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx)
+{
+	struct nvhost_master *host = syncpt_to_dev(sp);
+	u32 reg;
+
+	nvhost_module_busy(host->dev);
+	reg = syncpt_op().mutex_try_lock(sp, idx);
+	if (reg) {
+		nvhost_module_idle(host->dev);
+		return -EBUSY;
+	}
+	atomic_inc(&sp->lock_counts[idx]);
+	return 0;
+}
+
+void nvhost_mutex_unlock(struct nvhost_syncpt *sp, int idx)
+{
+	syncpt_op().mutex_unlock(sp, idx);
+	nvhost_module_idle(syncpt_to_dev(sp)->dev);
+	atomic_dec(&sp->lock_counts[idx]);
+}
+
+/* remove a wait pointed to by patch_addr */
+int nvhost_syncpt_patch_wait(struct nvhost_syncpt *sp, void *patch_addr)
+{
+	return syncpt_op().patch_wait(sp, patch_addr);
+}
+
+/* Displays the current value of the sync point via sysfs */
+static ssize_t syncpt_min_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	struct nvhost_syncpt_attr *syncpt_attr =
+		container_of(attr, struct nvhost_syncpt_attr, attr);
+
+	return snprintf(buf, PAGE_SIZE, "%u",
+			nvhost_syncpt_read(&syncpt_attr->host->syncpt,
+				syncpt_attr->id));
+}
+
+static ssize_t syncpt_max_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	struct nvhost_syncpt_attr *syncpt_attr =
+		container_of(attr, struct nvhost_syncpt_attr, attr);
+
+	return snprintf(buf, PAGE_SIZE, "%u",
+			nvhost_syncpt_read_max(&syncpt_attr->host->syncpt,
+				syncpt_attr->id));
+}
+
+int nvhost_syncpt_init(struct nvhost_device *dev,
+		struct nvhost_syncpt *sp)
+{
+	int i;
+	struct nvhost_master *host = syncpt_to_dev(sp);
+	int err = 0;
+
+	/* Allocate structs for min, max and base values */
+	sp->min_val = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_pts(sp),
+			GFP_KERNEL);
+	sp->max_val = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_pts(sp),
+			GFP_KERNEL);
+	sp->base_val = kzalloc(sizeof(u32) * nvhost_syncpt_nb_bases(sp),
+			GFP_KERNEL);
+	sp->lock_counts =
+		kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_mlocks(sp),
+			GFP_KERNEL);
+
+	if (!(sp->min_val && sp->max_val && sp->base_val && sp->lock_counts)) {
+		/* frees happen in the deinit */
+		err = -ENOMEM;
+		goto fail;
+	}
+
+	sp->kobj = kobject_create_and_add("syncpt", &dev->dev.kobj);
+	if (!sp->kobj) {
+		err = -EIO;
+		goto fail;
+	}
+
+	/* Allocate two attributes for each sync point: min and max */
+	sp->syncpt_attrs = kzalloc(sizeof(*sp->syncpt_attrs)
+			* nvhost_syncpt_nb_pts(sp) * 2, GFP_KERNEL);
+	if (!sp->syncpt_attrs) {
+		err = -ENOMEM;
+		goto fail;
+	}
+
+	/* Fill in the attributes */
+	for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) {
+		char name[MAX_SYNCPT_LENGTH];
+		struct kobject *kobj;
+		struct nvhost_syncpt_attr *min = &sp->syncpt_attrs[i*2];
+		struct nvhost_syncpt_attr *max = &sp->syncpt_attrs[i*2+1];
+
+		/* Create one directory per sync point */
+		snprintf(name, sizeof(name), "%d", i);
+		kobj = kobject_create_and_add(name, sp->kobj);
+		if (!kobj) {
+			err = -EIO;
+			goto fail;
+		}
+
+		sysfs_attr_init(&min->attr.attr);
+		min->id = i;
+		min->host = host;
+		min->attr.attr.name = min_name;
+		min->attr.attr.mode = S_IRUGO;
+		min->attr.show = syncpt_min_show;
+		if (sysfs_create_file(kobj, &min->attr.attr)) {
+			err = -EIO;
+			goto fail;
+		}
+
+		sysfs_attr_init(&max->attr.attr);
+		max->id = i;
+		max->host = host;
+		max->attr.attr.name = max_name;
+		max->attr.attr.mode = S_IRUGO;
+		max->attr.show = syncpt_max_show;
+		if (sysfs_create_file(kobj, &max->attr.attr)) {
+			err = -EIO;
+			goto fail;
+		}
+	}
+
+	return err;
+
+fail:
+	nvhost_syncpt_deinit(sp);
+	return err;
+}
+
+void nvhost_syncpt_deinit(struct nvhost_syncpt *sp)
+{
+	kobject_put(sp->kobj);
+
+	kfree(sp->min_val);
+	sp->min_val = NULL;
+
+	kfree(sp->max_val);
+	sp->max_val = NULL;
+
+	kfree(sp->base_val);
+	sp->base_val = NULL;
+
+	kfree(sp->lock_counts);
+	sp->lock_counts = 0;
+
+	kfree(sp->syncpt_attrs);
+	sp->syncpt_attrs = NULL;
+}
+
+int nvhost_syncpt_client_managed(struct nvhost_syncpt *sp, u32 id)
+{
+	return BIT(id) & syncpt_to_dev(sp)->info.client_managed;
+}
+
+int nvhost_syncpt_nb_pts(struct nvhost_syncpt *sp)
+{
+	return syncpt_to_dev(sp)->info.nb_pts;
+}
+
+int nvhost_syncpt_nb_bases(struct nvhost_syncpt *sp)
+{
+	return syncpt_to_dev(sp)->info.nb_bases;
+}
+
+int nvhost_syncpt_nb_mlocks(struct nvhost_syncpt *sp)
+{
+	return syncpt_to_dev(sp)->info.nb_mlocks;
+}
+
+/* public sync point API */
+u32 nvhost_syncpt_incr_max_ext(struct nvhost_device *dev, u32 id, u32 incrs)
+{
+	struct nvhost_syncpt *sp = &(nvhost_get_host(dev)->syncpt);
+	return nvhost_syncpt_incr_max(sp, id, incrs);
+}
+
+void nvhost_syncpt_cpu_incr_ext(struct nvhost_device *dev, u32 id)
+{
+	struct nvhost_syncpt *sp = &(nvhost_get_host(dev)->syncpt);
+	nvhost_syncpt_cpu_incr(sp, id);
+}
+
+u32 nvhost_syncpt_read_ext(struct nvhost_device *dev, u32 id)
+{
+	struct nvhost_syncpt *sp = &(nvhost_get_host(dev)->syncpt);
+	return nvhost_syncpt_read(sp, id);
+}
+
+int nvhost_syncpt_wait_timeout_ext(struct nvhost_device *dev, u32 id, u32 thresh,
+	u32 timeout, u32 *value)
+{
+	struct nvhost_syncpt *sp = &(nvhost_get_host(dev)->syncpt);
+	return nvhost_syncpt_wait_timeout(sp, id, thresh, timeout, value);
+}
diff --git a/drivers/staging/tegra/video/host/nvhost_syncpt.h b/drivers/staging/tegra/video/host/nvhost_syncpt.h
new file mode 100644
index 000000000000..9ee4f3a8d49d
--- /dev/null
+++ b/drivers/staging/tegra/video/host/nvhost_syncpt.h
@@ -0,0 +1,151 @@
+/*
+ * drivers/video/tegra/host/nvhost_syncpt.h
+ *
+ * Tegra Graphics Host Syncpoints
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_SYNCPT_H
+#define __NVHOST_SYNCPT_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/nvhost.h>
+#include <linux/atomic.h>
+
+/* host managed and invalid syncpt id */
+#define NVSYNCPT_GRAPHICS_HOST		     (0)
+
+/* Attribute struct for sysfs min and max attributes */
+struct nvhost_syncpt_attr {
+	struct kobj_attribute attr;
+	struct nvhost_master *host;
+	int id;
+};
+
+struct nvhost_syncpt {
+	struct kobject *kobj;
+	atomic_t *min_val;
+	atomic_t *max_val;
+	u32 *base_val;
+	atomic_t *lock_counts;
+	const char **syncpt_names;
+	struct nvhost_syncpt_attr *syncpt_attrs;
+};
+
+int nvhost_syncpt_init(struct nvhost_device *, struct nvhost_syncpt *);
+void nvhost_syncpt_deinit(struct nvhost_syncpt *);
+
+#define syncpt_to_dev(sp) container_of(sp, struct nvhost_master, syncpt)
+#define SYNCPT_CHECK_PERIOD (2 * HZ)
+#define MAX_STUCK_CHECK_COUNT 15
+
+/**
+ * Updates the value sent to hardware.
+ */
+static inline u32 nvhost_syncpt_incr_max(struct nvhost_syncpt *sp,
+					u32 id, u32 incrs)
+{
+	return (u32)atomic_add_return(incrs, &sp->max_val[id]);
+}
+
+/**
+ * Updated the value sent to hardware.
+ */
+static inline u32 nvhost_syncpt_set_max(struct nvhost_syncpt *sp,
+					u32 id, u32 val)
+{
+	atomic_set(&sp->max_val[id], val);
+	smp_wmb();
+	return val;
+}
+
+static inline u32 nvhost_syncpt_read_max(struct nvhost_syncpt *sp, u32 id)
+{
+	smp_rmb();
+	return (u32)atomic_read(&sp->max_val[id]);
+}
+
+static inline u32 nvhost_syncpt_read_min(struct nvhost_syncpt *sp, u32 id)
+{
+	smp_rmb();
+	return (u32)atomic_read(&sp->min_val[id]);
+}
+
+int nvhost_syncpt_client_managed(struct nvhost_syncpt *sp, u32 id);
+int nvhost_syncpt_nb_pts(struct nvhost_syncpt *sp);
+int nvhost_syncpt_nb_bases(struct nvhost_syncpt *sp);
+int nvhost_syncpt_nb_mlocks(struct nvhost_syncpt *sp);
+
+static inline bool nvhost_syncpt_check_max(struct nvhost_syncpt *sp,
+		u32 id, u32 real)
+{
+	u32 max;
+	if (nvhost_syncpt_client_managed(sp, id))
+		return true;
+	max = nvhost_syncpt_read_max(sp, id);
+	return (s32)(max - real) >= 0;
+}
+
+/**
+ * Returns true if syncpoint min == max
+ */
+static inline bool nvhost_syncpt_min_eq_max(struct nvhost_syncpt *sp, u32 id)
+{
+	int min, max;
+	smp_rmb();
+	min = atomic_read(&sp->min_val[id]);
+	max = atomic_read(&sp->max_val[id]);
+	return (min == max);
+}
+
+void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id);
+
+u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id);
+bool nvhost_syncpt_is_expired(struct nvhost_syncpt *sp, u32 id, u32 thresh);
+
+void nvhost_syncpt_save(struct nvhost_syncpt *sp);
+
+void nvhost_syncpt_reset(struct nvhost_syncpt *sp);
+
+u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id);
+u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id);
+
+void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id);
+
+int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh,
+			u32 timeout, u32 *value);
+
+static inline int nvhost_syncpt_wait(struct nvhost_syncpt *sp, u32 id, u32 thresh)
+{
+	return nvhost_syncpt_wait_timeout(sp, id, thresh,
+					  MAX_SCHEDULE_TIMEOUT, NULL);
+}
+
+int nvhost_syncpt_patch_wait(struct nvhost_syncpt *sp, void *patch_addr);
+
+void nvhost_syncpt_debug(struct nvhost_syncpt *sp);
+
+static inline int nvhost_syncpt_is_valid(struct nvhost_syncpt *sp, u32 id)
+{
+	return id != NVSYNCPT_INVALID && id < nvhost_syncpt_nb_pts(sp);
+}
+
+int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx);
+
+void nvhost_mutex_unlock(struct nvhost_syncpt *sp, int idx);
+
+#endif
diff --git a/drivers/staging/tegra/video/host/nvmap.c b/drivers/staging/tegra/video/host/nvmap.c
new file mode 100644
index 000000000000..fd82f40c59ff
--- /dev/null
+++ b/drivers/staging/tegra/video/host/nvmap.c
@@ -0,0 +1,100 @@
+/*
+ * drivers/video/tegra/host/nvmap.c
+ *
+ * Tegra Graphics Host Nvmap support
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "chip_support.h"
+#include <linux/nvmap.h>
+
+struct mem_mgr *nvhost_nvmap_alloc_mgr(void)
+{
+	return (struct mem_mgr *)nvmap_create_client(nvmap_dev, "nvhost");
+}
+
+void nvhost_nvmap_put_mgr(struct mem_mgr *mgr)
+{
+	nvmap_client_put((struct nvmap_client *)mgr);
+}
+
+struct mem_mgr *nvhost_nvmap_get_mgr(struct mem_mgr *mgr)
+{
+	return (struct mem_mgr *)nvmap_client_get((struct nvmap_client *)mgr);
+}
+
+struct mem_mgr *nvhost_nvmap_get_mgr_file(int fd)
+{
+	return (struct mem_mgr *)nvmap_client_get_file(fd);
+}
+
+struct mem_handle *nvhost_nvmap_alloc(struct mem_mgr *mgr,
+		size_t size, size_t align, int flags)
+{
+	return (struct mem_handle *)nvmap_alloc((struct nvmap_client *)mgr,
+			size, align, flags, 0);
+}
+
+void nvhost_nvmap_put(struct mem_mgr *mgr, struct mem_handle *handle)
+{
+	return nvmap_free((struct nvmap_client *)mgr,
+			(struct nvmap_handle_ref *)handle);
+}
+
+phys_addr_t nvhost_nvmap_pin(struct mem_mgr *mgr, struct mem_handle *handle)
+{
+	return nvmap_pin((struct nvmap_client *)mgr,
+			(struct nvmap_handle_ref *)handle);
+}
+
+void nvhost_nvmap_unpin(struct mem_mgr *mgr, struct mem_handle *handle)
+{
+	return nvmap_unpin((struct nvmap_client *)mgr,
+			(struct nvmap_handle_ref *)handle);
+}
+
+void *nvhost_nvmap_mmap(struct mem_handle *handle)
+{
+	return nvmap_mmap((struct nvmap_handle_ref *)handle);
+}
+
+void nvhost_nvmap_munmap(struct mem_handle *handle, void *addr)
+{
+	nvmap_munmap((struct nvmap_handle_ref *)handle, addr);
+}
+
+struct mem_handle *nvhost_nvmap_get(struct mem_mgr *mgr, u32 id)
+{
+	return (struct mem_handle *)
+		nvmap_duplicate_handle_id((struct nvmap_client *)mgr, id);
+}
+
+int nvhost_init_nvmap_support(struct nvhost_chip_support *chip)
+{
+	chip->mem.alloc_mgr = nvhost_nvmap_alloc_mgr;
+	chip->mem.put_mgr = nvhost_nvmap_put_mgr;
+	chip->mem.get_mgr = nvhost_nvmap_get_mgr;
+	chip->mem.get_mgr_file = nvhost_nvmap_get_mgr_file;
+	chip->mem.alloc = nvhost_nvmap_alloc;
+	chip->mem.put = nvhost_nvmap_put;
+	chip->mem.get = nvhost_nvmap_get;
+	chip->mem.pin = nvhost_nvmap_pin;
+	chip->mem.unpin = nvhost_nvmap_unpin;
+	chip->mem.mmap = nvhost_nvmap_mmap;
+	chip->mem.munmap = nvhost_nvmap_munmap;
+
+	return 0;
+}
diff --git a/drivers/staging/tegra/video/host/nvmap.h b/drivers/staging/tegra/video/host/nvmap.h
new file mode 100644
index 000000000000..90f64d44f434
--- /dev/null
+++ b/drivers/staging/tegra/video/host/nvmap.h
@@ -0,0 +1,27 @@
+/*
+ * drivers/video/tegra/host/nvmap.h
+ *
+ * Tegra Graphics Host nvmap memory manager
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVHOST_NVMAP_H
+#define __NVHOST_NVMAP_H
+
+struct nvhost_chip_support;
+int nvhost_init_nvmap_support(struct nvhost_chip_support *op);
+
+#endif
diff --git a/drivers/staging/tegra/video/host/t20/Makefile b/drivers/staging/tegra/video/host/t20/Makefile
new file mode 100644
index 000000000000..d0c5c23b4da2
--- /dev/null
+++ b/drivers/staging/tegra/video/host/t20/Makefile
@@ -0,0 +1,8 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/staging/tegra/video/host
+EXTRA_CFLAGS += -Idrivers/staging/tegra/include
+
+nvhost-t20-objs  = \
+	t20.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-t20.o
diff --git a/drivers/staging/tegra/video/host/t20/t20.c b/drivers/staging/tegra/video/host/t20/t20.c
new file mode 100644
index 000000000000..77ab760236f7
--- /dev/null
+++ b/drivers/staging/tegra/video/host/t20/t20.c
@@ -0,0 +1,256 @@
+/*
+ * drivers/video/tegra/host/t20/t20.c
+ *
+ * Tegra Graphics Init for T20 Architecture Chips
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/nvhost_ioctl.h>
+#include <linux/of.h>
+#include "t20.h"
+#include "gr3d/gr3d_t20.h"
+#include "mpe/mpe.h"
+#include "host1x/host1x.h"
+#include "nvhost_channel.h"
+#include "nvhost_memmgr.h"
+#include "host1x/host1x01_hardware.h"
+#include "host1x/host1x_syncpt.h"
+#include "chip_support.h"
+
+#include <mach/powergate.h>
+
+#define NVMODMUTEX_2D_FULL	(1)
+#define NVMODMUTEX_2D_SIMPLE	(2)
+#define NVMODMUTEX_2D_SB_A	(3)
+#define NVMODMUTEX_2D_SB_B	(4)
+#define NVMODMUTEX_3D		(5)
+#define NVMODMUTEX_DISPLAYA	(6)
+#define NVMODMUTEX_DISPLAYB	(7)
+#define NVMODMUTEX_VI		(8)
+#define NVMODMUTEX_DSI		(9)
+
+static int t20_num_alloc_channels = 0;
+
+static const char *s_syncpt_names[32] = {
+	"gfx_host",
+	"", "", "", "", "", "", "",
+	"disp0_a", "disp1_a", "avp_0",
+	"csi_vi_0", "csi_vi_1",
+	"vi_isp_0", "vi_isp_1", "vi_isp_2", "vi_isp_3", "vi_isp_4",
+	"2d_0", "2d_1",
+	"disp0_b", "disp1_b",
+	"3d",
+	"mpe",
+	"disp0_c", "disp1_c",
+	"vblank0", "vblank1",
+	"mpe_ebm_eof", "mpe_wr_safe",
+	"2d_tinyblt",
+	"dsi"
+};
+
+static struct host1x_device_info host1x01_info = {
+	.nb_channels	= 8,
+	.nb_pts		= 32,
+	.nb_mlocks	= 16,
+	.nb_bases	= 8,
+	.syncpt_names	= s_syncpt_names,
+	.client_managed	= NVSYNCPTS_CLIENT_MANAGED,
+};
+
+struct nvhost_device tegra_host1x01_t20_device = {
+	.dev		= {.platform_data = &host1x01_info},
+	.name		= "host1x",
+	.id		= -1,
+	.clocks		= {{"host1x", 166000000}, {} },
+	NVHOST_MODULE_NO_POWERGATE_IDS,
+};
+
+struct nvhost_device tegra_display01_t20_device = {
+	.name		= "display",
+	.id		= -1,
+	.index		= 0,
+	.syncpts	= BIT(NVSYNCPT_DISP0_A) | BIT(NVSYNCPT_DISP1_A) |
+			  BIT(NVSYNCPT_DISP0_B) | BIT(NVSYNCPT_DISP1_B) |
+			  BIT(NVSYNCPT_DISP0_C) | BIT(NVSYNCPT_DISP1_C) |
+			  BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1),
+	.modulemutexes	= BIT(NVMODMUTEX_DISPLAYA) | BIT(NVMODMUTEX_DISPLAYB),
+	NVHOST_MODULE_NO_POWERGATE_IDS,
+	NVHOST_DEFAULT_CLOCKGATE_DELAY,
+	.moduleid	= NVHOST_MODULE_NONE,
+};
+
+struct nvhost_device tegra_gr3d01_t20_device = {
+	.name		= "gr3d",
+	.version	= 1,
+	.id		= -1,
+	.index		= 1,
+	.syncpts	= BIT(NVSYNCPT_3D),
+	.waitbases	= BIT(NVWAITBASE_3D),
+	.modulemutexes	= BIT(NVMODMUTEX_3D),
+	.class		= NV_GRAPHICS_3D_CLASS_ID,
+	.clocks		= {{"gr3d", 300000000}, {} },
+	.powergate_ids	= {TEGRA_POWERGATE_3D, -1},
+	NVHOST_DEFAULT_CLOCKGATE_DELAY,
+	.moduleid	= NVHOST_MODULE_NONE,
+};
+
+struct nvhost_device tegra_gr2d01_t20_device = {
+	.name		= "gr2d",
+	.id		= -1,
+	.index		= 2,
+	.syncpts	= BIT(NVSYNCPT_2D_0) | BIT(NVSYNCPT_2D_1),
+	.waitbases	= BIT(NVWAITBASE_2D_0) | BIT(NVWAITBASE_2D_1),
+	.modulemutexes	= BIT(NVMODMUTEX_2D_FULL) | BIT(NVMODMUTEX_2D_SIMPLE) |
+			  BIT(NVMODMUTEX_2D_SB_A) | BIT(NVMODMUTEX_2D_SB_B),
+	.clocks		= { {"gr2d", 300000000},
+			    {"epp", 300000000}, {} },
+	NVHOST_MODULE_NO_POWERGATE_IDS,
+	.clockgate_delay = 0,
+	.moduleid	= NVHOST_MODULE_NONE,
+	.serialize	= true,
+};
+
+struct nvhost_device tegra_isp01_t20_device = {
+	.name		= "isp",
+	.id		= -1,
+	.index		= 3,
+	.syncpts	= 0,
+	NVHOST_MODULE_NO_POWERGATE_IDS,
+	NVHOST_DEFAULT_CLOCKGATE_DELAY,
+	.moduleid	= NVHOST_MODULE_ISP,
+};
+
+struct nvhost_device tegra_vi01_t20_device = {
+	.name		= "vi",
+	.id		= -1,
+	.index		= 4,
+	.syncpts	= BIT(NVSYNCPT_CSI_VI_0) | BIT(NVSYNCPT_CSI_VI_1) |
+			  BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_1) |
+			  BIT(NVSYNCPT_VI_ISP_2) | BIT(NVSYNCPT_VI_ISP_3) |
+			  BIT(NVSYNCPT_VI_ISP_4),
+	.modulemutexes	= BIT(NVMODMUTEX_VI),
+	.exclusive	= true,
+	NVHOST_MODULE_NO_POWERGATE_IDS,
+	NVHOST_DEFAULT_CLOCKGATE_DELAY,
+	.moduleid	= NVHOST_MODULE_VI,
+};
+
+struct nvhost_device tegra_mpe01_t20_device = {
+	.name		= "mpe",
+	.version	= 1,
+	.id		= -1,
+	.index		= 5,
+	.syncpts	= BIT(NVSYNCPT_MPE) | BIT(NVSYNCPT_MPE_EBM_EOF) |
+			  BIT(NVSYNCPT_MPE_WR_SAFE),
+	.waitbases	= BIT(NVWAITBASE_MPE),
+	.class		= NV_VIDEO_ENCODE_MPEG_CLASS_ID,
+	.waitbasesync	= true,
+	.keepalive	= true,
+	.clocks		= { {"mpe", 250000000}, {} },
+	.powergate_ids	= {TEGRA_POWERGATE_MPE, -1},
+	NVHOST_DEFAULT_CLOCKGATE_DELAY,
+	.moduleid	= NVHOST_MODULE_MPE,
+};
+
+struct nvhost_device tegra_dsi01_t20_device = {
+	.name		= "dsi",
+	.id		= -1,
+	.index		= 6,
+	.syncpts	= BIT(NVSYNCPT_DSI),
+	.modulemutexes	= BIT(NVMODMUTEX_DSI),
+	NVHOST_MODULE_NO_POWERGATE_IDS,
+	NVHOST_DEFAULT_CLOCKGATE_DELAY,
+	.moduleid	= NVHOST_MODULE_NONE,
+};
+
+static struct nvhost_device *t20_devices[] = {
+	&tegra_host1x01_t20_device,
+	&tegra_display01_t20_device,
+	&tegra_gr3d01_t20_device,
+	&tegra_gr2d01_t20_device,
+	&tegra_isp01_t20_device,
+	&tegra_vi01_t20_device,
+	&tegra_mpe01_t20_device,
+	&tegra_dsi01_t20_device,
+};
+
+struct of_dev_auxdata tegra20_auxdata_lookup[] __initdata = {
+	NVHOST_T20_OF_DEV_AUXDATA,
+	{}
+};
+
+struct nvhost_device *tegra2_match_aux_dev(const char *dev_name)
+{
+	int i;
+
+	if (!dev_name)
+		return &tegra_host1x01_t20_device;
+
+	for (i = 0; i < ARRAY_SIZE(t20_devices); i++) {
+		const char *aux_dev_name = t20_devices[i]->name;
+
+		if (!strncmp(dev_name, aux_dev_name, strlen(aux_dev_name)))
+			return t20_devices[i];
+	}
+
+	return NULL;
+}
+
+int tegra2_register_host1x_devices(void)
+{
+	return nvhost_add_devices(t20_devices, ARRAY_SIZE(t20_devices));
+}
+
+static void t20_free_nvhost_channel(struct nvhost_channel *ch)
+{
+	nvhost_free_channel_internal(ch, &t20_num_alloc_channels);
+}
+
+static struct nvhost_channel *t20_alloc_nvhost_channel(
+		struct nvhost_device *dev)
+{
+	return nvhost_alloc_channel_internal(dev->index,
+		nvhost_get_host(dev)->info.nb_channels,
+		&t20_num_alloc_channels);
+}
+
+#include "host1x/host1x_channel.c"
+#include "host1x/host1x_cdma.c"
+#include "host1x/host1x_debug.c"
+#include "host1x/host1x_syncpt.c"
+#include "host1x/host1x_intr.c"
+
+int nvhost_init_t20_support(struct nvhost_master *host,
+	struct nvhost_chip_support *op)
+{
+	int err;
+
+	op->channel = host1x_channel_ops;
+	op->cdma = host1x_cdma_ops;
+	op->push_buffer = host1x_pushbuffer_ops;
+	op->debug = host1x_debug_ops;
+	host->sync_aperture = host->aperture + HOST1X_CHANNEL_SYNC_REG_BASE;
+	op->syncpt = host1x_syncpt_ops;
+	op->intr = host1x_intr_ops;
+	err = nvhost_memmgr_init(op);
+	if (err)
+		return err;
+
+	op->nvhost_dev.alloc_nvhost_channel = t20_alloc_nvhost_channel;
+	op->nvhost_dev.free_nvhost_channel = t20_free_nvhost_channel;
+
+	return 0;
+}
diff --git a/drivers/staging/tegra/video/host/t20/t20.h b/drivers/staging/tegra/video/host/t20/t20.h
new file mode 100644
index 000000000000..729f9d8e85e4
--- /dev/null
+++ b/drivers/staging/tegra/video/host/t20/t20.h
@@ -0,0 +1,29 @@
+/*
+ * drivers/video/tegra/host/t20/t20.h
+ *
+ * Tegra Graphics Chip support for T20
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _NVHOST_T20_H_
+#define _NVHOST_T20_H_
+
+struct nvhost_master;
+struct nvhost_chip_support;
+
+int nvhost_init_t20_support(struct nvhost_master *,
+	struct nvhost_chip_support *);
+
+#endif /* _NVHOST_T20_H_ */
diff --git a/drivers/staging/tegra/video/host/t30/Makefile b/drivers/staging/tegra/video/host/t30/Makefile
new file mode 100644
index 000000000000..9f6cd7a0bc31
--- /dev/null
+++ b/drivers/staging/tegra/video/host/t30/Makefile
@@ -0,0 +1,8 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/staging/tegra/video/host
+EXTRA_CFLAGS += -Idrivers/staging/tegra/include
+
+nvhost-t30-objs  = \
+	t30.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-t30.o
diff --git a/drivers/staging/tegra/video/host/t30/t30.c b/drivers/staging/tegra/video/host/t30/t30.c
new file mode 100644
index 000000000000..b336a456fc08
--- /dev/null
+++ b/drivers/staging/tegra/video/host/t30/t30.c
@@ -0,0 +1,283 @@
+/*
+ * drivers/video/tegra/host/t30/t30.c
+ *
+ * Tegra Graphics Init for T30 Architecture Chips
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/nvhost_ioctl.h>
+#include "t20/t20.h"
+#include "t30.h"
+#include "gr3d/gr3d_t30.h"
+#include "gr3d/scale3d.h"
+#include "mpe/mpe.h"
+#include "host1x/host1x.h"
+#include "host1x/host1x01_hardware.h"
+#include "chip_support.h"
+#include "nvhost_channel.h"
+#include "nvhost_memmgr.h"
+#include "host1x/host1x_syncpt.h"
+
+#include <mach/powergate.h>
+
+#define NVMODMUTEX_2D_FULL	(1)
+#define NVMODMUTEX_2D_SIMPLE	(2)
+#define NVMODMUTEX_2D_SB_A	(3)
+#define NVMODMUTEX_2D_SB_B	(4)
+#define NVMODMUTEX_3D		(5)
+#define NVMODMUTEX_DISPLAYA	(6)
+#define NVMODMUTEX_DISPLAYB	(7)
+#define NVMODMUTEX_VI		(8)
+#define NVMODMUTEX_DSI		(9)
+
+static int t30_num_alloc_channels = 0;
+
+static const char *s_syncpt_names[32] = {
+	"gfx_host",
+	"", "", "", "", "", "", "",
+	"disp0_a", "disp1_a", "avp_0",
+	"csi_vi_0", "csi_vi_1",
+	"vi_isp_0", "vi_isp_1", "vi_isp_2", "vi_isp_3", "vi_isp_4",
+	"2d_0", "2d_1",
+	"disp0_b", "disp1_b",
+	"3d",
+	"mpe",
+	"disp0_c", "disp1_c",
+	"vblank0", "vblank1",
+	"mpe_ebm_eof", "mpe_wr_safe",
+	"2d_tinyblt",
+	"dsi"
+};
+
+static struct host1x_device_info host1x01_info = {
+	.nb_channels	= 8,
+	.nb_pts		= 32,
+	.nb_mlocks	= 16,
+	.nb_bases	= 8,
+	.syncpt_names	= s_syncpt_names,
+	.client_managed	= NVSYNCPTS_CLIENT_MANAGED,
+};
+
+static struct nvhost_device tegra_host1x01_device = {
+	.dev		= {.platform_data = &host1x01_info},
+	.name		= "host1x",
+	.id		= -1,
+	.clocks		= {{"host1x", UINT_MAX}, {} },
+	NVHOST_MODULE_NO_POWERGATE_IDS,
+};
+
+static struct nvhost_device tegra_display01_device = {
+	.name		= "display",
+	.id		= -1,
+	.index		= 0,
+	.syncpts	= BIT(NVSYNCPT_DISP0_A) | BIT(NVSYNCPT_DISP1_A) |
+			  BIT(NVSYNCPT_DISP0_B) | BIT(NVSYNCPT_DISP1_B) |
+			  BIT(NVSYNCPT_DISP0_C) | BIT(NVSYNCPT_DISP1_C) |
+			  BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1),
+	.modulemutexes	= BIT(NVMODMUTEX_DISPLAYA) | BIT(NVMODMUTEX_DISPLAYB),
+	NVHOST_MODULE_NO_POWERGATE_IDS,
+	NVHOST_DEFAULT_CLOCKGATE_DELAY,
+	.moduleid	= NVHOST_MODULE_NONE,
+};
+
+static struct nvhost_device tegra_gr3d02_device = {
+	.name		= "gr3d",
+	.version	= 2,
+	.id		= -1,
+	.index		= 1,
+	.syncpts	= BIT(NVSYNCPT_3D),
+	.waitbases	= BIT(NVWAITBASE_3D),
+	.modulemutexes	= BIT(NVMODMUTEX_3D),
+	.class		= NV_GRAPHICS_3D_CLASS_ID,
+	.clocks		= { {"gr3d", UINT_MAX},
+			    {"gr3d2", UINT_MAX},
+			    {"emc", UINT_MAX} },
+	.powergate_ids = { TEGRA_POWERGATE_3D,
+			   TEGRA_POWERGATE_3D1 },
+	NVHOST_DEFAULT_CLOCKGATE_DELAY,
+	.can_powergate = true,
+	.powerup_reset = true,
+	.powergate_delay = 250,
+	.moduleid	= NVHOST_MODULE_NONE,
+};
+
+static struct nvhost_device tegra_gr2d02_device = {
+	.name		= "gr2d",
+	.id		= -1,
+	.index		= 2,
+	.syncpts	= BIT(NVSYNCPT_2D_0) | BIT(NVSYNCPT_2D_1),
+	.waitbases	= BIT(NVWAITBASE_2D_0) | BIT(NVWAITBASE_2D_1),
+	.modulemutexes	= BIT(NVMODMUTEX_2D_FULL) | BIT(NVMODMUTEX_2D_SIMPLE) |
+			  BIT(NVMODMUTEX_2D_SB_A) | BIT(NVMODMUTEX_2D_SB_B),
+	.clocks		= { {"gr2d", 0},
+			  {"epp", 0},
+			  {"emc", 300000000} },
+	NVHOST_MODULE_NO_POWERGATE_IDS,
+	.clockgate_delay = 0,
+	.moduleid	= NVHOST_MODULE_NONE,
+	.serialize	= true,
+};
+
+static struct resource isp_resources_t20[] = {
+	{
+		.name = "regs",
+		.start = TEGRA_ISP_BASE,
+		.end = TEGRA_ISP_BASE + TEGRA_ISP_SIZE - 1,
+		.flags = IORESOURCE_MEM,
+	}
+};
+
+static struct nvhost_device tegra_isp01_device = {
+	.name		= "isp",
+	.id		= -1,
+	.resource = isp_resources_t20,
+	.num_resources = ARRAY_SIZE(isp_resources_t20),
+	.index		= 3,
+	.syncpts	= BIT(NVSYNCPT_VI_ISP_2) | BIT(NVSYNCPT_VI_ISP_3) |
+			  BIT(NVSYNCPT_VI_ISP_4),
+	.clocks		= { {"epp", 0}
+			  },
+	.keepalive	= true,
+	NVHOST_MODULE_NO_POWERGATE_IDS,
+	NVHOST_DEFAULT_CLOCKGATE_DELAY,
+	.moduleid	= NVHOST_MODULE_ISP,
+};
+
+static struct resource vi_resources[] = {
+	{
+		.name = "regs",
+		.start = TEGRA_VI_BASE,
+		.end = TEGRA_VI_BASE + TEGRA_VI_SIZE - 1,
+		.flags = IORESOURCE_MEM,
+	},
+};
+
+static struct nvhost_device tegra_vi01_device = {
+	.name		= "vi",
+	.resource = vi_resources,
+	.num_resources = ARRAY_SIZE(vi_resources),
+	.id		= -1,
+	.index		= 4,
+	.syncpts	= BIT(NVSYNCPT_CSI_VI_0) | BIT(NVSYNCPT_CSI_VI_1) |
+			  BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_1) |
+			  BIT(NVSYNCPT_VI_ISP_2) | BIT(NVSYNCPT_VI_ISP_3) |
+			  BIT(NVSYNCPT_VI_ISP_4),
+	.modulemutexes	= BIT(NVMODMUTEX_VI),
+	.exclusive	= true,
+	NVHOST_MODULE_NO_POWERGATE_IDS,
+	NVHOST_DEFAULT_CLOCKGATE_DELAY,
+	.moduleid	= NVHOST_MODULE_VI,
+};
+
+static struct resource tegra_mpe01_resources[] = {
+	{
+		.name = "regs",
+		.start = TEGRA_MPE_BASE,
+		.end = TEGRA_MPE_BASE + TEGRA_MPE_SIZE - 1,
+		.flags = IORESOURCE_MEM,
+	},
+};
+
+static struct nvhost_device tegra_mpe02_device = {
+	.name		= "mpe",
+	.version	= 2,
+	.id		= -1,
+	.resource	= tegra_mpe01_resources,
+	.num_resources	= ARRAY_SIZE(tegra_mpe01_resources),
+	.index		= 5,
+	.syncpts	= BIT(NVSYNCPT_MPE) | BIT(NVSYNCPT_MPE_EBM_EOF) |
+			  BIT(NVSYNCPT_MPE_WR_SAFE),
+	.waitbases	= BIT(NVWAITBASE_MPE),
+	.class		= NV_VIDEO_ENCODE_MPEG_CLASS_ID,
+	.waitbasesync	= true,
+	.keepalive	= true,
+	.clocks		= { {"mpe", UINT_MAX},
+			    {"emc", UINT_MAX} },
+	.powergate_ids	= {TEGRA_POWERGATE_MPE, -1},
+	NVHOST_DEFAULT_CLOCKGATE_DELAY,
+	.can_powergate	= true,
+	.powergate_delay = 100,
+	.moduleid	= NVHOST_MODULE_MPE,
+};
+
+static struct nvhost_device tegra_dsi01_device = {
+	.name		= "dsi",
+	.id		= -1,
+	.index		= 6,
+	.syncpts	= BIT(NVSYNCPT_DSI),
+	.modulemutexes	= BIT(NVMODMUTEX_DSI),
+	NVHOST_MODULE_NO_POWERGATE_IDS,
+	NVHOST_DEFAULT_CLOCKGATE_DELAY,
+	.moduleid	= NVHOST_MODULE_NONE,
+};
+
+static struct nvhost_device *t30_devices[] = {
+	&tegra_host1x01_device,
+	&tegra_display01_device,
+	&tegra_gr3d02_device,
+	&tegra_gr2d02_device,
+	&tegra_isp01_device,
+	&tegra_vi01_device,
+	&tegra_mpe02_device,
+	&tegra_dsi01_device,
+};
+
+int tegra3_register_host1x_devices(void)
+{
+	return nvhost_add_devices(t30_devices, ARRAY_SIZE(t30_devices));
+}
+
+static void t30_free_nvhost_channel(struct nvhost_channel *ch)
+{
+	nvhost_free_channel_internal(ch, &t30_num_alloc_channels);
+}
+
+static struct nvhost_channel *t30_alloc_nvhost_channel(
+		struct nvhost_device *dev)
+{
+	return nvhost_alloc_channel_internal(dev->index,
+		nvhost_get_host(dev)->info.nb_channels,
+		&t30_num_alloc_channels);
+}
+
+#include "host1x/host1x_channel.c"
+#include "host1x/host1x_cdma.c"
+#include "host1x/host1x_debug.c"
+#include "host1x/host1x_syncpt.c"
+#include "host1x/host1x_intr.c"
+
+int nvhost_init_t30_support(struct nvhost_master *host,
+	struct nvhost_chip_support *op)
+{
+	int err;
+
+	op->channel = host1x_channel_ops;
+	op->cdma = host1x_cdma_ops;
+	op->push_buffer = host1x_pushbuffer_ops;
+	op->debug = host1x_debug_ops;
+	op->debug.debug_init = nvhost_scale3d_debug_init;
+	host->sync_aperture = host->aperture + HOST1X_CHANNEL_SYNC_REG_BASE;
+	op->syncpt = host1x_syncpt_ops;
+	op->intr = host1x_intr_ops;
+	err = nvhost_memmgr_init(op);
+	if (err)
+		return err;
+
+	op->nvhost_dev.alloc_nvhost_channel = t30_alloc_nvhost_channel;
+	op->nvhost_dev.free_nvhost_channel = t30_free_nvhost_channel;
+
+	return 0;
+}
diff --git a/drivers/staging/tegra/video/host/t30/t30.h b/drivers/staging/tegra/video/host/t30/t30.h
new file mode 100644
index 000000000000..80838a5e287c
--- /dev/null
+++ b/drivers/staging/tegra/video/host/t30/t30.h
@@ -0,0 +1,29 @@
+/*
+ * drivers/video/tegra/host/t30/t30.h
+ *
+ * Tegra Graphics Chip support for Tegra3
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _NVHOST_T30_H_
+#define _NVHOST_T30_H_
+
+struct nvhost_master;
+struct nvhost_chip_support;
+
+int nvhost_init_t30_support(struct nvhost_master *host,
+	struct nvhost_chip_support *);
+
+#endif /* _NVHOST_T30_H_ */
diff --git a/drivers/staging/tegra/video/host/vi/Makefile b/drivers/staging/tegra/video/host/vi/Makefile
new file mode 100644
index 000000000000..1e5c58be1192
--- /dev/null
+++ b/drivers/staging/tegra/video/host/vi/Makefile
@@ -0,0 +1,8 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/staging/tegra/video/host
+EXTRA_CFLAGS += -Idrivers/staging/tegra/include
+
+nvhost-vi-objs  = \
+		vi.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost-vi.o
diff --git a/drivers/staging/tegra/video/host/vi/vi.c b/drivers/staging/tegra/video/host/vi/vi.c
new file mode 100644
index 000000000000..d26a6b7b2d4a
--- /dev/null
+++ b/drivers/staging/tegra/video/host/vi/vi.c
@@ -0,0 +1,86 @@
+/*
+ * drivers/video/tegra/host/vi/vi.c
+ *
+ * Tegra Graphics Host VI
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "dev.h"
+#include "bus_client.h"
+
+static int vi_probe(struct nvhost_device *dev,
+	struct nvhost_device_id *id_table)
+{
+	int err = 0;
+
+	err = nvhost_client_device_get_resources(dev);
+	if (err)
+		return err;
+
+	return nvhost_client_device_init(dev);
+}
+
+static int __exit vi_remove(struct nvhost_device *dev)
+{
+	/* Add clean-up */
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int vi_suspend(struct nvhost_device *dev, pm_message_t state)
+{
+	return nvhost_client_device_suspend(dev);
+}
+
+static int vi_resume(struct nvhost_device *dev)
+{
+	dev_info(&dev->dev, "resuming\n");
+	return 0;
+}
+#endif
+
+static struct of_device_id vi_of_match[] = {
+	{ .compatible = "nvidia,tegra20-vi", },
+	{ .compatible = "nvidia,tegra30-vi", },
+	{ },
+};
+
+static struct nvhost_driver vi_driver = {
+	.probe = vi_probe,
+	.remove = __exit_p(vi_remove),
+#ifdef CONFIG_PM
+	.suspend = vi_suspend,
+	.resume = vi_resume,
+#endif
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "vi",
+		.of_match_table = of_match_ptr(vi_of_match),
+	}
+};
+
+static int __init vi_init(void)
+{
+	return nvhost_driver_register(&vi_driver);
+}
+
+static void __exit vi_exit(void)
+{
+	nvhost_driver_unregister(&vi_driver);
+}
+
+module_init(vi_init);
+module_exit(vi_exit);
diff --git a/drivers/staging/tegra/video/nvmap/Makefile b/drivers/staging/tegra/video/nvmap/Makefile
new file mode 100644
index 000000000000..a4ac3e5c87f3
--- /dev/null
+++ b/drivers/staging/tegra/video/nvmap/Makefile
@@ -0,0 +1,10 @@
+GCOV_PROFILE := y
+EXTRA_CFLAGS += -Idrivers/staging/tegra/include
+
+obj-y += nvmap.o
+obj-y += nvmap_dev.o
+obj-y += nvmap_handle.o
+obj-y += nvmap_heap.o
+obj-y += nvmap_ioctl.o
+# obj-${CONFIG_IOMMU_API}	+= nvmap_iommu.o
+obj-${CONFIG_NVMAP_RECLAIM_UNPINNED_VM} += nvmap_mru.o
\ No newline at end of file
diff --git a/drivers/staging/tegra/video/nvmap/nvmap.c b/drivers/staging/tegra/video/nvmap/nvmap.c
new file mode 100644
index 000000000000..2ccf021c70f4
--- /dev/null
+++ b/drivers/staging/tegra/video/nvmap/nvmap.c
@@ -0,0 +1,619 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap.c
+ *
+ * Memory manager for Tegra GPU
+ *
+ * Copyright (c) 2009-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/rbtree.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+#include <mach/iovmm.h>
+#include <linux/nvmap.h>
+
+#include "nvmap.h"
+#include "nvmap_mru.h"
+
+/* private nvmap_handle flag for pinning duplicate detection */
+#define NVMAP_HANDLE_VISITED (0x1ul << 31)
+
+/* map the backing pages for a heap_pgalloc handle into its IOVMM area */
+static void map_iovmm_area(struct nvmap_handle *h)
+{
+	tegra_iovmm_addr_t va;
+	unsigned long i;
+
+	BUG_ON(!h->heap_pgalloc || !h->pgalloc.area);
+	BUG_ON(h->size & ~PAGE_MASK);
+	WARN_ON(!h->pgalloc.dirty);
+
+	for (va = h->pgalloc.area->iovm_start, i = 0;
+	     va < (h->pgalloc.area->iovm_start + h->size);
+	     i++, va += PAGE_SIZE) {
+		unsigned long pfn;
+
+		pfn = page_to_pfn(h->pgalloc.pages[i]);
+		BUG_ON(!pfn_valid(pfn));
+		tegra_iovmm_vm_insert_pfn(h->pgalloc.area, va, pfn);
+	}
+	h->pgalloc.dirty = false;
+}
+
+/* must be called inside nvmap_pin_lock, to ensure that an entire stream
+ * of pins will complete without racing with a second stream. handle should
+ * have nvmap_handle_get (or nvmap_validate_get) called before calling
+ * this function. */
+static int pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
+{
+	struct tegra_iovmm_area *area;
+	BUG_ON(!h->alloc);
+
+	nvmap_mru_lock(client->share);
+	if (atomic_inc_return(&h->pin) == 1) {
+		if (h->heap_pgalloc && !h->pgalloc.contig) {
+			area = nvmap_handle_iovmm_locked(client, h);
+			if (!area) {
+				/* no race here, inside the pin mutex */
+				atomic_dec(&h->pin);
+				nvmap_mru_unlock(client->share);
+				return -ENOMEM;
+			}
+			if (area != h->pgalloc.area)
+				h->pgalloc.dirty = true;
+			h->pgalloc.area = area;
+		}
+	}
+	nvmap_mru_unlock(client->share);
+	return 0;
+}
+
+/* doesn't need to be called inside nvmap_pin_lock, since this will only
+ * expand the available VM area */
+static int handle_unpin(struct nvmap_client *client,
+		struct nvmap_handle *h, int free_vm)
+{
+	int ret = 0;
+	nvmap_mru_lock(client->share);
+
+	if (atomic_read(&h->pin) == 0) {
+		nvmap_err(client, "%s unpinning unpinned handle %p\n",
+			  current->group_leader->comm, h);
+		nvmap_mru_unlock(client->share);
+		return 0;
+	}
+
+	BUG_ON(!h->alloc);
+
+	if (!atomic_dec_return(&h->pin)) {
+		if (h->heap_pgalloc && h->pgalloc.area) {
+			/* if a secure handle is clean (i.e., mapped into
+			 * IOVMM, it needs to be zapped on unpin. */
+			if (h->secure && !h->pgalloc.dirty) {
+				tegra_iovmm_zap_vm(h->pgalloc.area);
+				h->pgalloc.dirty = true;
+			}
+			if (free_vm) {
+				tegra_iovmm_free_vm(h->pgalloc.area);
+				h->pgalloc.area = NULL;
+			} else
+				nvmap_mru_insert_locked(client->share, h);
+			ret = 1;
+		}
+	}
+
+	nvmap_mru_unlock(client->share);
+	nvmap_handle_put(h);
+	return ret;
+}
+
+static int pin_array_locked(struct nvmap_client *client,
+		struct nvmap_handle **h, int count)
+{
+	int pinned;
+	int i;
+	int err = 0;
+
+	for (pinned = 0; pinned < count; pinned++) {
+		err = pin_locked(client, h[pinned]);
+		if (err)
+			break;
+	}
+
+	if (err) {
+		/* unpin pinned handles */
+		for (i = 0; i < pinned; i++) {
+			/* inc ref counter, because
+			 * handle_unpin decrements it */
+			nvmap_handle_get(h[i]);
+			/* unpin handles and free vm */
+			handle_unpin(client, h[i], true);
+		}
+	}
+
+	if (err && tegra_iovmm_get_max_free(client->share->iovmm) >=
+							client->iovm_limit) {
+		/* First attempt to pin in empty iovmm
+		 * may still fail because of fragmentation caused by
+		 * placing handles in MRU areas. After such failure
+		 * all MRU gets cleaned and iovm space is freed.
+		 *
+		 * We have to do pinning again here since there might be is
+		 * no more incoming pin_wait wakeup calls from unpin
+		 * operations */
+		for (pinned = 0; pinned < count; pinned++) {
+			err = pin_locked(client, h[pinned]);
+			if (err)
+				break;
+		}
+		if (err) {
+			pr_err("Pinning in empty iovmm failed!!!\n");
+			BUG_ON(1);
+		}
+	}
+	return err;
+}
+
+static int wait_pin_array_locked(struct nvmap_client *client,
+		struct nvmap_handle **h, int count)
+{
+	int ret = 0;
+
+	ret = pin_array_locked(client, h, count);
+
+	if (ret) {
+		ret = wait_event_interruptible(client->share->pin_wait,
+				!pin_array_locked(client, h, count));
+	}
+	return ret ? -EINTR : 0;
+}
+
+static int handle_unpin_noref(struct nvmap_client *client, unsigned long id)
+{
+	struct nvmap_handle *h;
+	int w;
+
+	h = nvmap_validate_get(client, id);
+	if (unlikely(!h)) {
+		nvmap_err(client, "%s attempting to unpin invalid handle %p\n",
+			  current->group_leader->comm, (void *)id);
+		return 0;
+	}
+
+	nvmap_err(client, "%s unpinning unreferenced handle %p\n",
+		  current->group_leader->comm, h);
+	WARN_ON(1);
+
+	w = handle_unpin(client, h, false);
+	nvmap_handle_put(h);
+	return w;
+}
+
+void nvmap_unpin_ids(struct nvmap_client *client,
+		     unsigned int nr, const unsigned long *ids)
+{
+	unsigned int i;
+	int do_wake = 0;
+
+	for (i = 0; i < nr; i++) {
+		struct nvmap_handle_ref *ref;
+
+		if (!ids[i])
+			continue;
+
+		nvmap_ref_lock(client);
+		ref = _nvmap_validate_id_locked(client, ids[i]);
+		if (ref) {
+			struct nvmap_handle *h = ref->handle;
+			int e = atomic_add_unless(&ref->pin, -1, 0);
+
+			nvmap_ref_unlock(client);
+
+			if (!e) {
+				nvmap_err(client, "%s unpinning unpinned "
+					  "handle %08lx\n",
+					  current->group_leader->comm, ids[i]);
+			} else {
+				do_wake |= handle_unpin(client, h, false);
+			}
+		} else {
+			nvmap_ref_unlock(client);
+			if (client->super)
+				do_wake |= handle_unpin_noref(client, ids[i]);
+			else
+				nvmap_err(client, "%s unpinning invalid "
+					  "handle %08lx\n",
+					  current->group_leader->comm, ids[i]);
+		}
+	}
+
+	if (do_wake)
+		wake_up(&client->share->pin_wait);
+}
+
+/* pins a list of handle_ref objects; same conditions apply as to
+ * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
+int nvmap_pin_ids(struct nvmap_client *client,
+		  unsigned int nr, const unsigned long *ids)
+{
+	int ret = 0;
+	unsigned int i;
+	struct nvmap_handle **h = (struct nvmap_handle **)ids;
+	struct nvmap_handle_ref *ref;
+
+	/* to optimize for the common case (client provided valid handle
+	 * references and the pin succeeds), increment the handle_ref pin
+	 * count during validation. in error cases, the tree will need to
+	 * be re-walked, since the handle_ref is discarded so that an
+	 * allocation isn't required. if a handle_ref is not found,
+	 * locally validate that the caller has permission to pin the handle;
+	 * handle_refs are not created in this case, so it is possible that
+	 * if the caller crashes after pinning a global handle, the handle
+	 * will be permanently leaked. */
+	nvmap_ref_lock(client);
+	for (i = 0; i < nr; i++) {
+		ref = _nvmap_validate_id_locked(client, ids[i]);
+		if (ref) {
+			atomic_inc(&ref->pin);
+			nvmap_handle_get(h[i]);
+		} else {
+			struct nvmap_handle *verify;
+			nvmap_ref_unlock(client);
+			verify = nvmap_validate_get(client, ids[i]);
+			if (verify) {
+				nvmap_warn(client, "%s pinning unreferenced "
+					   "handle %p\n",
+					   current->group_leader->comm, h[i]);
+			} else {
+				ret = -EPERM;
+				nr = i;
+				break;
+			}
+			nvmap_ref_lock(client);
+		}
+		if (!h[i]->alloc) {
+			ret = -EFAULT;
+			nr = i + 1;
+			break;
+		}
+	}
+	nvmap_ref_unlock(client);
+
+	if (ret)
+		goto out;
+
+	ret = mutex_lock_interruptible(&client->share->pin_lock);
+	if (WARN_ON(ret))
+		goto out;
+
+	ret = wait_pin_array_locked(client, h, nr);
+
+	mutex_unlock(&client->share->pin_lock);
+
+	if (ret) {
+		ret = -EINTR;
+	} else {
+		for (i = 0; i < nr; i++) {
+			if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
+				map_iovmm_area(h[i]);
+		}
+	}
+
+out:
+	if (ret) {
+		nvmap_ref_lock(client);
+		for (i = 0; i < nr; i++) {
+			if(!ids[i])
+				continue;
+
+			ref = _nvmap_validate_id_locked(client, ids[i]);
+			if (!ref) {
+				nvmap_warn(client, "%s freed handle %p "
+					   "during pinning\n",
+					   current->group_leader->comm,
+					   (void *)ids[i]);
+				continue;
+			}
+			atomic_dec(&ref->pin);
+		}
+		nvmap_ref_unlock(client);
+
+		for (i = 0; i < nr; i++)
+			if(h[i])
+				nvmap_handle_put(h[i]);
+	}
+
+	return ret;
+}
+
+static phys_addr_t handle_phys(struct nvmap_handle *h)
+{
+	phys_addr_t addr;
+
+	if (h->heap_pgalloc && h->pgalloc.contig) {
+		addr = page_to_phys(h->pgalloc.pages[0]);
+	} else if (h->heap_pgalloc) {
+		BUG_ON(!h->pgalloc.area);
+		addr = h->pgalloc.area->iovm_start;
+	} else {
+		addr = h->carveout->base;
+	}
+
+	return addr;
+}
+
+phys_addr_t nvmap_pin(struct nvmap_client *client,
+			struct nvmap_handle_ref *ref)
+{
+	struct nvmap_handle *h;
+	phys_addr_t phys;
+	int ret = 0;
+	unsigned long ref_id;
+
+	if (!ref)
+		return -EINVAL;
+	ref_id = nvmap_ref_to_id(ref);
+	h = nvmap_get_handle_id(client, ref_id);
+	if (WARN_ON(!h))
+		return -EINVAL;
+
+	atomic_inc(&ref->pin);
+
+	if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) {
+		ret = -EINTR;
+	} else {
+		ret = wait_pin_array_locked(client, &h, 1);
+		mutex_unlock(&client->share->pin_lock);
+	}
+
+	if (ret) {
+		atomic_dec(&ref->pin);
+		nvmap_handle_put(h);
+	} else {
+		if (h->heap_pgalloc && h->pgalloc.dirty)
+			map_iovmm_area(h);
+		phys = handle_phys(h);
+	}
+
+	return ret ?: phys;
+}
+
+phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id)
+{
+	struct nvmap_handle *h;
+	phys_addr_t phys;
+
+	h = nvmap_get_handle_id(c, id);
+	if (!h)
+		return -EPERM;
+	mutex_lock(&h->lock);
+	phys = handle_phys(h);
+	mutex_unlock(&h->lock);
+	nvmap_handle_put(h);
+
+	return phys;
+}
+
+void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *ref)
+{
+	if (!ref)
+		return;
+
+	atomic_dec(&ref->pin);
+	if (handle_unpin(client, ref->handle, false))
+		wake_up(&client->share->pin_wait);
+}
+
+void nvmap_unpin_handles(struct nvmap_client *client,
+			 struct nvmap_handle **h, int nr)
+{
+	int i;
+	int do_wake = 0;
+
+	for (i = 0; i < nr; i++) {
+		if (WARN_ON(!h[i]))
+			continue;
+		do_wake |= handle_unpin(client, h[i], false);
+	}
+
+	if (do_wake)
+		wake_up(&client->share->pin_wait);
+}
+
+void *nvmap_mmap(struct nvmap_handle_ref *ref)
+{
+	struct nvmap_handle *h;
+	pgprot_t prot;
+	unsigned long adj_size;
+	unsigned long offs;
+	struct vm_struct *v;
+	void *p;
+
+	h = nvmap_handle_get(ref->handle);
+	if (!h)
+		return NULL;
+
+	prot = nvmap_pgprot(h, pgprot_kernel);
+
+	if (h->heap_pgalloc)
+		return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
+				  -1, prot);
+
+	/* carveout - explicitly map the pfns into a vmalloc area */
+
+	nvmap_usecount_inc(h);
+
+	adj_size = h->carveout->base & ~PAGE_MASK;
+	adj_size += h->size;
+	adj_size = PAGE_ALIGN(adj_size);
+
+	v = alloc_vm_area(adj_size, NULL);
+	if (!v) {
+		nvmap_usecount_dec(h);
+		nvmap_handle_put(h);
+		return NULL;
+	}
+
+	p = v->addr + (h->carveout->base & ~PAGE_MASK);
+
+	for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
+		unsigned long addr = (unsigned long) v->addr + offs;
+		unsigned int pfn;
+		pgd_t *pgd;
+		pud_t *pud;
+		pmd_t *pmd;
+		pte_t *pte;
+
+		pfn = __phys_to_pfn(h->carveout->base + offs);
+		pgd = pgd_offset_k(addr);
+		pud = pud_alloc(&init_mm, pgd, addr);
+		if (!pud)
+			break;
+		pmd = pmd_alloc(&init_mm, pud, addr);
+		if (!pmd)
+			break;
+		pte = pte_alloc_kernel(pmd, addr);
+		if (!pte)
+			break;
+		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
+		flush_tlb_kernel_page(addr);
+	}
+
+	if (offs != adj_size) {
+		free_vm_area(v);
+		nvmap_usecount_dec(h);
+		nvmap_handle_put(h);
+		return NULL;
+	}
+
+	/* leave the handle ref count incremented by 1, so that
+	 * the handle will not be freed while the kernel mapping exists.
+	 * nvmap_handle_put will be called by unmapping this address */
+	return p;
+}
+
+void nvmap_munmap(struct nvmap_handle_ref *ref, void *addr)
+{
+	struct nvmap_handle *h;
+
+	if (!ref)
+		return;
+
+	h = ref->handle;
+
+	if (h->heap_pgalloc) {
+		vm_unmap_ram(addr, h->size >> PAGE_SHIFT);
+	} else {
+		struct vm_struct *vm;
+		addr -= (h->carveout->base & ~PAGE_MASK);
+		vm = remove_vm_area(addr);
+		BUG_ON(!vm);
+		kfree(vm);
+		nvmap_usecount_dec(h);
+	}
+	nvmap_handle_put(h);
+}
+
+struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
+				     size_t align, unsigned int flags,
+				     unsigned int heap_mask)
+{
+	const unsigned int default_heap = (NVMAP_HEAP_SYSMEM |
+					   NVMAP_HEAP_CARVEOUT_GENERIC);
+	struct nvmap_handle_ref *r = NULL;
+	int err;
+
+	if (heap_mask == 0)
+		heap_mask = default_heap;
+
+	r = nvmap_create_handle(client, size);
+	if (IS_ERR(r))
+		return r;
+
+	err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r),
+				    heap_mask, align, flags);
+
+	if (err) {
+		nvmap_free_handle_id(client, nvmap_ref_to_id(r));
+		return ERR_PTR(err);
+	}
+
+	return r;
+}
+
+/* allocates memory with specifed iovm_start address. */
+struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client,
+	size_t size, size_t align, unsigned int flags, unsigned int iovm_start)
+{
+	int err;
+	struct nvmap_handle *h;
+	struct nvmap_handle_ref *r;
+	const unsigned int default_heap = NVMAP_HEAP_IOVMM;
+
+	/* size need to be more than one page.
+	 * otherwise heap preference would change to system heap.
+	 */
+	if (size <= PAGE_SIZE)
+		size = PAGE_SIZE << 1;
+	r = nvmap_create_handle(client, size);
+	if (IS_ERR(r))
+		return r;
+
+	h = r->handle;
+	h->pgalloc.iovm_addr = iovm_start;
+	err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r),
+			default_heap, align, flags);
+	if (err)
+		goto fail;
+
+	err = mutex_lock_interruptible(&client->share->pin_lock);
+	if (WARN_ON(err))
+		goto fail;
+	err = pin_locked(client, h);
+	mutex_unlock(&client->share->pin_lock);
+	if (err)
+		goto fail;
+	return r;
+
+fail:
+	nvmap_free_handle_id(client, nvmap_ref_to_id(r));
+	return ERR_PTR(err);
+}
+
+void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r)
+{
+	unsigned long ref_id = nvmap_ref_to_id(r);
+
+	nvmap_unpin_ids(client, 1, &ref_id);
+	nvmap_free_handle_id(client, ref_id);
+}
+
+void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r)
+{
+	if (!r)
+		return;
+
+	nvmap_free_handle_id(client, nvmap_ref_to_id(r));
+}
diff --git a/drivers/staging/tegra/video/nvmap/nvmap.h b/drivers/staging/tegra/video/nvmap/nvmap.h
new file mode 100644
index 000000000000..091b1f4df23b
--- /dev/null
+++ b/drivers/staging/tegra/video/nvmap/nvmap.h
@@ -0,0 +1,321 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap.h
+ *
+ * GPU memory management driver for Tegra
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *'
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
+#define __VIDEO_TEGRA_NVMAP_NVMAP_H
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/atomic.h>
+#include <linux/nvmap.h>
+#include "nvmap_heap.h"
+
+struct nvmap_device;
+struct page;
+struct tegra_iovmm_area;
+
+void _nvmap_handle_free(struct nvmap_handle *h);
+
+#if defined(CONFIG_TEGRA_NVMAP)
+#define nvmap_err(_client, _fmt, ...)				\
+	dev_err(nvmap_client_to_device(_client),		\
+		"%s: "_fmt, __func__, ##__VA_ARGS__)
+
+#define nvmap_warn(_client, _fmt, ...)				\
+	dev_warn(nvmap_client_to_device(_client),		\
+		 "%s: "_fmt, __func__, ##__VA_ARGS__)
+
+#define nvmap_debug(_client, _fmt, ...)				\
+	dev_dbg(nvmap_client_to_device(_client),		\
+		"%s: "_fmt, __func__, ##__VA_ARGS__)
+
+#define nvmap_ref_to_id(_ref)		((unsigned long)(_ref)->handle)
+
+/* handles allocated using shared system memory (either IOVMM- or high-order
+ * page allocations */
+struct nvmap_pgalloc {
+	struct page **pages;
+	struct tegra_iovmm_area *area;
+	struct list_head mru_list;	/* MRU entry for IOVMM reclamation */
+	bool contig;			/* contiguous system memory */
+	bool dirty;			/* area is invalid and needs mapping */
+	u32 iovm_addr;	/* is non-zero, if client need specific iova mapping */
+};
+
+struct nvmap_handle {
+	struct rb_node node;	/* entry on global handle tree */
+	atomic_t ref;		/* reference count (i.e., # of duplications) */
+	atomic_t pin;		/* pin count */
+	unsigned int usecount;	/* how often is used */
+	unsigned long flags;
+	size_t size;		/* padded (as-allocated) size */
+	size_t orig_size;	/* original (as-requested) size */
+	size_t align;
+	struct nvmap_client *owner;
+	struct nvmap_device *dev;
+	union {
+		struct nvmap_pgalloc pgalloc;
+		struct nvmap_heap_block *carveout;
+	};
+	bool global;		/* handle may be duplicated by other clients */
+	bool secure;		/* zap IOVMM area on unpin */
+	bool heap_pgalloc;	/* handle is page allocated (sysmem / iovmm) */
+	bool alloc;		/* handle has memory allocated */
+	unsigned int userflags;	/* flags passed from userspace */
+	struct mutex lock;
+};
+
+#ifdef CONFIG_NVMAP_PAGE_POOLS
+#define NVMAP_UC_POOL NVMAP_HANDLE_UNCACHEABLE
+#define NVMAP_WC_POOL NVMAP_HANDLE_WRITE_COMBINE
+#define NVMAP_IWB_POOL NVMAP_HANDLE_INNER_CACHEABLE
+#define NVMAP_WB_POOL NVMAP_HANDLE_CACHEABLE
+#define NVMAP_NUM_POOLS (NVMAP_HANDLE_CACHEABLE + 1)
+
+struct nvmap_page_pool {
+	struct mutex lock;
+	int npages;
+	struct page **page_array;
+	struct page **shrink_array;
+	int max_pages;
+	int flags;
+};
+
+int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags);
+#endif
+
+struct nvmap_share {
+	struct tegra_iovmm_client *iovmm;
+	wait_queue_head_t pin_wait;
+	struct mutex pin_lock;
+#ifdef CONFIG_NVMAP_PAGE_POOLS
+	union {
+		struct nvmap_page_pool pools[NVMAP_NUM_POOLS];
+		struct {
+			struct nvmap_page_pool uc_pool;
+			struct nvmap_page_pool wc_pool;
+			struct nvmap_page_pool iwb_pool;
+			struct nvmap_page_pool wb_pool;
+		};
+	};
+#endif
+#ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
+	struct mutex mru_lock;
+	struct list_head *mru_lists;
+	int nr_mru;
+#endif
+};
+
+struct nvmap_carveout_commit {
+	size_t commit;
+	struct list_head list;
+};
+
+struct nvmap_client {
+	const char			*name;
+	struct nvmap_device		*dev;
+	struct nvmap_share		*share;
+	struct rb_root			handle_refs;
+	atomic_t			iovm_commit;
+	size_t				iovm_limit;
+	struct mutex			ref_lock;
+	bool				super;
+	atomic_t			count;
+	struct task_struct		*task;
+	struct list_head		list;
+	struct nvmap_carveout_commit	carveout_commit[0];
+};
+
+struct nvmap_vma_priv {
+	struct nvmap_handle *handle;
+	size_t		offs;
+	atomic_t	count;	/* number of processes cloning the VMA */
+};
+
+static inline void nvmap_ref_lock(struct nvmap_client *priv)
+{
+	mutex_lock(&priv->ref_lock);
+}
+
+static inline void nvmap_ref_unlock(struct nvmap_client *priv)
+{
+	mutex_unlock(&priv->ref_lock);
+}
+
+static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
+{
+	if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
+		pr_err("%s: %s getting a freed handle\n",
+			__func__, current->group_leader->comm);
+		if (atomic_read(&h->ref) <= 0)
+			return NULL;
+	}
+	return h;
+}
+
+static inline void nvmap_handle_put(struct nvmap_handle *h)
+{
+	int cnt = atomic_dec_return(&h->ref);
+
+	if (WARN_ON(cnt < 0)) {
+		pr_err("%s: %s put to negative references\n",
+			__func__, current->comm);
+	} else if (cnt == 0)
+		_nvmap_handle_free(h);
+}
+
+#define L_PTE_MT_INNER_WB	(_AT(pteval_t, 0x05) << 2)	/* 0101 (armv6, armv7) */
+static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
+{
+	if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
+		return pgprot_noncached(prot);
+	else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
+		return pgprot_writecombine(prot);
+	else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
+		return __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_INNER_WB);
+	return prot;
+}
+
+#else /* CONFIG_TEGRA_NVMAP */
+struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
+void nvmap_handle_put(struct nvmap_handle *h);
+pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot);
+
+#endif /* !CONFIG_TEGRA_NVMAP */
+
+struct device *nvmap_client_to_device(struct nvmap_client *client);
+
+pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr);
+
+pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr);
+
+void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte);
+
+void nvmap_usecount_inc(struct nvmap_handle *h);
+void nvmap_usecount_dec(struct nvmap_handle *h);
+
+struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
+					      struct nvmap_handle *handle,
+					      unsigned long type);
+
+unsigned long nvmap_carveout_usage(struct nvmap_client *c,
+				   struct nvmap_heap_block *b);
+
+struct nvmap_carveout_node;
+void nvmap_carveout_commit_add(struct nvmap_client *client,
+			       struct nvmap_carveout_node *node, size_t len);
+
+void nvmap_carveout_commit_subtract(struct nvmap_client *client,
+				    struct nvmap_carveout_node *node,
+				    size_t len);
+
+struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev);
+
+struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
+					unsigned long handle);
+
+struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *priv,
+						   unsigned long id);
+
+struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
+					 unsigned long id);
+
+struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
+					     size_t size);
+
+int nvmap_alloc_handle_id(struct nvmap_client *client,
+			  unsigned long id, unsigned int heap_mask,
+			  size_t align, unsigned int flags);
+
+void nvmap_free_handle_id(struct nvmap_client *c, unsigned long id);
+
+int nvmap_pin_ids(struct nvmap_client *client,
+		  unsigned int nr, const unsigned long *ids);
+
+void nvmap_unpin_ids(struct nvmap_client *priv,
+		     unsigned int nr, const unsigned long *ids);
+
+int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
+
+void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
+
+int is_nvmap_vma(struct vm_area_struct *vma);
+
+struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client,
+	size_t size, size_t align, unsigned int flags, unsigned int iova_start);
+
+void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r);
+
+/*
+ * The set_memory_* API can be used to change various attributes of a virtual
+ * address range. The attributes include:
+ * Cachability   : UnCached, WriteCombining, WriteBack
+ * Executability : eXeutable, NoteXecutable
+ * Read/Write    : ReadOnly, ReadWrite
+ * Presence      : NotPresent
+ *
+ * Within a catagory, the attributes are mutually exclusive.
+ *
+ * The implementation of this API will take care of various aspects that
+ * are associated with changing such attributes, such as:
+ * - Flushing TLBs
+ * - Flushing CPU caches
+ * - Making sure aliases of the memory behind the mapping don't violate
+ *   coherency rules as defined by the CPU in the system.
+ *
+ * What this API does not do:
+ * - Provide exclusion between various callers - including callers that
+ *   operation on other mappings of the same physical page
+ * - Restore default attributes when a page is freed
+ * - Guarantee that mappings other than the requested one are
+ *   in any state, other than that these do not violate rules for
+ *   the CPU you have. Do not depend on any effects on other mappings,
+ *   CPUs other than the one you have may have more relaxed rules.
+ * The caller is required to take care of these.
+ */
+
+int set_memory_uc(unsigned long addr, int numpages);
+int set_memory_wc(unsigned long addr, int numpages);
+int set_memory_wb(unsigned long addr, int numpages);
+int set_memory_iwb(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_np(unsigned long addr, int numpages);
+int set_memory_4k(unsigned long addr, int numpages);
+
+int set_memory_array_uc(unsigned long *addr, int addrinarray);
+int set_memory_array_wc(unsigned long *addr, int addrinarray);
+int set_memory_array_wb(unsigned long *addr, int addrinarray);
+int set_memory_array_iwb(unsigned long *addr, int addrinarray);
+
+int set_pages_array_uc(struct page **pages, int addrinarray);
+int set_pages_array_wc(struct page **pages, int addrinarray);
+int set_pages_array_wb(struct page **pages, int addrinarray);
+int set_pages_array_iwb(struct page **pages, int addrinarray);
+
+#endif
diff --git a/drivers/staging/tegra/video/nvmap/nvmap_common.h b/drivers/staging/tegra/video/nvmap/nvmap_common.h
new file mode 100644
index 000000000000..93ba1685a207
--- /dev/null
+++ b/drivers/staging/tegra/video/nvmap/nvmap_common.h
@@ -0,0 +1,29 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_common.h
+ *
+ * GPU memory management driver for Tegra
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *'
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#define FLUSH_CLEAN_BY_SET_WAY_THRESHOLD_INNER (8 * PAGE_SIZE)
+#define FLUSH_CLEAN_BY_SET_WAY_THRESHOLD_OUTER (1024 * 1024)
+
+extern void inner_flush_cache_all(void);
+extern void inner_clean_cache_all(void);
+
+extern void __flush_dcache_page(struct address_space *, struct page *);
diff --git a/drivers/staging/tegra/video/nvmap/nvmap_dev.c b/drivers/staging/tegra/video/nvmap/nvmap_dev.c
new file mode 100644
index 000000000000..668facfcf620
--- /dev/null
+++ b/drivers/staging/tegra/video/nvmap/nvmap_dev.c
@@ -0,0 +1,1499 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_dev.c
+ *
+ * User-space interface to nvmap
+ *
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/backing-dev.h>
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/oom.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/nvmap.h>
+#include <linux/memblock.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#include <mach/iovmm.h>
+
+#include "nvmap.h"
+#include "nvmap_ioctl.h"
+#include "nvmap_mru.h"
+#include "nvmap_common.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/nvmap.h>
+
+#define NVMAP_NUM_PTES		64
+#define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
+
+#ifdef CONFIG_NVMAP_CARVEOUT_KILLER
+static bool carveout_killer = true;
+#else
+static bool carveout_killer;
+#endif
+module_param(carveout_killer, bool, 0640);
+
+struct nvmap_carveout_node {
+	unsigned int		heap_bit;
+	struct nvmap_heap	*carveout;
+	int			index;
+	struct list_head	clients;
+	spinlock_t		clients_lock;
+};
+
+struct nvmap_device {
+	struct vm_struct *vm_rgn;
+	pte_t		*ptes[NVMAP_NUM_PTES];
+	unsigned long	ptebits[NVMAP_NUM_PTES / BITS_PER_LONG];
+	unsigned int	lastpte;
+	spinlock_t	ptelock;
+
+	struct rb_root	handles;
+	spinlock_t	handle_lock;
+	wait_queue_head_t pte_wait;
+	struct miscdevice dev_super;
+	struct miscdevice dev_user;
+	struct nvmap_carveout_node *heaps;
+	int nr_carveouts;
+	struct nvmap_share iovmm_master;
+	struct list_head clients;
+	spinlock_t	clients_lock;
+};
+
+struct nvmap_device *nvmap_dev;
+
+static int nvmap_open(struct inode *inode, struct file *filp);
+static int nvmap_release(struct inode *inode, struct file *filp);
+static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+static int nvmap_map(struct file *filp, struct vm_area_struct *vma);
+static void nvmap_vma_open(struct vm_area_struct *vma);
+static void nvmap_vma_close(struct vm_area_struct *vma);
+static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+static const struct file_operations nvmap_user_fops = {
+	.owner		= THIS_MODULE,
+	.open		= nvmap_open,
+	.release	= nvmap_release,
+	.unlocked_ioctl	= nvmap_ioctl,
+	.mmap		= nvmap_map,
+};
+
+static const struct file_operations nvmap_super_fops = {
+	.owner		= THIS_MODULE,
+	.open		= nvmap_open,
+	.release	= nvmap_release,
+	.unlocked_ioctl	= nvmap_ioctl,
+	.mmap		= nvmap_map,
+};
+
+static struct vm_operations_struct nvmap_vma_ops = {
+	.open		= nvmap_vma_open,
+	.close		= nvmap_vma_close,
+	.fault		= nvmap_vma_fault,
+};
+
+int is_nvmap_vma(struct vm_area_struct *vma)
+{
+	return vma->vm_ops == &nvmap_vma_ops;
+}
+
+struct device *nvmap_client_to_device(struct nvmap_client *client)
+{
+	if (client->super)
+		return client->dev->dev_super.this_device;
+	else
+		return client->dev->dev_user.this_device;
+}
+
+struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev)
+{
+	return &dev->iovmm_master;
+}
+
+/* allocates a PTE for the caller's use; returns the PTE pointer or
+ * a negative errno. may be called from IRQs */
+pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr)
+{
+	unsigned long flags;
+	unsigned long bit;
+
+	spin_lock_irqsave(&dev->ptelock, flags);
+	bit = find_next_zero_bit(dev->ptebits, NVMAP_NUM_PTES, dev->lastpte);
+	if (bit == NVMAP_NUM_PTES) {
+		bit = find_first_zero_bit(dev->ptebits, dev->lastpte);
+		if (bit == dev->lastpte)
+			bit = NVMAP_NUM_PTES;
+	}
+
+	if (bit == NVMAP_NUM_PTES) {
+		spin_unlock_irqrestore(&dev->ptelock, flags);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dev->lastpte = bit;
+	set_bit(bit, dev->ptebits);
+	spin_unlock_irqrestore(&dev->ptelock, flags);
+
+	*vaddr = dev->vm_rgn->addr + bit * PAGE_SIZE;
+	return &(dev->ptes[bit]);
+}
+
+/* allocates a PTE for the caller's use; returns the PTE pointer or
+ * a negative errno. must be called from sleepable contexts */
+pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr)
+{
+	int ret;
+	pte_t **pte;
+	ret = wait_event_interruptible(dev->pte_wait,
+			!IS_ERR(pte = nvmap_alloc_pte_irq(dev, vaddr)));
+
+	if (ret == -ERESTARTSYS)
+		return ERR_PTR(-EINTR);
+
+	return pte;
+}
+
+/* frees a PTE */
+void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte)
+{
+	unsigned long addr;
+	unsigned int bit = pte - dev->ptes;
+	unsigned long flags;
+
+	if (WARN_ON(bit >= NVMAP_NUM_PTES))
+		return;
+
+	addr = (unsigned long)dev->vm_rgn->addr + bit * PAGE_SIZE;
+	set_pte_at(&init_mm, addr, *pte, 0);
+
+	spin_lock_irqsave(&dev->ptelock, flags);
+	clear_bit(bit, dev->ptebits);
+	spin_unlock_irqrestore(&dev->ptelock, flags);
+	wake_up(&dev->pte_wait);
+}
+
+/* verifies that the handle ref value "ref" is a valid handle ref for the
+ * file. caller must hold the file's ref_lock prior to calling this function */
+struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *c,
+						   unsigned long id)
+{
+	struct rb_node *n = c->handle_refs.rb_node;
+
+	while (n) {
+		struct nvmap_handle_ref *ref;
+		ref = rb_entry(n, struct nvmap_handle_ref, node);
+		if ((unsigned long)ref->handle == id)
+			return ref;
+		else if (id > (unsigned long)ref->handle)
+			n = n->rb_right;
+		else
+			n = n->rb_left;
+	}
+
+	return NULL;
+}
+
+struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
+					 unsigned long id)
+{
+	struct nvmap_handle_ref *ref;
+	struct nvmap_handle *h = NULL;
+
+	nvmap_ref_lock(client);
+	ref = _nvmap_validate_id_locked(client, id);
+	if (ref)
+		h = ref->handle;
+	if (h)
+		h = nvmap_handle_get(h);
+	nvmap_ref_unlock(client);
+	return h;
+}
+
+unsigned long nvmap_carveout_usage(struct nvmap_client *c,
+				   struct nvmap_heap_block *b)
+{
+	struct nvmap_heap *h = nvmap_block_to_heap(b);
+	struct nvmap_carveout_node *n;
+	int i;
+
+	for (i = 0; i < c->dev->nr_carveouts; i++) {
+		n = &c->dev->heaps[i];
+		if (n->carveout == h)
+			return n->heap_bit;
+	}
+	return 0;
+}
+
+/*
+ * This routine is used to flush the carveout memory from cache.
+ * Why cache flush is needed for carveout? Consider the case, where a piece of
+ * carveout is allocated as cached and released. After this, if the same memory is
+ * allocated for uncached request and the memory is not flushed out from cache.
+ * In this case, the client might pass this to H/W engine and it could start modify
+ * the memory. As this was cached earlier, it might have some portion of it in cache.
+ * During cpu request to read/write other memory, the cached portion of this memory
+ * might get flushed back to main memory and would cause corruptions, if it happens
+ * after H/W writes data to memory.
+ *
+ * But flushing out the memory blindly on each carveout allocation is redundant.
+ *
+ * In order to optimize the carveout buffer cache flushes, the following
+ * strategy is used.
+ *
+ * The whole Carveout is flushed out from cache during its initialization.
+ * During allocation, carveout buffers are not flused from cache.
+ * During deallocation, carveout buffers are flushed, if they were allocated as cached.
+ * if they were allocated as uncached/writecombined, no cache flush is needed.
+ * Just draining store buffers is enough.
+ */
+int nvmap_flush_heap_block(struct nvmap_client *client,
+	struct nvmap_heap_block *block, size_t len, unsigned int prot)
+{
+	pte_t **pte;
+	void *addr;
+	phys_addr_t kaddr;
+	phys_addr_t phys = block->base;
+	phys_addr_t end = block->base + len;
+
+	if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE)
+		goto out;
+
+	if (len >= FLUSH_CLEAN_BY_SET_WAY_THRESHOLD_INNER) {
+		inner_flush_cache_all();
+		if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
+			outer_flush_range(block->base, block->base + len);
+		goto out;
+	}
+
+	pte = nvmap_alloc_pte((client ? client->dev : nvmap_dev), &addr);
+	if (IS_ERR(pte))
+		return PTR_ERR(pte);
+
+	kaddr = (phys_addr_t)addr;
+
+	while (phys < end) {
+		phys_addr_t next = (phys + PAGE_SIZE) & PAGE_MASK;
+		unsigned long pfn = __phys_to_pfn(phys);
+		void *base = (void *)kaddr + (phys & ~PAGE_MASK);
+
+		next = min(next, end);
+		set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, pgprot_kernel));
+		flush_tlb_kernel_page(kaddr);
+		__cpuc_flush_dcache_area(base, next - phys);
+		phys = next;
+	}
+
+	if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
+		outer_flush_range(block->base, block->base + len);
+
+	nvmap_free_pte((client ? client->dev : nvmap_dev), pte);
+out:
+	wmb();
+	return 0;
+}
+
+void nvmap_carveout_commit_add(struct nvmap_client *client,
+			       struct nvmap_carveout_node *node,
+			       size_t len)
+{
+	unsigned long flags;
+
+	nvmap_ref_lock(client);
+	spin_lock_irqsave(&node->clients_lock, flags);
+	BUG_ON(list_empty(&client->carveout_commit[node->index].list) &&
+	       client->carveout_commit[node->index].commit != 0);
+
+	client->carveout_commit[node->index].commit += len;
+	/* if this client isn't already on the list of nodes for this heap,
+	   add it */
+	if (list_empty(&client->carveout_commit[node->index].list)) {
+		list_add(&client->carveout_commit[node->index].list,
+			 &node->clients);
+	}
+	spin_unlock_irqrestore(&node->clients_lock, flags);
+	nvmap_ref_unlock(client);
+}
+
+void nvmap_carveout_commit_subtract(struct nvmap_client *client,
+				    struct nvmap_carveout_node *node,
+				    size_t len)
+{
+	unsigned long flags;
+
+	if (!client)
+		return;
+
+	spin_lock_irqsave(&node->clients_lock, flags);
+	BUG_ON(client->carveout_commit[node->index].commit < len);
+	client->carveout_commit[node->index].commit -= len;
+	/* if no more allocation in this carveout for this node, delete it */
+	if (!client->carveout_commit[node->index].commit)
+		list_del_init(&client->carveout_commit[node->index].list);
+	spin_unlock_irqrestore(&node->clients_lock, flags);
+}
+
+static struct nvmap_client *get_client_from_carveout_commit(
+	struct nvmap_carveout_node *node, struct nvmap_carveout_commit *commit)
+{
+	struct nvmap_carveout_commit *first_commit = commit - node->index;
+	return (void *)first_commit - offsetof(struct nvmap_client,
+					       carveout_commit);
+}
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_reclaim);
+static int wait_count;
+bool nvmap_shrink_carveout(struct nvmap_carveout_node *node)
+{
+	struct nvmap_carveout_commit *commit;
+	size_t selected_size = 0;
+	int selected_oom_adj = OOM_SCORE_ADJ_MIN;
+	struct task_struct *selected_task = NULL;
+	unsigned long flags;
+	bool wait = false;
+	int current_oom_adj = OOM_SCORE_ADJ_MIN;
+
+	task_lock(current);
+	if (current->signal)
+		current_oom_adj = current->signal->oom_score_adj;
+	task_unlock(current);
+
+	spin_lock_irqsave(&node->clients_lock, flags);
+	/* find the task with the smallest oom_score_adj (lowest priority)
+	 * and largest carveout allocation -- ignore kernel allocations,
+	 * there's no way to handle them */
+	list_for_each_entry(commit, &node->clients, list) {
+		struct nvmap_client *client =
+			get_client_from_carveout_commit(node, commit);
+		size_t size = commit->commit;
+		struct task_struct *task = client->task;
+		struct signal_struct *sig;
+
+		if (!task)
+			continue;
+
+		task_lock(task);
+		sig = task->signal;
+		if (!task->mm || !sig)
+			goto end;
+		/* don't try to kill current */
+		if (task == current->group_leader)
+			goto end;
+		/* don't try to kill higher priority tasks */
+		if (sig->oom_score_adj < current_oom_adj)
+			goto end;
+		if (sig->oom_score_adj < selected_oom_adj)
+			goto end;
+		if (sig->oom_score_adj == selected_oom_adj &&
+		    size <= selected_size)
+			goto end;
+		selected_oom_adj = sig->oom_score_adj;
+		selected_size = size;
+		selected_task = task;
+end:
+		task_unlock(task);
+	}
+	if (selected_task) {
+		wait = true;
+		if (fatal_signal_pending(selected_task)) {
+			pr_warning("carveout_killer: process %d dying "
+				   "slowly\n", selected_task->pid);
+			goto out;
+		}
+		pr_info("carveout_killer: killing process %d with oom_score_adj %d "
+			"to reclaim %d (for process with oom_score_adj %d)\n",
+			selected_task->pid, selected_oom_adj,
+			selected_size, current_oom_adj);
+		force_sig(SIGKILL, selected_task);
+	}
+out:
+	spin_unlock_irqrestore(&node->clients_lock, flags);
+	return wait;
+}
+
+static
+struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
+					      struct nvmap_handle *handle,
+					      unsigned long type)
+{
+	struct nvmap_carveout_node *co_heap;
+	struct nvmap_device *dev = client->dev;
+	int i;
+
+	for (i = 0; i < dev->nr_carveouts; i++) {
+		struct nvmap_heap_block *block;
+		co_heap = &dev->heaps[i];
+
+		if (!(co_heap->heap_bit & type))
+			continue;
+
+		block = nvmap_heap_alloc(co_heap->carveout, handle);
+		if (block)
+			return block;
+	}
+	return NULL;
+}
+
+static bool nvmap_carveout_freed(int count)
+{
+	smp_rmb();
+	return count != wait_count;
+}
+
+struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
+					      struct nvmap_handle *handle,
+					      unsigned long type)
+{
+	struct nvmap_heap_block *block;
+	struct nvmap_carveout_node *co_heap;
+	struct nvmap_device *dev = client->dev;
+	int i;
+	unsigned long end = jiffies +
+		msecs_to_jiffies(NVMAP_CARVEOUT_KILLER_RETRY_TIME);
+	int count = 0;
+
+	do {
+		block = do_nvmap_carveout_alloc(client, handle, type);
+		if (!carveout_killer)
+			return block;
+
+		if (block)
+			return block;
+
+		if (!count++) {
+			char task_comm[TASK_COMM_LEN];
+			if (client->task)
+				get_task_comm(task_comm, client->task);
+			else
+				task_comm[0] = 0;
+			pr_info("%s: failed to allocate %u bytes for "
+				"process %s, firing carveout "
+				"killer!\n", __func__, handle->size, task_comm);
+
+		} else {
+			pr_info("%s: still can't allocate %u bytes, "
+				"attempt %d!\n", __func__, handle->size, count);
+		}
+
+		/* shrink carveouts that matter and try again */
+		for (i = 0; i < dev->nr_carveouts; i++) {
+			int count;
+			co_heap = &dev->heaps[i];
+
+			if (!(co_heap->heap_bit & type))
+				continue;
+
+			count = wait_count;
+			/* indicates we didn't find anything to kill,
+			   might as well stop trying */
+			if (!nvmap_shrink_carveout(co_heap))
+				return NULL;
+
+			if (time_is_after_jiffies(end))
+				wait_event_interruptible_timeout(wait_reclaim,
+					 nvmap_carveout_freed(count),
+					 end - jiffies);
+		}
+	} while (time_is_after_jiffies(end));
+
+	if (time_is_before_jiffies(end))
+		pr_info("carveout_killer: timeout expired without "
+			"allocation succeeding.\n");
+
+	return NULL;
+}
+
+/* remove a handle from the device's tree of all handles; called
+ * when freeing handles. */
+int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h)
+{
+	spin_lock(&dev->handle_lock);
+
+	/* re-test inside the spinlock if the handle really has no clients;
+	 * only remove the handle if it is unreferenced */
+	if (atomic_add_return(0, &h->ref) > 0) {
+		spin_unlock(&dev->handle_lock);
+		return -EBUSY;
+	}
+	smp_rmb();
+	BUG_ON(atomic_read(&h->ref) < 0);
+	BUG_ON(atomic_read(&h->pin) != 0);
+
+	rb_erase(&h->node, &dev->handles);
+
+	spin_unlock(&dev->handle_lock);
+	return 0;
+}
+
+/* adds a newly-created handle to the device master tree */
+void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
+{
+	struct rb_node **p;
+	struct rb_node *parent = NULL;
+
+	spin_lock(&dev->handle_lock);
+	p = &dev->handles.rb_node;
+	while (*p) {
+		struct nvmap_handle *b;
+
+		parent = *p;
+		b = rb_entry(parent, struct nvmap_handle, node);
+		if (h > b)
+			p = &parent->rb_right;
+		else
+			p = &parent->rb_left;
+	}
+	rb_link_node(&h->node, parent, p);
+	rb_insert_color(&h->node, &dev->handles);
+	spin_unlock(&dev->handle_lock);
+}
+
+/* validates that a handle is in the device master tree, and that the
+ * client has permission to access it */
+struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
+					unsigned long id)
+{
+	struct nvmap_handle *h = NULL;
+	struct rb_node *n;
+
+	spin_lock(&client->dev->handle_lock);
+
+	n = client->dev->handles.rb_node;
+
+	while (n) {
+		h = rb_entry(n, struct nvmap_handle, node);
+		if ((unsigned long)h == id) {
+			if (client->super || h->global || (h->owner == client))
+				h = nvmap_handle_get(h);
+			else
+				h = NULL;
+			spin_unlock(&client->dev->handle_lock);
+			return h;
+		}
+		if (id > (unsigned long)h)
+			n = n->rb_right;
+		else
+			n = n->rb_left;
+	}
+	spin_unlock(&client->dev->handle_lock);
+	return NULL;
+}
+
+struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
+					 const char *name)
+{
+	struct nvmap_client *client;
+	struct task_struct *task;
+	int i;
+
+	if (WARN_ON(!dev))
+		return NULL;
+
+	client = kzalloc(sizeof(*client) + (sizeof(struct nvmap_carveout_commit)
+			 * dev->nr_carveouts), GFP_KERNEL);
+	if (!client)
+		return NULL;
+
+	client->name = name;
+	client->super = true;
+	client->dev = dev;
+	/* TODO: allocate unique IOVMM client for each nvmap client */
+	client->share = &dev->iovmm_master;
+	client->handle_refs = RB_ROOT;
+
+	atomic_set(&client->iovm_commit, 0);
+
+	client->iovm_limit = nvmap_mru_vm_size(client->share->iovmm);
+
+	for (i = 0; i < dev->nr_carveouts; i++) {
+		INIT_LIST_HEAD(&client->carveout_commit[i].list);
+		client->carveout_commit[i].commit = 0;
+	}
+
+	get_task_struct(current->group_leader);
+	task_lock(current->group_leader);
+	/* don't bother to store task struct for kernel threads,
+	   they can't be killed anyway */
+	if (current->flags & PF_KTHREAD) {
+		put_task_struct(current->group_leader);
+		task = NULL;
+	} else {
+		task = current->group_leader;
+	}
+	task_unlock(current->group_leader);
+	client->task = task;
+
+	mutex_init(&client->ref_lock);
+	atomic_set(&client->count, 1);
+
+	spin_lock(&dev->clients_lock);
+	list_add(&client->list, &dev->clients);
+	spin_unlock(&dev->clients_lock);
+	return client;
+}
+
+static void destroy_client(struct nvmap_client *client)
+{
+	struct rb_node *n;
+	int i;
+
+	if (!client)
+		return;
+
+
+	while ((n = rb_first(&client->handle_refs))) {
+		struct nvmap_handle_ref *ref;
+		int pins, dupes;
+
+		ref = rb_entry(n, struct nvmap_handle_ref, node);
+		rb_erase(&ref->node, &client->handle_refs);
+
+		smp_rmb();
+		pins = atomic_read(&ref->pin);
+
+		if (ref->handle->owner == client)
+			ref->handle->owner = NULL;
+
+		while (pins--)
+			nvmap_unpin_handles(client, &ref->handle, 1);
+
+		dupes = atomic_read(&ref->dupes);
+		while (dupes--)
+			nvmap_handle_put(ref->handle);
+
+		kfree(ref);
+	}
+
+	if (carveout_killer) {
+		wait_count++;
+		smp_wmb();
+		wake_up_all(&wait_reclaim);
+	}
+
+	for (i = 0; i < client->dev->nr_carveouts; i++)
+		list_del(&client->carveout_commit[i].list);
+
+	if (client->task)
+		put_task_struct(client->task);
+
+	spin_lock(&client->dev->clients_lock);
+	list_del(&client->list);
+	spin_unlock(&client->dev->clients_lock);
+	kfree(client);
+}
+
+struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
+{
+	if (WARN_ON(!client))
+		return NULL;
+
+	if (WARN_ON(!atomic_add_unless(&client->count, 1, 0)))
+		return NULL;
+
+	return client;
+}
+
+struct nvmap_client *nvmap_client_get_file(int fd)
+{
+	struct nvmap_client *client = ERR_PTR(-EFAULT);
+	struct file *f = fget(fd);
+	if (!f)
+		return ERR_PTR(-EINVAL);
+
+	if ((f->f_op == &nvmap_user_fops) || (f->f_op == &nvmap_super_fops)) {
+		client = f->private_data;
+		atomic_inc(&client->count);
+	}
+
+	fput(f);
+	return client;
+}
+
+void nvmap_client_put(struct nvmap_client *client)
+{
+	if (!client)
+		return;
+
+	if (!atomic_dec_return(&client->count))
+		destroy_client(client);
+}
+
+static int nvmap_open(struct inode *inode, struct file *filp)
+{
+	struct miscdevice *miscdev = filp->private_data;
+	struct nvmap_device *dev = dev_get_drvdata(miscdev->parent);
+	struct nvmap_client *priv;
+	int ret;
+
+	ret = nonseekable_open(inode, filp);
+	if (unlikely(ret))
+		return ret;
+
+	BUG_ON(dev != nvmap_dev);
+	priv = nvmap_create_client(dev, "user");
+	if (!priv)
+		return -ENOMEM;
+	trace_nvmap_open(priv);
+
+	priv->super = (filp->f_op == &nvmap_super_fops);
+
+	filp->private_data = priv;
+	return 0;
+}
+
+static int nvmap_release(struct inode *inode, struct file *filp)
+{
+	trace_nvmap_release(filp->private_data);
+	nvmap_client_put(filp->private_data);
+	return 0;
+}
+
+static int nvmap_map(struct file *filp, struct vm_area_struct *vma)
+{
+	struct nvmap_vma_priv *priv;
+
+	/* after NVMAP_IOC_MMAP, the handle that is mapped by this VMA
+	 * will be stored in vm_private_data and faulted in. until the
+	 * ioctl is made, the VMA is mapped no-access */
+	vma->vm_private_data = NULL;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->offs = 0;
+	priv->handle = NULL;
+	atomic_set(&priv->count, 1);
+
+	vma->vm_flags |= VM_SHARED;
+	vma->vm_flags |= (VM_IO | VM_DONTEXPAND | VM_MIXEDMAP | VM_DONTDUMP);
+	vma->vm_ops = &nvmap_vma_ops;
+	vma->vm_private_data = priv;
+
+	return 0;
+}
+
+static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int err = 0;
+	void __user *uarg = (void __user *)arg;
+
+	if (_IOC_TYPE(cmd) != NVMAP_IOC_MAGIC)
+		return -ENOTTY;
+
+	if (_IOC_NR(cmd) > NVMAP_IOC_MAXNR)
+		return -ENOTTY;
+
+	if (_IOC_DIR(cmd) & _IOC_READ)
+		err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+	if (_IOC_DIR(cmd) & _IOC_WRITE)
+		err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+	if (err)
+		return -EFAULT;
+
+	switch (cmd) {
+	case NVMAP_IOC_CLAIM:
+		nvmap_warn(filp->private_data, "preserved handles not"
+			   "supported\n");
+		err = -ENODEV;
+		break;
+	case NVMAP_IOC_CREATE:
+	case NVMAP_IOC_FROM_ID:
+		err = nvmap_ioctl_create(filp, cmd, uarg);
+		break;
+
+	case NVMAP_IOC_GET_ID:
+		err = nvmap_ioctl_getid(filp, uarg);
+		break;
+
+	case NVMAP_IOC_PARAM:
+		err = nvmap_ioctl_get_param(filp, uarg);
+		break;
+
+	case NVMAP_IOC_UNPIN_MULT:
+	case NVMAP_IOC_PIN_MULT:
+		err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT, uarg);
+		break;
+
+	case NVMAP_IOC_ALLOC:
+		err = nvmap_ioctl_alloc(filp, uarg);
+		break;
+
+	case NVMAP_IOC_FREE:
+		err = nvmap_ioctl_free(filp, arg);
+		break;
+
+	case NVMAP_IOC_MMAP:
+		err = nvmap_map_into_caller_ptr(filp, uarg);
+		break;
+
+	case NVMAP_IOC_WRITE:
+	case NVMAP_IOC_READ:
+		err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ, uarg);
+		break;
+
+	case NVMAP_IOC_CACHE:
+		err = nvmap_ioctl_cache_maint(filp, uarg);
+		break;
+
+	default:
+		return -ENOTTY;
+	}
+	return err;
+}
+
+/* to ensure that the backing store for the VMA isn't freed while a fork'd
+ * reference still exists, nvmap_vma_open increments the reference count on
+ * the handle, and nvmap_vma_close decrements it. alternatively, we could
+ * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
+*/
+static void nvmap_vma_open(struct vm_area_struct *vma)
+{
+	struct nvmap_vma_priv *priv;
+
+	priv = vma->vm_private_data;
+	BUG_ON(!priv);
+
+	atomic_inc(&priv->count);
+	if(priv->handle)
+		nvmap_usecount_inc(priv->handle);
+}
+
+static void nvmap_vma_close(struct vm_area_struct *vma)
+{
+	struct nvmap_vma_priv *priv = vma->vm_private_data;
+
+	if (priv) {
+		if (priv->handle) {
+			if (!WARN_ON(priv->handle->usecount <= 0))
+				nvmap_usecount_dec(priv->handle);
+		}
+		if (!atomic_dec_return(&priv->count)) {
+			if (priv->handle)
+				nvmap_handle_put(priv->handle);
+			kfree(priv);
+		}
+	}
+	vma->vm_private_data = NULL;
+}
+
+static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct nvmap_vma_priv *priv;
+	unsigned long offs;
+
+	offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
+	priv = vma->vm_private_data;
+	if (!priv || !priv->handle || !priv->handle->alloc)
+		return VM_FAULT_SIGBUS;
+
+	offs += priv->offs;
+	/* if the VMA was split for some reason, vm_pgoff will be the VMA's
+	 * offset from the original VMA */
+	offs += (vma->vm_pgoff << PAGE_SHIFT);
+
+	if (offs >= priv->handle->size)
+		return VM_FAULT_SIGBUS;
+
+	if (!priv->handle->heap_pgalloc) {
+		unsigned long pfn;
+		BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
+		pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
+		vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+		return VM_FAULT_NOPAGE;
+	} else {
+		struct page *page;
+		offs >>= PAGE_SHIFT;
+		page = priv->handle->pgalloc.pages[offs];
+		if (page)
+			get_page(page);
+		vmf->page = page;
+		return (page) ? 0 : VM_FAULT_SIGBUS;
+	}
+}
+
+static ssize_t attr_show_usage(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct nvmap_carveout_node *node = nvmap_heap_device_to_arg(dev);
+
+	return sprintf(buf, "%08x\n", node->heap_bit);
+}
+
+static struct device_attribute heap_attr_show_usage =
+	__ATTR(usage, S_IRUGO, attr_show_usage, NULL);
+
+static struct attribute *heap_extra_attrs[] = {
+	&heap_attr_show_usage.attr,
+	NULL,
+};
+
+static struct attribute_group heap_extra_attr_group = {
+	.attrs = heap_extra_attrs,
+};
+
+static void client_stringify(struct nvmap_client *client, struct seq_file *s)
+{
+	char task_comm[TASK_COMM_LEN];
+	if (!client->task) {
+		seq_printf(s, "%-18s %18s %8u", client->name, "kernel", 0);
+		return;
+	}
+	get_task_comm(task_comm, client->task);
+	seq_printf(s, "%-18s %18s %8u", client->name, task_comm,
+		   client->task->pid);
+}
+
+static void allocations_stringify(struct nvmap_client *client,
+				  struct seq_file *s, bool iovmm)
+{
+	struct rb_node *n = rb_first(&client->handle_refs);
+
+	for (; n != NULL; n = rb_next(n)) {
+		struct nvmap_handle_ref *ref =
+			rb_entry(n, struct nvmap_handle_ref, node);
+		struct nvmap_handle *handle = ref->handle;
+		if (handle->alloc && handle->heap_pgalloc == iovmm) {
+			phys_addr_t base = iovmm ? 0 :
+					   (handle->carveout->base);
+			seq_printf(s, "%-18s %-18s %8llx %10u %8x\n", "", "",
+					(unsigned long long)base,
+					handle->size, handle->userflags);
+		}
+	}
+}
+
+static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
+{
+	struct nvmap_carveout_node *node = s->private;
+	struct nvmap_carveout_commit *commit;
+	unsigned long flags;
+	unsigned int total = 0;
+
+	spin_lock_irqsave(&node->clients_lock, flags);
+	seq_printf(s, "%-18s %18s %8s %10s %8s\n", "CLIENT", "PROCESS", "PID",
+		"SIZE", "FLAGS");
+	seq_printf(s, "%-18s %18s %8s %10s\n", "", "",
+					"BASE", "SIZE");
+	list_for_each_entry(commit, &node->clients, list) {
+		struct nvmap_client *client =
+			get_client_from_carveout_commit(node, commit);
+		client_stringify(client, s);
+		seq_printf(s, " %10u\n", commit->commit);
+		allocations_stringify(client, s, false);
+		seq_printf(s, "\n");
+		total += commit->commit;
+	}
+	seq_printf(s, "%-18s %-18s %8u %10u\n", "total", "", 0, total);
+	spin_unlock_irqrestore(&node->clients_lock, flags);
+
+	return 0;
+}
+
+static int nvmap_debug_allocations_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, nvmap_debug_allocations_show,
+			   inode->i_private);
+}
+
+static const struct file_operations debug_allocations_fops = {
+	.open = nvmap_debug_allocations_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
+{
+	struct nvmap_carveout_node *node = s->private;
+	struct nvmap_carveout_commit *commit;
+	unsigned long flags;
+	unsigned int total = 0;
+
+	spin_lock_irqsave(&node->clients_lock, flags);
+	seq_printf(s, "%-18s %18s %8s %10s\n", "CLIENT", "PROCESS", "PID",
+		"SIZE");
+	list_for_each_entry(commit, &node->clients, list) {
+		struct nvmap_client *client =
+			get_client_from_carveout_commit(node, commit);
+		client_stringify(client, s);
+		seq_printf(s, " %10u\n", commit->commit);
+		total += commit->commit;
+	}
+	seq_printf(s, "%-18s %18s %8u %10u\n", "total", "", 0, total);
+	spin_unlock_irqrestore(&node->clients_lock, flags);
+
+	return 0;
+}
+
+static int nvmap_debug_clients_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, nvmap_debug_clients_show, inode->i_private);
+}
+
+static const struct file_operations debug_clients_fops = {
+	.open = nvmap_debug_clients_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int nvmap_debug_iovmm_clients_show(struct seq_file *s, void *unused)
+{
+	unsigned long flags;
+	unsigned int total = 0;
+	struct nvmap_client *client;
+	struct nvmap_device *dev = s->private;
+
+	spin_lock_irqsave(&dev->clients_lock, flags);
+	seq_printf(s, "%-18s %18s %8s %10s\n", "CLIENT", "PROCESS", "PID",
+		"SIZE");
+	list_for_each_entry(client, &dev->clients, list) {
+		client_stringify(client, s);
+		seq_printf(s, " %10u\n", atomic_read(&client->iovm_commit));
+		total += atomic_read(&client->iovm_commit);
+	}
+	seq_printf(s, "%-18s %18s %8u %10u\n", "total", "", 0, total);
+	spin_unlock_irqrestore(&dev->clients_lock, flags);
+
+	return 0;
+}
+
+static int nvmap_debug_iovmm_clients_open(struct inode *inode,
+					    struct file *file)
+{
+	return single_open(file, nvmap_debug_iovmm_clients_show,
+			    inode->i_private);
+}
+
+static const struct file_operations debug_iovmm_clients_fops = {
+	.open = nvmap_debug_iovmm_clients_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int nvmap_debug_iovmm_allocations_show(struct seq_file *s, void *unused)
+{
+	unsigned long flags;
+	unsigned int total = 0;
+	struct nvmap_client *client;
+	struct nvmap_device *dev = s->private;
+
+	spin_lock_irqsave(&dev->clients_lock, flags);
+	seq_printf(s, "%-18s %18s %8s %10s %8s\n", "CLIENT", "PROCESS", "PID",
+		"SIZE", "FLAGS");
+	seq_printf(s, "%-18s %18s %8s %10s\n", "", "",
+					"BASE", "SIZE");
+	list_for_each_entry(client, &dev->clients, list) {
+		client_stringify(client, s);
+		seq_printf(s, " %10u\n", atomic_read(&client->iovm_commit));
+		allocations_stringify(client, s, true);
+		seq_printf(s, "\n");
+		total += atomic_read(&client->iovm_commit);
+	}
+	seq_printf(s, "%-18s %-18s %8u %10u\n", "total", "", 0, total);
+	spin_unlock_irqrestore(&dev->clients_lock, flags);
+
+	return 0;
+}
+
+static int nvmap_debug_iovmm_allocations_open(struct inode *inode,
+						struct file *file)
+{
+	return single_open(file, nvmap_debug_iovmm_allocations_show,
+			    inode->i_private);
+}
+
+static const struct file_operations debug_iovmm_allocations_fops = {
+	.open = nvmap_debug_iovmm_allocations_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct nvmap_platform_data *nvmap_parse_dt(struct platform_device *pdev)
+{
+	struct nvmap_platform_data *pdata;
+	struct device_node *child, *np = pdev->dev.of_node;
+	struct nvmap_platform_carveout *carveout;
+	struct resource mem;
+	int i = 0, val;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return NULL;
+
+
+	np = of_get_child_by_name(np, "carveouts");
+	if (!np)
+		return NULL;
+
+	pdata->nr_carveouts = of_get_child_count(np);
+
+	if (!pdata->nr_carveouts)
+		return NULL;
+
+	pdata->carveouts = devm_kzalloc(&pdev->dev,
+					sizeof(*carveout) * pdata->nr_carveouts,
+					GFP_KERNEL);
+	if (!pdata->carveouts)
+		return NULL;
+
+	for_each_child_of_node(np, child) {
+			if (of_address_to_resource(child, 0, &mem))
+				continue;
+
+			carveout = &pdata->carveouts[i++];
+
+			carveout->base = mem.start;
+			carveout->size = resource_size(&mem);
+			carveout->name = mem.name;
+
+			memblock_remove(carveout->base, carveout->size);
+
+			if (!of_property_read_u32(child, "buddy-size", &val))
+				carveout->buddy_size = val;
+
+			if (!of_property_read_u32(child, "usage-mask", &val))
+				carveout->usage_mask = val;
+	}
+
+	return pdata;
+}
+
+static int nvmap_probe(struct platform_device *pdev)
+{
+	struct nvmap_platform_data *plat = pdev->dev.platform_data;
+	struct nvmap_device *dev;
+	struct dentry *nvmap_debug_root;
+	unsigned int i;
+	int e;
+
+	if (!plat && pdev->dev.of_node)
+		plat = nvmap_parse_dt(pdev);
+
+	if (!plat) {
+		dev_err(&pdev->dev, "no platform data?\n");
+		return -ENODEV;
+	}
+
+	if (WARN_ON(nvmap_dev != NULL)) {
+		dev_err(&pdev->dev, "only one nvmap device may be present\n");
+		return -ENODEV;
+	}
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev) {
+		dev_err(&pdev->dev, "out of memory for device\n");
+		return -ENOMEM;
+	}
+
+	dev->dev_user.minor = MISC_DYNAMIC_MINOR;
+	dev->dev_user.name = "nvmap";
+	dev->dev_user.fops = &nvmap_user_fops;
+	dev->dev_user.parent = &pdev->dev;
+
+	dev->dev_super.minor = MISC_DYNAMIC_MINOR;
+	dev->dev_super.name = "knvmap";
+	dev->dev_super.fops = &nvmap_super_fops;
+	dev->dev_super.parent = &pdev->dev;
+
+	dev->handles = RB_ROOT;
+
+	init_waitqueue_head(&dev->pte_wait);
+
+	init_waitqueue_head(&dev->iovmm_master.pin_wait);
+	mutex_init(&dev->iovmm_master.pin_lock);
+#ifdef CONFIG_NVMAP_PAGE_POOLS
+	for (i = 0; i < NVMAP_NUM_POOLS; i++)
+		nvmap_page_pool_init(&dev->iovmm_master.pools[i], i);
+#endif
+
+	dev->iovmm_master.iovmm =
+		tegra_iovmm_alloc_client(&pdev->dev, NULL,
+			&(dev->dev_user));
+// #if defined(CONFIG_TEGRA_IOVMM) || defined(CONFIG_IOMMU_API)
+// 	if (!dev->iovmm_master.iovmm) {
+// 		e = -ENOMEM;
+// 		dev_err(&pdev->dev, "couldn't create iovmm client\n");
+// 		goto fail;
+// 	}
+// #endif
+	dev->vm_rgn = alloc_vm_area(NVMAP_NUM_PTES * PAGE_SIZE, NULL);
+	if (!dev->vm_rgn) {
+		e = -ENOMEM;
+		dev_err(&pdev->dev, "couldn't allocate remapping region\n");
+		goto fail;
+	}
+	e = nvmap_mru_init(&dev->iovmm_master);
+	if (e) {
+		dev_err(&pdev->dev, "couldn't initialize MRU lists\n");
+		goto fail;
+	}
+
+	spin_lock_init(&dev->ptelock);
+	spin_lock_init(&dev->handle_lock);
+	INIT_LIST_HEAD(&dev->clients);
+	spin_lock_init(&dev->clients_lock);
+
+	for (i = 0; i < NVMAP_NUM_PTES; i++) {
+		unsigned long addr;
+		pgd_t *pgd;
+		pud_t *pud;
+		pmd_t *pmd;
+
+		addr = (unsigned long)dev->vm_rgn->addr + (i * PAGE_SIZE);
+		pgd = pgd_offset_k(addr);
+		pud = pud_alloc(&init_mm, pgd, addr);
+		if (!pud) {
+			e = -ENOMEM;
+			dev_err(&pdev->dev, "couldn't allocate page tables\n");
+			goto fail;
+		}
+		pmd = pmd_alloc(&init_mm, pud, addr);
+		if (!pmd) {
+			e = -ENOMEM;
+			dev_err(&pdev->dev, "couldn't allocate page tables\n");
+			goto fail;
+		}
+		dev->ptes[i] = pte_alloc_kernel(pmd, addr);
+		if (!dev->ptes[i]) {
+			e = -ENOMEM;
+			dev_err(&pdev->dev, "couldn't allocate page tables\n");
+			goto fail;
+		}
+	}
+
+	e = misc_register(&dev->dev_user);
+	if (e) {
+		dev_err(&pdev->dev, "unable to register miscdevice %s\n",
+			dev->dev_user.name);
+		goto fail;
+	}
+
+	e = misc_register(&dev->dev_super);
+	if (e) {
+		dev_err(&pdev->dev, "unable to register miscdevice %s\n",
+			dev->dev_super.name);
+		goto fail;
+	}
+
+	dev->nr_carveouts = 0;
+	dev->heaps = kzalloc(sizeof(struct nvmap_carveout_node) *
+			     plat->nr_carveouts, GFP_KERNEL);
+	if (!dev->heaps) {
+		e = -ENOMEM;
+		dev_err(&pdev->dev, "couldn't allocate carveout memory\n");
+		goto fail;
+	}
+
+	nvmap_debug_root = debugfs_create_dir("nvmap", NULL);
+	if (IS_ERR_OR_NULL(nvmap_debug_root))
+		dev_err(&pdev->dev, "couldn't create debug files\n");
+
+	for (i = 0; i < plat->nr_carveouts; i++) {
+		struct nvmap_carveout_node *node = &dev->heaps[dev->nr_carveouts];
+		const struct nvmap_platform_carveout *co = &plat->carveouts[i];
+		if (!co->size)
+			continue;
+		node->carveout = nvmap_heap_create(dev->dev_user.this_device,
+				   co->name, co->base, co->size,
+				   co->buddy_size, node);
+		if (!node->carveout) {
+			e = -ENOMEM;
+			dev_err(&pdev->dev, "couldn't create %s\n", co->name);
+			goto fail_heaps;
+		}
+		node->index = dev->nr_carveouts;
+		dev->nr_carveouts++;
+		spin_lock_init(&node->clients_lock);
+		INIT_LIST_HEAD(&node->clients);
+		node->heap_bit = co->usage_mask;
+		if (nvmap_heap_create_group(node->carveout,
+					    &heap_extra_attr_group))
+			dev_warn(&pdev->dev, "couldn't add extra attributes\n");
+
+		dev_info(&pdev->dev, "created carveout %s (%uKiB)\n",
+			 co->name, co->size / 1024);
+
+		if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
+			struct dentry *heap_root =
+				debugfs_create_dir(co->name, nvmap_debug_root);
+			if (!IS_ERR_OR_NULL(heap_root)) {
+				debugfs_create_file("clients", S_IRUGO,
+					heap_root, node, &debug_clients_fops);
+				debugfs_create_file("allocations", S_IRUGO,
+				    heap_root, node, &debug_allocations_fops);
+			}
+		}
+	}
+	if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
+		struct dentry *iovmm_root =
+			debugfs_create_dir("iovmm", nvmap_debug_root);
+		if (!IS_ERR_OR_NULL(iovmm_root)) {
+			debugfs_create_file("clients", S_IRUGO, iovmm_root,
+				dev, &debug_iovmm_clients_fops);
+			debugfs_create_file("allocations", S_IRUGO, iovmm_root,
+				dev, &debug_iovmm_allocations_fops);
+#ifdef CONFIG_NVMAP_PAGE_POOLS
+			for (i = 0; i < NVMAP_NUM_POOLS; i++) {
+				char name[40];
+				char *memtype_string[] = {"uc", "wc",
+							  "iwb", "wb"};
+				sprintf(name, "%s_page_pool_available_pages",
+					memtype_string[i]);
+				debugfs_create_u32(name, S_IRUGO,
+					iovmm_root,
+					&dev->iovmm_master.pools[i].npages);
+			}
+#endif
+		}
+	}
+
+	platform_set_drvdata(pdev, dev);
+	nvmap_dev = dev;
+
+	return 0;
+fail_heaps:
+	for (i = 0; i < dev->nr_carveouts; i++) {
+		struct nvmap_carveout_node *node = &dev->heaps[i];
+		nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
+		nvmap_heap_destroy(node->carveout);
+	}
+fail:
+	kfree(dev->heaps);
+	nvmap_mru_destroy(&dev->iovmm_master);
+	if (dev->dev_super.minor != MISC_DYNAMIC_MINOR)
+		misc_deregister(&dev->dev_super);
+	if (dev->dev_user.minor != MISC_DYNAMIC_MINOR)
+		misc_deregister(&dev->dev_user);
+	if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
+		tegra_iovmm_free_client(dev->iovmm_master.iovmm);
+	if (dev->vm_rgn)
+		free_vm_area(dev->vm_rgn);
+	kfree(dev);
+	nvmap_dev = NULL;
+	return e;
+}
+
+static int nvmap_remove(struct platform_device *pdev)
+{
+	struct nvmap_device *dev = platform_get_drvdata(pdev);
+	struct rb_node *n;
+	struct nvmap_handle *h;
+	int i;
+
+	misc_deregister(&dev->dev_super);
+	misc_deregister(&dev->dev_user);
+
+	while ((n = rb_first(&dev->handles))) {
+		h = rb_entry(n, struct nvmap_handle, node);
+		rb_erase(&h->node, &dev->handles);
+		kfree(h);
+	}
+
+	if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
+		tegra_iovmm_free_client(dev->iovmm_master.iovmm);
+
+	nvmap_mru_destroy(&dev->iovmm_master);
+
+	for (i = 0; i < dev->nr_carveouts; i++) {
+		struct nvmap_carveout_node *node = &dev->heaps[i];
+		nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
+		nvmap_heap_destroy(node->carveout);
+	}
+	kfree(dev->heaps);
+
+	free_vm_area(dev->vm_rgn);
+	kfree(dev);
+	nvmap_dev = NULL;
+	return 0;
+}
+
+static int nvmap_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	return 0;
+}
+
+static int nvmap_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static struct of_device_id nvmap_of_match[] = {
+	{ .compatible = "nvidia,tegra-nvmap", },
+	{ },
+};
+
+static struct platform_driver nvmap_driver = {
+	.probe		= nvmap_probe,
+	.remove		= nvmap_remove,
+	.suspend	= nvmap_suspend,
+	.resume		= nvmap_resume,
+
+	.driver = {
+		.name	= "tegra-nvmap",
+		.owner	= THIS_MODULE,
+		.of_match_table = of_match_ptr(nvmap_of_match),
+	},
+};
+
+static int __init nvmap_init_driver(void)
+{
+	int e;
+
+	nvmap_dev = NULL;
+
+	e = nvmap_heap_init();
+	if (e)
+		goto fail;
+
+	e = platform_driver_register(&nvmap_driver);
+	if (e) {
+		nvmap_heap_deinit();
+		goto fail;
+	}
+
+fail:
+	return e;
+}
+fs_initcall(nvmap_init_driver);
+
+static void __exit nvmap_exit_driver(void)
+{
+	platform_driver_unregister(&nvmap_driver);
+	nvmap_heap_deinit();
+	nvmap_dev = NULL;
+}
+module_exit(nvmap_exit_driver);
diff --git a/drivers/staging/tegra/video/nvmap/nvmap_handle.c b/drivers/staging/tegra/video/nvmap/nvmap_handle.c
new file mode 100644
index 000000000000..637cc79dcb11
--- /dev/null
+++ b/drivers/staging/tegra/video/nvmap/nvmap_handle.c
@@ -0,0 +1,1069 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_handle.c
+ *
+ * Handle allocation and freeing routines for nvmap
+ *
+ * Copyright (c) 2009-2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/shrinker.h>
+#include <linux/moduleparam.h>
+#include <linux/nvmap.h>
+
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+
+#include <mach/iovmm.h>
+#include <trace/events/nvmap.h>
+
+#include "nvmap.h"
+#include "nvmap_mru.h"
+#include "nvmap_common.h"
+
+#define NVMAP_SECURE_HEAPS	(NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM | \
+				 NVMAP_HEAP_CARVEOUT_VPR)
+#ifdef CONFIG_NVMAP_HIGHMEM_ONLY
+#define GFP_NVMAP		(__GFP_HIGHMEM | __GFP_NOWARN)
+#else
+#define GFP_NVMAP		(GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
+#endif
+/* handles may be arbitrarily large (16+MiB), and any handle allocated from
+ * the kernel (i.e., not a carveout handle) includes its array of pages. to
+ * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
+ * the array is allocated using vmalloc. */
+#define PAGELIST_VMALLOC_MIN	(PAGE_SIZE)
+
+#ifdef CONFIG_NVMAP_PAGE_POOLS
+
+#define NVMAP_TEST_PAGE_POOL_SHRINKER 1
+static bool enable_pp = 1;
+static int pool_size[NVMAP_NUM_POOLS];
+
+static char *s_memtype_str[] = {
+	"uc",
+	"wc",
+	"iwb",
+	"wb",
+};
+
+static inline void nvmap_page_pool_lock(struct nvmap_page_pool *pool)
+{
+	mutex_lock(&pool->lock);
+}
+
+static inline void nvmap_page_pool_unlock(struct nvmap_page_pool *pool)
+{
+	mutex_unlock(&pool->lock);
+}
+
+static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool)
+{
+	struct page *page = NULL;
+
+	if (pool->npages > 0) {
+		page = pool->page_array[--pool->npages];
+		pool->page_array[pool->npages] = NULL;
+		atomic_dec(&page->_count);
+		BUG_ON(atomic_read(&page->_count) != 1);
+	}
+	return page;
+}
+
+static struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool)
+{
+	struct page *page = NULL;
+
+	if (pool) {
+		nvmap_page_pool_lock(pool);
+		page = nvmap_page_pool_alloc_locked(pool);
+		nvmap_page_pool_unlock(pool);
+	}
+	return page;
+}
+
+static bool nvmap_page_pool_release_locked(struct nvmap_page_pool *pool,
+					    struct page *page)
+{
+	int ret = false;
+
+	if (enable_pp && pool->npages < pool->max_pages) {
+		atomic_inc(&page->_count);
+		BUG_ON(atomic_read(&page->_count) != 2);
+		BUG_ON(pool->page_array[pool->npages] != NULL);
+		pool->page_array[pool->npages++] = page;
+		ret = true;
+	}
+	return ret;
+}
+
+static bool nvmap_page_pool_release(struct nvmap_page_pool *pool,
+					  struct page *page)
+{
+	int ret = false;
+
+	if (pool) {
+		nvmap_page_pool_lock(pool);
+		ret = nvmap_page_pool_release_locked(pool, page);
+		nvmap_page_pool_unlock(pool);
+	}
+	return ret;
+}
+
+static int nvmap_page_pool_get_available_count(struct nvmap_page_pool *pool)
+{
+	return pool->npages;
+}
+
+static int nvmap_page_pool_free(struct nvmap_page_pool *pool, int nr_free)
+{
+	int err;
+	int i = nr_free;
+	int idx = 0;
+	struct page *page;
+
+	if (!nr_free)
+		return nr_free;
+	nvmap_page_pool_lock(pool);
+	while (i) {
+		page = nvmap_page_pool_alloc_locked(pool);
+		if (!page)
+			break;
+		pool->shrink_array[idx++] = page;
+		i--;
+	}
+
+	if (idx) {
+		/* This op should never fail. */
+		err = set_pages_array_wb(pool->shrink_array, idx);
+		BUG_ON(err);
+	}
+
+	while (idx--)
+		__free_page(pool->shrink_array[idx]);
+	nvmap_page_pool_unlock(pool);
+	return i;
+}
+
+static int nvmap_page_pool_get_unused_pages(void)
+{
+	unsigned int i;
+	int total = 0;
+	struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev);
+
+	for (i = 0; i < NVMAP_NUM_POOLS; i++)
+		total += nvmap_page_pool_get_available_count(&share->pools[i]);
+
+	return total;
+}
+
+static void nvmap_page_pool_resize(struct nvmap_page_pool *pool, int size)
+{
+	int available_pages;
+	int pages_to_release = 0;
+	struct page **page_array = NULL;
+	struct page **shrink_array = NULL;
+
+	if (size == pool->max_pages)
+		return;
+repeat:
+	nvmap_page_pool_free(pool, pages_to_release);
+	nvmap_page_pool_lock(pool);
+	available_pages = nvmap_page_pool_get_available_count(pool);
+	if (available_pages > size) {
+		nvmap_page_pool_unlock(pool);
+		pages_to_release = available_pages - size;
+		goto repeat;
+	}
+
+	if (size == 0) {
+		vfree(pool->page_array);
+		vfree(pool->shrink_array);
+		pool->page_array = pool->shrink_array = NULL;
+		goto out;
+	}
+
+	page_array = vzalloc(sizeof(struct page *) * size);
+	shrink_array = vzalloc(sizeof(struct page *) * size);
+	if (!page_array || !shrink_array)
+		goto fail;
+
+	memcpy(page_array, pool->page_array,
+		pool->npages * sizeof(struct page *));
+	vfree(pool->page_array);
+	vfree(pool->shrink_array);
+	pool->page_array = page_array;
+	pool->shrink_array = shrink_array;
+out:
+	pr_debug("%s pool resized to %d from %d pages",
+		s_memtype_str[pool->flags], size, pool->max_pages);
+	pool->max_pages = size;
+	goto exit;
+fail:
+	vfree(page_array);
+	vfree(shrink_array);
+	pr_err("failed");
+exit:
+	nvmap_page_pool_unlock(pool);
+}
+
+static unsigned long nvmap_page_pool_shrink_scan(struct shrinker *shrinker,
+						 struct shrink_control *sc)
+{
+	unsigned int i;
+	unsigned int pool_offset;
+	struct nvmap_page_pool *pool;
+	int shrink_pages = sc->nr_to_scan;
+	static atomic_t start_pool = ATOMIC_INIT(-1);
+	struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev);
+	unsigned long freed = 0;
+
+	pr_debug("sh_pages=%d", shrink_pages);
+
+	for (i = 0; i < NVMAP_NUM_POOLS && shrink_pages; i++) {
+		pool_offset = atomic_add_return(1, &start_pool) %
+				NVMAP_NUM_POOLS;
+		pool = &share->pools[pool_offset];
+		freed += shrink_pages;
+		shrink_pages = nvmap_page_pool_free(pool, shrink_pages);
+		freed -= shrink_pages;
+	}
+
+	return freed;
+}
+
+static unsigned long
+nvmap_page_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+	return nvmap_page_pool_get_unused_pages();
+}
+
+static struct shrinker nvmap_page_pool_shrinker = {
+	.count_objects = nvmap_page_pool_shrink_count,
+	.scan_objects = nvmap_page_pool_shrink_scan,
+	.seeks = 1,
+};
+
+static void shrink_page_pools(int *total_pages, int *available_pages)
+{
+	struct shrink_control sc;
+
+	sc.gfp_mask = GFP_KERNEL;
+	sc.nr_to_scan = 0;
+	*total_pages = nvmap_page_pool_shrink_count(NULL, &sc);
+	sc.nr_to_scan = *total_pages * 2;
+	nvmap_page_pool_shrink_scan(NULL, &sc);
+	*available_pages = nvmap_page_pool_shrink_count(NULL, &sc);
+}
+
+#if NVMAP_TEST_PAGE_POOL_SHRINKER
+static bool shrink_pp;
+static int shrink_set(const char *arg, const struct kernel_param *kp)
+{
+	int cpu = smp_processor_id();
+	unsigned long long t1, t2;
+	int total_pages, available_pages;
+
+	param_set_bool(arg, kp);
+
+	if (shrink_pp) {
+		t1 = cpu_clock(cpu);
+		shrink_page_pools(&total_pages, &available_pages);
+		t2 = cpu_clock(cpu);
+		pr_info("shrink page pools: time=%lldns, "
+			"total_pages_released=%d, free_pages_available=%d",
+			t2-t1, total_pages, available_pages);
+	}
+	return 0;
+}
+
+static int shrink_get(char *buff, const struct kernel_param *kp)
+{
+	return param_get_bool(buff, kp);
+}
+
+static struct kernel_param_ops shrink_ops = {
+	.get = shrink_get,
+	.set = shrink_set,
+};
+
+module_param_cb(shrink_page_pools, &shrink_ops, &shrink_pp, 0644);
+#endif
+
+static int enable_pp_set(const char *arg, const struct kernel_param *kp)
+{
+	int total_pages, available_pages;
+
+	param_set_bool(arg, kp);
+
+	if (!enable_pp) {
+		shrink_page_pools(&total_pages, &available_pages);
+		pr_info("disabled page pools and released pages, "
+			"total_pages_released=%d, free_pages_available=%d",
+			total_pages, available_pages);
+	}
+	return 0;
+}
+
+static int enable_pp_get(char *buff, const struct kernel_param *kp)
+{
+	return param_get_int(buff, kp);
+}
+
+static struct kernel_param_ops enable_pp_ops = {
+	.get = enable_pp_get,
+	.set = enable_pp_set,
+};
+
+module_param_cb(enable_page_pools, &enable_pp_ops, &enable_pp, 0644);
+
+#define POOL_SIZE_SET(m, i) \
+static int pool_size_##m##_set(const char *arg, const struct kernel_param *kp) \
+{ \
+	struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev); \
+	param_set_int(arg, kp); \
+	nvmap_page_pool_resize(&share->pools[i], pool_size[i]); \
+	return 0; \
+}
+
+#define POOL_SIZE_GET(m) \
+static int pool_size_##m##_get(char *buff, const struct kernel_param *kp) \
+{ \
+	return param_get_int(buff, kp); \
+}
+
+#define POOL_SIZE_OPS(m) \
+static struct kernel_param_ops pool_size_##m##_ops = { \
+	.get = pool_size_##m##_get, \
+	.set = pool_size_##m##_set, \
+};
+
+#define POOL_SIZE_MOUDLE_PARAM_CB(m, i) \
+module_param_cb(m##_pool_size, &pool_size_##m##_ops, &pool_size[i], 0644)
+
+POOL_SIZE_SET(uc, NVMAP_HANDLE_UNCACHEABLE);
+POOL_SIZE_GET(uc);
+POOL_SIZE_OPS(uc);
+POOL_SIZE_MOUDLE_PARAM_CB(uc, NVMAP_HANDLE_UNCACHEABLE);
+
+POOL_SIZE_SET(wc, NVMAP_HANDLE_WRITE_COMBINE);
+POOL_SIZE_GET(wc);
+POOL_SIZE_OPS(wc);
+POOL_SIZE_MOUDLE_PARAM_CB(wc, NVMAP_HANDLE_WRITE_COMBINE);
+
+POOL_SIZE_SET(iwb, NVMAP_HANDLE_INNER_CACHEABLE);
+POOL_SIZE_GET(iwb);
+POOL_SIZE_OPS(iwb);
+POOL_SIZE_MOUDLE_PARAM_CB(iwb, NVMAP_HANDLE_INNER_CACHEABLE);
+
+POOL_SIZE_SET(wb, NVMAP_HANDLE_CACHEABLE);
+POOL_SIZE_GET(wb);
+POOL_SIZE_OPS(wb);
+POOL_SIZE_MOUDLE_PARAM_CB(wb, NVMAP_HANDLE_CACHEABLE);
+
+int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags)
+{
+	static int reg = 1;
+	struct sysinfo info;
+#ifdef CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP
+	int i;
+	int err;
+	struct page *page;
+	int highmem_pages = 0;
+	typedef int (*set_pages_array) (struct page **pages, int addrinarray);
+	set_pages_array s_cpa[] = {
+		set_pages_array_uc,
+		set_pages_array_wc,
+		set_pages_array_iwb,
+		set_pages_array_wb
+	};
+#endif
+
+	BUG_ON(flags >= NVMAP_NUM_POOLS);
+	memset(pool, 0x0, sizeof(*pool));
+	mutex_init(&pool->lock);
+	pool->flags = flags;
+
+	/* No default pool for cached memory. */
+	if (flags == NVMAP_HANDLE_CACHEABLE)
+		return 0;
+
+	si_meminfo(&info);
+	if (!pool_size[flags] && !CONFIG_NVMAP_PAGE_POOL_SIZE)
+		/* Use 3/8th of total ram for page pools.
+		 * 1/8th for uc, 1/8th for wc and 1/8th for iwb.
+		 */
+		pool->max_pages = info.totalram >> 3;
+	else
+		pool->max_pages = CONFIG_NVMAP_PAGE_POOL_SIZE;
+
+	if (pool->max_pages <= 0 || pool->max_pages >= info.totalram)
+		goto fail;
+	pool_size[flags] = pool->max_pages;
+	pr_info("nvmap %s page pool size=%d pages\n",
+		s_memtype_str[flags], pool->max_pages);
+	pool->page_array = vzalloc(sizeof(void *) * pool->max_pages);
+	pool->shrink_array = vzalloc(sizeof(struct page *) * pool->max_pages);
+	if (!pool->page_array || !pool->shrink_array)
+		goto fail;
+
+	if (reg) {
+		reg = 0;
+		register_shrinker(&nvmap_page_pool_shrinker);
+	}
+
+#ifdef CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP
+	nvmap_page_pool_lock(pool);
+	for (i = 0; i < pool->max_pages; i++) {
+		page = alloc_page(GFP_NVMAP);
+		if (!page)
+			goto do_cpa;
+		if (!nvmap_page_pool_release_locked(pool, page)) {
+			__free_page(page);
+			goto do_cpa;
+		}
+		if (PageHighMem(page))
+			highmem_pages++;
+	}
+	si_meminfo(&info);
+	pr_info("nvmap pool = %s, highmem=%d, pool_size=%d, totalram=%lu,\n"
+		"\t\t\tfreeram=%lu, totalhigh=%lu, freehigh=%lu\n",
+		s_memtype_str[flags], highmem_pages, pool->max_pages,
+		info.totalram, info.freeram, info.totalhigh, info.freehigh);
+do_cpa:
+	err = (*s_cpa[flags])(pool->page_array, pool->npages);
+	BUG_ON(err);
+	nvmap_page_pool_unlock(pool);
+#endif
+	return 0;
+fail:
+	pool->max_pages = 0;
+	vfree(pool->shrink_array);
+	vfree(pool->page_array);
+	return -ENOMEM;
+}
+#endif
+
+static inline void *altalloc(size_t len)
+{
+	if (len > PAGELIST_VMALLOC_MIN)
+		return vmalloc(len);
+	else
+		return kmalloc(len, GFP_KERNEL);
+}
+
+static inline void altfree(void *ptr, size_t len)
+{
+	if (!ptr)
+		return;
+
+	if (len > PAGELIST_VMALLOC_MIN)
+		vfree(ptr);
+	else
+		kfree(ptr);
+}
+
+void _nvmap_handle_free(struct nvmap_handle *h)
+{
+	int err;
+	struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
+	unsigned int i, nr_page, page_index = 0;
+#ifdef CONFIG_NVMAP_PAGE_POOLS
+	struct nvmap_page_pool *pool = NULL;
+#endif
+
+	if (nvmap_handle_remove(h->dev, h) != 0)
+		return;
+
+	if (!h->alloc)
+		goto out;
+
+	if (!h->heap_pgalloc) {
+		nvmap_usecount_inc(h);
+		nvmap_heap_free(h->carveout);
+		goto out;
+	}
+
+	nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
+
+	BUG_ON(h->size & ~PAGE_MASK);
+	BUG_ON(!h->pgalloc.pages);
+
+	nvmap_mru_remove(share, h);
+
+#ifdef CONFIG_NVMAP_PAGE_POOLS
+	if (h->flags < NVMAP_NUM_POOLS)
+		pool = &share->pools[h->flags];
+
+	while (page_index < nr_page) {
+		if (!nvmap_page_pool_release(pool,
+		    h->pgalloc.pages[page_index]))
+			break;
+		page_index++;
+	}
+#endif
+
+	if (page_index == nr_page)
+		goto skip_attr_restore;
+
+	/* Restore page attributes. */
+	if (h->flags == NVMAP_HANDLE_WRITE_COMBINE ||
+	    h->flags == NVMAP_HANDLE_UNCACHEABLE ||
+	    h->flags == NVMAP_HANDLE_INNER_CACHEABLE) {
+		/* This op should never fail. */
+		err = set_pages_array_wb(&h->pgalloc.pages[page_index],
+				nr_page - page_index);
+		BUG_ON(err);
+	}
+
+skip_attr_restore:
+	if (h->pgalloc.area)
+		tegra_iovmm_free_vm(h->pgalloc.area);
+
+	for (i = page_index; i < nr_page; i++)
+		__free_page(h->pgalloc.pages[i]);
+
+	altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
+
+out:
+	kfree(h);
+}
+
+static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
+{
+	struct page *page, *p, *e;
+	unsigned int order;
+
+	size = PAGE_ALIGN(size);
+	order = get_order(size);
+	page = alloc_pages(gfp, order);
+
+	if (!page)
+		return NULL;
+
+	split_page(page, order);
+	e = page + (1 << order);
+	for (p = page + (size >> PAGE_SHIFT); p < e; p++)
+		__free_page(p);
+
+	return page;
+}
+
+static int handle_page_alloc(struct nvmap_client *client,
+			     struct nvmap_handle *h, bool contiguous)
+{
+	int err = 0;
+	size_t size = PAGE_ALIGN(h->size);
+	unsigned int nr_page = size >> PAGE_SHIFT;
+	pgprot_t prot;
+	unsigned int i = 0, page_index = 0;
+	struct page **pages;
+#ifdef CONFIG_NVMAP_PAGE_POOLS
+	struct nvmap_page_pool *pool = NULL;
+	struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
+	phys_addr_t paddr;
+#endif
+	gfp_t gfp = GFP_NVMAP;
+	unsigned long kaddr;
+	pte_t **pte = NULL;
+
+	if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES) {
+		gfp |= __GFP_ZERO;
+		prot = nvmap_pgprot(h, pgprot_kernel);
+		pte = nvmap_alloc_pte(client->dev, (void **)&kaddr);
+		if (IS_ERR(pte))
+			return -ENOMEM;
+	}
+
+	pages = altalloc(nr_page * sizeof(*pages));
+	if (!pages)
+		return -ENOMEM;
+
+	prot = nvmap_pgprot(h, pgprot_kernel);
+
+	h->pgalloc.area = NULL;
+	if (contiguous) {
+		struct page *page;
+		page = nvmap_alloc_pages_exact(gfp, size);
+		if (!page)
+			goto fail;
+
+		for (i = 0; i < nr_page; i++)
+			pages[i] = nth_page(page, i);
+
+	} else {
+#ifdef CONFIG_NVMAP_PAGE_POOLS
+		if (h->flags < NVMAP_NUM_POOLS)
+			pool = &share->pools[h->flags];
+
+		for (i = 0; i < nr_page; i++) {
+			/* Get pages from pool, if available. */
+			pages[i] = nvmap_page_pool_alloc(pool);
+			if (!pages[i])
+				break;
+			if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES) {
+				/*
+				 * Just memset low mem pages; they will for
+				 * sure have a virtual address. Otherwise, build
+				 * a mapping for the page in the kernel.
+				 */
+				if (!PageHighMem(pages[i])) {
+					memset(page_address(pages[i]), 0,
+					       PAGE_SIZE);
+				} else {
+					paddr = page_to_phys(pages[i]);
+					set_pte_at(&init_mm, kaddr, *pte,
+						   pfn_pte(__phys_to_pfn(paddr),
+							   prot));
+					flush_tlb_kernel_page(kaddr);
+					memset((char *)kaddr, 0, PAGE_SIZE);
+				}
+			}
+			page_index++;
+		}
+#endif
+		for (; i < nr_page; i++) {
+			pages[i] = nvmap_alloc_pages_exact(gfp,	PAGE_SIZE);
+			if (!pages[i])
+				goto fail;
+		}
+
+#ifndef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
+		h->pgalloc.area = tegra_iovmm_create_vm(client->share->iovmm,
+					NULL, size, h->align, prot,
+					h->pgalloc.iovm_addr);
+		if (!h->pgalloc.area)
+			goto fail;
+
+		h->pgalloc.dirty = true;
+#endif
+	}
+
+	if (nr_page == page_index)
+		goto skip_attr_change;
+
+	/* Update the pages mapping in kernel page table. */
+	if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
+		err = set_pages_array_wc(&pages[page_index],
+					nr_page - page_index);
+	else if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
+		err = set_pages_array_uc(&pages[page_index],
+					nr_page - page_index);
+	else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
+		err = set_pages_array_iwb(&pages[page_index],
+					nr_page - page_index);
+
+	if (err)
+		goto fail;
+
+skip_attr_change:
+	if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES)
+		nvmap_free_pte(client->dev, pte);
+	h->size = size;
+	h->pgalloc.pages = pages;
+	h->pgalloc.contig = contiguous;
+	INIT_LIST_HEAD(&h->pgalloc.mru_list);
+	return 0;
+
+fail:
+	if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES)
+		nvmap_free_pte(client->dev, pte);
+	err = set_pages_array_wb(pages, i);
+	BUG_ON(err);
+	while (i--)
+		__free_page(pages[i]);
+	altfree(pages, nr_page * sizeof(*pages));
+	wmb();
+	return -ENOMEM;
+}
+
+static void alloc_handle(struct nvmap_client *client,
+			 struct nvmap_handle *h, unsigned int type)
+{
+	unsigned int carveout_mask = NVMAP_HEAP_CARVEOUT_MASK;
+	unsigned int iovmm_mask = NVMAP_HEAP_IOVMM;
+
+	BUG_ON(type & (type - 1));
+
+#ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
+	/* Convert generic carveout requests to iovmm requests. */
+	carveout_mask &= ~NVMAP_HEAP_CARVEOUT_GENERIC;
+	iovmm_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
+#endif
+
+	if (type & carveout_mask) {
+		struct nvmap_heap_block *b;
+		/* Protect handle from relocation */
+		nvmap_usecount_inc(h);
+
+		b = nvmap_carveout_alloc(client, h, type);
+		if (b) {
+			h->heap_pgalloc = false;
+			h->alloc = true;
+			nvmap_carveout_commit_add(client,
+				nvmap_heap_to_arg(nvmap_block_to_heap(b)),
+				h->size);
+		}
+		nvmap_usecount_dec(h);
+
+	} else if (type & iovmm_mask) {
+		size_t reserved = PAGE_ALIGN(h->size);
+		int commit = 0;
+		int ret;
+
+		/* increment the committed IOVM space prior to allocation
+		 * to avoid race conditions with other threads simultaneously
+		 * allocating. */
+		commit = atomic_add_return(reserved,
+					    &client->iovm_commit);
+
+		if (commit < client->iovm_limit)
+			ret = handle_page_alloc(client, h, false);
+		else
+			ret = -ENOMEM;
+
+		if (!ret) {
+			h->heap_pgalloc = true;
+			h->alloc = true;
+		} else {
+			atomic_sub(reserved, &client->iovm_commit);
+		}
+
+	} else if (type & NVMAP_HEAP_SYSMEM) {
+		if (handle_page_alloc(client, h, true) == 0) {
+			BUG_ON(!h->pgalloc.contig);
+			h->heap_pgalloc = true;
+			h->alloc = true;
+		}
+	}
+}
+
+/* small allocations will try to allocate from generic OS memory before
+ * any of the limited heaps, to increase the effective memory for graphics
+ * allocations, and to reduce fragmentation of the graphics heaps with
+ * sub-page splinters */
+static const unsigned int heap_policy_small[] = {
+	NVMAP_HEAP_CARVEOUT_VPR,
+	NVMAP_HEAP_CARVEOUT_IRAM,
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
+	NVMAP_HEAP_SYSMEM,
+#endif
+	NVMAP_HEAP_CARVEOUT_MASK,
+	NVMAP_HEAP_IOVMM,
+	0,
+};
+
+static const unsigned int heap_policy_large[] = {
+	NVMAP_HEAP_CARVEOUT_VPR,
+	NVMAP_HEAP_CARVEOUT_IRAM,
+	NVMAP_HEAP_IOVMM,
+	NVMAP_HEAP_CARVEOUT_MASK,
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
+	NVMAP_HEAP_SYSMEM,
+#endif
+	0,
+};
+
+int nvmap_alloc_handle_id(struct nvmap_client *client,
+			  unsigned long id, unsigned int heap_mask,
+			  size_t align, unsigned int flags)
+{
+	struct nvmap_handle *h = NULL;
+	const unsigned int *alloc_policy;
+	int nr_page;
+	int err = -ENOMEM;
+
+	h = nvmap_get_handle_id(client, id);
+
+	if (!h)
+		return -EINVAL;
+
+	if (h->alloc)
+		goto out;
+
+	trace_nvmap_alloc_handle_id(client, id, heap_mask, align, flags);
+	h->userflags = flags;
+	nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+	h->secure = !!(flags & NVMAP_HANDLE_SECURE);
+	h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
+	h->align = max_t(size_t, align, L1_CACHE_BYTES);
+
+#ifndef CONFIG_TEGRA_IOVMM
+	/* convert iovmm requests to generic carveout. */
+	if (heap_mask & NVMAP_HEAP_IOVMM) {
+		heap_mask = (heap_mask & ~NVMAP_HEAP_IOVMM) |
+			    NVMAP_HEAP_CARVEOUT_GENERIC;
+	}
+#endif
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
+	/* Allow single pages allocations in system memory to save
+	 * carveout space and avoid extra iovm mappings */
+	if (nr_page == 1) {
+		if (heap_mask &
+		    (NVMAP_HEAP_IOVMM | NVMAP_HEAP_CARVEOUT_GENERIC))
+			heap_mask |= NVMAP_HEAP_SYSMEM;
+	}
+#endif
+#ifndef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
+	/* This restriction is deprecated as alignments greater than
+	   PAGE_SIZE are now correctly handled, but it is retained for
+	   AP20 compatibility. */
+	if (h->align > PAGE_SIZE)
+		heap_mask &= NVMAP_HEAP_CARVEOUT_MASK;
+#endif
+	/* secure allocations can only be served from secure heaps */
+	if (h->secure)
+		heap_mask &= NVMAP_SECURE_HEAPS;
+
+	if (!heap_mask) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
+
+	while (!h->alloc && *alloc_policy) {
+		unsigned int heap_type;
+
+		heap_type = *alloc_policy++;
+		heap_type &= heap_mask;
+
+		if (!heap_type)
+			continue;
+
+		heap_mask &= ~heap_type;
+
+		while (heap_type && !h->alloc) {
+			unsigned int heap;
+
+			/* iterate possible heaps MSB-to-LSB, since higher-
+			 * priority carveouts will have higher usage masks */
+			heap = 1 << __fls(heap_type);
+			alloc_handle(client, h, heap);
+			heap_type &= ~heap;
+		}
+	}
+
+out:
+	err = (h->alloc) ? 0 : err;
+	nvmap_handle_put(h);
+	return err;
+}
+
+void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
+{
+	struct nvmap_handle_ref *ref;
+	struct nvmap_handle *h;
+	int pins;
+
+	nvmap_ref_lock(client);
+
+	ref = _nvmap_validate_id_locked(client, id);
+	if (!ref) {
+		nvmap_ref_unlock(client);
+		return;
+	}
+
+	trace_nvmap_free_handle_id(client, id);
+	BUG_ON(!ref->handle);
+	h = ref->handle;
+
+	if (atomic_dec_return(&ref->dupes)) {
+		nvmap_ref_unlock(client);
+		goto out;
+	}
+
+	smp_rmb();
+	pins = atomic_read(&ref->pin);
+	rb_erase(&ref->node, &client->handle_refs);
+
+	if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig)
+		atomic_sub(h->size, &client->iovm_commit);
+
+	if (h->alloc && !h->heap_pgalloc) {
+		mutex_lock(&h->lock);
+		nvmap_carveout_commit_subtract(client,
+			nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
+			h->size);
+		mutex_unlock(&h->lock);
+	}
+
+	nvmap_ref_unlock(client);
+
+	if (pins)
+		nvmap_err(client, "%s freeing pinned handle %p\n",
+			  current->group_leader->comm, h);
+
+	while (pins--)
+		nvmap_unpin_handles(client, &ref->handle, 1);
+
+	if (h->owner == client)
+		h->owner = NULL;
+
+	kfree(ref);
+
+out:
+	BUG_ON(!atomic_read(&h->ref));
+	nvmap_handle_put(h);
+}
+
+static void add_handle_ref(struct nvmap_client *client,
+			   struct nvmap_handle_ref *ref)
+{
+	struct rb_node **p, *parent = NULL;
+
+	nvmap_ref_lock(client);
+	p = &client->handle_refs.rb_node;
+	while (*p) {
+		struct nvmap_handle_ref *node;
+		parent = *p;
+		node = rb_entry(parent, struct nvmap_handle_ref, node);
+		if (ref->handle > node->handle)
+			p = &parent->rb_right;
+		else
+			p = &parent->rb_left;
+	}
+	rb_link_node(&ref->node, parent, p);
+	rb_insert_color(&ref->node, &client->handle_refs);
+	nvmap_ref_unlock(client);
+}
+
+struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
+					     size_t size)
+{
+	struct nvmap_handle *h;
+	struct nvmap_handle_ref *ref = NULL;
+
+	if (!client)
+		return ERR_PTR(-EINVAL);
+
+	if (!size)
+		return ERR_PTR(-EINVAL);
+
+	h = kzalloc(sizeof(*h), GFP_KERNEL);
+	if (!h)
+		return ERR_PTR(-ENOMEM);
+
+	ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+	if (!ref) {
+		kfree(h);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	atomic_set(&h->ref, 1);
+	atomic_set(&h->pin, 0);
+	h->owner = client;
+	h->dev = client->dev;
+	BUG_ON(!h->owner);
+	h->size = h->orig_size = size;
+	h->flags = NVMAP_HANDLE_WRITE_COMBINE;
+	mutex_init(&h->lock);
+
+	nvmap_handle_add(client->dev, h);
+
+	atomic_set(&ref->dupes, 1);
+	ref->handle = h;
+	atomic_set(&ref->pin, 0);
+	add_handle_ref(client, ref);
+	trace_nvmap_create_handle(client, h, size, ref);
+	return ref;
+}
+
+struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
+						   unsigned long id)
+{
+	struct nvmap_handle_ref *ref = NULL;
+	struct nvmap_handle *h = NULL;
+
+	BUG_ON(!client || client->dev != nvmap_dev);
+	/* on success, the reference count for the handle should be
+	 * incremented, so the success paths will not call nvmap_handle_put */
+	h = nvmap_validate_get(client, id);
+
+	if (!h) {
+		nvmap_debug(client, "%s duplicate handle failed\n",
+			    current->group_leader->comm);
+		return ERR_PTR(-EPERM);
+	}
+
+	if (!h->alloc) {
+		nvmap_err(client, "%s duplicating unallocated handle\n",
+			  current->group_leader->comm);
+		nvmap_handle_put(h);
+		return ERR_PTR(-EINVAL);
+	}
+
+	nvmap_ref_lock(client);
+	ref = _nvmap_validate_id_locked(client, (unsigned long)h);
+
+	if (ref) {
+		/* handle already duplicated in client; just increment
+		 * the reference count rather than re-duplicating it */
+		atomic_inc(&ref->dupes);
+		nvmap_ref_unlock(client);
+		return ref;
+	}
+
+	nvmap_ref_unlock(client);
+
+	/* verify that adding this handle to the process' access list
+	 * won't exceed the IOVM limit */
+	if (h->heap_pgalloc && !h->pgalloc.contig) {
+		int oc;
+		oc = atomic_add_return(h->size, &client->iovm_commit);
+		if (oc > client->iovm_limit && !client->super) {
+			atomic_sub(h->size, &client->iovm_commit);
+			nvmap_handle_put(h);
+			nvmap_err(client, "duplicating %p in %s over-commits"
+				  " IOVMM space\n", (void *)id,
+				  current->group_leader->comm);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+	if (!ref) {
+		nvmap_handle_put(h);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (!h->heap_pgalloc) {
+		mutex_lock(&h->lock);
+		nvmap_carveout_commit_add(client,
+			nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
+			h->size);
+		mutex_unlock(&h->lock);
+	}
+
+	atomic_set(&ref->dupes, 1);
+	ref->handle = h;
+	atomic_set(&ref->pin, 0);
+	add_handle_ref(client, ref);
+	trace_nvmap_duplicate_handle_id(client, id, ref);
+	return ref;
+}
diff --git a/drivers/staging/tegra/video/nvmap/nvmap_heap.c b/drivers/staging/tegra/video/nvmap/nvmap_heap.c
new file mode 100644
index 000000000000..34cc78de55cc
--- /dev/null
+++ b/drivers/staging/tegra/video/nvmap/nvmap_heap.c
@@ -0,0 +1,1129 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_heap.c
+ *
+ * GPU heap allocator.
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/nvmap.h>
+#include <linux/stat.h>
+
+#include "nvmap.h"
+#include "nvmap_heap.h"
+#include "nvmap_common.h"
+
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+/*
+ * "carveouts" are platform-defined regions of physically contiguous memory
+ * which are not managed by the OS. a platform may specify multiple carveouts,
+ * for either small special-purpose memory regions (like IRAM on Tegra SoCs)
+ * or reserved regions of main system memory.
+ *
+ * the carveout allocator returns allocations which are physically contiguous.
+ * to reduce external fragmentation, the allocation algorithm implemented in
+ * this file employs 3 strategies for keeping allocations of similar size
+ * grouped together inside the larger heap: the "small", "normal" and "huge"
+ * strategies. the size thresholds (in bytes) for determining which strategy
+ * to employ should be provided by the platform for each heap. it is possible
+ * for a platform to define a heap where only the "normal" strategy is used.
+ *
+ * o "normal" allocations use an address-order first-fit allocator (called
+ *   BOTTOM_UP in the code below). each allocation is rounded up to be
+ *   an integer multiple of the "small" allocation size.
+ *
+ * o "huge" allocations use an address-order last-fit allocator (called
+ *   TOP_DOWN in the code below). like "normal" allocations, each allocation
+ *   is rounded up to be an integer multiple of the "small" allocation size.
+ *
+ * o "small" allocations are treated differently: the heap manager maintains
+ *   a pool of "small"-sized blocks internally from which allocations less
+ *   than 1/2 of the "small" size are buddy-allocated. if a "small" allocation
+ *   is requested and none of the buddy sub-heaps is able to service it,
+ *   the heap manager will try to allocate a new buddy-heap.
+ *
+ * this allocator is intended to keep "splinters" colocated in the carveout,
+ * and to ensure that the minimum free block size in the carveout (i.e., the
+ * "small" threshold) is still a meaningful size.
+ *
+ */
+
+#define MAX_BUDDY_NR	128	/* maximum buddies in a buddy allocator */
+
+enum direction {
+	TOP_DOWN,
+	BOTTOM_UP
+};
+
+enum block_type {
+	BLOCK_FIRST_FIT,	/* block was allocated directly from the heap */
+	BLOCK_BUDDY,		/* block was allocated from a buddy sub-heap */
+	BLOCK_EMPTY,
+};
+
+struct heap_stat {
+	size_t free;		/* total free size */
+	size_t free_largest;	/* largest free block */
+	size_t free_count;	/* number of free blocks */
+	size_t total;		/* total size */
+	size_t largest;		/* largest unique block */
+	size_t count;		/* total number of blocks */
+	/* fast compaction attempt counter */
+	unsigned int compaction_count_fast;
+	/* full compaction attempt counter */
+	unsigned int compaction_count_full;
+};
+
+struct buddy_heap;
+
+struct buddy_block {
+	struct nvmap_heap_block block;
+	struct buddy_heap *heap;
+};
+
+struct list_block {
+	struct nvmap_heap_block block;
+	struct list_head all_list;
+	unsigned int mem_prot;
+	phys_addr_t orig_addr;
+	size_t size;
+	size_t align;
+	struct nvmap_heap *heap;
+	struct list_head free_list;
+};
+
+struct combo_block {
+	union {
+		struct list_block lb;
+		struct buddy_block bb;
+	};
+};
+
+struct buddy_bits {
+	unsigned int alloc:1;
+	unsigned int order:7;	/* log2(MAX_BUDDY_NR); */
+};
+
+struct buddy_heap {
+	struct list_block *heap_base;
+	unsigned int nr_buddies;
+	struct list_head buddy_list;
+	struct buddy_bits bitmap[MAX_BUDDY_NR];
+};
+
+struct nvmap_heap {
+	struct list_head all_list;
+	struct list_head free_list;
+	struct mutex lock;
+	struct list_head buddy_list;
+	unsigned int min_buddy_shift;
+	unsigned int buddy_heap_size;
+	unsigned int small_alloc;
+	const char *name;
+	void *arg;
+	struct device dev;
+};
+
+static struct kmem_cache *buddy_heap_cache;
+static struct kmem_cache *block_cache;
+
+static inline struct nvmap_heap *parent_of(struct buddy_heap *heap)
+{
+	return heap->heap_base->heap;
+}
+
+static inline unsigned int order_of(size_t len, size_t min_shift)
+{
+	len = 2 * DIV_ROUND_UP(len, (1 << min_shift)) - 1;
+	return fls(len)-1;
+}
+
+/* returns the free size in bytes of the buddy heap; must be called while
+ * holding the parent heap's lock. */
+static void buddy_stat(struct buddy_heap *heap, struct heap_stat *stat)
+{
+	unsigned int index;
+	unsigned int shift = parent_of(heap)->min_buddy_shift;
+
+	for (index = 0; index < heap->nr_buddies;
+	     index += (1 << heap->bitmap[index].order)) {
+		size_t curr = 1 << (heap->bitmap[index].order + shift);
+
+		stat->largest = max(stat->largest, curr);
+		stat->total += curr;
+		stat->count++;
+
+		if (!heap->bitmap[index].alloc) {
+			stat->free += curr;
+			stat->free_largest = max(stat->free_largest, curr);
+			stat->free_count++;
+		}
+	}
+}
+
+/* returns the free size of the heap (including any free blocks in any
+ * buddy-heap suballocators; must be called while holding the parent
+ * heap's lock. */
+static phys_addr_t heap_stat(struct nvmap_heap *heap, struct heap_stat *stat)
+{
+	struct buddy_heap *bh;
+	struct list_block *l = NULL;
+	phys_addr_t base = -1ul;
+
+	memset(stat, 0, sizeof(*stat));
+	mutex_lock(&heap->lock);
+	list_for_each_entry(l, &heap->all_list, all_list) {
+		stat->total += l->size;
+		stat->largest = max(l->size, stat->largest);
+		stat->count++;
+		base = min(base, l->orig_addr);
+	}
+
+	list_for_each_entry(bh, &heap->buddy_list, buddy_list) {
+		buddy_stat(bh, stat);
+		/* the total counts are double-counted for buddy heaps
+		 * since the blocks allocated for buddy heaps exist in the
+		 * all_list; subtract out the doubly-added stats */
+		stat->total -= bh->heap_base->size;
+		stat->count--;
+	}
+
+	list_for_each_entry(l, &heap->free_list, free_list) {
+		stat->free += l->size;
+		stat->free_count++;
+		stat->free_largest = max(l->size, stat->free_largest);
+	}
+	mutex_unlock(&heap->lock);
+
+	return base;
+}
+
+static ssize_t heap_name_show(struct device *dev,
+			      struct device_attribute *attr, char *buf);
+
+static ssize_t heap_stat_show(struct device *dev,
+			      struct device_attribute *attr, char *buf);
+
+static struct device_attribute heap_stat_total_max =
+	__ATTR(total_max, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_total_count =
+	__ATTR(total_count, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_total_size =
+	__ATTR(total_size, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_free_max =
+	__ATTR(free_max, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_free_count =
+	__ATTR(free_count, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_free_size =
+	__ATTR(free_size, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_base =
+	__ATTR(base, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_attr_name =
+	__ATTR(name, S_IRUGO, heap_name_show, NULL);
+
+static struct attribute *heap_stat_attrs[] = {
+	&heap_stat_total_max.attr,
+	&heap_stat_total_count.attr,
+	&heap_stat_total_size.attr,
+	&heap_stat_free_max.attr,
+	&heap_stat_free_count.attr,
+	&heap_stat_free_size.attr,
+	&heap_stat_base.attr,
+	&heap_attr_name.attr,
+	NULL,
+};
+
+static struct attribute_group heap_stat_attr_group = {
+	.attrs	= heap_stat_attrs,
+};
+
+static ssize_t heap_name_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+
+	struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
+	return sprintf(buf, "%s\n", heap->name);
+}
+
+static ssize_t heap_stat_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
+	struct heap_stat stat;
+	phys_addr_t base;
+
+	base = heap_stat(heap, &stat);
+
+	if (attr == &heap_stat_total_max)
+		return sprintf(buf, "%u\n", stat.largest);
+	else if (attr == &heap_stat_total_count)
+		return sprintf(buf, "%u\n", stat.count);
+	else if (attr == &heap_stat_total_size)
+		return sprintf(buf, "%u\n", stat.total);
+	else if (attr == &heap_stat_free_max)
+		return sprintf(buf, "%u\n", stat.free_largest);
+	else if (attr == &heap_stat_free_count)
+		return sprintf(buf, "%u\n", stat.free_count);
+	else if (attr == &heap_stat_free_size)
+		return sprintf(buf, "%u\n", stat.free);
+	else if (attr == &heap_stat_base)
+		return sprintf(buf, "%08llx\n", (unsigned long long)base);
+	else
+		return -EINVAL;
+}
+#ifndef CONFIG_NVMAP_CARVEOUT_COMPACTOR
+static struct nvmap_heap_block *buddy_alloc(struct buddy_heap *heap,
+					    size_t size, size_t align,
+					    unsigned int mem_prot)
+{
+	unsigned int index = 0;
+	unsigned int min_shift = parent_of(heap)->min_buddy_shift;
+	unsigned int order = order_of(size, min_shift);
+	unsigned int align_mask;
+	unsigned int best = heap->nr_buddies;
+	struct buddy_block *b;
+
+	if (heap->heap_base->mem_prot != mem_prot)
+		return NULL;
+
+	align = max(align, (size_t)(1 << min_shift));
+	align_mask = (align >> min_shift) - 1;
+
+	for (index = 0; index < heap->nr_buddies;
+	     index += (1 << heap->bitmap[index].order)) {
+
+		if (heap->bitmap[index].alloc || (index & align_mask) ||
+		    (heap->bitmap[index].order < order))
+			continue;
+
+		if (best == heap->nr_buddies ||
+		    heap->bitmap[index].order < heap->bitmap[best].order)
+			best = index;
+
+		if (heap->bitmap[best].order == order)
+			break;
+	}
+
+	if (best == heap->nr_buddies)
+		return NULL;
+
+	b = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+	if (!b)
+		return NULL;
+
+	while (heap->bitmap[best].order != order) {
+		unsigned int buddy;
+		heap->bitmap[best].order--;
+		buddy = best ^ (1 << heap->bitmap[best].order);
+		heap->bitmap[buddy].order = heap->bitmap[best].order;
+		heap->bitmap[buddy].alloc = 0;
+	}
+	heap->bitmap[best].alloc = 1;
+	b->block.base = heap->heap_base->block.base + (best << min_shift);
+	b->heap = heap;
+	b->block.type = BLOCK_BUDDY;
+	return &b->block;
+}
+#endif
+
+static struct buddy_heap *do_buddy_free(struct nvmap_heap_block *block)
+{
+	struct buddy_block *b = container_of(block, struct buddy_block, block);
+	struct buddy_heap *h = b->heap;
+	unsigned int min_shift = parent_of(h)->min_buddy_shift;
+	unsigned int index;
+
+	index = (block->base - h->heap_base->block.base) >> min_shift;
+	h->bitmap[index].alloc = 0;
+
+	for (;;) {
+		unsigned int buddy = index ^ (1 << h->bitmap[index].order);
+		if (buddy >= h->nr_buddies || h->bitmap[buddy].alloc ||
+		    h->bitmap[buddy].order != h->bitmap[index].order)
+			break;
+
+		h->bitmap[buddy].order++;
+		h->bitmap[index].order++;
+		index = min(buddy, index);
+	}
+
+	kmem_cache_free(block_cache, b);
+	if ((1 << h->bitmap[0].order) == h->nr_buddies)
+		return h;
+
+	return NULL;
+}
+
+
+/*
+ * base_max limits position of allocated chunk in memory.
+ * if base_max is 0 then there is no such limitation.
+ */
+static struct nvmap_heap_block *do_heap_alloc(struct nvmap_heap *heap,
+					      size_t len, size_t align,
+					      unsigned int mem_prot,
+					      phys_addr_t base_max)
+{
+	struct list_block *b = NULL;
+	struct list_block *i = NULL;
+	struct list_block *rem = NULL;
+	phys_addr_t fix_base;
+	enum direction dir;
+
+	/* since pages are only mappable with one cache attribute,
+	 * and most allocations from carveout heaps are DMA coherent
+	 * (i.e., non-cacheable), round cacheable allocations up to
+	 * a page boundary to ensure that the physical pages will
+	 * only be mapped one way. */
+	if (mem_prot == NVMAP_HANDLE_CACHEABLE ||
+	    mem_prot == NVMAP_HANDLE_INNER_CACHEABLE) {
+		align = max_t(size_t, align, PAGE_SIZE);
+		len = PAGE_ALIGN(len);
+	}
+
+#ifdef CONFIG_NVMAP_CARVEOUT_COMPACTOR
+	dir = BOTTOM_UP;
+#else
+	dir = (len <= heap->small_alloc) ? BOTTOM_UP : TOP_DOWN;
+#endif
+
+	if (dir == BOTTOM_UP) {
+		list_for_each_entry(i, &heap->free_list, free_list) {
+			size_t fix_size;
+			fix_base = ALIGN(i->block.base, align);
+			if(!fix_base || fix_base >= i->block.base + i->size)
+				continue;
+
+			fix_size = i->size - (fix_base - i->block.base);
+
+			/* needed for compaction. relocated chunk
+			 * should never go up */
+			if (base_max && fix_base > base_max)
+				break;
+
+			if (fix_size >= len) {
+				b = i;
+				break;
+			}
+		}
+	} else {
+		list_for_each_entry_reverse(i, &heap->free_list, free_list) {
+			if (i->size >= len) {
+				fix_base = i->block.base + i->size - len;
+				fix_base &= ~(align-1);
+				if (fix_base >= i->block.base) {
+					b = i;
+					break;
+				}
+			}
+		}
+	}
+
+	if (!b)
+		return NULL;
+
+	if (dir == BOTTOM_UP)
+		b->block.type = BLOCK_FIRST_FIT;
+
+	/* split free block */
+	if (b->block.base != fix_base) {
+		/* insert a new free block before allocated */
+		rem = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+		if (!rem) {
+			b->orig_addr = b->block.base;
+			b->block.base = fix_base;
+			b->size -= (b->block.base - b->orig_addr);
+			goto out;
+		}
+
+		rem->block.type = BLOCK_EMPTY;
+		rem->block.base = b->block.base;
+		rem->orig_addr = rem->block.base;
+		rem->size = fix_base - rem->block.base;
+		b->block.base = fix_base;
+		b->orig_addr = fix_base;
+		b->size -= rem->size;
+		list_add_tail(&rem->all_list,  &b->all_list);
+		list_add_tail(&rem->free_list, &b->free_list);
+	}
+
+	b->orig_addr = b->block.base;
+
+	if (b->size > len) {
+		/* insert a new free block after allocated */
+		rem = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+		if (!rem)
+			goto out;
+
+		rem->block.type = BLOCK_EMPTY;
+		rem->block.base = b->block.base + len;
+		rem->size = b->size - len;
+		BUG_ON(rem->size > b->size);
+		rem->orig_addr = rem->block.base;
+		b->size = len;
+		list_add(&rem->all_list,  &b->all_list);
+		list_add(&rem->free_list, &b->free_list);
+	}
+
+out:
+	list_del(&b->free_list);
+	b->heap = heap;
+	b->mem_prot = mem_prot;
+	b->align = align;
+	return &b->block;
+}
+
+#ifdef DEBUG_FREE_LIST
+static void freelist_debug(struct nvmap_heap *heap, const char *title,
+			   struct list_block *token)
+{
+	int i;
+	struct list_block *n;
+
+	dev_debug(&heap->dev, "%s\n", title);
+	i = 0;
+	list_for_each_entry(n, &heap->free_list, free_list) {
+		dev_debug(&heap->dev, "\t%d [%p..%p]%s\n", i, (void *)n->orig_addr,
+			  (void *)(n->orig_addr + n->size),
+			  (n == token) ? "<--" : "");
+		i++;
+	}
+}
+#else
+#define freelist_debug(_heap, _title, _token)	do { } while (0)
+#endif
+
+static struct list_block *do_heap_free(struct nvmap_heap_block *block)
+{
+	struct list_block *b = container_of(block, struct list_block, block);
+	struct list_block *n = NULL;
+	struct nvmap_heap *heap = b->heap;
+
+	BUG_ON(b->block.base > b->orig_addr);
+	b->size += (b->block.base - b->orig_addr);
+	b->block.base = b->orig_addr;
+
+	freelist_debug(heap, "free list before", b);
+
+	/* Find position of first free block to the right of freed one */
+	list_for_each_entry(n, &heap->free_list, free_list) {
+		if (n->block.base > b->block.base)
+			break;
+	}
+
+	/* Add freed block before found free one */
+	list_add_tail(&b->free_list, &n->free_list);
+	BUG_ON(list_empty(&b->all_list));
+
+	freelist_debug(heap, "free list pre-merge", b);
+
+	/* merge freed block with next if they connect
+	 * freed block becomes bigger, next one is destroyed */
+	if (!list_is_last(&b->free_list, &heap->free_list)) {
+		n = list_first_entry(&b->free_list, struct list_block, free_list);
+		if (n->block.base == b->block.base + b->size) {
+			list_del(&n->all_list);
+			list_del(&n->free_list);
+			BUG_ON(b->orig_addr >= n->orig_addr);
+			b->size += n->size;
+			kmem_cache_free(block_cache, n);
+		}
+	}
+
+	/* merge freed block with prev if they connect
+	 * previous free block becomes bigger, freed one is destroyed */
+	if (b->free_list.prev != &heap->free_list) {
+		n = list_entry(b->free_list.prev, struct list_block, free_list);
+		if (n->block.base + n->size == b->block.base) {
+			list_del(&b->all_list);
+			list_del(&b->free_list);
+			BUG_ON(n->orig_addr >= b->orig_addr);
+			n->size += b->size;
+			kmem_cache_free(block_cache, b);
+			b = n;
+		}
+	}
+
+	freelist_debug(heap, "free list after", b);
+	b->block.type = BLOCK_EMPTY;
+	return b;
+}
+
+#ifndef CONFIG_NVMAP_CARVEOUT_COMPACTOR
+
+static struct nvmap_heap_block *do_buddy_alloc(struct nvmap_heap *h,
+					       size_t len, size_t align,
+					       unsigned int mem_prot)
+{
+	struct buddy_heap *bh;
+	struct nvmap_heap_block *b = NULL;
+
+	list_for_each_entry(bh, &h->buddy_list, buddy_list) {
+		b = buddy_alloc(bh, len, align, mem_prot);
+		if (b)
+			return b;
+	}
+
+	/* no buddy heaps could service this allocation: try to create a new
+	 * buddy heap instead */
+	bh = kmem_cache_zalloc(buddy_heap_cache, GFP_KERNEL);
+	if (!bh)
+		return NULL;
+
+	b = do_heap_alloc(h, h->buddy_heap_size,
+			h->buddy_heap_size, mem_prot, 0);
+	if (!b) {
+		kmem_cache_free(buddy_heap_cache, bh);
+		return NULL;
+	}
+
+	bh->heap_base = container_of(b, struct list_block, block);
+	bh->nr_buddies = h->buddy_heap_size >> h->min_buddy_shift;
+	bh->bitmap[0].alloc = 0;
+	bh->bitmap[0].order = order_of(h->buddy_heap_size, h->min_buddy_shift);
+	list_add_tail(&bh->buddy_list, &h->buddy_list);
+	return buddy_alloc(bh, len, align, mem_prot);
+}
+
+#endif
+
+#ifdef CONFIG_NVMAP_CARVEOUT_COMPACTOR
+
+static int do_heap_copy_listblock(struct nvmap_device *dev,
+		 phys_addr_t dst_base, phys_addr_t src_base, size_t len)
+{
+	pte_t **pte_src = NULL;
+	pte_t **pte_dst = NULL;
+	void *addr_src = NULL;
+	void *addr_dst = NULL;
+	unsigned long kaddr_src;
+	unsigned long kaddr_dst;
+	phys_addr_t phys_src = src_base;
+	phys_addr_t phys_dst = dst_base;
+	unsigned long pfn_src;
+	unsigned long pfn_dst;
+	int error = 0;
+
+	pgprot_t prot = pgprot_writecombine(pgprot_kernel);
+
+	int page;
+
+	pte_src = nvmap_alloc_pte(dev, &addr_src);
+	if (IS_ERR(pte_src)) {
+		pr_err("Error when allocating pte_src\n");
+		pte_src = NULL;
+		error = -1;
+		goto fail;
+	}
+
+	pte_dst = nvmap_alloc_pte(dev, &addr_dst);
+	if (IS_ERR(pte_dst)) {
+		pr_err("Error while allocating pte_dst\n");
+		pte_dst = NULL;
+		error = -1;
+		goto fail;
+	}
+
+	kaddr_src = (unsigned long)addr_src;
+	kaddr_dst = (unsigned long)addr_dst;
+
+	BUG_ON(phys_dst > phys_src);
+	BUG_ON((phys_src & PAGE_MASK) != phys_src);
+	BUG_ON((phys_dst & PAGE_MASK) != phys_dst);
+	BUG_ON((len & PAGE_MASK) != len);
+
+	for (page = 0; page < (len >> PAGE_SHIFT) ; page++) {
+
+		pfn_src = __phys_to_pfn(phys_src) + page;
+		pfn_dst = __phys_to_pfn(phys_dst) + page;
+
+		set_pte_at(&init_mm, kaddr_src, *pte_src,
+				pfn_pte(pfn_src, prot));
+		flush_tlb_kernel_page(kaddr_src);
+
+		set_pte_at(&init_mm, kaddr_dst, *pte_dst,
+				pfn_pte(pfn_dst, prot));
+		flush_tlb_kernel_page(kaddr_dst);
+
+		memcpy(addr_dst, addr_src, PAGE_SIZE);
+	}
+
+fail:
+	if (pte_src)
+		nvmap_free_pte(dev, pte_src);
+	if (pte_dst)
+		nvmap_free_pte(dev, pte_dst);
+	return error;
+}
+
+
+static struct nvmap_heap_block *do_heap_relocate_listblock(
+		struct list_block *block, bool fast)
+{
+	struct nvmap_heap_block *heap_block = &block->block;
+	struct nvmap_heap_block *heap_block_new = NULL;
+	struct nvmap_heap *heap = block->heap;
+	struct nvmap_handle *handle = heap_block->handle;
+	phys_addr_t src_base = heap_block->base;
+	phys_addr_t dst_base;
+	size_t src_size = block->size;
+	size_t src_align = block->align;
+	unsigned int src_prot = block->mem_prot;
+	int error = 0;
+	struct nvmap_share *share;
+
+	if (!handle) {
+		pr_err("INVALID HANDLE!\n");
+		return NULL;
+	}
+
+	mutex_lock(&handle->lock);
+
+	share = nvmap_get_share_from_dev(handle->dev);
+
+	/* TODO: It is possible to use only handle lock and no share
+	 * pin_lock, but then we'll need to lock every handle during
+	 * each pinning operation. Need to estimate performance impact
+	 * if we decide to simplify locking this way. */
+	mutex_lock(&share->pin_lock);
+
+	/* abort if block is pinned */
+	if (atomic_read(&handle->pin))
+		goto fail;
+	/* abort if block is mapped */
+	if (handle->usecount)
+		goto fail;
+
+	if (fast) {
+		/* Fast compaction path - first allocate, then free. */
+		heap_block_new = do_heap_alloc(heap, src_size, src_align,
+				src_prot, src_base);
+		if (heap_block_new)
+			do_heap_free(heap_block);
+		else
+			goto fail;
+	} else {
+		/* Full compaction path, first free, then allocate
+		 * It is slower but provide best compaction results */
+		do_heap_free(heap_block);
+		heap_block_new = do_heap_alloc(heap, src_size, src_align,
+				src_prot, src_base);
+		/* Allocation should always succeed*/
+		BUG_ON(!heap_block_new);
+	}
+
+	/* update handle */
+	handle->carveout = heap_block_new;
+	heap_block_new->handle = handle;
+
+	/* copy source data to new block location */
+	dst_base = heap_block_new->base;
+
+	/* new allocation should always go lower addresses, or stay same */
+	BUG_ON(dst_base > src_base);
+
+	if (dst_base != src_base) {
+		error = do_heap_copy_listblock(handle->dev,
+					dst_base, src_base, src_size);
+		BUG_ON(error);
+	}
+
+fail:
+	mutex_unlock(&share->pin_lock);
+	mutex_unlock(&handle->lock);
+	return heap_block_new;
+}
+
+static void nvmap_heap_compact(struct nvmap_heap *heap,
+				size_t requested_size, bool fast)
+{
+	struct list_block *block_current = NULL;
+	struct list_block *block_prev = NULL;
+	struct list_block *block_next = NULL;
+
+	struct list_head *ptr, *ptr_prev, *ptr_next;
+	int relocation_count = 0;
+
+	ptr = heap->all_list.next;
+
+	/* walk through all blocks */
+	while (ptr != &heap->all_list) {
+		block_current = list_entry(ptr, struct list_block, all_list);
+
+		ptr_prev = ptr->prev;
+		ptr_next = ptr->next;
+
+		if (block_current->block.type != BLOCK_EMPTY) {
+			ptr = ptr_next;
+			continue;
+		}
+
+		if (fast && block_current->size >= requested_size)
+			break;
+
+		/* relocate prev block */
+		if (ptr_prev != &heap->all_list) {
+
+			block_prev = list_entry(ptr_prev,
+					struct list_block, all_list);
+
+			BUG_ON(block_prev->block.type != BLOCK_FIRST_FIT);
+
+			if (do_heap_relocate_listblock(block_prev, true)) {
+				/* After relocation current free block can be
+				 * destroyed when it is merged with previous
+				 * free block. Updated pointer to new free
+				 * block can be obtained from the next block */
+				relocation_count++;
+				ptr = ptr_next->prev;
+				continue;
+			}
+		}
+
+		if (ptr_next != &heap->all_list) {
+			struct nvmap_heap_block *block_new;
+			phys_addr_t old_base;
+
+			block_next = list_entry(ptr_next,
+					struct list_block, all_list);
+
+			BUG_ON(block_next->block.type != BLOCK_FIRST_FIT);
+
+			old_base = block_next->block.base;
+			block_new = do_heap_relocate_listblock(block_next,
+					fast);
+			if (block_new && block_new->base == old_base) {
+				/* When doing full heap compaction, the block
+				 * can end up relocated in the same location.
+				 * That means nothing was really accomplished,
+				 * but block pointers got invalidated. */
+				ptr_next = ptr_prev->next->next;
+			} else if (block_new) {
+				ptr = ptr_prev->next;
+				relocation_count++;
+				continue;
+			}
+		}
+		ptr = ptr_next;
+	}
+	pr_err("Relocated %d chunks\n", relocation_count);
+}
+#endif
+
+void nvmap_usecount_inc(struct nvmap_handle *h)
+{
+	if (h->alloc && !h->heap_pgalloc) {
+		mutex_lock(&h->lock);
+		h->usecount++;
+		mutex_unlock(&h->lock);
+	} else {
+		h->usecount++;
+	}
+}
+
+
+void nvmap_usecount_dec(struct nvmap_handle *h)
+{
+	h->usecount--;
+}
+
+/* nvmap_heap_alloc: allocates a block of memory of len bytes, aligned to
+ * align bytes. */
+struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *h,
+					  struct nvmap_handle *handle)
+{
+	struct nvmap_heap_block *b;
+	size_t len        = handle->size;
+	size_t align      = handle->align;
+	unsigned int prot = handle->flags;
+
+	mutex_lock(&h->lock);
+
+#ifdef CONFIG_NVMAP_CARVEOUT_COMPACTOR
+	/* Align to page size */
+	align = ALIGN(align, PAGE_SIZE);
+	len = ALIGN(len, PAGE_SIZE);
+	b = do_heap_alloc(h, len, align, prot, 0);
+	if (!b) {
+		pr_err("Compaction triggered!\n");
+		nvmap_heap_compact(h, len, true);
+		b = do_heap_alloc(h, len, align, prot, 0);
+		if (!b) {
+			pr_err("Full compaction triggered!\n");
+			nvmap_heap_compact(h, len, false);
+			b = do_heap_alloc(h, len, align, prot, 0);
+		}
+	}
+#else
+	if (len <= h->buddy_heap_size / 2) {
+		b = do_buddy_alloc(h, len, align, prot);
+	} else {
+		if (h->buddy_heap_size)
+			len = ALIGN(len, h->buddy_heap_size);
+		align = max(align, (size_t)L1_CACHE_BYTES);
+		b = do_heap_alloc(h, len, align, prot, 0);
+	}
+#endif
+
+	if (b) {
+		b->handle = handle;
+		handle->carveout = b;
+	}
+	mutex_unlock(&h->lock);
+	return b;
+}
+
+struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b)
+{
+	if (b->type == BLOCK_BUDDY) {
+		struct buddy_block *bb;
+		bb = container_of(b, struct buddy_block, block);
+		return parent_of(bb->heap);
+	} else {
+		struct list_block *lb;
+		lb = container_of(b, struct list_block, block);
+		return lb->heap;
+	}
+}
+
+/* nvmap_heap_free: frees block b*/
+void nvmap_heap_free(struct nvmap_heap_block *b)
+{
+	struct buddy_heap *bh = NULL;
+	struct nvmap_heap *h = nvmap_block_to_heap(b);
+	struct list_block *lb;
+
+	mutex_lock(&h->lock);
+	if (b->type == BLOCK_BUDDY)
+		bh = do_buddy_free(b);
+	else {
+		lb = container_of(b, struct list_block, block);
+		nvmap_flush_heap_block(NULL, b, lb->size, lb->mem_prot);
+		do_heap_free(b);
+	}
+
+	if (bh) {
+		list_del(&bh->buddy_list);
+		mutex_unlock(&h->lock);
+		nvmap_heap_free(&bh->heap_base->block);
+		kmem_cache_free(buddy_heap_cache, bh);
+	} else
+		mutex_unlock(&h->lock);
+}
+
+
+static void heap_release(struct device *heap)
+{
+}
+
+/* nvmap_heap_create: create a heap object of len bytes, starting from
+ * address base.
+ *
+ * if buddy_size is >= NVMAP_HEAP_MIN_BUDDY_SIZE, then allocations <= 1/2
+ * of the buddy heap size will use a buddy sub-allocator, where each buddy
+ * heap is buddy_size bytes (should be a power of 2). all other allocations
+ * will be rounded up to be a multiple of buddy_size bytes.
+ */
+struct nvmap_heap *nvmap_heap_create(struct device *parent, const char *name,
+				     phys_addr_t base, size_t len,
+				     size_t buddy_size, void *arg)
+{
+	struct nvmap_heap *h = NULL;
+	struct list_block *l = NULL;
+
+	if (WARN_ON(buddy_size && buddy_size < NVMAP_HEAP_MIN_BUDDY_SIZE)) {
+		dev_warn(parent, "%s: buddy_size %u too small\n", __func__,
+			buddy_size);
+		buddy_size = 0;
+	} else if (WARN_ON(buddy_size >= len)) {
+		dev_warn(parent, "%s: buddy_size %u too large\n", __func__,
+			buddy_size);
+		buddy_size = 0;
+	} else if (WARN_ON(buddy_size & (buddy_size - 1))) {
+		dev_warn(parent, "%s: buddy_size %u not a power of 2\n",
+			 __func__, buddy_size);
+		buddy_size = 1 << (ilog2(buddy_size) + 1);
+	}
+
+	if (WARN_ON(buddy_size && (base & (buddy_size - 1)))) {
+		phys_addr_t orig = base;
+		dev_warn(parent, "%s: base address %p not aligned to "
+			 "buddy_size %u\n", __func__, (void *)base, buddy_size);
+		base = ALIGN(base, buddy_size);
+		len -= (base - orig);
+	}
+
+	if (WARN_ON(buddy_size && (len & (buddy_size - 1)))) {
+		dev_warn(parent, "%s: length %u not aligned to "
+			 "buddy_size %u\n", __func__, len, buddy_size);
+		len &= ~(buddy_size - 1);
+	}
+
+	h = kzalloc(sizeof(*h), GFP_KERNEL);
+	if (!h) {
+		dev_err(parent, "%s: out of memory\n", __func__);
+		goto fail_alloc;
+	}
+
+	l = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+	if (!l) {
+		dev_err(parent, "%s: out of memory\n", __func__);
+		goto fail_alloc;
+	}
+
+	dev_set_name(&h->dev, "heap-%s", name);
+	h->name = name;
+	h->arg = arg;
+	h->dev.parent = parent;
+	h->dev.driver = NULL;
+	h->dev.release = heap_release;
+	if (device_register(&h->dev)) {
+		dev_err(parent, "%s: failed to register %s\n", __func__,
+			dev_name(&h->dev));
+		goto fail_alloc;
+	}
+	if (sysfs_create_group(&h->dev.kobj, &heap_stat_attr_group)) {
+		dev_err(&h->dev, "%s: failed to create attributes\n", __func__);
+		goto fail_register;
+	}
+	h->small_alloc = max(2 * buddy_size, len / 256);
+	h->buddy_heap_size = buddy_size;
+	if (buddy_size)
+		h->min_buddy_shift = ilog2(buddy_size / MAX_BUDDY_NR);
+	INIT_LIST_HEAD(&h->free_list);
+	INIT_LIST_HEAD(&h->buddy_list);
+	INIT_LIST_HEAD(&h->all_list);
+	mutex_init(&h->lock);
+	l->block.base = base;
+	l->block.type = BLOCK_EMPTY;
+	l->size = len;
+	l->orig_addr = base;
+	list_add_tail(&l->free_list, &h->free_list);
+	list_add_tail(&l->all_list, &h->all_list);
+
+	inner_flush_cache_all();
+	outer_flush_range(base, base + len);
+	wmb();
+	return h;
+
+fail_register:
+	device_unregister(&h->dev);
+fail_alloc:
+	if (l)
+		kmem_cache_free(block_cache, l);
+	kfree(h);
+	return NULL;
+}
+
+void *nvmap_heap_device_to_arg(struct device *dev)
+{
+	struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
+	return heap->arg;
+}
+
+void *nvmap_heap_to_arg(struct nvmap_heap *heap)
+{
+	return heap->arg;
+}
+
+/* nvmap_heap_destroy: frees all resources in heap */
+void nvmap_heap_destroy(struct nvmap_heap *heap)
+{
+	WARN_ON(!list_empty(&heap->buddy_list));
+
+	sysfs_remove_group(&heap->dev.kobj, &heap_stat_attr_group);
+	device_unregister(&heap->dev);
+
+	while (!list_empty(&heap->buddy_list)) {
+		struct buddy_heap *b;
+		b = list_first_entry(&heap->buddy_list, struct buddy_heap,
+				     buddy_list);
+		list_del(&heap->buddy_list);
+		nvmap_heap_free(&b->heap_base->block);
+		kmem_cache_free(buddy_heap_cache, b);
+	}
+
+	WARN_ON(!list_is_singular(&heap->all_list));
+	while (!list_empty(&heap->all_list)) {
+		struct list_block *l;
+		l = list_first_entry(&heap->all_list, struct list_block,
+				     all_list);
+		list_del(&l->all_list);
+		kmem_cache_free(block_cache, l);
+	}
+
+	kfree(heap);
+}
+
+/* nvmap_heap_create_group: adds the attribute_group grp to the heap kobject */
+int nvmap_heap_create_group(struct nvmap_heap *heap,
+			    const struct attribute_group *grp)
+{
+	return sysfs_create_group(&heap->dev.kobj, grp);
+}
+
+/* nvmap_heap_remove_group: removes the attribute_group grp  */
+void nvmap_heap_remove_group(struct nvmap_heap *heap,
+			     const struct attribute_group *grp)
+{
+	sysfs_remove_group(&heap->dev.kobj, grp);
+}
+
+int nvmap_heap_init(void)
+{
+	BUG_ON(buddy_heap_cache != NULL);
+	buddy_heap_cache = KMEM_CACHE(buddy_heap, 0);
+	if (!buddy_heap_cache) {
+		pr_err("%s: unable to create buddy heap cache\n", __func__);
+		return -ENOMEM;
+	}
+
+	block_cache = KMEM_CACHE(combo_block, 0);
+	if (!block_cache) {
+		kmem_cache_destroy(buddy_heap_cache);
+		pr_err("%s: unable to create block cache\n", __func__);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+void nvmap_heap_deinit(void)
+{
+	if (buddy_heap_cache)
+		kmem_cache_destroy(buddy_heap_cache);
+	if (block_cache)
+		kmem_cache_destroy(block_cache);
+
+	block_cache = NULL;
+	buddy_heap_cache = NULL;
+}
diff --git a/drivers/staging/tegra/video/nvmap/nvmap_heap.h b/drivers/staging/tegra/video/nvmap/nvmap_heap.h
new file mode 100644
index 000000000000..158a1fa3d33c
--- /dev/null
+++ b/drivers/staging/tegra/video/nvmap/nvmap_heap.h
@@ -0,0 +1,68 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_heap.h
+ *
+ * GPU heap allocator.
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __NVMAP_HEAP_H
+#define __NVMAP_HEAP_H
+
+struct device;
+struct nvmap_heap;
+struct attribute_group;
+
+struct nvmap_heap_block {
+	phys_addr_t	base;
+	unsigned int	type;
+	struct nvmap_handle *handle;
+};
+
+#define NVMAP_HEAP_MIN_BUDDY_SIZE	8192
+
+struct nvmap_heap *nvmap_heap_create(struct device *parent, const char *name,
+				     phys_addr_t base, size_t len,
+				     unsigned int buddy_size, void *arg);
+
+void nvmap_heap_destroy(struct nvmap_heap *heap);
+
+void *nvmap_heap_device_to_arg(struct device *dev);
+
+void *nvmap_heap_to_arg(struct nvmap_heap *heap);
+
+struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *heap,
+					  struct nvmap_handle *handle);
+
+struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b);
+
+void nvmap_heap_free(struct nvmap_heap_block *block);
+
+int nvmap_heap_create_group(struct nvmap_heap *heap,
+			    const struct attribute_group *grp);
+
+void nvmap_heap_remove_group(struct nvmap_heap *heap,
+			     const struct attribute_group *grp);
+
+int __init nvmap_heap_init(void);
+
+void nvmap_heap_deinit(void);
+
+int nvmap_flush_heap_block(struct nvmap_client *client,
+	struct nvmap_heap_block *block, size_t len, unsigned int prot);
+
+#endif
diff --git a/drivers/staging/tegra/video/nvmap/nvmap_ioctl.c b/drivers/staging/tegra/video/nvmap/nvmap_ioctl.c
new file mode 100644
index 000000000000..79fae392c1dd
--- /dev/null
+++ b/drivers/staging/tegra/video/nvmap/nvmap_ioctl.c
@@ -0,0 +1,800 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_ioctl.c
+ *
+ * User-space interface to nvmap
+ *
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/nvmap.h>
+
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include <asm/tlbflush.h>
+
+#include <mach/iovmm.h>
+#include <trace/events/nvmap.h>
+
+#include "nvmap_ioctl.h"
+#include "nvmap.h"
+#include "nvmap_common.h"
+
+static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
+			 int is_read, unsigned long h_offs,
+			 unsigned long sys_addr, unsigned long h_stride,
+			 unsigned long sys_stride, unsigned long elem_size,
+			 unsigned long count);
+
+static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
+		       unsigned long start, unsigned long end, unsigned int op);
+
+
+int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg)
+{
+	struct nvmap_pin_handle op;
+	struct nvmap_handle *h;
+	unsigned long on_stack[16];
+	unsigned long *refs;
+	unsigned long __user *output;
+	unsigned int i;
+	int err = 0;
+
+	if (copy_from_user(&op, arg, sizeof(op)))
+		return -EFAULT;
+
+	if (!op.count)
+		return -EINVAL;
+
+	if (op.count > 1) {
+		size_t bytes = op.count * sizeof(*refs); /* kcalloc below will catch overflow. */
+
+		if (op.count > ARRAY_SIZE(on_stack))
+			refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
+		else
+			refs = on_stack;
+
+		if (!refs)
+			return -ENOMEM;
+
+		if (copy_from_user(refs, (void *)op.handles, bytes)) {
+			err = -EFAULT;
+			goto out;
+		}
+	} else {
+		refs = on_stack;
+		on_stack[0] = (unsigned long)op.handles;
+	}
+
+	trace_nvmap_ioctl_pinop(filp->private_data, is_pin, op.count, refs);
+	if (is_pin)
+		err = nvmap_pin_ids(filp->private_data, op.count, refs);
+	else
+		nvmap_unpin_ids(filp->private_data, op.count, refs);
+
+	/* skip the output stage on unpin */
+	if (err || !is_pin)
+		goto out;
+
+	/* it is guaranteed that if nvmap_pin_ids returns 0 that
+	 * all of the handle_ref objects are valid, so dereferencing
+	 * directly here is safe */
+	if (op.count > 1)
+		output = (unsigned long __user *)op.addr;
+	else {
+		struct nvmap_pin_handle __user *tmp = arg;
+		output = (unsigned long __user *)&(tmp->addr);
+	}
+
+	if (!output)
+		goto out;
+
+	for (i = 0; i < op.count && !err; i++) {
+		unsigned long addr;
+
+		h = (struct nvmap_handle *)refs[i];
+
+		if (h->heap_pgalloc && h->pgalloc.contig)
+			addr = page_to_phys(h->pgalloc.pages[0]);
+		else if (h->heap_pgalloc)
+			addr = h->pgalloc.area->iovm_start;
+		else
+			addr = h->carveout->base;
+
+		err = put_user(addr, &output[i]);
+	}
+
+	if (err)
+		nvmap_unpin_ids(filp->private_data, op.count, refs);
+
+out:
+	if (refs != on_stack)
+		kfree(refs);
+
+	return err;
+}
+
+int nvmap_ioctl_getid(struct file *filp, void __user *arg)
+{
+	struct nvmap_client *client = filp->private_data;
+	struct nvmap_create_handle op;
+	struct nvmap_handle *h = NULL;
+
+	if (copy_from_user(&op, arg, sizeof(op)))
+		return -EFAULT;
+
+	if (!op.handle)
+		return -EINVAL;
+
+	h = nvmap_get_handle_id(client, op.handle);
+
+	if (!h)
+		return -EPERM;
+
+	op.id = (__u32)h;
+	if (client == h->owner)
+		h->global = true;
+
+	nvmap_handle_put(h);
+
+	return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0;
+}
+
+int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
+{
+	struct nvmap_alloc_handle op;
+	struct nvmap_client *client = filp->private_data;
+
+	if (copy_from_user(&op, arg, sizeof(op)))
+		return -EFAULT;
+
+	if (!op.handle)
+		return -EINVAL;
+
+	if (op.align & (op.align - 1))
+		return -EINVAL;
+
+	/* user-space handles are aligned to page boundaries, to prevent
+	 * data leakage. */
+	op.align = max_t(size_t, op.align, PAGE_SIZE);
+#if defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
+	op.flags |= NVMAP_HANDLE_ZEROED_PAGES;
+#endif
+
+	return nvmap_alloc_handle_id(client, op.handle, op.heap_mask,
+				     op.align, op.flags);
+}
+
+int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
+{
+	struct nvmap_create_handle op;
+	struct nvmap_handle_ref *ref = NULL;
+	struct nvmap_client *client = filp->private_data;
+	int err = 0;
+
+	if (copy_from_user(&op, arg, sizeof(op)))
+		return -EFAULT;
+
+	if (!client)
+		return -ENODEV;
+
+	if (cmd == NVMAP_IOC_CREATE) {
+		ref = nvmap_create_handle(client, PAGE_ALIGN(op.size));
+		if (!IS_ERR(ref))
+			ref->handle->orig_size = op.size;
+	} else if (cmd == NVMAP_IOC_FROM_ID) {
+		ref = nvmap_duplicate_handle_id(client, op.id);
+	} else {
+		return -EINVAL;
+	}
+
+	if (IS_ERR(ref))
+		return PTR_ERR(ref);
+
+	op.handle = nvmap_ref_to_id(ref);
+	if (copy_to_user(arg, &op, sizeof(op))) {
+		err = -EFAULT;
+		nvmap_free_handle_id(client, op.handle);
+	}
+
+	return err;
+}
+
+int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg)
+{
+	struct nvmap_client *client = filp->private_data;
+	struct nvmap_map_caller op;
+	struct nvmap_vma_priv *vpriv;
+	struct vm_area_struct *vma;
+	struct nvmap_handle *h = NULL;
+	unsigned int cache_flags;
+	int err = 0;
+
+	if (copy_from_user(&op, arg, sizeof(op)))
+		return -EFAULT;
+
+	if (!op.handle)
+		return -EINVAL;
+
+	h = nvmap_get_handle_id(client, op.handle);
+
+	if (!h)
+		return -EPERM;
+
+	if(!h->alloc) {
+		nvmap_handle_put(h);
+		return -EFAULT;
+	}
+
+	trace_nvmap_map_into_caller_ptr(client, h, op.offset,
+					op.length, op.flags);
+	down_read(&current->mm->mmap_sem);
+
+	vma = find_vma(current->mm, op.addr);
+	if (!vma || !vma->vm_private_data) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	if (op.offset & ~PAGE_MASK) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	if (op.offset > h->size || (op.offset + op.length) > h->size) {
+		err = -EADDRNOTAVAIL;
+		goto out;
+	}
+
+	vpriv = vma->vm_private_data;
+	BUG_ON(!vpriv);
+
+	/* the VMA must exactly match the requested mapping operation, and the
+	 * VMA that is targetted must have been created by this driver
+	 */
+	if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) ||
+	    (vma->vm_end-vma->vm_start != op.length)) {
+		err = -EPERM;
+		goto out;
+	}
+
+	/* verify that each mmap() system call creates a unique VMA */
+
+	if (vpriv->handle && (h == vpriv->handle)) {
+		goto out;
+	} else if (vpriv->handle) {
+		err = -EADDRNOTAVAIL;
+		goto out;
+	}
+
+	nvmap_usecount_inc(h);
+
+	if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
+		nvmap_usecount_dec(h);
+		err = -EFAULT;
+		goto out;
+	}
+
+	vpriv->handle = h;
+	vpriv->offs = op.offset;
+
+	cache_flags = op.flags & NVMAP_HANDLE_CACHE_FLAG;
+	if ((cache_flags == NVMAP_HANDLE_INNER_CACHEABLE ||
+	     cache_flags == NVMAP_HANDLE_CACHEABLE) &&
+	    (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
+	     h->flags == NVMAP_HANDLE_WRITE_COMBINE)) {
+		if (h->size & ~PAGE_MASK) {
+			pr_err("\n%s:attempt to convert a buffer from uc/wc to"
+				" wb, whose size is not a multiple of page size."
+				" request ignored.\n", __func__);
+		} else {
+			unsigned int nr_page = h->size >> PAGE_SHIFT;
+			wmb();
+			/* override allocation time cache coherency attributes. */
+			h->flags &= ~NVMAP_HANDLE_CACHE_FLAG;
+			h->flags |= cache_flags;
+
+			/* Update page attributes, if the memory is allocated
+			 *  from system heap pages.
+			 */
+			if (cache_flags == NVMAP_HANDLE_INNER_CACHEABLE &&
+				h->heap_pgalloc)
+				set_pages_array_iwb(h->pgalloc.pages, nr_page);
+			else if (h->heap_pgalloc)
+				set_pages_array_wb(h->pgalloc.pages, nr_page);
+		}
+	}
+	vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
+
+out:
+	up_read(&current->mm->mmap_sem);
+
+	if (err)
+		nvmap_handle_put(h);
+	return err;
+}
+
+int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
+{
+	struct nvmap_handle_param op;
+	struct nvmap_client *client = filp->private_data;
+	struct nvmap_handle *h;
+	int err = 0;
+
+	if (copy_from_user(&op, arg, sizeof(op)))
+		return -EFAULT;
+
+	h = nvmap_get_handle_id(client, op.handle);
+	if (!h)
+		return -EINVAL;
+
+	switch (op.param) {
+	case NVMAP_HANDLE_PARAM_SIZE:
+		op.result = h->orig_size;
+		break;
+	case NVMAP_HANDLE_PARAM_ALIGNMENT:
+		mutex_lock(&h->lock);
+		if (!h->alloc)
+			op.result = 0;
+		else if (h->heap_pgalloc)
+			op.result = PAGE_SIZE;
+		else if (h->carveout->base)
+			op.result = (h->carveout->base & -h->carveout->base);
+		else
+			op.result = SZ_4M;
+		mutex_unlock(&h->lock);
+		break;
+	case NVMAP_HANDLE_PARAM_BASE:
+		if (WARN_ON(!h->alloc || !atomic_add_return(0, &h->pin)))
+			op.result = -1ul;
+		else if (!h->heap_pgalloc) {
+			mutex_lock(&h->lock);
+			op.result = h->carveout->base;
+			mutex_unlock(&h->lock);
+		} else if (h->pgalloc.contig)
+			op.result = page_to_phys(h->pgalloc.pages[0]);
+		else if (h->pgalloc.area)
+			op.result = h->pgalloc.area->iovm_start;
+		else
+			op.result = -1ul;
+		break;
+	case NVMAP_HANDLE_PARAM_HEAP:
+		if (!h->alloc)
+			op.result = 0;
+		else if (!h->heap_pgalloc) {
+			mutex_lock(&h->lock);
+			op.result = nvmap_carveout_usage(client, h->carveout);
+			mutex_unlock(&h->lock);
+		} else if (h->pgalloc.contig)
+			op.result = NVMAP_HEAP_SYSMEM;
+		else
+			op.result = NVMAP_HEAP_IOVMM;
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	if (!err && copy_to_user(arg, &op, sizeof(op)))
+		err = -EFAULT;
+
+	nvmap_handle_put(h);
+	return err;
+}
+
+int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg)
+{
+	struct nvmap_client *client = filp->private_data;
+	struct nvmap_rw_handle __user *uarg = arg;
+	struct nvmap_rw_handle op;
+	struct nvmap_handle *h;
+	ssize_t copied;
+	int err = 0;
+
+	if (copy_from_user(&op, arg, sizeof(op)))
+		return -EFAULT;
+
+	if (!op.handle || !op.addr || !op.count || !op.elem_size)
+		return -EINVAL;
+
+	h = nvmap_get_handle_id(client, op.handle);
+	if (!h)
+		return -EPERM;
+
+	nvmap_usecount_inc(h);
+
+	trace_nvmap_ioctl_rw_handle(client, h, is_read, op.offset,
+				    op.addr, op.hmem_stride,
+				    op.user_stride, op.elem_size, op.count);
+	copied = rw_handle(client, h, is_read, op.offset,
+			   (unsigned long)op.addr, op.hmem_stride,
+			   op.user_stride, op.elem_size, op.count);
+
+	if (copied < 0) {
+		err = copied;
+		copied = 0;
+	} else if (copied < (op.count * op.elem_size))
+		err = -EINTR;
+
+	__put_user(copied, &uarg->count);
+
+	nvmap_usecount_dec(h);
+
+	nvmap_handle_put(h);
+
+	return err;
+}
+
+int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg)
+{
+	struct nvmap_client *client = filp->private_data;
+	struct nvmap_cache_op op;
+	struct vm_area_struct *vma;
+	struct nvmap_vma_priv *vpriv;
+	unsigned long start;
+	unsigned long end;
+	int err = 0;
+
+	if (copy_from_user(&op, arg, sizeof(op)))
+		return -EFAULT;
+
+	if (!op.handle || !op.addr || op.op < NVMAP_CACHE_OP_WB ||
+	    op.op > NVMAP_CACHE_OP_WB_INV)
+		return -EINVAL;
+
+	down_read(&current->mm->mmap_sem);
+
+	vma = find_vma(current->active_mm, (unsigned long)op.addr);
+	if (!vma || !is_nvmap_vma(vma) ||
+	    (unsigned long)op.addr + op.len > vma->vm_end) {
+		err = -EADDRNOTAVAIL;
+		goto out;
+	}
+
+	vpriv = (struct nvmap_vma_priv *)vma->vm_private_data;
+
+	if ((unsigned long)vpriv->handle != op.handle) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	start = (unsigned long)op.addr - vma->vm_start;
+	end = start + op.len;
+
+	err = cache_maint(client, vpriv->handle, start, end, op.op);
+out:
+	up_read(&current->mm->mmap_sem);
+	return err;
+}
+
+int nvmap_ioctl_free(struct file *filp, unsigned long arg)
+{
+	struct nvmap_client *client = filp->private_data;
+
+	if (!arg)
+		return 0;
+
+	nvmap_free_handle_id(client, arg);
+	return 0;
+}
+
+extern void v7_dma_map_area(const void *, size_t, int);
+
+static void inner_cache_maint(unsigned int op, void *vaddr, size_t size)
+{
+	if (op == NVMAP_CACHE_OP_WB_INV)
+		dmac_flush_range(vaddr, vaddr + size);
+	else if (op == NVMAP_CACHE_OP_INV)
+		v7_dma_map_area(vaddr, size, DMA_FROM_DEVICE);
+	else
+		v7_dma_map_area(vaddr, size, DMA_TO_DEVICE);
+}
+
+static void outer_cache_maint(unsigned int op, phys_addr_t paddr, size_t size)
+{
+	if (op == NVMAP_CACHE_OP_WB_INV)
+		outer_flush_range(paddr, paddr + size);
+	else if (op == NVMAP_CACHE_OP_INV)
+		outer_inv_range(paddr, paddr + size);
+	else
+		outer_clean_range(paddr, paddr + size);
+}
+
+static void heap_page_cache_maint(struct nvmap_client *client,
+	struct nvmap_handle *h, unsigned long start, unsigned long end,
+	unsigned int op, bool inner, bool outer, pte_t **pte,
+	unsigned long kaddr, pgprot_t prot)
+{
+	struct page *page;
+	phys_addr_t paddr;
+	unsigned long next;
+	unsigned long off;
+	size_t size;
+
+	while (start < end) {
+		page = h->pgalloc.pages[start >> PAGE_SHIFT];
+		next = min(((start + PAGE_SIZE) & PAGE_MASK), end);
+		off = start & ~PAGE_MASK;
+		size = next - start;
+		paddr = page_to_phys(page) + off;
+
+		if (inner) {
+			void *vaddr = (void *)kaddr + off;
+			BUG_ON(!pte);
+			BUG_ON(!kaddr);
+			set_pte_at(&init_mm, kaddr, *pte,
+				pfn_pte(__phys_to_pfn(paddr), prot));
+			flush_tlb_kernel_page(kaddr);
+			inner_cache_maint(op, vaddr, size);
+		}
+
+		if (outer)
+			outer_cache_maint(op, paddr, size);
+		start = next;
+	}
+}
+
+#if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
+static bool fast_cache_maint_outer(unsigned long start,
+		unsigned long end, unsigned int op)
+{
+	bool result = false;
+#if defined(CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS)
+	if (end - start >= FLUSH_CLEAN_BY_SET_WAY_THRESHOLD_OUTER) {
+		if (op == NVMAP_CACHE_OP_WB_INV) {
+			outer_flush_all();
+			result = true;
+		}
+		if (op == NVMAP_CACHE_OP_WB) {
+			outer_clean_all();
+			result = true;
+		}
+	}
+#endif
+	return result;
+}
+#endif
+
+static bool fast_cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
+	unsigned long start, unsigned long end, unsigned int op)
+{
+	int ret = false;
+
+#if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
+	if ((op == NVMAP_CACHE_OP_INV) ||
+		((end - start) < FLUSH_CLEAN_BY_SET_WAY_THRESHOLD_INNER))
+		goto out;
+
+	if (op == NVMAP_CACHE_OP_WB_INV)
+		inner_flush_cache_all();
+	else if (op == NVMAP_CACHE_OP_WB)
+		inner_clean_cache_all();
+
+	/* outer maintenance */
+	if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE ) {
+		if(!fast_cache_maint_outer(start, end, op))
+		{
+			if (h->heap_pgalloc) {
+				heap_page_cache_maint(client, h, start,
+					end, op, false, true, NULL, 0, 0);
+			} else  {
+				phys_addr_t pstart;
+
+				pstart = start + h->carveout->base;
+				outer_cache_maint(op, pstart, end - start);
+			}
+		}
+	}
+	ret = true;
+out:
+#endif
+	return ret;
+}
+
+static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
+		       unsigned long start, unsigned long end, unsigned int op)
+{
+	pgprot_t prot;
+	pte_t **pte = NULL;
+	unsigned long kaddr;
+	phys_addr_t pstart = start;
+	phys_addr_t pend = end;
+	phys_addr_t loop;
+	int err = 0;
+
+	h = nvmap_handle_get(h);
+	if (!h)
+		return -EFAULT;
+
+	if (!h->alloc) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	trace_cache_maint(client, h, start, end, op);
+	wmb();
+	if (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
+	    h->flags == NVMAP_HANDLE_WRITE_COMBINE || start == end)
+		goto out;
+
+	if (fast_cache_maint(client, h, start, end, op))
+		goto out;
+
+	prot = nvmap_pgprot(h, pgprot_kernel);
+	pte = nvmap_alloc_pte(client->dev, (void **)&kaddr);
+	if (IS_ERR(pte)) {
+		err = PTR_ERR(pte);
+		pte = NULL;
+		goto out;
+	}
+
+	if (h->heap_pgalloc) {
+		heap_page_cache_maint(client, h, start, end, op, true,
+			(h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ? false : true,
+			pte, kaddr, prot);
+		goto out;
+	}
+
+	if (start > h->size || end > h->size) {
+		nvmap_warn(client, "cache maintenance outside handle\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	/* lock carveout from relocation by mapcount */
+	nvmap_usecount_inc(h);
+
+	pstart += h->carveout->base;
+	pend += h->carveout->base;
+
+	loop = pstart;
+
+	while (loop < pend) {
+		phys_addr_t next = (loop + PAGE_SIZE) & PAGE_MASK;
+		void *base = (void *)kaddr + (loop & ~PAGE_MASK);
+		next = min(next, pend);
+
+		set_pte_at(&init_mm, kaddr, *pte,
+			   pfn_pte(__phys_to_pfn(loop), prot));
+		flush_tlb_kernel_page(kaddr);
+
+		inner_cache_maint(op, base, next - loop);
+		loop = next;
+	}
+
+	if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)
+		outer_cache_maint(op, pstart, pend - pstart);
+
+	/* unlock carveout */
+	nvmap_usecount_dec(h);
+
+out:
+	if (pte)
+		nvmap_free_pte(client->dev, pte);
+	nvmap_handle_put(h);
+	return err;
+}
+
+static int rw_handle_page(struct nvmap_handle *h, int is_read,
+			  unsigned long start, unsigned long rw_addr,
+			  unsigned long bytes, unsigned long kaddr, pte_t *pte)
+{
+	pgprot_t prot = nvmap_pgprot(h, pgprot_kernel);
+	unsigned long end = start + bytes;
+	int err = 0;
+
+	while (!err && start < end) {
+		struct page *page = NULL;
+		phys_addr_t phys;
+		size_t count;
+		void *src;
+
+		if (!h->heap_pgalloc) {
+			phys = h->carveout->base + start;
+		} else {
+			page = h->pgalloc.pages[start >> PAGE_SHIFT];
+			BUG_ON(!page);
+			get_page(page);
+			phys = page_to_phys(page) + (start & ~PAGE_MASK);
+		}
+
+		set_pte_at(&init_mm, kaddr, pte,
+			   pfn_pte(__phys_to_pfn(phys), prot));
+		flush_tlb_kernel_page(kaddr);
+
+		src = (void *)kaddr + (phys & ~PAGE_MASK);
+		phys = PAGE_SIZE - (phys & ~PAGE_MASK);
+		count = min_t(size_t, end - start, phys);
+
+		if (is_read)
+			err = copy_to_user((void *)rw_addr, src, count);
+		else
+			err = copy_from_user(src, (void *)rw_addr, count);
+
+		if (err)
+			err = -EFAULT;
+
+		rw_addr += count;
+		start += count;
+
+		if (page)
+			put_page(page);
+	}
+
+	return err;
+}
+
+static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
+			 int is_read, unsigned long h_offs,
+			 unsigned long sys_addr, unsigned long h_stride,
+			 unsigned long sys_stride, unsigned long elem_size,
+			 unsigned long count)
+{
+	ssize_t copied = 0;
+	pte_t **pte;
+	void *addr;
+	int ret = 0;
+
+	if (!elem_size)
+		return -EINVAL;
+
+	if (!h->alloc)
+		return -EFAULT;
+
+	if (elem_size == h_stride && elem_size == sys_stride) {
+		elem_size *= count;
+		h_stride = elem_size;
+		sys_stride = elem_size;
+		count = 1;
+	}
+
+	pte = nvmap_alloc_pte(client->dev, &addr);
+	if (IS_ERR(pte))
+		return PTR_ERR(pte);
+
+	while (count--) {
+		if (h_offs + elem_size > h->size) {
+			nvmap_warn(client, "read/write outside of handle\n");
+			ret = -EFAULT;
+			break;
+		}
+		if (is_read)
+			cache_maint(client, h, h_offs,
+				h_offs + elem_size, NVMAP_CACHE_OP_INV);
+
+		ret = rw_handle_page(h, is_read, h_offs, sys_addr,
+				     elem_size, (unsigned long)addr, *pte);
+
+		if (ret)
+			break;
+
+		if (!is_read)
+			cache_maint(client, h, h_offs,
+				h_offs + elem_size, NVMAP_CACHE_OP_WB);
+
+		copied += elem_size;
+		sys_addr += sys_stride;
+		h_offs += h_stride;
+	}
+
+	nvmap_free_pte(client->dev, pte);
+	return ret ?: copied;
+}
diff --git a/drivers/staging/tegra/video/nvmap/nvmap_ioctl.h b/drivers/staging/tegra/video/nvmap/nvmap_ioctl.h
new file mode 100644
index 000000000000..29341a84b477
--- /dev/null
+++ b/drivers/staging/tegra/video/nvmap/nvmap_ioctl.h
@@ -0,0 +1,162 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_ioctl.h
+ *
+ * ioctl declarations for nvmap
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __VIDEO_TEGRA_NVMAP_IOCTL_H
+#define __VIDEO_TEGRA_NVMAP_IOCTL_H
+
+#include <linux/ioctl.h>
+
+#ifdef  __KERNEL__
+#include <linux/file.h>
+#include <linux/nvmap.h>
+#endif
+
+enum {
+	NVMAP_HANDLE_PARAM_SIZE = 1,
+	NVMAP_HANDLE_PARAM_ALIGNMENT,
+	NVMAP_HANDLE_PARAM_BASE,
+	NVMAP_HANDLE_PARAM_HEAP,
+};
+
+enum {
+	NVMAP_CACHE_OP_WB = 0,
+	NVMAP_CACHE_OP_INV,
+	NVMAP_CACHE_OP_WB_INV,
+};
+
+
+struct nvmap_create_handle {
+	union {
+		__u32 key;	/* ClaimPreservedHandle */
+		__u32 id;	/* FromId */
+		__u32 size;	/* CreateHandle */
+	};
+	__u32 handle;
+};
+
+struct nvmap_alloc_handle {
+	__u32 handle;
+	__u32 heap_mask;
+	__u32 flags;
+	__u32 align;
+};
+
+struct nvmap_map_caller {
+	__u32 handle;		/* hmem */
+	__u32 offset;		/* offset into hmem; should be page-aligned */
+	__u32 length;		/* number of bytes to map */
+	__u32 flags;
+	unsigned long addr;	/* user pointer */
+};
+
+struct nvmap_rw_handle {
+	unsigned long addr;	/* user pointer */
+	__u32 handle;		/* hmem */
+	__u32 offset;		/* offset into hmem */
+	__u32 elem_size;	/* individual atom size */
+	__u32 hmem_stride;	/* delta in bytes between atoms in hmem */
+	__u32 user_stride;	/* delta in bytes between atoms in user */
+	__u32 count;		/* number of atoms to copy */
+};
+
+struct nvmap_pin_handle {
+	unsigned long handles;	/* array of handles to pin/unpin */
+	unsigned long addr;	/* array of addresses to return */
+	__u32 count;		/* number of entries in handles */
+};
+
+struct nvmap_handle_param {
+	__u32 handle;
+	__u32 param;
+	unsigned long result;
+};
+
+struct nvmap_cache_op {
+	unsigned long addr;
+	__u32 handle;
+	__u32 len;
+	__s32 op;
+};
+
+#define NVMAP_IOC_MAGIC 'N'
+
+/* Creates a new memory handle. On input, the argument is the size of the new
+ * handle; on return, the argument is the name of the new handle
+ */
+#define NVMAP_IOC_CREATE  _IOWR(NVMAP_IOC_MAGIC, 0, struct nvmap_create_handle)
+#define NVMAP_IOC_CLAIM   _IOWR(NVMAP_IOC_MAGIC, 1, struct nvmap_create_handle)
+#define NVMAP_IOC_FROM_ID _IOWR(NVMAP_IOC_MAGIC, 2, struct nvmap_create_handle)
+
+/* Actually allocates memory for the specified handle */
+#define NVMAP_IOC_ALLOC    _IOW(NVMAP_IOC_MAGIC, 3, struct nvmap_alloc_handle)
+
+/* Frees a memory handle, unpinning any pinned pages and unmapping any mappings
+ */
+#define NVMAP_IOC_FREE       _IO(NVMAP_IOC_MAGIC, 4)
+
+/* Maps the region of the specified handle into a user-provided virtual address
+ * that was previously created via an mmap syscall on this fd */
+#define NVMAP_IOC_MMAP       _IOWR(NVMAP_IOC_MAGIC, 5, struct nvmap_map_caller)
+
+/* Reads/writes data (possibly strided) from a user-provided buffer into the
+ * hmem at the specified offset */
+#define NVMAP_IOC_WRITE      _IOW(NVMAP_IOC_MAGIC, 6, struct nvmap_rw_handle)
+#define NVMAP_IOC_READ       _IOW(NVMAP_IOC_MAGIC, 7, struct nvmap_rw_handle)
+
+#define NVMAP_IOC_PARAM _IOWR(NVMAP_IOC_MAGIC, 8, struct nvmap_handle_param)
+
+/* Pins a list of memory handles into IO-addressable memory (either IOVMM
+ * space or physical memory, depending on the allocation), and returns the
+ * address. Handles may be pinned recursively. */
+#define NVMAP_IOC_PIN_MULT   _IOWR(NVMAP_IOC_MAGIC, 10, struct nvmap_pin_handle)
+#define NVMAP_IOC_UNPIN_MULT _IOW(NVMAP_IOC_MAGIC, 11, struct nvmap_pin_handle)
+
+#define NVMAP_IOC_CACHE      _IOW(NVMAP_IOC_MAGIC, 12, struct nvmap_cache_op)
+
+/* Returns a global ID usable to allow a remote process to create a handle
+ * reference to the same handle */
+#define NVMAP_IOC_GET_ID  _IOWR(NVMAP_IOC_MAGIC, 13, struct nvmap_create_handle)
+
+#define NVMAP_IOC_MAXNR (_IOC_NR(NVMAP_IOC_GET_ID))
+
+#ifdef  __KERNEL__
+int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg);
+
+int nvmap_ioctl_get_param(struct file *filp, void __user* arg);
+
+int nvmap_ioctl_getid(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_alloc(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_free(struct file *filp, unsigned long arg);
+
+int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg);
+
+int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg);
+#endif
+
+
+#endif
diff --git a/drivers/staging/tegra/video/nvmap/nvmap_iommu.c b/drivers/staging/tegra/video/nvmap/nvmap_iommu.c
new file mode 100644
index 000000000000..f86d369b7412
--- /dev/null
+++ b/drivers/staging/tegra/video/nvmap/nvmap_iommu.c
@@ -0,0 +1,96 @@
+/*
+ * IOMMU backend support for NVMAP
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/slab.h>
+#include <mach/iovmm.h>
+
+#include "nvmap.h"
+
+struct tegra_iovmm_area *tegra_iommu_create_vm(struct device *dev,
+			       dma_addr_t req, size_t size, pgprot_t prot)
+{
+	struct tegra_iovmm_area *area;
+	dma_addr_t iova;
+
+	area = kmalloc(sizeof(*area), GFP_KERNEL);
+	if (!area)
+		return NULL;
+
+	if (!req)
+		req = DMA_ANON_ADDR;
+
+	iova = arm_iommu_alloc_iova_at(dev, req, size);
+	if (iova == DMA_ERROR_CODE)
+		goto err_out;
+	area->iovm_start = iova;
+	area->iovm_length = size;
+	area->pgprot = prot;
+	area->dev = dev;
+	return area;
+
+err_out:
+	kfree(area);
+	return NULL;
+}
+
+void tegra_iommu_free_vm(struct tegra_iovmm_area *area)
+{
+	int i;
+	size_t count = area->iovm_length >> PAGE_SHIFT;
+
+	for (i = 0; i < count; i++) {
+		dma_addr_t iova;
+
+		iova = area->iovm_start + i * PAGE_SIZE;
+		dma_unmap_page(area->dev, iova, PAGE_SIZE, DMA_NONE);
+	}
+	kfree(area);
+}
+
+struct tegra_iovmm_client *tegra_iommu_alloc_client(struct device *dev)
+{
+	struct dma_iommu_mapping *map;
+	struct tegra_iovmm_client *client;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return NULL;
+
+	map = arm_iommu_create_mapping(&platform_bus_type,
+		       TEGRA_IOMMU_BASE, TEGRA_IOMMU_SIZE, 0);
+	if (IS_ERR(map))
+		goto err_map;
+
+	if (arm_iommu_attach_device(dev, map))
+		goto err_attach;
+	client->dev = dev;
+	return client;
+
+err_attach:
+	arm_iommu_release_mapping(map);
+err_map:
+	kfree(client);
+	return NULL;
+}
+
+void tegra_iommu_free_client(struct tegra_iovmm_client *client)
+{
+	arm_iommu_release_mapping(client->dev->archdata.mapping);
+	kfree(client);
+}
diff --git a/drivers/staging/tegra/video/nvmap/nvmap_mru.c b/drivers/staging/tegra/video/nvmap/nvmap_mru.c
new file mode 100644
index 000000000000..f54d44923ebf
--- /dev/null
+++ b/drivers/staging/tegra/video/nvmap/nvmap_mru.c
@@ -0,0 +1,187 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_mru.c
+ *
+ * IOVMM virtualization support for nvmap
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <asm/pgtable.h>
+
+#include <mach/iovmm.h>
+
+#include "nvmap.h"
+#include "nvmap_mru.h"
+
+/* if IOVMM reclamation is enabled (CONFIG_NVMAP_RECLAIM_UNPINNED_VM),
+ * unpinned handles are placed onto a most-recently-used eviction list;
+ * multiple lists are maintained, segmented by size (sizes were chosen to
+ * roughly correspond with common sizes for graphics surfaces).
+ *
+ * if a handle is located on the MRU list, then the code below may
+ * steal its IOVMM area at any time to satisfy a pin operation if no
+ * free IOVMM space is available
+ */
+
+static const size_t mru_cutoff[] = {
+	262144, 393216, 786432, 1048576, 1572864
+};
+
+static inline struct list_head *mru_list(struct nvmap_share *share, size_t size)
+{
+	unsigned int i;
+
+	BUG_ON(!share->mru_lists);
+	for (i = 0; i < ARRAY_SIZE(mru_cutoff); i++)
+		if (size <= mru_cutoff[i])
+			break;
+
+	return &share->mru_lists[i];
+}
+
+size_t nvmap_mru_vm_size(struct tegra_iovmm_client *iovmm)
+{
+	size_t vm_size = tegra_iovmm_get_vm_size(iovmm);
+	return (vm_size >> 2) * 3;
+}
+
+/*  nvmap_mru_vma_lock should be acquired by the caller before calling this */
+void nvmap_mru_insert_locked(struct nvmap_share *share, struct nvmap_handle *h)
+{
+	size_t len = h->pgalloc.area->iovm_length;
+	list_add(&h->pgalloc.mru_list, mru_list(share, len));
+}
+
+void nvmap_mru_remove(struct nvmap_share *s, struct nvmap_handle *h)
+{
+	nvmap_mru_lock(s);
+	if (!list_empty(&h->pgalloc.mru_list))
+		list_del(&h->pgalloc.mru_list);
+	nvmap_mru_unlock(s);
+	INIT_LIST_HEAD(&h->pgalloc.mru_list);
+}
+
+/* returns a tegra_iovmm_area for a handle. if the handle already has
+ * an iovmm_area allocated, the handle is simply removed from its MRU list
+ * and the existing iovmm_area is returned.
+ *
+ * if no existing allocation exists, try to allocate a new IOVMM area.
+ *
+ * if a new area can not be allocated, try to re-use the most-recently-unpinned
+ * handle's allocation.
+ *
+ * and if that fails, iteratively evict handles from the MRU lists and free
+ * their allocations, until the new allocation succeeds.
+ */
+struct tegra_iovmm_area *nvmap_handle_iovmm_locked(struct nvmap_client *c,
+					    struct nvmap_handle *h)
+{
+	struct list_head *mru;
+	struct nvmap_handle *evict = NULL;
+	struct tegra_iovmm_area *vm = NULL;
+	unsigned int i, idx;
+	pgprot_t prot;
+
+	BUG_ON(!h || !c || !c->share);
+
+	prot = nvmap_pgprot(h, pgprot_kernel);
+
+	if (h->pgalloc.area) {
+		BUG_ON(list_empty(&h->pgalloc.mru_list));
+		list_del(&h->pgalloc.mru_list);
+		INIT_LIST_HEAD(&h->pgalloc.mru_list);
+		return h->pgalloc.area;
+	}
+
+	vm = tegra_iovmm_create_vm(c->share->iovmm, NULL,
+			h->size, h->align, prot,
+			h->pgalloc.iovm_addr);
+
+	if (vm) {
+		INIT_LIST_HEAD(&h->pgalloc.mru_list);
+		return vm;
+	}
+	/* if client is looking for specific iovm address, return from here. */
+	if ((vm == NULL) && (h->pgalloc.iovm_addr != 0))
+		return NULL;
+	/* attempt to re-use the most recently unpinned IOVMM area in the
+	 * same size bin as the current handle. If that fails, iteratively
+	 * evict handles (starting from the current bin) until an allocation
+	 * succeeds or no more areas can be evicted */
+	mru = mru_list(c->share, h->size);
+	if (!list_empty(mru))
+		evict = list_first_entry(mru, struct nvmap_handle,
+					 pgalloc.mru_list);
+
+	if (evict && evict->pgalloc.area->iovm_length >= h->size) {
+		list_del(&evict->pgalloc.mru_list);
+		vm = evict->pgalloc.area;
+		evict->pgalloc.area = NULL;
+		INIT_LIST_HEAD(&evict->pgalloc.mru_list);
+		return vm;
+	}
+
+	idx = mru - c->share->mru_lists;
+
+	for (i = 0; i < c->share->nr_mru && !vm; i++, idx++) {
+		if (idx >= c->share->nr_mru)
+			idx = 0;
+		mru = &c->share->mru_lists[idx];
+		while (!list_empty(mru) && !vm) {
+			evict = list_first_entry(mru, struct nvmap_handle,
+						 pgalloc.mru_list);
+
+			BUG_ON(atomic_read(&evict->pin) != 0);
+			BUG_ON(!evict->pgalloc.area);
+			list_del(&evict->pgalloc.mru_list);
+			INIT_LIST_HEAD(&evict->pgalloc.mru_list);
+			tegra_iovmm_free_vm(evict->pgalloc.area);
+			evict->pgalloc.area = NULL;
+			vm = tegra_iovmm_create_vm(c->share->iovmm,
+					NULL, h->size, h->align,
+					prot, h->pgalloc.iovm_addr);
+		}
+	}
+	return vm;
+}
+
+int nvmap_mru_init(struct nvmap_share *share)
+{
+	int i;
+	mutex_init(&share->mru_lock);
+	share->nr_mru = ARRAY_SIZE(mru_cutoff) + 1;
+
+	share->mru_lists = kzalloc(sizeof(struct list_head) * share->nr_mru,
+				   GFP_KERNEL);
+
+	if (!share->mru_lists)
+		return -ENOMEM;
+
+	for (i = 0; i < share->nr_mru; i++)
+		INIT_LIST_HEAD(&share->mru_lists[i]);
+
+	return 0;
+}
+
+void nvmap_mru_destroy(struct nvmap_share *share)
+{
+	kfree(share->mru_lists);
+	share->mru_lists = NULL;
+}
diff --git a/drivers/staging/tegra/video/nvmap/nvmap_mru.h b/drivers/staging/tegra/video/nvmap/nvmap_mru.h
new file mode 100644
index 000000000000..6c94630bc3ef
--- /dev/null
+++ b/drivers/staging/tegra/video/nvmap/nvmap_mru.h
@@ -0,0 +1,84 @@
+/*
+ * drivers/video/tegra/nvmap_mru.c
+ *
+ * IOVMM virtualization support for nvmap
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+
+#ifndef __VIDEO_TEGRA_NVMAP_MRU_H
+#define __VIDEO_TEGRA_NVMAP_MRU_H
+
+#include <linux/spinlock.h>
+
+#include "nvmap.h"
+
+struct tegra_iovmm_area;
+struct tegra_iovmm_client;
+
+#ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
+
+static inline void nvmap_mru_lock(struct nvmap_share *share)
+{
+	mutex_lock(&share->mru_lock);
+}
+
+static inline void nvmap_mru_unlock(struct nvmap_share *share)
+{
+	mutex_unlock(&share->mru_lock);
+}
+
+int nvmap_mru_init(struct nvmap_share *share);
+
+void nvmap_mru_destroy(struct nvmap_share *share);
+
+size_t nvmap_mru_vm_size(struct tegra_iovmm_client *iovmm);
+
+void nvmap_mru_insert_locked(struct nvmap_share *share, struct nvmap_handle *h);
+
+void nvmap_mru_remove(struct nvmap_share *s, struct nvmap_handle *h);
+
+struct tegra_iovmm_area *nvmap_handle_iovmm_locked(struct nvmap_client *c,
+					    struct nvmap_handle *h);
+
+#else
+
+#define nvmap_mru_lock(_s)	do { } while (0)
+#define nvmap_mru_unlock(_s)	do { } while (0)
+#define nvmap_mru_init(_s)	0
+#define nvmap_mru_destroy(_s)	do { } while (0)
+#define nvmap_mru_vm_size(_a)	tegra_iovmm_get_vm_size(_a)
+
+static inline void nvmap_mru_insert_locked(struct nvmap_share *share,
+					   struct nvmap_handle *h)
+{ }
+
+static inline void nvmap_mru_remove(struct nvmap_share *s,
+				    struct nvmap_handle *h)
+{ }
+
+static inline struct tegra_iovmm_area *nvmap_handle_iovmm_locked(struct nvmap_client *c,
+							  struct nvmap_handle *h)
+{
+	BUG_ON(!h->pgalloc.area);
+	return h->pgalloc.area;
+}
+
+#endif
+
+#endif
