Merge tag 'drm-intel-next-2016-09-19' of git://anongit.freedesktop.org/drm-intel into drm-next

- refactor the sseu code (Imre)
- refine guc dmesg output (Dave Gordon)
- more vgpu work
- more skl wm fixes (Lyude)
- refactor dpll code in prep for upfront link training (Jim Bride et al)
- consolidate all platform feature checks into intel_device_info (Carlos Santa)
- refactor elsp/execlist submission as prep for re-submission after hang
  recovery and eventually scheduling (Chris Wilson)
- allow synchronous gpu reset handling, to remove tricky/impossible/fragile
  error recovery code (Chris Wilson)
- prep work for nonblocking (execlist) submission, using fences to track
  depencies and drive elsp submission (Chris Wilson)
- partial error recover/resubmission of non-guilty batches after hangs (Chris Wilson)
- full dma-buf implicit fencing support (Chris Wilson)
- dp link training fixes (Jim, Dhinkaran, Navare, ...)
- obey dp branch device pixel rate/bpc/clock limits (Mika Kahola), needed for
  many vga dongles
- bunch of small cleanups and polish all over, as usual

[airlied: printing macros collided]

* tag 'drm-intel-next-2016-09-19' of git://anongit.freedesktop.org/drm-intel: (163 commits)
  drm/i915: Update DRIVER_DATE to 20160919
  drm: Fix DisplayPort branch device ID kernel-doc
  drm/i915: use NULL for NULL pointers
  drm/i915: do not use 'false' as a NULL pointer
  drm/i915: make intel_dp_compute_bpp static
  drm: Add DP branch device info on debugfs
  drm/i915: Update bits per component for display info
  drm/i915: Check pixel rate for DP to VGA dongle
  drm/i915: Read DP branch device SW revision
  drm/i915: Read DP branch device HW revision
  drm/i915: Cleanup DisplayPort AUX channel initialization
  drm: Read DP branch device id
  drm: Helper to read max bits per component
  drm: Helper to read max clock rate
  drm: Drop VGA from bpc definitions
  drm: Add missing DP downstream port types
  drm/i915: Add ddb size field to device info structure
  drm/i915/guc: general tidying up (submission)
  drm/i915/guc: general tidying up (loader)
  drm/i915: clarify PMINTRMSK/pm_intr_keep usage
  ...
diff --git a/Documentation/devicetree/bindings/display/bridge/tda998x.txt b/Documentation/devicetree/bindings/display/bridge/tda998x.txt
index e178e6b..24cc246 100644
--- a/Documentation/devicetree/bindings/display/bridge/tda998x.txt
+++ b/Documentation/devicetree/bindings/display/bridge/tda998x.txt
@@ -21,8 +21,19 @@
   - video-ports: 24 bits value which defines how the video controller
 	output is wired to the TDA998x input - default: <0x230145>
 
+  - audio-ports: array of 8-bit values, 2 values per one DAI[1].
+	The first value defines the DAI type: TDA998x_SPDIF or TDA998x_I2S[2].
+	The second value defines the tda998x AP_ENA reg content when the DAI
+	in question is used. The implementation allows one or two DAIs. If two
+	DAIs are defined, they must be of different type.
+
+[1] Documentation/sound/alsa/soc/DAI.txt
+[2] include/dt-bindings/display/tda998x.h
+
 Example:
 
+#include <dt-bindings/display/tda998x.h>
+
 	tda998x: hdmi-encoder {
 		compatible = "nxp,tda998x";
 		reg = <0x70>;
@@ -30,4 +41,11 @@
 		interrupts = <27 2>;		/* falling edge */
 		pinctrl-0 = <&pmx_camera>;
 		pinctrl-names = "default";
+		video-ports = <0x230145>;
+
+		#sound-dai-cells = <2>;
+			     /*	DAI-format	AP_ENA reg value */
+		audio-ports = <	TDA998x_SPDIF	0x04
+				TDA998x_I2S	0x03>;
+
 	};
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
index 5489b59..9eb3f0a 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
@@ -6,8 +6,10 @@
 
 Required properties:
 - compatible: value should be one of the following
-		"rockchip,rk3288-vop";
 		"rockchip,rk3036-vop";
+		"rockchip,rk3288-vop";
+		"rockchip,rk3399-vop-big";
+		"rockchip,rk3399-vop-lit";
 
 - interrupts: should contain a list of all VOP IP block interrupts in the
 		 order: VSYNC, LCD_SYSTEM. The interrupt specifier
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index df8f4ae..b95696d 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -26,13 +26,14 @@
 The TCON acts as a timing controller for RGB, LVDS and TV interfaces.
 
 Required properties:
- - compatible: value should be "allwinner,sun5i-a13-tcon".
+ - compatible: value must be either:
+   * allwinner,sun5i-a13-tcon
+   * allwinner,sun8i-a33-tcon
  - reg: base address and size of memory-mapped region
  - interrupts: interrupt associated to this IP
  - clocks: phandles to the clocks feeding the TCON. Three are needed:
    - 'ahb': the interface clocks
    - 'tcon-ch0': The clock driving the TCON channel 0
-   - 'tcon-ch1': The clock driving the TCON channel 1
  - resets: phandles to the reset controllers driving the encoder
    - "lcd": the reset line for the TCON channel 0
 
@@ -49,6 +50,33 @@
   second the block connected to the TCON channel 1 (usually the TV
   encoder)
 
+On the A13, there is one more clock required:
+   - 'tcon-ch1': The clock driving the TCON channel 1
+
+DRC
+---
+
+The DRC (Dynamic Range Controller), found in the latest Allwinner SoCs
+(A31, A23, A33), allows to dynamically adjust pixel
+brightness/contrast based on histogram measurements for LCD content
+adaptive backlight control.
+
+
+Required properties:
+  - compatible: value must be one of:
+    * allwinner,sun8i-a33-drc
+  - reg: base address and size of the memory-mapped region.
+  - interrupts: interrupt associated to this IP
+  - clocks: phandles to the clocks feeding the DRC
+    * ahb: the DRC interface clock
+    * mod: the DRC module clock
+    * ram: the DRC DRAM clock
+  - clock-names: the clock names mentioned above
+  - resets: phandles to the reset line driving the DRC
+
+- ports: A ports node with endpoint definitions as defined in
+  Documentation/devicetree/bindings/media/video-interfaces.txt. The
+  first port should be the input endpoints, the second one the outputs
 
 Display Engine Backend
 ----------------------
@@ -59,6 +87,7 @@
 Required properties:
   - compatible: value must be one of:
     * allwinner,sun5i-a13-display-backend
+    * allwinner,sun8i-a33-display-backend
   - reg: base address and size of the memory-mapped region.
   - clocks: phandles to the clocks feeding the frontend and backend
     * ahb: the backend interface clock
@@ -71,6 +100,14 @@
   Documentation/devicetree/bindings/media/video-interfaces.txt. The
   first port should be the input endpoints, the second one the output
 
+On the A33, some additional properties are required:
+  - reg needs to have an additional region corresponding to the SAT
+  - reg-names need to be set, with "be" and "sat"
+  - clocks and clock-names need to have a phandle to the SAT bus
+    clocks, whose name will be "sat"
+  - resets and reset-names need to have a phandle to the SAT bus
+    resets, whose name will be "sat"
+
 Display Engine Frontend
 -----------------------
 
@@ -80,6 +117,7 @@
 Required properties:
   - compatible: value must be one of:
     * allwinner,sun5i-a13-display-frontend
+    * allwinner,sun8i-a33-display-frontend
   - reg: base address and size of the memory-mapped region.
   - interrupts: interrupt associated to this IP
   - clocks: phandles to the clocks feeding the frontend and backend
@@ -104,6 +142,7 @@
 Required properties:
   - compatible: value must be one of:
     * allwinner,sun5i-a13-display-engine
+    * allwinner,sun8i-a33-display-engine
 
   - allwinner,pipelines: list of phandle to the display engine
     frontends available.
diff --git a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
index 6efa4c5..a5007aa 100644
--- a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
+++ b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
@@ -17,6 +17,18 @@
    the lcd controller.
  - max-pixelclock: The maximum pixel clock that can be supported
    by the lcd controller in KHz.
+ - blue-and-red-wiring: Recognized values "default", "straight" or
+   "crossed". This property deals with the LCDC revision 2 (found on
+   AM335x) color errata [1].
+    - "straight" indicates normal wiring that supports RGB565,
+      BGR888, and XBGR8888 color formats.
+    - "crossed" indicates wiring that has blue and red wires
+      crossed. This setup supports BGR565, RGB888 and XRGB8888
+      formats.
+    - If the property is not present or its value is not recognized
+      the legacy mode is assumed. This configuration supports RGB565,
+      RGB888 and XRGB8888 formats. However, depending on wiring, the red
+      and blue colors are swapped in either 16 or 24-bit color modes.
 
 Optional nodes:
 
@@ -28,6 +40,14 @@
    Documentation/devicetree/bindings/display/tilcdc/tfp410.txt for connecting
    tfp410 DVI encoder or lcd panel to lcdc
 
+[1] There is an errata about AM335x color wiring. For 16-bit color mode
+    the wires work as they should (LCD_DATA[0:4] is for Blue[3:7]),
+    but for 24 bit color modes the wiring of blue and red components is
+    crossed and LCD_DATA[0:4] is for Red[3:7] and LCD_DATA[11:15] is
+    for Blue[3-7]. For more details see section 3.1.1 in AM335x
+    Silicon Errata:
+    http://www.ti.com/general/docs/lit/getliterature.tsp?baseLiteratureNumber=sprz360
+
 Example:
 
 	fb: fb@4830e000 {
@@ -37,6 +57,8 @@
 		interrupts = <36>;
 		ti,hwmods = "lcdc";
 
+		blue-and-red-wiring = "crossed";
+
 		port {
 			lcdc_0: endpoint@0 {
 				remote-endpoint = <&hdmi_0>;
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index 0b302fe..59fa3c1 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -2,38 +2,45 @@
 Mode Setting Helper Functions
 =============================
 
-The plane, CRTC, encoder and connector functions provided by the drivers
-implement the DRM API. They're called by the DRM core and ioctl handlers
-to handle device state changes and configuration request. As
-implementing those functions often requires logic not specific to
-drivers, mid-layer helper functions are available to avoid duplicating
-boilerplate code.
+The DRM subsystem aims for a strong separation between core code and helper
+libraries. Core code takes care of general setup and teardown and decoding
+userspace requests to kernel internal objects. Everything else is handled by a
+large set of helper libraries, which can be combined freely to pick and choose
+for each driver what fits, and avoid shared code where special behaviour is
+needed.
 
-The DRM core contains one mid-layer implementation. The mid-layer
-provides implementations of several plane, CRTC, encoder and connector
-functions (called from the top of the mid-layer) that pre-process
-requests and call lower-level functions provided by the driver (at the
-bottom of the mid-layer). For instance, the
-:c:func:`drm_crtc_helper_set_config()` function can be used to
-fill the :c:type:`struct drm_crtc_funcs <drm_crtc_funcs>`
-set_config field. When called, it will split the set_config operation
-in smaller, simpler operations and call the driver to handle them.
+This distinction between core code and helpers is especially strong in the
+modesetting code, where there's a shared userspace ABI for all drivers. This is
+in contrast to the render side, where pretty much everything (with very few
+exceptions) can be considered optional helper code.
 
-To use the mid-layer, drivers call
-:c:func:`drm_crtc_helper_add()`,
-:c:func:`drm_encoder_helper_add()` and
-:c:func:`drm_connector_helper_add()` functions to install their
-mid-layer bottom operations handlers, and fill the :c:type:`struct
-drm_crtc_funcs <drm_crtc_funcs>`, :c:type:`struct
-drm_encoder_funcs <drm_encoder_funcs>` and :c:type:`struct
-drm_connector_funcs <drm_connector_funcs>` structures with
-pointers to the mid-layer top API functions. Installing the mid-layer
-bottom operation handlers is best done right after registering the
-corresponding KMS object.
+There are a few areas these helpers can grouped into:
 
-The mid-layer is not split between CRTC, encoder and connector
-operations. To use it, a driver must provide bottom functions for all of
-the three KMS entities.
+* Helpers to implement modesetting. The important ones here are the atomic
+  helpers. Old drivers still often use the legacy CRTC helpers. They both share
+  the same set of common helper vtables. For really simple drivers (anything
+  that would have been a great fit in the deprecated fbdev subsystem) there's
+  also the simple display pipe helpers.
+
+* There's a big pile of helpers for handling outputs. First the generic bridge
+  helpers for handling encoder and transcoder IP blocks. Second the panel helpers
+  for handling panel-related information and logic. Plus then a big set of
+  helpers for the various sink standards (DisplayPort, HDMI, MIPI DSI). Finally
+  there's also generic helpers for handling output probing, and for dealing with
+  EDIDs.
+
+* The last group of helpers concerns itself with the frontend side of a display
+  pipeline: Planes, handling rectangles for visibility checking and scissoring,
+  flip queues and assorted bits.
+
+Modeset Helper Reference for Common Vtables
+===========================================
+
+.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
+   :internal:
+
+.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
+   :doc: overview
 
 Atomic Modeset Helper Functions Reference
 =========================================
@@ -62,33 +69,27 @@
 .. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
    :export:
 
-Modeset Helper Reference for Common Vtables
-===========================================
-
-.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
-   :internal:
-
-.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
-   :doc: overview
-
 Legacy CRTC/Modeset Helper Functions Reference
 ==============================================
 
 .. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
-   :export:
-
-.. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
    :doc: overview
 
-Output Probing Helper Functions Reference
-=========================================
-
-.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
-   :doc: output probing helper overview
-
-.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
+.. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
    :export:
 
+Simple KMS Helper Reference
+===========================
+
+.. kernel-doc:: include/drm/drm_simple_kms_helper.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
+   :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
+   :doc: overview
+
 fbdev Helper Functions Reference
 ================================
 
@@ -110,6 +111,36 @@
 .. kernel-doc:: drivers/gpu/drm/drm_fb_cma_helper.c
    :export:
 
+Bridges
+=======
+
+Overview
+--------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+   :doc: overview
+
+Default bridge callback sequence
+--------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+   :doc: bridge callbacks
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+   :export:
+
+Panel Helper Reference
+======================
+
+.. kernel-doc:: include/drm/drm_panel.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_panel.c
+   :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_panel.c
+   :doc: drm panel
+
 Display Port Helper Functions Reference
 =======================================
 
@@ -158,6 +189,15 @@
 .. kernel-doc:: drivers/gpu/drm/drm_mipi_dsi.c
    :export:
 
+Output Probing Helper Functions Reference
+=========================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
+   :doc: output probing helper overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
+   :export:
+
 EDID Helper Functions Reference
 ===============================
 
@@ -176,18 +216,6 @@
 .. kernel-doc:: drivers/gpu/drm/drm_rect.c
    :export:
 
-Flip-work Helper Reference
-==========================
-
-.. kernel-doc:: include/drm/drm_flip_work.h
-   :doc: flip utils
-
-.. kernel-doc:: include/drm/drm_flip_work.h
-   :internal:
-
-.. kernel-doc:: drivers/gpu/drm/drm_flip_work.c
-   :export:
-
 HDMI Infoframes Helper Reference
 ================================
 
@@ -202,59 +230,40 @@
 .. kernel-doc:: drivers/video/hdmi.c
    :export:
 
+Flip-work Helper Reference
+==========================
+
+.. kernel-doc:: include/drm/drm_flip_work.h
+   :doc: flip utils
+
+.. kernel-doc:: include/drm/drm_flip_work.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_flip_work.c
+   :export:
+
 Plane Helper Reference
 ======================
 
 .. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
-   :export:
-
-.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
    :doc: overview
 
+.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
+   :export:
+
 Tile group
-----------
+==========
+
+# FIXME: This should probably be moved into a property documentation section
 
 .. kernel-doc:: drivers/gpu/drm/drm_crtc.c
    :doc: Tile group
 
-Bridges
-=======
+Auxiliary Modeset Helpers
+=========================
 
-Overview
---------
+.. kernel-doc:: drivers/gpu/drm/drm_modeset_helper.c
+   :doc: aux kms helpers
 
-.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
-   :doc: overview
-
-Default bridge callback sequence
---------------------------------
-
-.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
-   :doc: bridge callbacks
-
-.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+.. kernel-doc:: drivers/gpu/drm/drm_modeset_helper.c
    :export:
-
-Panel Helper Reference
-======================
-
-.. kernel-doc:: include/drm/drm_panel.h
-   :internal:
-
-.. kernel-doc:: drivers/gpu/drm/drm_panel.c
-   :export:
-
-.. kernel-doc:: drivers/gpu/drm/drm_panel.c
-   :doc: drm panel
-
-Simple KMS Helper Reference
-===========================
-
-.. kernel-doc:: include/drm/drm_simple_kms_helper.h
-   :internal:
-
-.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
-   :export:
-
-.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
-   :doc: overview
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 8dfa4b2..f9a991b 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -2,9 +2,6 @@
 Kernel Mode Setting (KMS)
 =========================
 
-Mode Setting
-============
-
 Drivers must initialize the mode setting core by calling
 :c:func:`drm_mode_config_init()` on the DRM device. The function
 initializes the :c:type:`struct drm_device <drm_device>`
@@ -18,60 +15,59 @@
 -  struct drm_mode_config_funcs \*funcs;
    Mode setting functions.
 
-Display Modes Function Reference
---------------------------------
+Modeset Base Object Abstraction
+===============================
 
-.. kernel-doc:: include/drm/drm_modes.h
+.. kernel-doc:: include/drm/drm_mode_object.h
    :internal:
 
-.. kernel-doc:: drivers/gpu/drm/drm_modes.c
+.. kernel-doc:: drivers/gpu/drm/drm_mode_object.c
+   :export:
+
+KMS Data Structures
+===================
+
+.. kernel-doc:: include/drm/drm_crtc.h
+   :internal:
+
+KMS API Functions
+=================
+
+.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
    :export:
 
 Atomic Mode Setting Function Reference
---------------------------------------
+======================================
 
 .. kernel-doc:: drivers/gpu/drm/drm_atomic.c
    :export:
 
-.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
+.. kernel-doc:: include/drm/drm_atomic.h
    :internal:
 
 Frame Buffer Abstraction
-------------------------
+========================
 
-Frame buffers are abstract memory objects that provide a source of
-pixels to scanout to a CRTC. Applications explicitly request the
-creation of frame buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls
-and receive an opaque handle that can be passed to the KMS CRTC control,
-plane configuration and page flip functions.
+.. kernel-doc:: drivers/gpu/drm/drm_framebuffer.c
+   :doc: overview
 
-Frame buffers rely on the underneath memory manager for low-level memory
-operations. When creating a frame buffer applications pass a memory
-handle (or a list of memory handles for multi-planar formats) through
-the ``drm_mode_fb_cmd2`` argument. For drivers using GEM as their
-userspace buffer management interface this would be a GEM handle.
-Drivers are however free to use their own backing storage object
-handles, e.g. vmwgfx directly exposes special TTM handles to userspace
-and so expects TTM handles in the create ioctl and not GEM handles.
+Frame Buffer Functions Reference
+--------------------------------
 
-The lifetime of a drm framebuffer is controlled with a reference count,
-drivers can grab additional references with
-:c:func:`drm_framebuffer_reference()`and drop them again with
-:c:func:`drm_framebuffer_unreference()`. For driver-private
-framebuffers for which the last reference is never dropped (e.g. for the
-fbdev framebuffer when the struct :c:type:`struct drm_framebuffer
-<drm_framebuffer>` is embedded into the fbdev helper struct)
-drivers can manually clean up a framebuffer at module unload time with
-:c:func:`drm_framebuffer_unregister_private()`.
+.. kernel-doc:: drivers/gpu/drm/drm_framebuffer.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_framebuffer.h
+   :internal:
 
 DRM Format Handling
--------------------
+===================
 
 .. kernel-doc:: drivers/gpu/drm/drm_fourcc.c
    :export:
 
 Dumb Buffer Objects
--------------------
+===================
 
 The KMS API doesn't standardize backing storage object creation and
 leaves it to driver-specific ioctls. Furthermore actually creating a
@@ -114,14 +110,44 @@
 attempted on some ARM embedded platforms. Such drivers really must have
 a hardware-specific ioctl to allocate suitable buffer objects.
 
-Output Polling
---------------
+Display Modes Function Reference
+================================
 
-void (\*output_poll_changed)(struct drm_device \*dev);
-This operation notifies the driver that the status of one or more
-connectors has changed. Drivers that use the fb helper can just call the
-:c:func:`drm_fb_helper_hotplug_event()` function to handle this
-operation.
+.. kernel-doc:: include/drm/drm_modes.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_modes.c
+   :export:
+
+Connector Abstraction
+=====================
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+   :doc: overview
+
+Connector Functions Reference
+-----------------------------
+
+.. kernel-doc:: include/drm/drm_connector.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+   :export:
+
+Encoder Abstraction
+===================
+
+.. kernel-doc:: drivers/gpu/drm/drm_encoder.c
+   :doc: overview
+
+Encoder Functions Reference
+---------------------------
+
+.. kernel-doc:: include/drm/drm_encoder.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_encoder.c
+   :export:
 
 KMS Initialization and Cleanup
 ==============================
@@ -196,206 +222,6 @@
 primary planes may make use of the helper functions described in ? to
 create and register a primary plane with standard capabilities.
 
-Encoders (:c:type:`struct drm_encoder <drm_encoder>`)
------------------------------------------------------
-
-An encoder takes pixel data from a CRTC and converts it to a format
-suitable for any attached connectors. On some devices, it may be
-possible to have a CRTC send data to more than one encoder. In that
-case, both encoders would receive data from the same scanout buffer,
-resulting in a "cloned" display configuration across the connectors
-attached to each encoder.
-
-Encoder Initialization
-~~~~~~~~~~~~~~~~~~~~~~
-
-As for CRTCs, a KMS driver must create, initialize and register at least
-one :c:type:`struct drm_encoder <drm_encoder>` instance. The
-instance is allocated and zeroed by the driver, possibly as part of a
-larger structure.
-
-Drivers must initialize the :c:type:`struct drm_encoder
-<drm_encoder>` possible_crtcs and possible_clones fields before
-registering the encoder. Both fields are bitmasks of respectively the
-CRTCs that the encoder can be connected to, and sibling encoders
-candidate for cloning.
-
-After being initialized, the encoder must be registered with a call to
-:c:func:`drm_encoder_init()`. The function takes a pointer to the
-encoder functions and an encoder type. Supported types are
-
--  DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A
--  DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort
--  DRM_MODE_ENCODER_LVDS for display panels
--  DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video,
-   Component, SCART)
--  DRM_MODE_ENCODER_VIRTUAL for virtual machine displays
-
-Encoders must be attached to a CRTC to be used. DRM drivers leave
-encoders unattached at initialization time. Applications (or the fbdev
-compatibility layer when implemented) are responsible for attaching the
-encoders they want to use to a CRTC.
-
-Connectors (:c:type:`struct drm_connector <drm_connector>`)
------------------------------------------------------------
-
-A connector is the final destination for pixel data on a device, and
-usually connects directly to an external display device like a monitor
-or laptop panel. A connector can only be attached to one encoder at a
-time. The connector is also the structure where information about the
-attached display is kept, so it contains fields for display data, EDID
-data, DPMS & connection status, and information about modes supported on
-the attached displays.
-
-Connector Initialization
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-Finally a KMS driver must create, initialize, register and attach at
-least one :c:type:`struct drm_connector <drm_connector>`
-instance. The instance is created as other KMS objects and initialized
-by setting the following fields.
-
-interlace_allowed
-    Whether the connector can handle interlaced modes.
-
-doublescan_allowed
-    Whether the connector can handle doublescan.
-
-display_info
-    Display information is filled from EDID information when a display
-    is detected. For non hot-pluggable displays such as flat panels in
-    embedded systems, the driver should initialize the
-    display_info.width_mm and display_info.height_mm fields with the
-    physical size of the display.
-
-polled
-    Connector polling mode, a combination of
-
-    DRM_CONNECTOR_POLL_HPD
-        The connector generates hotplug events and doesn't need to be
-        periodically polled. The CONNECT and DISCONNECT flags must not
-        be set together with the HPD flag.
-
-    DRM_CONNECTOR_POLL_CONNECT
-        Periodically poll the connector for connection.
-
-    DRM_CONNECTOR_POLL_DISCONNECT
-        Periodically poll the connector for disconnection.
-
-    Set to 0 for connectors that don't support connection status
-    discovery.
-
-The connector is then registered with a call to
-:c:func:`drm_connector_init()` with a pointer to the connector
-functions and a connector type, and exposed through sysfs with a call to
-:c:func:`drm_connector_register()`.
-
-Supported connector types are
-
--  DRM_MODE_CONNECTOR_VGA
--  DRM_MODE_CONNECTOR_DVII
--  DRM_MODE_CONNECTOR_DVID
--  DRM_MODE_CONNECTOR_DVIA
--  DRM_MODE_CONNECTOR_Composite
--  DRM_MODE_CONNECTOR_SVIDEO
--  DRM_MODE_CONNECTOR_LVDS
--  DRM_MODE_CONNECTOR_Component
--  DRM_MODE_CONNECTOR_9PinDIN
--  DRM_MODE_CONNECTOR_DisplayPort
--  DRM_MODE_CONNECTOR_HDMIA
--  DRM_MODE_CONNECTOR_HDMIB
--  DRM_MODE_CONNECTOR_TV
--  DRM_MODE_CONNECTOR_eDP
--  DRM_MODE_CONNECTOR_VIRTUAL
-
-Connectors must be attached to an encoder to be used. For devices that
-map connectors to encoders 1:1, the connector should be attached at
-initialization time with a call to
-:c:func:`drm_mode_connector_attach_encoder()`. The driver must
-also set the :c:type:`struct drm_connector <drm_connector>`
-encoder field to point to the attached encoder.
-
-Finally, drivers must initialize the connectors state change detection
-with a call to :c:func:`drm_kms_helper_poll_init()`. If at least
-one connector is pollable but can't generate hotplug interrupts
-(indicated by the DRM_CONNECTOR_POLL_CONNECT and
-DRM_CONNECTOR_POLL_DISCONNECT connector flags), a delayed work will
-automatically be queued to periodically poll for changes. Connectors
-that can generate hotplug interrupts must be marked with the
-DRM_CONNECTOR_POLL_HPD flag instead, and their interrupt handler must
-call :c:func:`drm_helper_hpd_irq_event()`. The function will
-queue a delayed work to check the state of all connectors, but no
-periodic polling will be done.
-
-Connector Operations
-~~~~~~~~~~~~~~~~~~~~
-
-    **Note**
-
-    Unless otherwise state, all operations are mandatory.
-
-DPMS
-''''
-
-void (\*dpms)(struct drm_connector \*connector, int mode);
-The DPMS operation sets the power state of a connector. The mode
-argument is one of
-
--  DRM_MODE_DPMS_ON
-
--  DRM_MODE_DPMS_STANDBY
-
--  DRM_MODE_DPMS_SUSPEND
-
--  DRM_MODE_DPMS_OFF
-
-In all but DPMS_ON mode the encoder to which the connector is attached
-should put the display in low-power mode by driving its signals
-appropriately. If more than one connector is attached to the encoder
-care should be taken not to change the power state of other displays as
-a side effect. Low-power mode should be propagated to the encoders and
-CRTCs when all related connectors are put in low-power mode.
-
-Modes
-'''''
-
-int (\*fill_modes)(struct drm_connector \*connector, uint32_t
-max_width, uint32_t max_height);
-Fill the mode list with all supported modes for the connector. If the
-``max_width`` and ``max_height`` arguments are non-zero, the
-implementation must ignore all modes wider than ``max_width`` or higher
-than ``max_height``.
-
-The connector must also fill in this operation its display_info
-width_mm and height_mm fields with the connected display physical size
-in millimeters. The fields should be set to 0 if the value isn't known
-or is not applicable (for instance for projector devices).
-
-Connection Status
-'''''''''''''''''
-
-The connection status is updated through polling or hotplug events when
-supported (see ?). The status value is reported to userspace through
-ioctls and must not be used inside the driver, as it only gets
-initialized by a call to :c:func:`drm_mode_getconnector()` from
-userspace.
-
-enum drm_connector_status (\*detect)(struct drm_connector
-\*connector, bool force);
-Check to see if anything is attached to the connector. The ``force``
-parameter is set to false whilst polling or to true when checking the
-connector due to user request. ``force`` can be used by the driver to
-avoid expensive, destructive operations during automated probing.
-
-Return connector_status_connected if something is connected to the
-connector, connector_status_disconnected if nothing is connected and
-connector_status_unknown if the connection state isn't known.
-
-Drivers should only return connector_status_connected if the
-connection status has really been probed as connected. Connectors that
-can't detect the connection status, or failed connection status probes,
-should return connector_status_unknown.
-
 Cleanup
 -------
 
@@ -463,20 +289,8 @@
 the process is complete, the new connector is registered with sysfs to
 make its properties available to applications.
 
-KMS API Functions
------------------
-
-.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
-   :export:
-
-KMS Data Structures
--------------------
-
-.. kernel-doc:: include/drm/drm_crtc.h
-   :internal:
-
 KMS Locking
------------
+===========
 
 .. kernel-doc:: drivers/gpu/drm/drm_modeset_lock.c
    :doc: kms locking
@@ -490,90 +304,23 @@
 KMS Properties
 ==============
 
-Drivers may need to expose additional parameters to applications than
-those described in the previous sections. KMS supports attaching
-properties to CRTCs, connectors and planes and offers a userspace API to
-list, get and set the property values.
+Property Types and Blob Property Support
+----------------------------------------
 
-Properties are identified by a name that uniquely defines the property
-purpose, and store an associated value. For all property types except
-blob properties the value is a 64-bit unsigned integer.
+.. kernel-doc:: drivers/gpu/drm/drm_property.c
+   :doc: overview
 
-KMS differentiates between properties and property instances. Drivers
-first create properties and then create and associate individual
-instances of those properties to objects. A property can be instantiated
-multiple times and associated with different objects. Values are stored
-in property instances, and all other property information are stored in
-the property and shared between all instances of the property.
+.. kernel-doc:: include/drm/drm_property.h
+   :internal:
 
-Every property is created with a type that influences how the KMS core
-handles the property. Supported property types are
+.. kernel-doc:: drivers/gpu/drm/drm_property.c
+   :export:
 
-DRM_MODE_PROP_RANGE
-    Range properties report their minimum and maximum admissible values.
-    The KMS core verifies that values set by application fit in that
-    range.
+Blending and Z-Position properties
+----------------------------------
 
-DRM_MODE_PROP_ENUM
-    Enumerated properties take a numerical value that ranges from 0 to
-    the number of enumerated values defined by the property minus one,
-    and associate a free-formed string name to each value. Applications
-    can retrieve the list of defined value-name pairs and use the
-    numerical value to get and set property instance values.
-
-DRM_MODE_PROP_BITMASK
-    Bitmask properties are enumeration properties that additionally
-    restrict all enumerated values to the 0..63 range. Bitmask property
-    instance values combine one or more of the enumerated bits defined
-    by the property.
-
-DRM_MODE_PROP_BLOB
-    Blob properties store a binary blob without any format restriction.
-    The binary blobs are created as KMS standalone objects, and blob
-    property instance values store the ID of their associated blob
-    object.
-
-    Blob properties are only used for the connector EDID property and
-    cannot be created by drivers.
-
-To create a property drivers call one of the following functions
-depending on the property type. All property creation functions take
-property flags and name, as well as type-specific arguments.
-
--  struct drm_property \*drm_property_create_range(struct
-   drm_device \*dev, int flags, const char \*name, uint64_t min,
-   uint64_t max);
-   Create a range property with the given minimum and maximum values.
-
--  struct drm_property \*drm_property_create_enum(struct drm_device
-   \*dev, int flags, const char \*name, const struct
-   drm_prop_enum_list \*props, int num_values);
-   Create an enumerated property. The ``props`` argument points to an
-   array of ``num_values`` value-name pairs.
-
--  struct drm_property \*drm_property_create_bitmask(struct
-   drm_device \*dev, int flags, const char \*name, const struct
-   drm_prop_enum_list \*props, int num_values);
-   Create a bitmask property. The ``props`` argument points to an array
-   of ``num_values`` value-name pairs.
-
-Properties can additionally be created as immutable, in which case they
-will be read-only for applications but can be modified by the driver. To
-create an immutable property drivers must set the
-DRM_MODE_PROP_IMMUTABLE flag at property creation time.
-
-When no array of value-name pairs is readily available at property
-creation time for enumerated or range properties, drivers can create the
-property using the :c:func:`drm_property_create()` function and
-manually add enumeration value-name pairs by calling the
-:c:func:`drm_property_add_enum()` function. Care must be taken to
-properly specify the property type through the ``flags`` argument.
-
-After creating properties drivers can attach property instances to CRTC,
-connector and plane objects by calling the
-:c:func:`drm_object_attach_property()`. The function takes a
-pointer to the target object, a pointer to the previously created
-property and an initial instance value.
+.. kernel-doc:: drivers/gpu/drm/drm_blend.c
+   :export:
 
 Existing KMS Properties
 -----------------------
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index 59f9822..bca8085 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -26,12 +26,12 @@
 UMA devices.
 
 The Translation Table Manager (TTM)
------------------------------------
+===================================
 
 TTM design background and information belongs here.
 
 TTM initialization
-~~~~~~~~~~~~~~~~~~
+------------------
 
     **Warning**
 
@@ -77,7 +77,7 @@
 count for the TTM, which will call your initialization function.
 
 The Graphics Execution Manager (GEM)
-------------------------------------
+====================================
 
 The GEM design approach has resulted in a memory manager that doesn't
 provide full coverage of all (or even all common) use cases in its
@@ -114,7 +114,7 @@
 driver-specific ioctls.
 
 GEM Initialization
-~~~~~~~~~~~~~~~~~~
+------------------
 
 Drivers that use GEM must set the DRIVER_GEM bit in the struct
 :c:type:`struct drm_driver <drm_driver>` driver_features
@@ -132,7 +132,7 @@
 its own DRM MM object.
 
 GEM Objects Creation
-~~~~~~~~~~~~~~~~~~~~
+--------------------
 
 GEM splits creation of GEM objects and allocation of the memory that
 backs them in two distinct operations.
@@ -173,7 +173,7 @@
 must be managed by drivers.
 
 GEM Objects Lifetime
-~~~~~~~~~~~~~~~~~~~~
+--------------------
 
 All GEM objects are reference-counted by the GEM core. References can be
 acquired and release by :c:func:`calling
@@ -196,7 +196,7 @@
 :c:func:`drm_gem_object_release()`.
 
 GEM Objects Naming
-~~~~~~~~~~~~~~~~~~
+------------------
 
 Communication between userspace and the kernel refers to GEM objects
 using local handles, global names or, more recently, file descriptors.
@@ -245,7 +245,7 @@
 based on dma-bufs.
 
 GEM Objects Mapping
-~~~~~~~~~~~~~~~~~~~
+-------------------
 
 Because mapping operations are fairly heavyweight GEM favours
 read/write-like access to buffers, implemented through driver-specific
@@ -304,7 +304,7 @@
 faults can implement their own mmap file operation handler.
 
 Memory Coherency
-~~~~~~~~~~~~~~~~
+----------------
 
 When mapped to the device or used in a command buffer, backing pages for
 an object are flushed to memory and marked write combined so as to be
@@ -320,7 +320,7 @@
 any necessary flushing operations).
 
 Command Execution
-~~~~~~~~~~~~~~~~~
+-----------------
 
 Perhaps the most important GEM function for GPU devices is providing a
 command execution interface to clients. Client programs construct
@@ -348,8 +348,20 @@
 .. kernel-doc:: include/drm/drm_gem.h
    :internal:
 
+GEM CMA Helper Functions Reference
+----------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
+   :doc: cma helpers
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_gem_cma_helper.h
+   :internal:
+
 VMA Offset Manager
-------------------
+==================
 
 .. kernel-doc:: drivers/gpu/drm/drm_vma_manager.c
    :doc: vma offset manager
@@ -361,14 +373,14 @@
    :internal:
 
 PRIME Buffer Sharing
---------------------
+====================
 
 PRIME is the cross device buffer sharing framework in drm, originally
 created for the OPTIMUS range of multi-gpu platforms. To userspace PRIME
 buffers are dma-buf based file descriptors.
 
 Overview and Driver Interface
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------
 
 Similar to GEM global names, PRIME file descriptors are also used to
 share buffer objects across processes. They offer additional security:
@@ -406,7 +418,7 @@
 support PRIME.
 
 PRIME Helper Functions
-~~~~~~~~~~~~~~~~~~~~~~
+----------------------
 
 .. kernel-doc:: drivers/gpu/drm/drm_prime.c
    :doc: PRIME Helpers
@@ -418,16 +430,16 @@
    :export:
 
 DRM MM Range Allocator
-----------------------
+======================
 
 Overview
-~~~~~~~~
+--------
 
 .. kernel-doc:: drivers/gpu/drm/drm_mm.c
    :doc: Overview
 
 LRU Scan/Eviction Support
-~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------
 
 .. kernel-doc:: drivers/gpu/drm/drm_mm.c
    :doc: lru scan roaster
@@ -440,15 +452,3 @@
 
 .. kernel-doc:: include/drm/drm_mm.h
    :internal:
-
-CMA Helper Functions Reference
-------------------------------
-
-.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
-   :doc: cma helpers
-
-.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
-   :export:
-
-.. kernel-doc:: include/drm/drm_gem_cma_helper.h
-   :internal:
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index 536bf3e..1ba301c 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -33,6 +33,76 @@
 .. kernel-doc:: include/drm/drm_auth.h
    :internal:
 
+Open-Source Userspace Requirements
+==================================
+
+The DRM subsystem has stricter requirements than most other kernel subsystems on
+what the userspace side for new uAPI needs to look like. This section here
+explains what exactly those requirements are, and why they exist.
+
+The short summary is that any addition of DRM uAPI requires corresponding
+open-sourced userspace patches, and those patches must be reviewed and ready for
+merging into a suitable and canonical upstream project.
+
+GFX devices (both display and render/GPU side) are really complex bits of
+hardware, with userspace and kernel by necessity having to work together really
+closely.  The interfaces, for rendering and modesetting, must be extremely wide
+and flexible, and therefore it is almost always impossible to precisely define
+them for every possible corner case. This in turn makes it really practically
+infeasible to differentiate between behaviour that's required by userspace, and
+which must not be changed to avoid regressions, and behaviour which is only an
+accidental artifact of the current implementation.
+
+Without access to the full source code of all userspace users that means it
+becomes impossible to change the implementation details, since userspace could
+depend upon the accidental behaviour of the current implementation in minute
+details. And debugging such regressions without access to source code is pretty
+much impossible. As a consequence this means:
+
+- The Linux kernel's "no regression" policy holds in practice only for
+  open-source userspace of the DRM subsystem. DRM developers are perfectly fine
+  if closed-source blob drivers in userspace use the same uAPI as the open
+  drivers, but they must do so in the exact same way as the open drivers.
+  Creative (ab)use of the interfaces will, and in the past routinely has, lead
+  to breakage.
+
+- Any new userspace interface must have an open-source implementation as
+  demonstration vehicle.
+
+The other reason for requiring open-source userspace is uAPI review. Since the
+kernel and userspace parts of a GFX stack must work together so closely, code
+review can only assess whether a new interface achieves its goals by looking at
+both sides. Making sure that the interface indeed covers the use-case fully
+leads to a few additional requirements:
+
+- The open-source userspace must not be a toy/test application, but the real
+  thing. Specifically it needs to handle all the usual error and corner cases.
+  These are often the places where new uAPI falls apart and hence essential to
+  assess the fitness of a proposed interface.
+
+- The userspace side must be fully reviewed and tested to the standards of that
+  userspace project. For e.g. mesa this means piglit testcases and review on the
+  mailing list. This is again to ensure that the new interface actually gets the
+  job done.
+
+- The userspace patches must be against the canonical upstream, not some vendor
+  fork. This is to make sure that no one cheats on the review and testing
+  requirements by doing a quick fork.
+
+- The kernel patch can only be merged after all the above requirements are met,
+  but it **must** be merged **before** the userspace patches land. uAPI always flows
+  from the kernel, doing things the other way round risks divergence of the uAPI
+  definitions and header files.
+
+These are fairly steep requirements, but have grown out from years of shared
+pain and experience with uAPI added hastily, and almost always regretted about
+just as fast. GFX devices change really fast, requiring a paradigm shift and
+entire new set of uAPI interfaces every few years at least. Together with the
+Linux kernel's guarantee to keep existing userspace running for 10+ years this
+is already rather painful for the DRM subsystem, with multiple different uAPIs
+for the same thing co-existing. If we add a few more complete mistakes into the
+mix every year it would be entirely unmanageable.
+
 Render nodes
 ============
 
@@ -86,6 +156,43 @@
 visible to user-space and accessible beyond open-file boundaries, they
 cannot support render nodes.
 
+Validating changes with IGT
+===========================
+
+There's a collection of tests that aims to cover the whole functionality of
+DRM drivers and that can be used to check that changes to DRM drivers or the
+core don't regress existing functionality. This test suite is called IGT and
+its code can be found in https://cgit.freedesktop.org/drm/igt-gpu-tools/.
+
+To build IGT, start by installing its build dependencies. In Debian-based
+systems::
+
+	# apt-get build-dep intel-gpu-tools
+
+And in Fedora-based systems::
+
+	# dnf builddep intel-gpu-tools
+
+Then clone the repository::
+
+	$ git clone git://anongit.freedesktop.org/drm/igt-gpu-tools
+
+Configure the build system and start the build::
+
+	$ cd igt-gpu-tools && ./autogen.sh && make -j6
+
+Download the piglit dependency::
+
+	$ ./scripts/run-tests.sh -d
+
+And run the tests::
+
+	$ ./scripts/run-tests.sh -t kms -t core -s
+
+run-tests.sh is a wrapper around piglit that will execute the tests matching
+the -t options. A report in HTML format will be available in
+./results/html/index.html. Results can be compared with piglit.
+
 VBlank event handling
 =====================
 
diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst
index fcac0fa..ba92f45 100644
--- a/Documentation/gpu/index.rst
+++ b/Documentation/gpu/index.rst
@@ -12,3 +12,4 @@
    drm-uapi
    i915
    vga-switcheroo
+   vgaarbiter
diff --git a/Documentation/vgaarbiter.txt b/Documentation/gpu/vgaarbiter.rst
similarity index 64%
rename from Documentation/vgaarbiter.txt
rename to Documentation/gpu/vgaarbiter.rst
index 014423e..0b41b05 100644
--- a/Documentation/vgaarbiter.txt
+++ b/Documentation/gpu/vgaarbiter.rst
@@ -1,4 +1,4 @@
-
+===========
 VGA Arbiter
 ===========
 
@@ -19,21 +19,8 @@
 is needed to control the sharing of these resources. This document introduces
 the operation of the VGA arbiter implemented for the Linux kernel.
 
-----------------------------------------------------------------------------
-
-I.  Details and Theory of Operation
-        I.1 vgaarb
-        I.2 libpciaccess
-        I.3 xf86VGAArbiter (X server implementation)
-II. Credits
-III.References
-
-
-I. Details and Theory of Operation
-==================================
-
-I.1 vgaarb
-----------
+vgaarb kernel/userspace ABI
+---------------------------
 
 The vgaarb is a module of the Linux Kernel. When it is initially loaded, it
 scans all PCI devices and adds the VGA ones inside the arbitration. The
@@ -44,42 +31,52 @@
 The kernel exports a char device interface (/dev/vga_arbiter) to the clients,
 which has the following semantics:
 
- open       : open user instance of the arbiter. By default, it's attached to
-              the default VGA device of the system.
+open
+        Opens a user instance of the arbiter. By default, it's attached to the
+        default VGA device of the system.
 
- close      : close user instance. Release locks made by the user
+close
+        Close a user instance. Release locks made by the user
 
- read       : return a string indicating the status of the target like:
+read
+        Return a string indicating the status of the target like:
 
-              "<card_ID>,decodes=<io_state>,owns=<io_state>,locks=<io_state> (ic,mc)"
+        "<card_ID>,decodes=<io_state>,owns=<io_state>,locks=<io_state> (ic,mc)"
 
-              An IO state string is of the form {io,mem,io+mem,none}, mc and
-              ic are respectively mem and io lock counts (for debugging/
-              diagnostic only). "decodes" indicate what the card currently
-              decodes, "owns" indicates what is currently enabled on it, and
-              "locks" indicates what is locked by this card. If the card is
-              unplugged, we get "invalid" then for card_ID and an -ENODEV
-              error is returned for any command until a new card is targeted.
+        An IO state string is of the form {io,mem,io+mem,none}, mc and
+        ic are respectively mem and io lock counts (for debugging/
+        diagnostic only). "decodes" indicate what the card currently
+        decodes, "owns" indicates what is currently enabled on it, and
+        "locks" indicates what is locked by this card. If the card is
+        unplugged, we get "invalid" then for card_ID and an -ENODEV
+        error is returned for any command until a new card is targeted.
 
 
- write       : write a command to the arbiter. List of commands:
+write
+        Write a command to the arbiter. List of commands:
 
-  target <card_ID>   : switch target to card <card_ID> (see below)
-  lock <io_state>    : acquires locks on target ("none" is an invalid io_state)
-  trylock <io_state> : non-blocking acquire locks on target (returns EBUSY if
-                       unsuccessful)
-  unlock <io_state>  : release locks on target
-  unlock all         : release all locks on target held by this user (not
-                       implemented yet)
-  decodes <io_state> : set the legacy decoding attributes for the card
+        target <card_ID>
+                switch target to card <card_ID> (see below)
+        lock <io_state>
+                acquires locks on target ("none" is an invalid io_state)
+        trylock <io_state>
+                non-blocking acquire locks on target (returns EBUSY if
+                unsuccessful)
+        unlock <io_state>
+                release locks on target
+        unlock all
+                release all locks on target held by this user (not implemented
+                yet)
+        decodes <io_state>
+                set the legacy decoding attributes for the card
 
-  poll               : event if something changes on any card (not just the
-                       target)
+        poll
+                event if something changes on any card (not just the target)
 
-  card_ID is of the form "PCI:domain:bus:dev.fn". It can be set to "default"
-  to go back to the system default card (TODO: not implemented yet). Currently,
-  only PCI is supported as a prefix, but the userland API may support other bus
-  types in the future, even if the current kernel implementation doesn't.
+        card_ID is of the form "PCI:domain:bus:dev.fn". It can be set to "default"
+        to go back to the system default card (TODO: not implemented yet). Currently,
+        only PCI is supported as a prefix, but the userland API may support other bus
+        types in the future, even if the current kernel implementation doesn't.
 
 Note about locks:
 
@@ -97,29 +94,35 @@
 There is also an in-kernel API of the arbiter in case DRM, vgacon, or other
 drivers want to use it.
 
+In-kernel interface
+-------------------
 
-I.2 libpciaccess
-----------------
+.. kernel-doc:: include/linux/vgaarb.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/vga/vgaarb.c
+   :export:
+
+libpciaccess
+------------
 
 To use the vga arbiter char device it was implemented an API inside the
 libpciaccess library. One field was added to struct pci_device (each device
-on the system):
+on the system)::
 
     /* the type of resource decoded by the device */
     int vgaarb_rsrc;
 
-Besides it, in pci_system were added:
+Besides it, in pci_system were added::
 
     int vgaarb_fd;
     int vga_count;
     struct pci_device *vga_target;
     struct pci_device *vga_default_dev;
 
-
 The vga_count is used to track how many cards are being arbitrated, so for
 instance, if there is only one card, then it can completely escape arbitration.
 
-
 These functions below acquire VGA resources for the given card and mark those
 resources as locked. If the resources requested are "normal" (and not legacy)
 resources, the arbiter will first check whether the card is doing legacy
@@ -136,44 +139,44 @@
 succeeds.  vga_arb_trylock() will return (-EBUSY) instead of blocking. Nested
 calls are supported (a per-resource counter is maintained).
 
+Set the target device of this client. ::
 
-Set the target device of this client.
     int  pci_device_vgaarb_set_target   (struct pci_device *dev);
 
-
 For instance, in x86 if two devices on the same bus want to lock different
 resources, both will succeed (lock). If devices are in different buses and
-trying to lock different resources, only the first who tried succeeds.
+trying to lock different resources, only the first who tried succeeds. ::
+
     int  pci_device_vgaarb_lock         (void);
     int  pci_device_vgaarb_trylock      (void);
 
-Unlock resources of device.
+Unlock resources of device. ::
+
     int  pci_device_vgaarb_unlock       (void);
 
 Indicates to the arbiter if the card decodes legacy VGA IOs, legacy VGA
 Memory, both, or none. All cards default to both, the card driver (fbdev for
 example) should tell the arbiter if it has disabled legacy decoding, so the
 card can be left out of the arbitration process (and can be safe to take
-interrupts at any time.
+interrupts at any time. ::
+
     int  pci_device_vgaarb_decodes      (int new_vgaarb_rsrc);
 
-Connects to the arbiter device, allocates the struct
+Connects to the arbiter device, allocates the struct ::
+
     int  pci_device_vgaarb_init         (void);
 
-Close the connection
+Close the connection ::
+
     void pci_device_vgaarb_fini         (void);
 
-
-I.3 xf86VGAArbiter (X server implementation)
---------------------------------------------
-
-(TODO)
+xf86VGAArbiter (X server implementation)
+----------------------------------------
 
 X server basically wraps all the functions that touch VGA registers somehow.
 
-
-II. Credits
-===========
+References
+----------
 
 Benjamin Herrenschmidt (IBM?) started this work when he discussed such design
 with the Xorg community in 2005 [1, 2]. In the end of 2007, Paulo Zanoni and
@@ -182,11 +185,7 @@
 implementation of the user space side [3]. Now (2009) Tiago Vignatti and Dave
 Airlie finally put this work in shape and queued to Jesse Barnes' PCI tree.
 
-
-III. References
-==============
-
-[0] http://cgit.freedesktop.org/xorg/xserver/commit/?id=4b42448a2388d40f257774fbffdccaea87bd0347
-[1] http://lists.freedesktop.org/archives/xorg/2005-March/006663.html
-[2] http://lists.freedesktop.org/archives/xorg/2005-March/006745.html
-[3] http://lists.freedesktop.org/archives/xorg/2007-October/029507.html
+0) http://cgit.freedesktop.org/xorg/xserver/commit/?id=4b42448a2388d40f257774fbffdccaea87bd0347
+1) http://lists.freedesktop.org/archives/xorg/2005-March/006663.html
+2) http://lists.freedesktop.org/archives/xorg/2005-March/006745.html
+3) http://lists.freedesktop.org/archives/xorg/2007-October/029507.html
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
index ca72167..528559b 100644
--- a/arch/arm/boot/dts/am335x-boneblack.dts
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -9,6 +9,7 @@
 
 #include "am33xx.dtsi"
 #include "am335x-bone-common.dtsi"
+#include <dt-bindings/display/tda998x.h>
 
 / {
 	model = "TI AM335x BeagleBone Black";
@@ -75,6 +76,16 @@
 			AM33XX_IOPAD(0x9b0, PIN_OUTPUT_PULLDOWN | MUX_MODE3)	/* xdma_event_intr0 */
 		>;
 	};
+
+	mcasp0_pins: mcasp0_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x9ac, PIN_INPUT_PULLUP | MUX_MODE0) /* mcasp0_ahcklx.mcasp0_ahclkx */
+			AM33XX_IOPAD(0x99c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mcasp0_ahclkr.mcasp0_axr2*/
+			AM33XX_IOPAD(0x994, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mcasp0_fsx.mcasp0_fsx */
+			AM33XX_IOPAD(0x990, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mcasp0_aclkx.mcasp0_aclkx */
+			AM33XX_IOPAD(0x86c, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a11.GPIO1_27 */
+		>;
+	};
 };
 
 &lcdc {
@@ -87,16 +98,22 @@
 };
 
 &i2c0 {
-	tda19988 {
+	tda19988: tda19988 {
 		compatible = "nxp,tda998x";
 		reg = <0x70>;
+
 		pinctrl-names = "default", "off";
 		pinctrl-0 = <&nxp_hdmi_bonelt_pins>;
 		pinctrl-1 = <&nxp_hdmi_bonelt_off_pins>;
 
-		port {
-			hdmi_0: endpoint@0 {
-				remote-endpoint = <&lcdc_0>;
+		#sound-dai-cells = <0>;
+		audio-ports = <	TDA998x_I2S	0x03>;
+
+		ports {
+			port@0 {
+				hdmi_0: endpoint@0 {
+					remote-endpoint = <&lcdc_0>;
+				};
 			};
 		};
 	};
@@ -105,3 +122,49 @@
 &rtc {
 	system-power-controller;
 };
+
+&mcasp0	{
+	#sound-dai-cells = <0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&mcasp0_pins>;
+	status = "okay";
+	op-mode = <0>;	/* MCASP_IIS_MODE */
+	tdm-slots = <2>;
+	serial-dir = <	/* 0: INACTIVE, 1: TX, 2: RX */
+			0 0 1 0
+		>;
+	tx-num-evt = <32>;
+	rx-num-evt = <32>;
+};
+
+/ {
+	clk_mcasp0_fixed: clk_mcasp0_fixed {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <24576000>;
+	};
+
+	clk_mcasp0: clk_mcasp0 {
+		#clock-cells = <0>;
+		compatible = "gpio-gate-clock";
+		clocks = <&clk_mcasp0_fixed>;
+		enable-gpios = <&gpio1 27 0>; /* BeagleBone Black Clk enable on GPIO1_27 */
+	};
+
+	sound {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "TI BeagleBone Black";
+		simple-audio-card,format = "i2s";
+		simple-audio-card,bitclock-master = <&dailink0_master>;
+		simple-audio-card,frame-master = <&dailink0_master>;
+
+		dailink0_master: simple-audio-card,cpu {
+			sound-dai = <&mcasp0>;
+			clocks = <&clk_mcasp0>;
+		};
+
+		simple-audio-card,codec {
+			sound-dai = <&tda19988>;
+		};
+	};
+};
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index ddaee60..cf04d24 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -586,6 +586,22 @@
 }
 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
 
+static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+				      enum dma_data_direction direction)
+{
+	bool write = (direction == DMA_BIDIRECTIONAL ||
+		      direction == DMA_TO_DEVICE);
+	struct reservation_object *resv = dmabuf->resv;
+	long ret;
+
+	/* Wait on any implicit rendering fences */
+	ret = reservation_object_wait_timeout_rcu(resv, write, true,
+						  MAX_SCHEDULE_TIMEOUT);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
 
 /**
  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
@@ -608,6 +624,13 @@
 	if (dmabuf->ops->begin_cpu_access)
 		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
 
+	/* Ensure that all fences are waited upon - but we first allow
+	 * the native handler the chance to do so more efficiently if it
+	 * chooses. A double invocation here will be reasonably cheap no-op.
+	 */
+	if (ret == 0)
+		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c
index ee50022..f1989fc 100644
--- a/drivers/dma-buf/fence-array.c
+++ b/drivers/dma-buf/fence-array.c
@@ -107,14 +107,14 @@
  * @fences:		[in]	array containing the fences
  * @context:		[in]	fence context to use
  * @seqno:		[in]	sequence number to use
- * @signal_on_any	[in]	signal on any fence in the array
+ * @signal_on_any:	[in]	signal on any fence in the array
  *
  * Allocate a fence_array object and initialize the base fence with fence_init().
  * In case of error it returns NULL.
  *
- * The caller should allocte the fences array with num_fences size
+ * The caller should allocate the fences array with num_fences size
  * and fill it with the fences it wants to add to the object. Ownership of this
- * array is take and fence_put() is used on each fence on release.
+ * array is taken and fence_put() is used on each fence on release.
  *
  * If @signal_on_any is true the fence array signals if any fence in the array
  * signals, otherwise it signals when all fences in the array signal.
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 9566a62..723d8af 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -205,7 +205,7 @@
  * @fence: the shared fence to add
  *
  * Add a fence to a shared slot, obj->lock must be held, and
- * reservation_object_reserve_shared_fence has been called.
+ * reservation_object_reserve_shared() has been called.
  */
 void reservation_object_add_shared_fence(struct reservation_object *obj,
 					 struct fence *fence)
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 486d29c..abb5fda 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -306,7 +306,8 @@
 
 	poll_wait(file, &sync_file->wq, wait);
 
-	if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+	if (!poll_does_not_wait(wait) &&
+	    !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
 		if (fence_add_callback(sync_file->fence, &sync_file->cb,
 				       fence_check_cb_func) < 0)
 			wake_up_all(&sync_file->wq);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index fe1e86e..483059a 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -108,24 +108,8 @@
 
 source "drivers/gpu/drm/i2c/Kconfig"
 
-config DRM_TDFX
-	tristate "3dfx Banshee/Voodoo3+"
-	depends on DRM && PCI
-	help
-	  Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
-	  graphics card.  If M is selected, the module will be called tdfx.
-
 source "drivers/gpu/drm/arm/Kconfig"
 
-config DRM_R128
-	tristate "ATI Rage 128"
-	depends on DRM && PCI
-	select FW_LOADER
-	help
-	  Choose this option if you have an ATI Rage 128 graphics card.  If M
-	  is selected, the module will be called r128.  AGP support for
-	  this card is strongly suggested (unless you have a PCI version).
-
 config DRM_RADEON
 	tristate "ATI Radeon"
 	depends on DRM && PCI
@@ -163,55 +147,11 @@
 	  If M is selected, the module will be called amdgpu.
 
 source "drivers/gpu/drm/amd/amdgpu/Kconfig"
-source "drivers/gpu/drm/amd/powerplay/Kconfig"
-
-source "drivers/gpu/drm/amd/acp/Kconfig"
 
 source "drivers/gpu/drm/nouveau/Kconfig"
 
-config DRM_I810
-	tristate "Intel I810"
-	# !PREEMPT because of missing ioctl locking
-	depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
-	help
-	  Choose this option if you have an Intel I810 graphics card.  If M is
-	  selected, the module will be called i810.  AGP support is required
-	  for this driver to work.
-
 source "drivers/gpu/drm/i915/Kconfig"
 
-config DRM_MGA
-	tristate "Matrox g200/g400"
-	depends on DRM && PCI
-	select FW_LOADER
-	help
-	  Choose this option if you have a Matrox G200, G400 or G450 graphics
-	  card.  If M is selected, the module will be called mga.  AGP
-	  support is required for this driver to work.
-
-config DRM_SIS
-	tristate "SiS video cards"
-	depends on DRM && AGP
-	depends on FB_SIS || FB_SIS=n
-	help
-	  Choose this option if you have a SiS 630 or compatible video
-          chipset. If M is selected the module will be called sis. AGP
-          support is required for this driver to work.
-
-config DRM_VIA
-	tristate "Via unichrome video cards"
-	depends on DRM && PCI
-	help
-	  Choose this option if you have a Via unichrome or compatible video
-	  chipset. If M is selected the module will be called via.
-
-config DRM_SAVAGE
-	tristate "Savage video cards"
-	depends on DRM && PCI
-	help
-	  Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
-	  chipset. If M is selected the module will be called savage.
-
 config DRM_VGEM
 	tristate "Virtual GEM provider"
 	depends on DRM
@@ -282,3 +222,81 @@
 source "drivers/gpu/drm/hisilicon/Kconfig"
 
 source "drivers/gpu/drm/mediatek/Kconfig"
+
+# Keep legacy drivers last
+
+menuconfig DRM_LEGACY
+	bool "Enable legacy drivers (DANGEROUS)"
+	depends on DRM
+	help
+	  Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
+	  APIs to user-space, which can be used to circumvent access
+	  restrictions and other security measures. For backwards compatibility
+	  those drivers are still available, but their use is highly
+	  inadvisable and might harm your system.
+
+	  You are recommended to use the safe modeset-only drivers instead, and
+	  perform 3D emulation in user-space.
+
+	  Unless you have strong reasons to go rogue, say "N".
+
+if DRM_LEGACY
+
+config DRM_TDFX
+	tristate "3dfx Banshee/Voodoo3+"
+	depends on DRM && PCI
+	help
+	  Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
+	  graphics card.  If M is selected, the module will be called tdfx.
+
+config DRM_R128
+	tristate "ATI Rage 128"
+	depends on DRM && PCI
+	select FW_LOADER
+	help
+	  Choose this option if you have an ATI Rage 128 graphics card.  If M
+	  is selected, the module will be called r128.  AGP support for
+	  this card is strongly suggested (unless you have a PCI version).
+
+config DRM_I810
+	tristate "Intel I810"
+	# !PREEMPT because of missing ioctl locking
+	depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
+	help
+	  Choose this option if you have an Intel I810 graphics card.  If M is
+	  selected, the module will be called i810.  AGP support is required
+	  for this driver to work.
+
+config DRM_MGA
+	tristate "Matrox g200/g400"
+	depends on DRM && PCI
+	select FW_LOADER
+	help
+	  Choose this option if you have a Matrox G200, G400 or G450 graphics
+	  card.  If M is selected, the module will be called mga.  AGP
+	  support is required for this driver to work.
+
+config DRM_SIS
+	tristate "SiS video cards"
+	depends on DRM && AGP
+	depends on FB_SIS || FB_SIS=n
+	help
+	  Choose this option if you have a SiS 630 or compatible video
+	  chipset. If M is selected the module will be called sis. AGP
+	  support is required for this driver to work.
+
+config DRM_VIA
+	tristate "Via unichrome video cards"
+	depends on DRM && PCI
+	help
+	  Choose this option if you have a Via unichrome or compatible video
+	  chipset. If M is selected the module will be called via.
+
+config DRM_SAVAGE
+	tristate "Savage video cards"
+	depends on DRM && PCI
+	help
+	  Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
+	  chipset. If M is selected the module will be called savage.
+
+endif # DRM_LEGACY
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 3ff0941..439d89b 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -12,7 +12,9 @@
 		drm_info.o drm_debugfs.o drm_encoder_slave.o \
 		drm_trace_points.o drm_global.o drm_prime.o \
 		drm_rect.o drm_vma_manager.o drm_flip_work.o \
-		drm_modeset_lock.o drm_atomic.o drm_bridge.o
+		drm_modeset_lock.o drm_atomic.o drm_bridge.o \
+		drm_framebuffer.o drm_connector.o drm_blend.o \
+		drm_encoder.o drm_mode_object.o drm_property.o
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -24,7 +26,7 @@
 drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
 		drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
 		drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
-		drm_simple_kms_helper.o drm_blend.o
+		drm_simple_kms_helper.o drm_modeset_helper.o
 
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 7335c04..53cf397 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -1,3 +1,10 @@
+config DRM_AMDGPU_SI
+	bool "Enable amdgpu support for SI parts"
+	depends on DRM_AMDGPU
+	help
+	  Choose this option if you want to enable experimental support
+	  for SI asics.
+
 config DRM_AMDGPU_CIK
 	bool "Enable amdgpu support for CIK parts"
 	depends on DRM_AMDGPU
@@ -25,3 +32,5 @@
 	  Selecting this option creates a debugfs file to inspect the mapped
 	  pages. Uses more memory for housekeeping, enable only for debugging.
 
+source "drivers/gpu/drm/amd/powerplay/Kconfig"
+source "drivers/gpu/drm/amd/acp/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index c7fcdce..dc6df07 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -30,6 +30,8 @@
 	ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
 	amdgpu_amdkfd_gfx_v7.o
 
+amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
+
 amdgpu-y += \
 	vi.o
 
@@ -58,7 +60,8 @@
 # add DCE block
 amdgpu-y += \
 	dce_v10_0.o \
-	dce_v11_0.o
+	dce_v11_0.o \
+	dce_virtual.o
 
 # add GFX block
 amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index 0619269..b8d6667 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -90,6 +90,7 @@
 #define ENCODER_OBJECT_ID_INTERNAL_VCE            0x24
 #define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3        0x25
 #define ENCODER_OBJECT_ID_INTERNAL_AMCLK          0x27
+#define ENCODER_OBJECT_ID_VIRTUAL                 0x28
 
 #define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO    0xFF
 
@@ -119,6 +120,7 @@
 #define CONNECTOR_OBJECT_ID_eDP                   0x14
 #define CONNECTOR_OBJECT_ID_MXM                   0x15
 #define CONNECTOR_OBJECT_ID_LVDS_eDP              0x16
+#define CONNECTOR_OBJECT_ID_VIRTUAL               0x17
 
 /* deleted */
 
@@ -147,6 +149,7 @@
 #define GRAPH_OBJECT_ENUM_ID5                     0x05
 #define GRAPH_OBJECT_ENUM_ID6                     0x06
 #define GRAPH_OBJECT_ENUM_ID7                     0x07
+#define GRAPH_OBJECT_ENUM_VIRTUAL                 0x08
 
 /****************************************************/
 /* Graphics Object ID Bit definition                */
@@ -408,6 +411,10 @@
                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
                                                   ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
 
+#define ENCODER_VIRTUAL_ENUM_VIRTUAL            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT)
+
 /****************************************************/
 /* Connector Object ID definition - Shared with BIOS */
 /****************************************************/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 8ebc5f1..ee45d9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -51,6 +51,7 @@
 #include "amdgpu_ih.h"
 #include "amdgpu_irq.h"
 #include "amdgpu_ucode.h"
+#include "amdgpu_ttm.h"
 #include "amdgpu_gds.h"
 #include "amd_powerplay.h"
 #include "amdgpu_acp.h"
@@ -63,6 +64,7 @@
 extern int amdgpu_modeset;
 extern int amdgpu_vram_limit;
 extern int amdgpu_gart_size;
+extern int amdgpu_moverate;
 extern int amdgpu_benchmarking;
 extern int amdgpu_testing;
 extern int amdgpu_audio;
@@ -91,6 +93,9 @@
 extern unsigned amdgpu_cg_mask;
 extern unsigned amdgpu_pg_mask;
 extern char *amdgpu_disable_cu;
+extern int amdgpu_sclk_deep_sleep_en;
+extern char *amdgpu_virtual_display;
+extern unsigned amdgpu_pp_feature_mask;
 
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
 #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
@@ -105,7 +110,7 @@
 #define AMDGPU_MAX_RINGS			16
 #define AMDGPU_MAX_GFX_RINGS			1
 #define AMDGPU_MAX_COMPUTE_RINGS		8
-#define AMDGPU_MAX_VCE_RINGS			2
+#define AMDGPU_MAX_VCE_RINGS			3
 
 /* max number of IP instances */
 #define AMDGPU_MAX_SDMA_INSTANCES		2
@@ -248,10 +253,9 @@
 			 uint64_t pe, uint64_t src,
 			 unsigned count);
 	/* write pte one entry at a time with addr mapping */
-	void (*write_pte)(struct amdgpu_ib *ib,
-			  const dma_addr_t *pages_addr, uint64_t pe,
-			  uint64_t addr, unsigned count,
-			  uint32_t incr, uint32_t flags);
+	void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
+			  uint64_t value, unsigned count,
+			  uint32_t incr);
 	/* for linear pte/pde updates without addr mapping */
 	void (*set_pte_pde)(struct amdgpu_ib *ib,
 			    uint64_t pe,
@@ -316,6 +320,10 @@
 	/* note usage for clock and power gating */
 	void (*begin_use)(struct amdgpu_ring *ring);
 	void (*end_use)(struct amdgpu_ring *ring);
+	void (*emit_switch_buffer) (struct amdgpu_ring *ring);
+	void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
+	unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring);
+	unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring);
 };
 
 /*
@@ -396,46 +404,9 @@
 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
 
 /*
- * TTM.
+ * BO.
  */
 
-#define AMDGPU_TTM_LRU_SIZE	20
-
-struct amdgpu_mman_lru {
-	struct list_head		*lru[TTM_NUM_MEM_TYPES];
-	struct list_head		*swap_lru;
-};
-
-struct amdgpu_mman {
-	struct ttm_bo_global_ref        bo_global_ref;
-	struct drm_global_reference	mem_global_ref;
-	struct ttm_bo_device		bdev;
-	bool				mem_global_referenced;
-	bool				initialized;
-
-#if defined(CONFIG_DEBUG_FS)
-	struct dentry			*vram;
-	struct dentry			*gtt;
-#endif
-
-	/* buffer handling */
-	const struct amdgpu_buffer_funcs	*buffer_funcs;
-	struct amdgpu_ring			*buffer_funcs_ring;
-	/* Scheduler entity for buffer moves */
-	struct amd_sched_entity			entity;
-
-	/* custom LRU management */
-	struct amdgpu_mman_lru			log2_size[AMDGPU_TTM_LRU_SIZE];
-};
-
-int amdgpu_copy_buffer(struct amdgpu_ring *ring,
-		       uint64_t src_offset,
-		       uint64_t dst_offset,
-		       uint32_t byte_count,
-		       struct reservation_object *resv,
-		       struct fence **fence);
-int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
-
 struct amdgpu_bo_list_entry {
 	struct amdgpu_bo		*robj;
 	struct ttm_validate_buffer	tv;
@@ -498,10 +469,12 @@
 	struct amdgpu_device		*adev;
 	struct drm_gem_object		gem_base;
 	struct amdgpu_bo		*parent;
+	struct amdgpu_bo		*shadow;
 
 	struct ttm_bo_kmap_obj		dma_buf_vmap;
 	struct amdgpu_mn		*mn;
 	struct list_head		mn_list;
+	struct list_head		shadow_list;
 };
 #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
 
@@ -651,6 +624,7 @@
 int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
 		     int pages, struct page **pagelist,
 		     dma_addr_t *dma_addr, uint32_t flags);
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
 
 /*
  * GPU MC structures, functions & helpers
@@ -677,6 +651,8 @@
 	uint32_t                fw_version;
 	struct amdgpu_irq_src	vm_fault;
 	uint32_t		vram_type;
+	uint32_t                srbm_soft_reset;
+	struct amdgpu_mode_mc_save save;
 };
 
 /*
@@ -721,10 +697,11 @@
  */
 
 struct amdgpu_flip_work {
-	struct work_struct		flip_work;
+	struct delayed_work		flip_work;
 	struct work_struct		unpin_work;
 	struct amdgpu_device		*adev;
 	int				crtc_id;
+	u32				target_vblank;
 	uint64_t			base;
 	struct drm_pending_vblank_event *event;
 	struct amdgpu_bo		*old_rbo;
@@ -815,13 +792,17 @@
 /* maximum number of VMIDs */
 #define AMDGPU_NUM_VM	16
 
+/* Maximum number of PTEs the hardware can write with one command */
+#define AMDGPU_VM_MAX_UPDATE_SIZE	0x3FFFF
+
 /* number of entries in page table */
 #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
 
 /* PTBs (Page Table Blocks) need to be aligned to 32K */
 #define AMDGPU_VM_PTB_ALIGN_SIZE   32768
-#define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1)
-#define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK)
+
+/* LOG2 number of continuous pages for the fragment field */
+#define AMDGPU_LOG2_PAGES_PER_FRAG 4
 
 #define AMDGPU_PTE_VALID	(1 << 0)
 #define AMDGPU_PTE_SYSTEM	(1 << 1)
@@ -833,10 +814,7 @@
 #define AMDGPU_PTE_READABLE	(1 << 5)
 #define AMDGPU_PTE_WRITEABLE	(1 << 6)
 
-/* PTE (Page Table Entry) fragment field for different page sizes */
-#define AMDGPU_PTE_FRAG_4KB	(0 << 7)
-#define AMDGPU_PTE_FRAG_64KB	(4 << 7)
-#define AMDGPU_LOG2_PAGES_PER_FRAG 4
+#define AMDGPU_PTE_FRAG(x)	((x & 0x1f) << 7)
 
 /* How to programm VM fault handling */
 #define AMDGPU_VM_FAULT_STOP_NEVER	0
@@ -846,6 +824,7 @@
 struct amdgpu_vm_pt {
 	struct amdgpu_bo_list_entry	entry;
 	uint64_t			addr;
+	uint64_t			shadow_addr;
 };
 
 struct amdgpu_vm {
@@ -948,7 +927,6 @@
 		      struct amdgpu_job *job);
 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
-uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 				    struct amdgpu_vm *vm);
 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
@@ -957,7 +935,7 @@
 			     struct amdgpu_sync *sync);
 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 			struct amdgpu_bo_va *bo_va,
-			struct ttm_mem_reg *mem);
+			bool clear);
 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 			     struct amdgpu_bo *bo);
 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
@@ -992,6 +970,7 @@
 	spinlock_t		ring_lock;
 	struct fence            **fences;
 	struct amdgpu_ctx_ring	rings[AMDGPU_MAX_RINGS];
+	bool preamble_presented;
 };
 
 struct amdgpu_ctx_mgr {
@@ -1195,6 +1174,10 @@
 	unsigned			ce_ram_size;
 	struct amdgpu_cu_info		cu_info;
 	const struct amdgpu_gfx_funcs	*funcs;
+
+	/* reset mask */
+	uint32_t                        grbm_soft_reset;
+	uint32_t                        srbm_soft_reset;
 };
 
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -1247,11 +1230,16 @@
 	struct fence			*fence;
 	uint64_t			bytes_moved_threshold;
 	uint64_t			bytes_moved;
+	struct amdgpu_bo_list_entry	*evictable;
 
 	/* user fence */
 	struct amdgpu_bo_list_entry	uf_entry;
 };
 
+#define AMDGPU_PREAMBLE_IB_PRESENT          (1 << 0) /* bit set means command submit involves a preamble IB */
+#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST    (1 << 1) /* bit set means preamble IB is first presented in belonging context */
+#define AMDGPU_HAVE_CTX_SWITCH              (1 << 2) /* bit set means context switch occured */
+
 struct amdgpu_job {
 	struct amd_sched_job    base;
 	struct amdgpu_device	*adev;
@@ -1260,9 +1248,10 @@
 	struct amdgpu_sync	sync;
 	struct amdgpu_ib	*ibs;
 	struct fence		*fence; /* the hw fence */
+	uint32_t		preamble_status;
 	uint32_t		num_ibs;
 	void			*owner;
-	uint64_t		ctx;
+	uint64_t		fence_ctx; /* the fence_context this job uses */
 	bool                    vm_needs_flush;
 	unsigned		vm_id;
 	uint64_t		vm_pd_addr;
@@ -1683,6 +1672,7 @@
 	bool			address_64_bit;
 	bool			use_ctx_buf;
 	struct amd_sched_entity entity;
+	uint32_t                srbm_soft_reset;
 };
 
 /*
@@ -1709,6 +1699,8 @@
 	struct amdgpu_irq_src	irq;
 	unsigned		harvest_config;
 	struct amd_sched_entity	entity;
+	uint32_t                srbm_soft_reset;
+	unsigned		num_rings;
 };
 
 /*
@@ -1726,9 +1718,14 @@
 
 struct amdgpu_sdma {
 	struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
+#ifdef CONFIG_DRM_AMDGPU_SI
+	//SI DMA has a difference trap irq number for the second engine
+	struct amdgpu_irq_src	trap_irq_1;
+#endif
 	struct amdgpu_irq_src	trap_irq;
 	struct amdgpu_irq_src	illegal_inst_irq;
 	int			num_instances;
+	uint32_t                    srbm_soft_reset;
 };
 
 /*
@@ -1841,6 +1838,9 @@
 	int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
 	/* query virtual capabilities */
 	u32 (*get_virtual_caps)(struct amdgpu_device *adev);
+	/* static power management */
+	int (*get_pcie_lanes)(struct amdgpu_device *adev);
+	void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
 };
 
 /*
@@ -1956,6 +1956,7 @@
 	bool valid;
 	bool sw;
 	bool hw;
+	bool hang;
 };
 
 struct amdgpu_device {
@@ -2014,6 +2015,8 @@
 	spinlock_t pcie_idx_lock;
 	amdgpu_rreg_t			pcie_rreg;
 	amdgpu_wreg_t			pcie_wreg;
+	amdgpu_rreg_t			pciep_rreg;
+	amdgpu_wreg_t			pciep_wreg;
 	/* protects concurrent UVD register access */
 	spinlock_t uvd_ctx_idx_lock;
 	amdgpu_rreg_t			uvd_ctx_rreg;
@@ -2054,7 +2057,16 @@
 	atomic64_t			num_evictions;
 	atomic_t			gpu_reset_counter;
 
+	/* data for buffer migration throttling */
+	struct {
+		spinlock_t		lock;
+		s64			last_update_us;
+		s64			accum_us; /* accumulated microseconds */
+		u32			log2_max_MBps;
+	} mm_stats;
+
 	/* display */
+	bool				enable_virtual_display;
 	struct amdgpu_mode_info		mode_info;
 	struct work_struct		hotplug_work;
 	struct amdgpu_irq_src		crtc_irq;
@@ -2117,6 +2129,14 @@
 	struct kfd_dev          *kfd;
 
 	struct amdgpu_virtualization virtualization;
+
+	/* link all shadow bo */
+	struct list_head                shadow_list;
+	struct mutex                    shadow_list_lock;
+	/* link all gtt */
+	spinlock_t			gtt_list_lock;
+	struct list_head                gtt_list;
+
 };
 
 bool amdgpu_device_is_px(struct drm_device *dev);
@@ -2149,6 +2169,8 @@
 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
+#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
+#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
@@ -2192,6 +2214,9 @@
 #define REG_GET_FIELD(value, reg, field)				\
 	(((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
 
+#define WREG32_FIELD(reg, field, val)	\
+	WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+
 /*
  * BIOS helpers.
  */
@@ -2236,13 +2261,16 @@
 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
 #define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
+#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
+#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
+#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
 #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
 #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
-#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags)))
+#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
@@ -2257,9 +2285,13 @@
 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
 #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
+#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
+#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
+#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r))
+#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r))
 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
@@ -2387,6 +2419,7 @@
 
 /* Common functions */
 int amdgpu_gpu_reset(struct amdgpu_device *adev);
+bool amdgpu_need_backup(struct amdgpu_device *adev);
 void amdgpu_pci_config_reset(struct amdgpu_device *adev);
 bool amdgpu_card_posted(struct amdgpu_device *adev);
 void amdgpu_update_display_priority(struct amdgpu_device *adev);
@@ -2412,6 +2445,10 @@
 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
 void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
 void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
+u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev);
+int amdgpu_ttm_global_init(struct amdgpu_device *adev);
+int amdgpu_ttm_init(struct amdgpu_device *adev);
+void amdgpu_ttm_fini(struct amdgpu_device *adev);
 void amdgpu_program_register_sequence(struct amdgpu_device *adev,
 					     const u32 *registers,
 					     const u32 array_size);
@@ -2444,8 +2481,8 @@
 				 struct drm_file *file_priv);
 void amdgpu_driver_preclose_kms(struct drm_device *dev,
 				struct drm_file *file_priv);
-int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
-int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
+int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
+int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
@@ -2491,6 +2528,7 @@
 struct amdgpu_bo_va_mapping *
 amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
 		       uint64_t addr, struct amdgpu_bo **bo);
+int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
 
 #include "amdgpu_object.h"
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 9831753..59961db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -259,6 +259,33 @@
 	DRM_MODE_CONNECTOR_Unknown
 };
 
+bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	struct atom_context *ctx = mode_info->atom_context;
+	int index = GetIndexIntoMasterTable(DATA, Object_Header);
+	u16 size, data_offset;
+	u8 frev, crev;
+	ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
+	ATOM_OBJECT_HEADER *obj_header;
+
+	if (!amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+		return false;
+
+	if (crev < 2)
+		return false;
+
+	obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
+	path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
+	    (ctx->bios + data_offset +
+	     le16_to_cpu(obj_header->usDisplayPathTableOffset));
+
+	if (path_obj->ucNumOfDispPath)
+		return true;
+	else
+		return false;
+}
+
 bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev)
 {
 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
@@ -951,6 +978,48 @@
 		return -EINVAL;
 
 	switch (crev) {
+	case 2:
+	case 3:
+	case 5:
+		/* r6xx, r7xx, evergreen, ni, si.
+		 * TODO: add support for asic_type <= CHIP_RV770*/
+		if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
+			args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+
+			amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+			dividers->post_div = args.v3.ucPostDiv;
+			dividers->enable_post_div = (args.v3.ucCntlFlag &
+						     ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+			dividers->enable_dithen = (args.v3.ucCntlFlag &
+						   ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+			dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
+			dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
+			dividers->ref_div = args.v3.ucRefDiv;
+			dividers->vco_mode = (args.v3.ucCntlFlag &
+					      ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+		} else {
+			/* for SI we use ComputeMemoryClockParam for memory plls */
+			if (adev->asic_type >= CHIP_TAHITI)
+				return -EINVAL;
+			args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+			if (strobe_mode)
+				args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
+
+			amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+			dividers->post_div = args.v5.ucPostDiv;
+			dividers->enable_post_div = (args.v5.ucCntlFlag &
+						     ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+			dividers->enable_dithen = (args.v5.ucCntlFlag &
+						   ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+			dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
+			dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
+			dividers->ref_div = args.v5.ucRefDiv;
+			dividers->vco_mode = (args.v5.ucCntlFlag &
+					      ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+		}
+		break;
 	case 4:
 		/* fusion */
 		args.v4.ulClock = cpu_to_le32(clock);	/* 10 khz */
@@ -1095,6 +1164,32 @@
 	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
 }
 
+void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
+					  u16 *vddc, u16 *vddci, u16 *mvdd)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+	u8 frev, crev;
+	u16 data_offset;
+	union firmware_info *firmware_info;
+
+	*vddc = 0;
+	*vddci = 0;
+	*mvdd = 0;
+
+	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		firmware_info =
+			(union firmware_info *)(mode_info->atom_context->bios +
+						data_offset);
+		*vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+		if ((frev == 2) && (crev >= 2)) {
+			*vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
+			*mvdd = le16_to_cpu(firmware_info->info_22.usBootUpMVDDCVoltage);
+		}
+	}
+}
+
 union set_voltage {
 	struct _SET_VOLTAGE_PS_ALLOCATION alloc;
 	struct _SET_VOLTAGE_PARAMETERS v1;
@@ -1102,6 +1197,52 @@
 	struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
 };
 
+int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
+			     u16 voltage_id, u16 *voltage)
+{
+	union set_voltage args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+	u8 frev, crev;
+
+	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
+		return -EINVAL;
+
+	switch (crev) {
+	case 1:
+		return -EINVAL;
+	case 2:
+		args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
+		args.v2.ucVoltageMode = 0;
+		args.v2.usVoltageLevel = 0;
+
+		amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		*voltage = le16_to_cpu(args.v2.usVoltageLevel);
+		break;
+	case 3:
+		args.v3.ucVoltageType = voltage_type;
+		args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
+		args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
+
+		amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		*voltage = le16_to_cpu(args.v3.usVoltageLevel);
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
+						      u16 *voltage,
+						      u16 leakage_idx)
+{
+	return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
+}
+
 void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
 				 u16 voltage_level,
 				 u8 voltage_type)
@@ -1322,6 +1463,50 @@
 	return NULL;
 }
 
+int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
+			      u8 voltage_type,
+			      u8 *svd_gpio_id, u8 *svc_gpio_id)
+{
+	int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+	u8 frev, crev;
+	u16 data_offset, size;
+	union voltage_object_info *voltage_info;
+	union voltage_object *voltage_object = NULL;
+
+	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		voltage_info = (union voltage_object_info *)
+			(adev->mode_info.atom_context->bios + data_offset);
+
+		switch (frev) {
+		case 3:
+			switch (crev) {
+			case 1:
+				voltage_object = (union voltage_object *)
+					amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3,
+								      voltage_type,
+								      VOLTAGE_OBJ_SVID2);
+				if (voltage_object) {
+					*svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId;
+					*svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId;
+				} else {
+					return -EINVAL;
+				}
+				break;
+			default:
+				DRM_ERROR("unknown voltage object table\n");
+				return -EINVAL;
+			}
+			break;
+		default:
+			DRM_ERROR("unknown voltage object table\n");
+			return -EINVAL;
+		}
+
+	}
+	return 0;
+}
+
 bool
 amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
 				u8 voltage_type, u8 voltage_mode)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 8c2e696..1735615 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -140,6 +140,8 @@
 							  uint8_t id);
 void amdgpu_atombios_i2c_init(struct amdgpu_device *adev);
 
+bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev);
+
 bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev);
 
 int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev);
@@ -206,5 +208,19 @@
 void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
 
 void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
-
+int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
+			     u16 voltage_id, u16 *voltage);
+int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
+						      u16 *voltage,
+						      u16 leakage_idx);
+void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
+					  u16 *vddc, u16 *vddci, u16 *mvdd);
+int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
+				       u8 clock_type,
+				       u32 clock,
+				       bool strobe_mode,
+				       struct atom_clock_dividers *dividers);
+int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
+			      u8 voltage_type,
+			      u8 *svd_gpio_id, u8 *svc_gpio_id);
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 33e47a4..3453052 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -39,7 +39,8 @@
 	start_jiffies = jiffies;
 	for (i = 0; i < n; i++) {
 		struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
-		r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
+		r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
+				       false);
 		if (r)
 			goto exit_do_move;
 		r = fence_wait(fence, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index bc0440f..f1c53a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -616,7 +616,7 @@
 	return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
 }
 
-int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
+static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
 				  enum amd_ip_block_type block_type,
 				  enum amd_clockgating_state state)
 {
@@ -637,7 +637,7 @@
 	return r;
 }
 
-int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
+static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
 				  enum amd_ip_block_type block_type,
 				  enum amd_powergating_state state)
 {
@@ -848,6 +848,12 @@
 	case CGS_SYSTEM_INFO_GFX_SE_INFO:
 		sys_info->value = adev->gfx.config.max_shader_engines;
 		break;
+	case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID:
+		sys_info->value = adev->pdev->subsystem_device;
+		break;
+	case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
+		sys_info->value = adev->pdev->subsystem_vendor;
+		break;
 	default:
 		return -ENODEV;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index ff0b55a..319a5e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1504,6 +1504,86 @@
 	.force = amdgpu_connector_dvi_force,
 };
 
+static struct drm_encoder *
+amdgpu_connector_virtual_encoder(struct drm_connector *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+	struct drm_encoder *encoder;
+	int i;
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+
+		encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+		if (!encoder)
+			continue;
+
+		if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+			return encoder;
+	}
+
+	/* pick the first one */
+	if (enc_id)
+		return drm_encoder_find(connector->dev, enc_id);
+	return NULL;
+}
+
+static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector)
+{
+	struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
+
+	if (encoder) {
+		amdgpu_connector_add_common_modes(encoder, connector);
+	}
+
+	return 0;
+}
+
+static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
+					   struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+int amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
+{
+	return 0;
+}
+
+static enum drm_connector_status
+
+amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+int amdgpu_connector_virtual_set_property(struct drm_connector *connector,
+				  struct drm_property *property,
+				  uint64_t val)
+{
+	return 0;
+}
+
+static void amdgpu_connector_virtual_force(struct drm_connector *connector)
+{
+	return;
+}
+
+static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = {
+	.get_modes = amdgpu_connector_virtual_get_modes,
+	.mode_valid = amdgpu_connector_virtual_mode_valid,
+	.best_encoder = amdgpu_connector_virtual_encoder,
+};
+
+static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = {
+	.dpms = amdgpu_connector_virtual_dpms,
+	.detect = amdgpu_connector_virtual_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = amdgpu_connector_virtual_set_property,
+	.destroy = amdgpu_connector_destroy,
+	.force = amdgpu_connector_virtual_force,
+};
+
 void
 amdgpu_connector_add(struct amdgpu_device *adev,
 		      uint32_t connector_id,
@@ -1888,6 +1968,17 @@
 			connector->interlace_allowed = false;
 			connector->doublescan_allowed = false;
 			break;
+		case DRM_MODE_CONNECTOR_VIRTUAL:
+			amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
+			if (!amdgpu_dig_connector)
+				goto failed;
+			amdgpu_connector->con_priv = amdgpu_dig_connector;
+			drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type);
+			drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = false;
+			connector->doublescan_allowed = false;
+			break;
 		}
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 0307ff5..b8412bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -91,6 +91,7 @@
 				      uint32_t *offset)
 {
 	struct drm_gem_object *gobj;
+	unsigned long size;
 
 	gobj = drm_gem_object_lookup(p->filp, data->handle);
 	if (gobj == NULL)
@@ -101,6 +102,11 @@
 	p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
 	p->uf_entry.tv.shared = true;
 	p->uf_entry.user_pages = NULL;
+
+	size = amdgpu_bo_size(p->uf_entry.robj);
+	if (size != PAGE_SIZE || (data->offset + 8) > size)
+		return -EINVAL;
+
 	*offset = data->offset;
 
 	drm_gem_object_unreference_unlocked(gobj);
@@ -235,70 +241,212 @@
 	return ret;
 }
 
-/* Returns how many bytes TTM can move per IB.
+/* Convert microseconds to bytes. */
+static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
+{
+	if (us <= 0 || !adev->mm_stats.log2_max_MBps)
+		return 0;
+
+	/* Since accum_us is incremented by a million per second, just
+	 * multiply it by the number of MB/s to get the number of bytes.
+	 */
+	return us << adev->mm_stats.log2_max_MBps;
+}
+
+static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
+{
+	if (!adev->mm_stats.log2_max_MBps)
+		return 0;
+
+	return bytes >> adev->mm_stats.log2_max_MBps;
+}
+
+/* Returns how many bytes TTM can move right now. If no bytes can be moved,
+ * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
+ * which means it can go over the threshold once. If that happens, the driver
+ * will be in debt and no other buffer migrations can be done until that debt
+ * is repaid.
+ *
+ * This approach allows moving a buffer of any size (it's important to allow
+ * that).
+ *
+ * The currency is simply time in microseconds and it increases as the clock
+ * ticks. The accumulated microseconds (us) are converted to bytes and
+ * returned.
  */
 static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
 {
-	u64 real_vram_size = adev->mc.real_vram_size;
-	u64 vram_usage = atomic64_read(&adev->vram_usage);
+	s64 time_us, increment_us;
+	u64 max_bytes;
+	u64 free_vram, total_vram, used_vram;
 
-	/* This function is based on the current VRAM usage.
+	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
+	 * throttling.
 	 *
-	 * - If all of VRAM is free, allow relocating the number of bytes that
-	 *   is equal to 1/4 of the size of VRAM for this IB.
-
-	 * - If more than one half of VRAM is occupied, only allow relocating
-	 *   1 MB of data for this IB.
-	 *
-	 * - From 0 to one half of used VRAM, the threshold decreases
-	 *   linearly.
-	 *         __________________
-	 * 1/4 of -|\               |
-	 * VRAM    | \              |
-	 *         |  \             |
-	 *         |   \            |
-	 *         |    \           |
-	 *         |     \          |
-	 *         |      \         |
-	 *         |       \________|1 MB
-	 *         |----------------|
-	 *    VRAM 0 %             100 %
-	 *         used            used
-	 *
-	 * Note: It's a threshold, not a limit. The threshold must be crossed
-	 * for buffer relocations to stop, so any buffer of an arbitrary size
-	 * can be moved as long as the threshold isn't crossed before
-	 * the relocation takes place. We don't want to disable buffer
-	 * relocations completely.
-	 *
-	 * The idea is that buffers should be placed in VRAM at creation time
-	 * and TTM should only do a minimum number of relocations during
-	 * command submission. In practice, you need to submit at least
-	 * a dozen IBs to move all buffers to VRAM if they are in GTT.
-	 *
-	 * Also, things can get pretty crazy under memory pressure and actual
-	 * VRAM usage can change a lot, so playing safe even at 50% does
-	 * consistently increase performance.
+	 * It means that in order to get full max MBps, at least 5 IBs per
+	 * second must be submitted and not more than 200ms apart from each
+	 * other.
 	 */
+	const s64 us_upper_bound = 200000;
 
-	u64 half_vram = real_vram_size >> 1;
-	u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
-	u64 bytes_moved_threshold = half_free_vram >> 1;
-	return max(bytes_moved_threshold, 1024*1024ull);
+	if (!adev->mm_stats.log2_max_MBps)
+		return 0;
+
+	total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
+	used_vram = atomic64_read(&adev->vram_usage);
+	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
+
+	spin_lock(&adev->mm_stats.lock);
+
+	/* Increase the amount of accumulated us. */
+	time_us = ktime_to_us(ktime_get());
+	increment_us = time_us - adev->mm_stats.last_update_us;
+	adev->mm_stats.last_update_us = time_us;
+	adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
+                                      us_upper_bound);
+
+	/* This prevents the short period of low performance when the VRAM
+	 * usage is low and the driver is in debt or doesn't have enough
+	 * accumulated us to fill VRAM quickly.
+	 *
+	 * The situation can occur in these cases:
+	 * - a lot of VRAM is freed by userspace
+	 * - the presence of a big buffer causes a lot of evictions
+	 *   (solution: split buffers into smaller ones)
+	 *
+	 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
+	 * accum_us to a positive number.
+	 */
+	if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
+		s64 min_us;
+
+		/* Be more aggresive on dGPUs. Try to fill a portion of free
+		 * VRAM now.
+		 */
+		if (!(adev->flags & AMD_IS_APU))
+			min_us = bytes_to_us(adev, free_vram / 4);
+		else
+			min_us = 0; /* Reset accum_us on APUs. */
+
+		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
+	}
+
+	/* This returns 0 if the driver is in debt to disallow (optional)
+	 * buffer moves.
+	 */
+	max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
+
+	spin_unlock(&adev->mm_stats.lock);
+	return max_bytes;
 }
 
-int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
+/* Report how many bytes have really been moved for the last command
+ * submission. This can result in a debt that can stop buffer migrations
+ * temporarily.
+ */
+static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
+					 u64 num_bytes)
+{
+	spin_lock(&adev->mm_stats.lock);
+	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
+	spin_unlock(&adev->mm_stats.lock);
+}
+
+static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
+				 struct amdgpu_bo *bo)
+{
+	u64 initial_bytes_moved;
+	uint32_t domain;
+	int r;
+
+	if (bo->pin_count)
+		return 0;
+
+	/* Don't move this buffer if we have depleted our allowance
+	 * to move it. Don't move anything if the threshold is zero.
+	 */
+	if (p->bytes_moved < p->bytes_moved_threshold)
+		domain = bo->prefered_domains;
+	else
+		domain = bo->allowed_domains;
+
+retry:
+	amdgpu_ttm_placement_from_domain(bo, domain);
+	initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+	p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+		initial_bytes_moved;
+
+	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
+		domain = bo->allowed_domains;
+		goto retry;
+	}
+
+	return r;
+}
+
+/* Last resort, try to evict something from the current working set */
+static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
+				struct amdgpu_bo_list_entry *lobj)
+{
+	uint32_t domain = lobj->robj->allowed_domains;
+	int r;
+
+	if (!p->evictable)
+		return false;
+
+	for (;&p->evictable->tv.head != &p->validated;
+	     p->evictable = list_prev_entry(p->evictable, tv.head)) {
+
+		struct amdgpu_bo_list_entry *candidate = p->evictable;
+		struct amdgpu_bo *bo = candidate->robj;
+		u64 initial_bytes_moved;
+		uint32_t other;
+
+		/* If we reached our current BO we can forget it */
+		if (candidate == lobj)
+			break;
+
+		other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+		/* Check if this BO is in one of the domains we need space for */
+		if (!(other & domain))
+			continue;
+
+		/* Check if we can move this BO somewhere else */
+		other = bo->allowed_domains & ~domain;
+		if (!other)
+			continue;
+
+		/* Good we can try to move this BO somewhere else */
+		amdgpu_ttm_placement_from_domain(bo, other);
+		initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+		p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+			initial_bytes_moved;
+
+		if (unlikely(r))
+			break;
+
+		p->evictable = list_prev_entry(p->evictable, tv.head);
+		list_move(&candidate->tv.head, &p->validated);
+
+		return true;
+	}
+
+	return false;
+}
+
+static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 			    struct list_head *validated)
 {
 	struct amdgpu_bo_list_entry *lobj;
-	u64 initial_bytes_moved;
 	int r;
 
 	list_for_each_entry(lobj, validated, tv.head) {
 		struct amdgpu_bo *bo = lobj->robj;
 		bool binding_userptr = false;
 		struct mm_struct *usermm;
-		uint32_t domain;
 
 		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
 		if (usermm && usermm != current->mm)
@@ -313,35 +461,19 @@
 			binding_userptr = true;
 		}
 
-		if (bo->pin_count)
-			continue;
+		if (p->evictable == lobj)
+			p->evictable = NULL;
 
-		/* Avoid moving this one if we have moved too many buffers
-		 * for this IB already.
-		 *
-		 * Note that this allows moving at least one buffer of
-		 * any size, because it doesn't take the current "bo"
-		 * into account. We don't want to disallow buffer moves
-		 * completely.
-		 */
-		if (p->bytes_moved <= p->bytes_moved_threshold)
-			domain = bo->prefered_domains;
-		else
-			domain = bo->allowed_domains;
-
-	retry:
-		amdgpu_ttm_placement_from_domain(bo, domain);
-		initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
-		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
-		p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
-			       initial_bytes_moved;
-
-		if (unlikely(r)) {
-			if (r != -ERESTARTSYS && domain != bo->allowed_domains) {
-				domain = bo->allowed_domains;
-				goto retry;
-			}
+		do {
+			r = amdgpu_cs_bo_validate(p, bo);
+		} while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
+		if (r)
 			return r;
+
+		if (bo->shadow) {
+			r = amdgpu_cs_bo_validate(p, bo);
+			if (r)
+				return r;
 		}
 
 		if (binding_userptr) {
@@ -386,8 +518,10 @@
 
 		r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
 					   &duplicates);
-		if (unlikely(r != 0))
+		if (unlikely(r != 0)) {
+			DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
 			goto error_free_pages;
+		}
 
 		/* Without a BO list we don't have userptr BOs */
 		if (!p->bo_list)
@@ -427,9 +561,10 @@
 		/* Unreserve everything again. */
 		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 
-		/* We tried to often, just abort */
+		/* We tried too many times, just abort */
 		if (!--tries) {
 			r = -EDEADLK;
+			DRM_ERROR("deadlock in %s\n", __func__);
 			goto error_free_pages;
 		}
 
@@ -441,11 +576,13 @@
 							 sizeof(struct page*));
 			if (!e->user_pages) {
 				r = -ENOMEM;
+				DRM_ERROR("calloc failure in %s\n", __func__);
 				goto error_free_pages;
 			}
 
 			r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
 			if (r) {
+				DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
 				drm_free_large(e->user_pages);
 				e->user_pages = NULL;
 				goto error_free_pages;
@@ -460,14 +597,23 @@
 
 	p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
 	p->bytes_moved = 0;
+	p->evictable = list_last_entry(&p->validated,
+				       struct amdgpu_bo_list_entry,
+				       tv.head);
 
 	r = amdgpu_cs_list_validate(p, &duplicates);
-	if (r)
+	if (r) {
+		DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
 		goto error_validate;
+	}
 
 	r = amdgpu_cs_list_validate(p, &p->validated);
-	if (r)
+	if (r) {
+		DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
 		goto error_validate;
+	}
+
+	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved);
 
 	fpriv->vm.last_eviction_counter =
 		atomic64_read(&p->adev->num_evictions);
@@ -499,8 +645,12 @@
 		}
 	}
 
-	if (p->uf_entry.robj)
-		p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj);
+	if (!r && p->uf_entry.robj) {
+		struct amdgpu_bo *uf = p->uf_entry.robj;
+
+		r = amdgpu_ttm_bind(uf->tbo.ttm, &uf->tbo.mem);
+		p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
+	}
 
 error_validate:
 	if (r) {
@@ -617,7 +767,7 @@
 			if (bo_va == NULL)
 				continue;
 
-			r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem);
+			r = amdgpu_vm_bo_update(adev, bo_va, false);
 			if (r)
 				return r;
 
@@ -710,6 +860,14 @@
 		if (r)
 			return r;
 
+		if (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
+			parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
+			if (!parser->ctx->preamble_presented) {
+				parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
+				parser->ctx->preamble_presented = true;
+			}
+		}
+
 		if (parser->job->ring && parser->job->ring != ring)
 			return -EINVAL;
 
@@ -849,7 +1007,7 @@
 	}
 
 	job->owner = p->filp;
-	job->ctx = entity->fence_context;
+	job->fence_ctx = entity->fence_context;
 	p->fence = fence_get(&job->base.s_fence->finished);
 	cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
 	job->uf_sequence = cs->out.handle;
@@ -1015,3 +1173,29 @@
 
 	return NULL;
 }
+
+/**
+ * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
+ *
+ * @parser: command submission parser context
+ *
+ * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
+ */
+int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
+{
+	unsigned i;
+	int r;
+
+	if (!parser->bo_list)
+		return 0;
+
+	for (i = 0; i < parser->bo_list->num_entries; i++) {
+		struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
+
+		r = amdgpu_ttm_bind(bo->tbo.ttm, &bo->tbo.mem);
+		if (unlikely(r))
+			return r;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index df7ab245..3ddae5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -41,16 +41,25 @@
 #include "atom.h"
 #include "amdgpu_atombios.h"
 #include "amd_pcie.h"
+#ifdef CONFIG_DRM_AMDGPU_SI
+#include "si.h"
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
 #include "cik.h"
 #endif
 #include "vi.h"
 #include "bif/bif_4_1_d.h"
+#include <linux/pci.h>
 
 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
 
 static const char *amdgpu_asic_name[] = {
+	"TAHITI",
+	"PITCAIRN",
+	"VERDE",
+	"OLAND",
+	"HAINAN",
 	"BONAIRE",
 	"KAVERI",
 	"KABINI",
@@ -1026,7 +1035,7 @@
 		/* don't suspend or resume card normally */
 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
-		amdgpu_resume_kms(dev, true, true);
+		amdgpu_device_resume(dev, true, true);
 
 		dev->pdev->d3_delay = d3_delay;
 
@@ -1036,7 +1045,7 @@
 		printk(KERN_INFO "amdgpu: switched off\n");
 		drm_kms_helper_poll_disable(dev);
 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-		amdgpu_suspend_kms(dev, true, true);
+		amdgpu_device_suspend(dev, true, true);
 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
 	}
 }
@@ -1181,10 +1190,38 @@
 	return 1;
 }
 
+static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
+{
+	adev->enable_virtual_display = false;
+
+	if (amdgpu_virtual_display) {
+		struct drm_device *ddev = adev->ddev;
+		const char *pci_address_name = pci_name(ddev->pdev);
+		char *pciaddstr, *pciaddstr_tmp, *pciaddname;
+
+		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
+		pciaddstr_tmp = pciaddstr;
+		while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
+			if (!strcmp(pci_address_name, pciaddname)) {
+				adev->enable_virtual_display = true;
+				break;
+			}
+		}
+
+		DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
+				 amdgpu_virtual_display, pci_address_name,
+				 adev->enable_virtual_display);
+
+		kfree(pciaddstr);
+	}
+}
+
 static int amdgpu_early_init(struct amdgpu_device *adev)
 {
 	int i, r;
 
+	amdgpu_whether_enable_virtual_display(adev);
+
 	switch (adev->asic_type) {
 	case CHIP_TOPAZ:
 	case CHIP_TONGA:
@@ -1202,6 +1239,18 @@
 		if (r)
 			return r;
 		break;
+#ifdef CONFIG_DRM_AMDGPU_SI
+	case CHIP_VERDE:
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+	case CHIP_OLAND:
+	case CHIP_HAINAN:
+		adev->family = AMDGPU_FAMILY_SI;
+		r = si_set_ip_blocks(adev);
+		if (r)
+			return r;
+		break;
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
 	case CHIP_BONAIRE:
 	case CHIP_HAWAII:
@@ -1318,6 +1367,9 @@
 	for (i = 0; i < adev->num_ip_blocks; i++) {
 		if (!adev->ip_block_status[i].valid)
 			continue;
+		if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD ||
+			adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE)
+			continue;
 		/* enable clockgating to save power */
 		r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
 								    AMD_CG_STATE_GATE);
@@ -1461,6 +1513,7 @@
 {
 	int r, i;
 	bool runtime = false;
+	u32 max_MBps;
 
 	adev->shutdown = false;
 	adev->dev = &pdev->dev;
@@ -1484,6 +1537,8 @@
 	adev->smc_wreg = &amdgpu_invalid_wreg;
 	adev->pcie_rreg = &amdgpu_invalid_rreg;
 	adev->pcie_wreg = &amdgpu_invalid_wreg;
+	adev->pciep_rreg = &amdgpu_invalid_rreg;
+	adev->pciep_wreg = &amdgpu_invalid_wreg;
 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
 	adev->didt_rreg = &amdgpu_invalid_rreg;
@@ -1520,9 +1575,22 @@
 	spin_lock_init(&adev->didt_idx_lock);
 	spin_lock_init(&adev->gc_cac_idx_lock);
 	spin_lock_init(&adev->audio_endpt_idx_lock);
+	spin_lock_init(&adev->mm_stats.lock);
 
-	adev->rmmio_base = pci_resource_start(adev->pdev, 5);
-	adev->rmmio_size = pci_resource_len(adev->pdev, 5);
+	INIT_LIST_HEAD(&adev->shadow_list);
+	mutex_init(&adev->shadow_list_lock);
+
+	INIT_LIST_HEAD(&adev->gtt_list);
+	spin_lock_init(&adev->gtt_list_lock);
+
+	if (adev->asic_type >= CHIP_BONAIRE) {
+		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
+		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
+	} else {
+		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
+		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
+	}
+
 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
 	if (adev->rmmio == NULL) {
 		return -ENOMEM;
@@ -1530,8 +1598,9 @@
 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
 	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
 
-	/* doorbell bar mapping */
-	amdgpu_doorbell_init(adev);
+	if (adev->asic_type >= CHIP_BONAIRE)
+		/* doorbell bar mapping */
+		amdgpu_doorbell_init(adev);
 
 	/* io port mapping */
 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -1628,6 +1697,14 @@
 
 	adev->accel_working = true;
 
+	/* Initialize the buffer migration limit. */
+	if (amdgpu_moverate >= 0)
+		max_MBps = amdgpu_moverate;
+	else
+		max_MBps = 8; /* Allow 8 MB/s. */
+	/* Get a log2 for easy divisions. */
+	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
+
 	amdgpu_fbdev_init(adev);
 
 	r = amdgpu_ib_pool_init(adev);
@@ -1732,7 +1809,8 @@
 	adev->rio_mem = NULL;
 	iounmap(adev->rmmio);
 	adev->rmmio = NULL;
-	amdgpu_doorbell_fini(adev);
+	if (adev->asic_type >= CHIP_BONAIRE)
+		amdgpu_doorbell_fini(adev);
 	amdgpu_debugfs_regs_cleanup(adev);
 	amdgpu_debugfs_remove_files(adev);
 }
@@ -1742,7 +1820,7 @@
  * Suspend & resume.
  */
 /**
- * amdgpu_suspend_kms - initiate device suspend
+ * amdgpu_device_suspend - initiate device suspend
  *
  * @pdev: drm dev pointer
  * @state: suspend state
@@ -1751,7 +1829,7 @@
  * Returns 0 for success or an error on failure.
  * Called at driver suspend.
  */
-int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
+int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
 {
 	struct amdgpu_device *adev;
 	struct drm_crtc *crtc;
@@ -1764,7 +1842,8 @@
 
 	adev = dev->dev_private;
 
-	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+	    dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
 		return 0;
 
 	drm_kms_helper_poll_disable(dev);
@@ -1819,6 +1898,10 @@
 		/* Shut down the device */
 		pci_disable_device(dev->pdev);
 		pci_set_power_state(dev->pdev, PCI_D3hot);
+	} else {
+		r = amdgpu_asic_reset(adev);
+		if (r)
+			DRM_ERROR("amdgpu asic reset failed\n");
 	}
 
 	if (fbcon) {
@@ -1830,7 +1913,7 @@
 }
 
 /**
- * amdgpu_resume_kms - initiate device resume
+ * amdgpu_device_resume - initiate device resume
  *
  * @pdev: drm dev pointer
  *
@@ -1838,32 +1921,37 @@
  * Returns 0 for success or an error on failure.
  * Called at driver resume.
  */
-int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
 {
 	struct drm_connector *connector;
 	struct amdgpu_device *adev = dev->dev_private;
 	struct drm_crtc *crtc;
 	int r;
 
-	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+	    dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
 		return 0;
 
-	if (fbcon) {
+	if (fbcon)
 		console_lock();
-	}
+
 	if (resume) {
 		pci_set_power_state(dev->pdev, PCI_D0);
 		pci_restore_state(dev->pdev);
-		if (pci_enable_device(dev->pdev)) {
+		r = pci_enable_device(dev->pdev);
+		if (r) {
 			if (fbcon)
 				console_unlock();
-			return -1;
+			return r;
 		}
 	}
 
 	/* post card */
-	if (!amdgpu_card_posted(adev))
-		amdgpu_atom_asic_init(adev->mode_info.atom_context);
+	if (!amdgpu_card_posted(adev) || !resume) {
+		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+		if (r)
+			DRM_ERROR("amdgpu asic init failed\n");
+	}
 
 	r = amdgpu_resume(adev);
 	if (r)
@@ -1937,6 +2025,126 @@
 	return 0;
 }
 
+static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
+{
+	int i;
+	bool asic_hang = false;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_status[i].valid)
+			continue;
+		if (adev->ip_blocks[i].funcs->check_soft_reset)
+			adev->ip_blocks[i].funcs->check_soft_reset(adev);
+		if (adev->ip_block_status[i].hang) {
+			DRM_INFO("IP block:%d is hang!\n", i);
+			asic_hang = true;
+		}
+	}
+	return asic_hang;
+}
+
+int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
+{
+	int i, r = 0;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_status[i].valid)
+			continue;
+		if (adev->ip_block_status[i].hang &&
+		    adev->ip_blocks[i].funcs->pre_soft_reset) {
+			r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
+			if (r)
+				return r;
+		}
+	}
+
+	return 0;
+}
+
+static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
+{
+	if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang ||
+	    adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang ||
+	    adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang ||
+	    adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) {
+		DRM_INFO("Some block need full reset!\n");
+		return true;
+	}
+	return false;
+}
+
+static int amdgpu_soft_reset(struct amdgpu_device *adev)
+{
+	int i, r = 0;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_status[i].valid)
+			continue;
+		if (adev->ip_block_status[i].hang &&
+		    adev->ip_blocks[i].funcs->soft_reset) {
+			r = adev->ip_blocks[i].funcs->soft_reset(adev);
+			if (r)
+				return r;
+		}
+	}
+
+	return 0;
+}
+
+static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
+{
+	int i, r = 0;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_status[i].valid)
+			continue;
+		if (adev->ip_block_status[i].hang &&
+		    adev->ip_blocks[i].funcs->post_soft_reset)
+			r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
+
+bool amdgpu_need_backup(struct amdgpu_device *adev)
+{
+	if (adev->flags & AMD_IS_APU)
+		return false;
+
+	return amdgpu_lockup_timeout > 0 ? true : false;
+}
+
+static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
+					   struct amdgpu_ring *ring,
+					   struct amdgpu_bo *bo,
+					   struct fence **fence)
+{
+	uint32_t domain;
+	int r;
+
+       if (!bo->shadow)
+               return 0;
+
+       r = amdgpu_bo_reserve(bo, false);
+       if (r)
+               return r;
+       domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+       /* if bo has been evicted, then no need to recover */
+       if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+               r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
+						 NULL, fence, true);
+               if (r) {
+                       DRM_ERROR("recover page table failed!\n");
+                       goto err;
+               }
+       }
+err:
+       amdgpu_bo_unreserve(bo);
+       return r;
+}
+
 /**
  * amdgpu_gpu_reset - reset the asic
  *
@@ -1949,6 +2157,12 @@
 {
 	int i, r;
 	int resched;
+	bool need_full_reset;
+
+	if (!amdgpu_check_soft_reset(adev)) {
+		DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
+		return 0;
+	}
 
 	atomic_inc(&adev->gpu_reset_counter);
 
@@ -1967,40 +2181,93 @@
 	/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
 	amdgpu_fence_driver_force_completion(adev);
 
-	/* save scratch */
-	amdgpu_atombios_scratch_regs_save(adev);
-	r = amdgpu_suspend(adev);
+	need_full_reset = amdgpu_need_full_reset(adev);
+
+	if (!need_full_reset) {
+		amdgpu_pre_soft_reset(adev);
+		r = amdgpu_soft_reset(adev);
+		amdgpu_post_soft_reset(adev);
+		if (r || amdgpu_check_soft_reset(adev)) {
+			DRM_INFO("soft reset failed, will fallback to full reset!\n");
+			need_full_reset = true;
+		}
+	}
+
+	if (need_full_reset) {
+		/* save scratch */
+		amdgpu_atombios_scratch_regs_save(adev);
+		r = amdgpu_suspend(adev);
 
 retry:
-	/* Disable fb access */
-	if (adev->mode_info.num_crtc) {
-		struct amdgpu_mode_mc_save save;
-		amdgpu_display_stop_mc_access(adev, &save);
-		amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
-	}
+		/* Disable fb access */
+		if (adev->mode_info.num_crtc) {
+			struct amdgpu_mode_mc_save save;
+			amdgpu_display_stop_mc_access(adev, &save);
+			amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
+		}
 
-	r = amdgpu_asic_reset(adev);
-	/* post card */
-	amdgpu_atom_asic_init(adev->mode_info.atom_context);
+		r = amdgpu_asic_reset(adev);
+		/* post card */
+		amdgpu_atom_asic_init(adev->mode_info.atom_context);
 
-	if (!r) {
-		dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
-		r = amdgpu_resume(adev);
+		if (!r) {
+			dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
+			r = amdgpu_resume(adev);
+		}
+		/* restore scratch */
+		amdgpu_atombios_scratch_regs_restore(adev);
 	}
-	/* restore scratch */
-	amdgpu_atombios_scratch_regs_restore(adev);
 	if (!r) {
+		amdgpu_irq_gpu_reset_resume_helper(adev);
+		if (need_full_reset && amdgpu_need_backup(adev)) {
+			r = amdgpu_ttm_recover_gart(adev);
+			if (r)
+				DRM_ERROR("gart recovery failed!!!\n");
+		}
 		r = amdgpu_ib_ring_tests(adev);
 		if (r) {
 			dev_err(adev->dev, "ib ring test failed (%d).\n", r);
 			r = amdgpu_suspend(adev);
+			need_full_reset = true;
 			goto retry;
 		}
+		/**
+		 * recovery vm page tables, since we cannot depend on VRAM is
+		 * consistent after gpu full reset.
+		 */
+		if (need_full_reset && amdgpu_need_backup(adev)) {
+			struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+			struct amdgpu_bo *bo, *tmp;
+			struct fence *fence = NULL, *next = NULL;
 
+			DRM_INFO("recover vram bo from shadow\n");
+			mutex_lock(&adev->shadow_list_lock);
+			list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
+				amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
+				if (fence) {
+					r = fence_wait(fence, false);
+					if (r) {
+						WARN(r, "recovery from shadow isn't comleted\n");
+						break;
+					}
+				}
+
+				fence_put(fence);
+				fence = next;
+			}
+			mutex_unlock(&adev->shadow_list_lock);
+			if (fence) {
+				r = fence_wait(fence, false);
+				if (r)
+					WARN(r, "recovery from shadow isn't comleted\n");
+			}
+			fence_put(fence);
+		}
 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 			struct amdgpu_ring *ring = adev->rings[i];
 			if (!ring)
 				continue;
+
 			amd_sched_job_recovery(&ring->sched);
 			kthread_unpark(ring->sched.thread);
 		}
@@ -2020,7 +2287,6 @@
 		/* bad news, how to tell it to userspace ? */
 		dev_info(adev->dev, "GPU reset failed\n");
 	}
-	amdgpu_irq_gpu_reset_resume_helper(adev);
 
 	return r;
 }
@@ -2178,22 +2444,26 @@
 	struct amdgpu_device *adev = f->f_inode->i_private;
 	ssize_t result = 0;
 	int r;
-	bool use_bank;
+	bool pm_pg_lock, use_bank;
 	unsigned instance_bank, sh_bank, se_bank;
 
 	if (size & 0x3 || *pos & 0x3)
 		return -EINVAL;
 
+	/* are we reading registers for which a PG lock is necessary? */
+	pm_pg_lock = (*pos >> 23) & 1;
+
 	if (*pos & (1ULL << 62)) {
 		se_bank = (*pos >> 24) & 0x3FF;
 		sh_bank = (*pos >> 34) & 0x3FF;
 		instance_bank = (*pos >> 44) & 0x3FF;
 		use_bank = 1;
-		*pos &= 0xFFFFFF;
 	} else {
 		use_bank = 0;
 	}
 
+	*pos &= 0x3FFFF;
+
 	if (use_bank) {
 		if (sh_bank >= adev->gfx.config.max_sh_per_se ||
 		    se_bank >= adev->gfx.config.max_shader_engines)
@@ -2203,6 +2473,9 @@
 					sh_bank, instance_bank);
 	}
 
+	if (pm_pg_lock)
+		mutex_lock(&adev->pm.mutex);
+
 	while (size) {
 		uint32_t value;
 
@@ -2228,6 +2501,9 @@
 		mutex_unlock(&adev->grbm_idx_mutex);
 	}
 
+	if (pm_pg_lock)
+		mutex_unlock(&adev->pm.mutex);
+
 	return result;
 }
 
@@ -2385,7 +2661,7 @@
 	while (size) {
 		uint32_t value;
 
-		value = RREG32_SMC(*pos >> 2);
+		value = RREG32_SMC(*pos);
 		r = put_user(value, (uint32_t *)buf);
 		if (r)
 			return r;
@@ -2416,7 +2692,7 @@
 		if (r)
 			return r;
 
-		WREG32_SMC(*pos >> 2, value);
+		WREG32_SMC(*pos, value);
 
 		result += 4;
 		buf += 4;
@@ -2443,7 +2719,7 @@
 		return -ENOMEM;
 
 	/* version, increment each time something is added */
-	config[no_regs++] = 0;
+	config[no_regs++] = 2;
 	config[no_regs++] = adev->gfx.config.max_shader_engines;
 	config[no_regs++] = adev->gfx.config.max_tile_pipes;
 	config[no_regs++] = adev->gfx.config.max_cu_per_sh;
@@ -2468,6 +2744,15 @@
 	config[no_regs++] = adev->gfx.config.gb_addr_config;
 	config[no_regs++] = adev->gfx.config.num_rbs;
 
+	/* rev==1 */
+	config[no_regs++] = adev->rev_id;
+	config[no_regs++] = adev->pg_flags;
+	config[no_regs++] = adev->cg_flags;
+
+	/* rev==2 */
+	config[no_regs++] = adev->family;
+	config[no_regs++] = adev->external_rev_id;
+
 	while (size && (*pos < no_regs * 4)) {
 		uint32_t value;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 76f9602..9af8d3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -41,7 +41,7 @@
 		container_of(cb, struct amdgpu_flip_work, cb);
 
 	fence_put(f);
-	schedule_work(&work->flip_work);
+	schedule_work(&work->flip_work.work);
 }
 
 static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
@@ -63,16 +63,17 @@
 
 static void amdgpu_flip_work_func(struct work_struct *__work)
 {
+	struct delayed_work *delayed_work =
+		container_of(__work, struct delayed_work, work);
 	struct amdgpu_flip_work *work =
-		container_of(__work, struct amdgpu_flip_work, flip_work);
+		container_of(delayed_work, struct amdgpu_flip_work, flip_work);
 	struct amdgpu_device *adev = work->adev;
 	struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
 
 	struct drm_crtc *crtc = &amdgpuCrtc->base;
 	unsigned long flags;
-	unsigned i, repcnt = 4;
-	int vpos, hpos, stat, min_udelay = 0;
-	struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
+	unsigned i;
+	int vpos, hpos;
 
 	if (amdgpu_flip_handle_fence(work, &work->excl))
 		return;
@@ -81,55 +82,23 @@
 		if (amdgpu_flip_handle_fence(work, &work->shared[i]))
 			return;
 
-	/* We borrow the event spin lock for protecting flip_status */
-	spin_lock_irqsave(&crtc->dev->event_lock, flags);
-
-	/* If this happens to execute within the "virtually extended" vblank
-	 * interval before the start of the real vblank interval then it needs
-	 * to delay programming the mmio flip until the real vblank is entered.
-	 * This prevents completing a flip too early due to the way we fudge
-	 * our vblank counter and vblank timestamps in order to work around the
-	 * problem that the hw fires vblank interrupts before actual start of
-	 * vblank (when line buffer refilling is done for a frame). It
-	 * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for
-	 * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts.
-	 *
-	 * In practice this won't execute very often unless on very fast
-	 * machines because the time window for this to happen is very small.
+	/* Wait until we're out of the vertical blank period before the one
+	 * targeted by the flip
 	 */
-	while (amdgpuCrtc->enabled && --repcnt) {
-		/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
-		 * start in hpos, and to the "fudged earlier" vblank start in
-		 * vpos.
-		 */
-		stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id,
-						  GET_DISTANCE_TO_VBLANKSTART,
-						  &vpos, &hpos, NULL, NULL,
-						  &crtc->hwmode);
-
-		if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
-		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
-		    !(vpos >= 0 && hpos <= 0))
-			break;
-
-		/* Sleep at least until estimated real start of hw vblank */
-		min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
-		if (min_udelay > vblank->framedur_ns / 2000) {
-			/* Don't wait ridiculously long - something is wrong */
-			repcnt = 0;
-			break;
-		}
-		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-		usleep_range(min_udelay, 2 * min_udelay);
-		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+	if (amdgpuCrtc->enabled &&
+	    (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
+					&vpos, &hpos, NULL, NULL,
+					&crtc->hwmode)
+	     & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+	    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+	    (int)(work->target_vblank -
+		  amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) {
+		schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
+		return;
 	}
 
-	if (!repcnt)
-		DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
-				 "framedur %d, linedur %d, stat %d, vpos %d, "
-				 "hpos %d\n", work->crtc_id, min_udelay,
-				 vblank->framedur_ns / 1000,
-				 vblank->linedur_ns / 1000, stat, vpos, hpos);
+	/* We borrow the event spin lock for protecting flip_status */
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
 	/* Do the flip (mmio) */
 	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
@@ -169,10 +138,10 @@
 	kfree(work);
 }
 
-int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
-			  struct drm_framebuffer *fb,
-			  struct drm_pending_vblank_event *event,
-			  uint32_t page_flip_flags)
+int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_pending_vblank_event *event,
+				 uint32_t page_flip_flags, uint32_t target)
 {
 	struct drm_device *dev = crtc->dev;
 	struct amdgpu_device *adev = dev->dev_private;
@@ -191,7 +160,7 @@
 	if (work == NULL)
 		return -ENOMEM;
 
-	INIT_WORK(&work->flip_work, amdgpu_flip_work_func);
+	INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func);
 	INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func);
 
 	work->event = event;
@@ -237,12 +206,8 @@
 	amdgpu_bo_unreserve(new_rbo);
 
 	work->base = base;
-
-	r = drm_crtc_vblank_get(crtc);
-	if (r) {
-		DRM_ERROR("failed to get vblank before flip\n");
-		goto pflip_cleanup;
-	}
+	work->target_vblank = target - drm_crtc_vblank_count(crtc) +
+		amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
 
 	/* we borrow the event spin lock for protecting flip_wrok */
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -250,7 +215,7 @@
 		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 		r = -EBUSY;
-		goto vblank_cleanup;
+		goto pflip_cleanup;
 	}
 
 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
@@ -262,12 +227,9 @@
 	/* update crtc fb */
 	crtc->primary->fb = fb;
 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-	amdgpu_flip_work_func(&work->flip_work);
+	amdgpu_flip_work_func(&work->flip_work.work);
 	return 0;
 
-vblank_cleanup:
-	drm_crtc_vblank_put(crtc);
-
 pflip_cleanup:
 	if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
 		DRM_ERROR("failed to reserve new rbo in error path\n");
@@ -335,7 +297,7 @@
 	return ret;
 }
 
-static const char *encoder_names[38] = {
+static const char *encoder_names[41] = {
 	"NONE",
 	"INTERNAL_LVDS",
 	"INTERNAL_TMDS1",
@@ -374,6 +336,9 @@
 	"TRAVIS",
 	"INTERNAL_VCE",
 	"INTERNAL_UNIPHY3",
+	"HDMI_ANX9805",
+	"INTERNAL_AMCLK",
+	"VIRTUAL",
 };
 
 static const char *hpd_names[6] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 11263c5..5963626 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -53,13 +53,17 @@
  * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
  *           at the end of IBs.
  * - 3.3.0 - Add VM support for UVD on supported hardware.
+ * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS.
+ * - 3.5.0 - Add support for new UVD_NO_OP register.
+ * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
  */
 #define KMS_DRIVER_MAJOR	3
-#define KMS_DRIVER_MINOR	3
+#define KMS_DRIVER_MINOR	6
 #define KMS_DRIVER_PATCHLEVEL	0
 
 int amdgpu_vram_limit = 0;
 int amdgpu_gart_size = -1; /* auto */
+int amdgpu_moverate = -1; /* auto */
 int amdgpu_benchmarking = 0;
 int amdgpu_testing = 0;
 int amdgpu_audio = -1;
@@ -84,11 +88,14 @@
 int amdgpu_sched_hw_submission = 2;
 int amdgpu_powerplay = -1;
 int amdgpu_powercontainment = 1;
+int amdgpu_sclk_deep_sleep_en = 1;
 unsigned amdgpu_pcie_gen_cap = 0;
 unsigned amdgpu_pcie_lane_cap = 0;
 unsigned amdgpu_cg_mask = 0xffffffff;
 unsigned amdgpu_pg_mask = 0xffffffff;
 char *amdgpu_disable_cu = NULL;
+char *amdgpu_virtual_display = NULL;
+unsigned amdgpu_pp_feature_mask = 0xffffffff;
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -96,6 +103,9 @@
 MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
 module_param_named(gartsize, amdgpu_gart_size, int, 0600);
 
+MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
+module_param_named(moverate, amdgpu_moverate, int, 0600);
+
 MODULE_PARM_DESC(benchmark, "Run benchmark");
 module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
 
@@ -168,8 +178,14 @@
 
 MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
 module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
+
+MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
+module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
 #endif
 
+MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)");
+module_param_named(sclkdeepsleep, amdgpu_sclk_deep_sleep_en, int, 0444);
+
 MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
 module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
 
@@ -185,7 +201,84 @@
 MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
 module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
 
+MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)");
+module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
+
 static const struct pci_device_id pciidlist[] = {
+#ifdef  CONFIG_DRM_AMDGPU_SI
+	{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+	{0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+	{0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+	{0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+	{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+	{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+	{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+	{0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+	{0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+	{0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+	{0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+	{0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+	{0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+	{0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
 	/* Kaveri */
 	{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
@@ -383,32 +476,72 @@
 	drm_put_dev(dev);
 }
 
+static void
+amdgpu_pci_shutdown(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct amdgpu_device *adev = dev->dev_private;
+
+	/* if we are running in a VM, make sure the device
+	 * torn down properly on reboot/shutdown
+	 */
+	if (adev->virtualization.is_virtual)
+		amdgpu_pci_remove(pdev);
+}
+
 static int amdgpu_pmops_suspend(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
+
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-	return amdgpu_suspend_kms(drm_dev, true, true);
+	return amdgpu_device_suspend(drm_dev, true, true);
 }
 
 static int amdgpu_pmops_resume(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-	return amdgpu_resume_kms(drm_dev, true, true);
+
+	/* GPU comes up enabled by the bios on resume */
+	if (amdgpu_device_is_px(drm_dev)) {
+		pm_runtime_disable(dev);
+		pm_runtime_set_active(dev);
+		pm_runtime_enable(dev);
+	}
+
+	return amdgpu_device_resume(drm_dev, true, true);
 }
 
 static int amdgpu_pmops_freeze(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
+
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-	return amdgpu_suspend_kms(drm_dev, false, true);
+	return amdgpu_device_suspend(drm_dev, false, true);
 }
 
 static int amdgpu_pmops_thaw(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
+
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-	return amdgpu_resume_kms(drm_dev, false, true);
+	return amdgpu_device_resume(drm_dev, false, true);
+}
+
+static int amdgpu_pmops_poweroff(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return amdgpu_device_suspend(drm_dev, true, true);
+}
+
+static int amdgpu_pmops_restore(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return amdgpu_device_resume(drm_dev, false, true);
 }
 
 static int amdgpu_pmops_runtime_suspend(struct device *dev)
@@ -426,7 +559,7 @@
 	drm_kms_helper_poll_disable(drm_dev);
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
 
-	ret = amdgpu_suspend_kms(drm_dev, false, false);
+	ret = amdgpu_device_suspend(drm_dev, false, false);
 	pci_save_state(pdev);
 	pci_disable_device(pdev);
 	pci_ignore_hotplug(pdev);
@@ -459,7 +592,7 @@
 		return ret;
 	pci_set_master(pdev);
 
-	ret = amdgpu_resume_kms(drm_dev, false, false);
+	ret = amdgpu_device_resume(drm_dev, false, false);
 	drm_kms_helper_poll_enable(drm_dev);
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
 	drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
@@ -513,8 +646,8 @@
 	.resume = amdgpu_pmops_resume,
 	.freeze = amdgpu_pmops_freeze,
 	.thaw = amdgpu_pmops_thaw,
-	.poweroff = amdgpu_pmops_freeze,
-	.restore = amdgpu_pmops_resume,
+	.poweroff = amdgpu_pmops_poweroff,
+	.restore = amdgpu_pmops_restore,
 	.runtime_suspend = amdgpu_pmops_runtime_suspend,
 	.runtime_resume = amdgpu_pmops_runtime_resume,
 	.runtime_idle = amdgpu_pmops_runtime_idle,
@@ -596,6 +729,7 @@
 	.id_table = pciidlist,
 	.probe = amdgpu_pci_probe,
 	.remove = amdgpu_pci_remove,
+	.shutdown = amdgpu_pci_shutdown,
 	.driver.pm = &amdgpu_pm_ops,
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index bf033b5..107fbb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -25,6 +25,7 @@
  */
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/pm_runtime.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
@@ -47,8 +48,35 @@
 	struct amdgpu_device *adev;
 };
 
+static int
+amdgpufb_open(struct fb_info *info, int user)
+{
+	struct amdgpu_fbdev *rfbdev = info->par;
+	struct amdgpu_device *adev = rfbdev->adev;
+	int ret = pm_runtime_get_sync(adev->ddev->dev);
+	if (ret < 0 && ret != -EACCES) {
+		pm_runtime_mark_last_busy(adev->ddev->dev);
+		pm_runtime_put_autosuspend(adev->ddev->dev);
+		return ret;
+	}
+	return 0;
+}
+
+static int
+amdgpufb_release(struct fb_info *info, int user)
+{
+	struct amdgpu_fbdev *rfbdev = info->par;
+	struct amdgpu_device *adev = rfbdev->adev;
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+	return 0;
+}
+
 static struct fb_ops amdgpufb_ops = {
 	.owner = THIS_MODULE,
+	.fb_open = amdgpufb_open,
+	.fb_release = amdgpufb_release,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
index 503d540..e73728d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
@@ -31,14 +31,6 @@
 #define AMDGPU_GWS_SHIFT	PAGE_SHIFT
 #define AMDGPU_OA_SHIFT		PAGE_SHIFT
 
-#define AMDGPU_PL_GDS		TTM_PL_PRIV0
-#define AMDGPU_PL_GWS		TTM_PL_PRIV1
-#define AMDGPU_PL_OA		TTM_PL_PRIV2
-
-#define AMDGPU_PL_FLAG_GDS		TTM_PL_FLAG_PRIV0
-#define AMDGPU_PL_FLAG_GWS		TTM_PL_FLAG_PRIV1
-#define AMDGPU_PL_FLAG_OA		TTM_PL_FLAG_PRIV2
-
 struct amdgpu_ring;
 struct amdgpu_bo;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 31a6763..c93a92a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -186,10 +186,8 @@
 			 "AMDGPU i2c hw bus %s", name);
 		i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
 		ret = i2c_add_adapter(&i2c->adapter);
-		if (ret) {
-			DRM_ERROR("Failed to register hw i2c %s\n", name);
+		if (ret)
 			goto out_free;
-		}
 	} else {
 		/* set the amdgpu bit adapter */
 		snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index a31d7ef..4127e7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -124,7 +124,8 @@
 	bool skip_preamble, need_ctx_switch;
 	unsigned patch_offset = ~0;
 	struct amdgpu_vm *vm;
-	uint64_t ctx;
+	uint64_t fence_ctx;
+	uint32_t status = 0, alloc_size;
 
 	unsigned i;
 	int r = 0;
@@ -135,14 +136,14 @@
 	/* ring tests don't use a job */
 	if (job) {
 		vm = job->vm;
-		ctx = job->ctx;
+		fence_ctx = job->fence_ctx;
 	} else {
 		vm = NULL;
-		ctx = 0;
+		fence_ctx = 0;
 	}
 
 	if (!ring->ready) {
-		dev_err(adev->dev, "couldn't schedule ib\n");
+		dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
 		return -EINVAL;
 	}
 
@@ -151,7 +152,10 @@
 		return -EINVAL;
 	}
 
-	r = amdgpu_ring_alloc(ring, 256 * num_ibs);
+	alloc_size = amdgpu_ring_get_dma_frame_size(ring) +
+		num_ibs * amdgpu_ring_get_emit_ib_size(ring);
+
+	r = amdgpu_ring_alloc(ring, alloc_size);
 	if (r) {
 		dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
 		return r;
@@ -174,13 +178,22 @@
 	/* always set cond_exec_polling to CONTINUE */
 	*ring->cond_exe_cpu_addr = 1;
 
-	skip_preamble = ring->current_ctx == ctx;
-	need_ctx_switch = ring->current_ctx != ctx;
+	skip_preamble = ring->current_ctx == fence_ctx;
+	need_ctx_switch = ring->current_ctx != fence_ctx;
+	if (job && ring->funcs->emit_cntxcntl) {
+		if (need_ctx_switch)
+			status |= AMDGPU_HAVE_CTX_SWITCH;
+		status |= job->preamble_status;
+		amdgpu_ring_emit_cntxcntl(ring, status);
+	}
+
 	for (i = 0; i < num_ibs; ++i) {
 		ib = &ibs[i];
 
 		/* drop preamble IBs if we don't have a context switch */
-		if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
+		if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
+			skip_preamble &&
+			!(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST))
 			continue;
 
 		amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
@@ -209,7 +222,9 @@
 	if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
 
-	ring->current_ctx = ctx;
+	ring->current_ctx = fence_ctx;
+	if (ring->funcs->emit_switch_buffer)
+		amdgpu_ring_emit_switch_buffer(ring);
 	amdgpu_ring_commit(ring);
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 534fc04..3ab4c65 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -40,32 +40,15 @@
 
 	/* Allocate ring buffer */
 	if (adev->irq.ih.ring_obj == NULL) {
-		r = amdgpu_bo_create(adev, adev->irq.ih.ring_size,
-				     PAGE_SIZE, true,
-				     AMDGPU_GEM_DOMAIN_GTT, 0,
-				     NULL, NULL, &adev->irq.ih.ring_obj);
+		r = amdgpu_bo_create_kernel(adev, adev->irq.ih.ring_size,
+					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+					    &adev->irq.ih.ring_obj,
+					    &adev->irq.ih.gpu_addr,
+					    (void **)&adev->irq.ih.ring);
 		if (r) {
 			DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
 			return r;
 		}
-		r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
-		if (unlikely(r != 0))
-			return r;
-		r = amdgpu_bo_pin(adev->irq.ih.ring_obj,
-				  AMDGPU_GEM_DOMAIN_GTT,
-				  &adev->irq.ih.gpu_addr);
-		if (r) {
-			amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
-			DRM_ERROR("amdgpu: failed to pin ih ring buffer (%d).\n", r);
-			return r;
-		}
-		r = amdgpu_bo_kmap(adev->irq.ih.ring_obj,
-				   (void **)&adev->irq.ih.ring);
-		amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
-		if (r) {
-			DRM_ERROR("amdgpu: failed to map ih ring buffer (%d).\n", r);
-			return r;
-		}
 	}
 	return 0;
 }
@@ -136,8 +119,6 @@
  */
 void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
 {
-	int r;
-
 	if (adev->irq.ih.use_bus_addr) {
 		if (adev->irq.ih.ring) {
 			/* add 8 bytes for the rptr/wptr shadows and
@@ -149,17 +130,9 @@
 			adev->irq.ih.ring = NULL;
 		}
 	} else {
-		if (adev->irq.ih.ring_obj) {
-			r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
-			if (likely(r == 0)) {
-				amdgpu_bo_kunmap(adev->irq.ih.ring_obj);
-				amdgpu_bo_unpin(adev->irq.ih.ring_obj);
-				amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
-			}
-			amdgpu_bo_unref(&adev->irq.ih.ring_obj);
-			adev->irq.ih.ring = NULL;
-			adev->irq.ih.ring_obj = NULL;
-		}
+		amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj,
+				      &adev->irq.ih.gpu_addr,
+				      (void **)&adev->irq.ih.ring);
 		amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
 		amdgpu_wb_free(adev, adev->irq.ih.rptr_offs);
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 7ef0935..f016464 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -70,6 +70,7 @@
 	/* gen irq stuff */
 	struct irq_domain		*domain; /* GPU irq controller domain */
 	unsigned			virq[AMDGPU_MAX_IRQ_SRC_ID];
+	uint32_t                        srbm_soft_reset;
 };
 
 void amdgpu_irq_preinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 6674d40..8c58079 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -91,7 +91,7 @@
 		amdgpu_ib_free(job->adev, &job->ibs[i], f);
 }
 
-void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
 {
 	struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
@@ -124,7 +124,7 @@
 		return r;
 
 	job->owner = owner;
-	job->ctx = entity->fence_context;
+	job->fence_ctx = entity->fence_context;
 	*f = fence_get(&job->base.s_fence->finished);
 	amdgpu_job_free_resources(job);
 	amd_sched_entity_push_job(&job->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d942654..c2c7fb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -292,14 +292,14 @@
 			type = AMD_IP_BLOCK_TYPE_UVD;
 			ring_mask = adev->uvd.ring.ready ? 1 : 0;
 			ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
-			ib_size_alignment = 8;
+			ib_size_alignment = 16;
 			break;
 		case AMDGPU_HW_IP_VCE:
 			type = AMD_IP_BLOCK_TYPE_VCE;
-			for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++)
+			for (i = 0; i < adev->vce.num_rings; i++)
 				ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
 			ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
-			ib_size_alignment = 8;
+			ib_size_alignment = 1;
 			break;
 		default:
 			return -EINVAL;
@@ -373,6 +373,9 @@
 	case AMDGPU_INFO_NUM_BYTES_MOVED:
 		ui64 = atomic64_read(&adev->num_bytes_moved);
 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
+	case AMDGPU_INFO_NUM_EVICTIONS:
+		ui64 = atomic64_read(&adev->num_evictions);
+		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
 	case AMDGPU_INFO_VRAM_USAGE:
 		ui64 = atomic64_read(&adev->vram_usage);
 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
@@ -539,12 +542,16 @@
 		return r;
 
 	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
-	if (unlikely(!fpriv))
-		return -ENOMEM;
+	if (unlikely(!fpriv)) {
+		r = -ENOMEM;
+		goto out_suspend;
+	}
 
 	r = amdgpu_vm_init(adev, &fpriv->vm);
-	if (r)
-		goto error_free;
+	if (r) {
+		kfree(fpriv);
+		goto out_suspend;
+	}
 
 	mutex_init(&fpriv->bo_list_lock);
 	idr_init(&fpriv->bo_list_handles);
@@ -553,12 +560,9 @@
 
 	file_priv->driver_priv = fpriv;
 
+out_suspend:
 	pm_runtime_mark_last_busy(dev->dev);
 	pm_runtime_put_autosuspend(dev->dev);
-	return 0;
-
-error_free:
-	kfree(fpriv);
 
 	return r;
 }
@@ -597,6 +601,9 @@
 
 	kfree(fpriv);
 	file_priv->driver_priv = NULL;
+
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
 }
 
 /**
@@ -611,6 +618,7 @@
 void amdgpu_driver_preclose_kms(struct drm_device *dev,
 				struct drm_file *file_priv)
 {
+	pm_runtime_get_sync(dev->dev);
 }
 
 /*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 6b1d7d3..7b0eff7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -39,6 +39,8 @@
 #include <drm/drm_plane_helper.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
+#include <linux/hrtimer.h>
+#include "amdgpu_irq.h"
 
 struct amdgpu_bo;
 struct amdgpu_device;
@@ -339,6 +341,8 @@
 	int			num_dig; /* number of dig blocks */
 	int			disp_priority;
 	const struct amdgpu_display_funcs *funcs;
+	struct hrtimer vblank_timer;
+	enum amdgpu_interrupt_state vsync_timer_enabled;
 };
 
 #define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -587,10 +591,10 @@
 void amdgpu_print_display_setup(struct drm_device *dev);
 int amdgpu_modeset_create_props(struct amdgpu_device *adev);
 int amdgpu_crtc_set_config(struct drm_mode_set *set);
-int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
-			  struct drm_framebuffer *fb,
-			  struct drm_pending_vblank_event *event,
-			  uint32_t page_flip_flags);
+int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_pending_vblank_event *event,
+				 uint32_t page_flip_flags, uint32_t target);
 extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 6f0873c..428aa00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -38,20 +38,17 @@
 #include "amdgpu_trace.h"
 
 
-int amdgpu_ttm_init(struct amdgpu_device *adev);
-void amdgpu_ttm_fini(struct amdgpu_device *adev);
 
 static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
 						struct ttm_mem_reg *mem)
 {
-	u64 ret = 0;
-	if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) {
-		ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) >
-			   adev->mc.visible_vram_size ?
-			   adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
-			   mem->size;
-	}
-	return ret;
+	if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size)
+		return 0;
+
+	return ((mem->start << PAGE_SHIFT) + mem->size) >
+		adev->mc.visible_vram_size ?
+		adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
+		mem->size;
 }
 
 static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
@@ -99,6 +96,11 @@
 
 	drm_gem_object_release(&bo->gem_base);
 	amdgpu_bo_unref(&bo->parent);
+	if (!list_empty(&bo->shadow_list)) {
+		mutex_lock(&bo->adev->shadow_list_lock);
+		list_del_init(&bo->shadow_list);
+		mutex_unlock(&bo->adev->shadow_list_lock);
+	}
 	kfree(bo->metadata);
 	kfree(bo);
 }
@@ -112,84 +114,93 @@
 
 static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
 				      struct ttm_placement *placement,
-				      struct ttm_place *placements,
+				      struct ttm_place *places,
 				      u32 domain, u64 flags)
 {
-	u32 c = 0, i;
-
-	placement->placement = placements;
-	placement->busy_placement = placements;
+	u32 c = 0;
 
 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
+		unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+
 		if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
-			adev->mc.visible_vram_size < adev->mc.real_vram_size) {
-			placements[c].fpfn =
-				adev->mc.visible_vram_size >> PAGE_SHIFT;
-			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
-				TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
+		    !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
+		    adev->mc.visible_vram_size < adev->mc.real_vram_size) {
+			places[c].fpfn = visible_pfn;
+			places[c].lpfn = 0;
+			places[c].flags = TTM_PL_FLAG_WC |
+				TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
+				TTM_PL_FLAG_TOPDOWN;
+			c++;
 		}
-		placements[c].fpfn = 0;
-		placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+
+		places[c].fpfn = 0;
+		places[c].lpfn = 0;
+		places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
 			TTM_PL_FLAG_VRAM;
-		if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED))
-			placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN;
+		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+			places[c].lpfn = visible_pfn;
+		else
+			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
+		c++;
 	}
 
 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
-		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
-			placements[c].fpfn = 0;
-			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
+		places[c].fpfn = 0;
+		places[c].lpfn = 0;
+		places[c].flags = TTM_PL_FLAG_TT;
+		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+			places[c].flags |= TTM_PL_FLAG_WC |
 				TTM_PL_FLAG_UNCACHED;
-		} else {
-			placements[c].fpfn = 0;
-			placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
-		}
+		else
+			places[c].flags |= TTM_PL_FLAG_CACHED;
+		c++;
 	}
 
 	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
-		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
-			placements[c].fpfn = 0;
-			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM |
+		places[c].fpfn = 0;
+		places[c].lpfn = 0;
+		places[c].flags = TTM_PL_FLAG_SYSTEM;
+		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+			places[c].flags |= TTM_PL_FLAG_WC |
 				TTM_PL_FLAG_UNCACHED;
-		} else {
-			placements[c].fpfn = 0;
-			placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
-		}
+		else
+			places[c].flags |= TTM_PL_FLAG_CACHED;
+		c++;
 	}
 
 	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
-		placements[c].fpfn = 0;
-		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
-			AMDGPU_PL_FLAG_GDS;
+		places[c].fpfn = 0;
+		places[c].lpfn = 0;
+		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
+		c++;
 	}
+
 	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
-		placements[c].fpfn = 0;
-		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
-			AMDGPU_PL_FLAG_GWS;
+		places[c].fpfn = 0;
+		places[c].lpfn = 0;
+		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
+		c++;
 	}
+
 	if (domain & AMDGPU_GEM_DOMAIN_OA) {
-		placements[c].fpfn = 0;
-		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
-			AMDGPU_PL_FLAG_OA;
+		places[c].fpfn = 0;
+		places[c].lpfn = 0;
+		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
+		c++;
 	}
 
 	if (!c) {
-		placements[c].fpfn = 0;
-		placements[c++].flags = TTM_PL_MASK_CACHING |
-			TTM_PL_FLAG_SYSTEM;
+		places[c].fpfn = 0;
+		places[c].lpfn = 0;
+		places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+		c++;
 	}
-	placement->num_placement = c;
-	placement->num_busy_placement = c;
 
-	for (i = 0; i < c; i++) {
-		if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
-			(placements[i].flags & TTM_PL_FLAG_VRAM) &&
-			!placements[i].fpfn)
-			placements[i].lpfn =
-				adev->mc.visible_vram_size >> PAGE_SHIFT;
-		else
-			placements[i].lpfn = 0;
-	}
+	placement->num_placement = c;
+	placement->placement = places;
+
+	placement->num_busy_placement = c;
+	placement->busy_placement = places;
 }
 
 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
@@ -211,6 +222,98 @@
 	bo->placement.busy_placement = bo->placements;
 }
 
+/**
+ * amdgpu_bo_create_kernel - create BO for kernel use
+ *
+ * @adev: amdgpu device object
+ * @size: size for the new BO
+ * @align: alignment for the new BO
+ * @domain: where to place it
+ * @bo_ptr: resulting BO
+ * @gpu_addr: GPU addr of the pinned BO
+ * @cpu_addr: optional CPU address mapping
+ *
+ * Allocates and pins a BO for kernel internal use.
+ *
+ * Returns 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+			    unsigned long size, int align,
+			    u32 domain, struct amdgpu_bo **bo_ptr,
+			    u64 *gpu_addr, void **cpu_addr)
+{
+	int r;
+
+	r = amdgpu_bo_create(adev, size, align, true, domain,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     NULL, NULL, bo_ptr);
+	if (r) {
+		dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
+		return r;
+	}
+
+	r = amdgpu_bo_reserve(*bo_ptr, false);
+	if (r) {
+		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
+		goto error_free;
+	}
+
+	r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
+	if (r) {
+		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
+		goto error_unreserve;
+	}
+
+	if (cpu_addr) {
+		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
+		if (r) {
+			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
+			goto error_unreserve;
+		}
+	}
+
+	amdgpu_bo_unreserve(*bo_ptr);
+
+	return 0;
+
+error_unreserve:
+	amdgpu_bo_unreserve(*bo_ptr);
+
+error_free:
+	amdgpu_bo_unref(bo_ptr);
+
+	return r;
+}
+
+/**
+ * amdgpu_bo_free_kernel - free BO for kernel use
+ *
+ * @bo: amdgpu BO to free
+ *
+ * unmaps and unpin a BO for kernel internal use.
+ */
+void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
+			   void **cpu_addr)
+{
+	if (*bo == NULL)
+		return;
+
+	if (likely(amdgpu_bo_reserve(*bo, false) == 0)) {
+		if (cpu_addr)
+			amdgpu_bo_kunmap(*bo);
+
+		amdgpu_bo_unpin(*bo);
+		amdgpu_bo_unreserve(*bo);
+	}
+	amdgpu_bo_unref(bo);
+
+	if (gpu_addr)
+		*gpu_addr = 0;
+
+	if (cpu_addr)
+		*cpu_addr = NULL;
+}
+
 int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 				unsigned long size, int byte_align,
 				bool kernel, u32 domain, u64 flags,
@@ -250,6 +353,7 @@
 	}
 	bo->adev = adev;
 	INIT_LIST_HEAD(&bo->list);
+	INIT_LIST_HEAD(&bo->shadow_list);
 	INIT_LIST_HEAD(&bo->va);
 	bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
 					 AMDGPU_GEM_DOMAIN_GTT |
@@ -277,11 +381,79 @@
 	if (unlikely(r != 0)) {
 		return r;
 	}
+
+	if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
+	    bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
+		struct fence *fence;
+
+		if (adev->mman.buffer_funcs_ring == NULL ||
+		   !adev->mman.buffer_funcs_ring->ready) {
+			r = -EBUSY;
+			goto fail_free;
+		}
+
+		r = amdgpu_bo_reserve(bo, false);
+		if (unlikely(r != 0))
+			goto fail_free;
+
+		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+		r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+		if (unlikely(r != 0))
+			goto fail_unreserve;
+
+		amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
+		amdgpu_bo_fence(bo, fence, false);
+		amdgpu_bo_unreserve(bo);
+		fence_put(bo->tbo.moving);
+		bo->tbo.moving = fence_get(fence);
+		fence_put(fence);
+	}
 	*bo_ptr = bo;
 
 	trace_amdgpu_bo_create(bo);
 
 	return 0;
+
+fail_unreserve:
+	amdgpu_bo_unreserve(bo);
+fail_free:
+	amdgpu_bo_unref(&bo);
+	return r;
+}
+
+static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
+				   unsigned long size, int byte_align,
+				   struct amdgpu_bo *bo)
+{
+	struct ttm_placement placement = {0};
+	struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+	int r;
+
+	if (bo->shadow)
+		return 0;
+
+	bo->flags |= AMDGPU_GEM_CREATE_SHADOW;
+	memset(&placements, 0,
+	       (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
+
+	amdgpu_ttm_placement_init(adev, &placement,
+				  placements, AMDGPU_GEM_DOMAIN_GTT,
+				  AMDGPU_GEM_CREATE_CPU_GTT_USWC);
+
+	r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
+					AMDGPU_GEM_DOMAIN_GTT,
+					AMDGPU_GEM_CREATE_CPU_GTT_USWC,
+					NULL, &placement,
+					bo->tbo.resv,
+					&bo->shadow);
+	if (!r) {
+		bo->shadow->parent = amdgpu_bo_ref(bo);
+		mutex_lock(&adev->shadow_list_lock);
+		list_add_tail(&bo->shadow_list, &adev->shadow_list);
+		mutex_unlock(&adev->shadow_list_lock);
+	}
+
+	return r;
 }
 
 int amdgpu_bo_create(struct amdgpu_device *adev,
@@ -293,6 +465,7 @@
 {
 	struct ttm_placement placement = {0};
 	struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+	int r;
 
 	memset(&placements, 0,
 	       (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
@@ -300,9 +473,83 @@
 	amdgpu_ttm_placement_init(adev, &placement,
 				  placements, domain, flags);
 
-	return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
-					   domain, flags, sg, &placement,
-					   resv, bo_ptr);
+	r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
+					domain, flags, sg, &placement,
+					resv, bo_ptr);
+	if (r)
+		return r;
+
+	if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
+		r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
+		if (r)
+			amdgpu_bo_unref(bo_ptr);
+	}
+
+	return r;
+}
+
+int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+			       struct amdgpu_ring *ring,
+			       struct amdgpu_bo *bo,
+			       struct reservation_object *resv,
+			       struct fence **fence,
+			       bool direct)
+
+{
+	struct amdgpu_bo *shadow = bo->shadow;
+	uint64_t bo_addr, shadow_addr;
+	int r;
+
+	if (!shadow)
+		return -EINVAL;
+
+	bo_addr = amdgpu_bo_gpu_offset(bo);
+	shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
+
+	r = reservation_object_reserve_shared(bo->tbo.resv);
+	if (r)
+		goto err;
+
+	r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
+			       amdgpu_bo_size(bo), resv, fence,
+			       direct);
+	if (!r)
+		amdgpu_bo_fence(bo, *fence, true);
+
+err:
+	return r;
+}
+
+int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+				  struct amdgpu_ring *ring,
+				  struct amdgpu_bo *bo,
+				  struct reservation_object *resv,
+				  struct fence **fence,
+				  bool direct)
+
+{
+	struct amdgpu_bo *shadow = bo->shadow;
+	uint64_t bo_addr, shadow_addr;
+	int r;
+
+	if (!shadow)
+		return -EINVAL;
+
+	bo_addr = amdgpu_bo_gpu_offset(bo);
+	shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
+
+	r = reservation_object_reserve_shared(bo->tbo.resv);
+	if (r)
+		goto err;
+
+	r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
+			       amdgpu_bo_size(bo), resv, fence,
+			       direct);
+	if (!r)
+		amdgpu_bo_fence(bo, *fence, true);
+
+err:
+	return r;
 }
 
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
@@ -380,16 +627,17 @@
 		return -EINVAL;
 
 	if (bo->pin_count) {
+		uint32_t mem_type = bo->tbo.mem.mem_type;
+
+		if (domain != amdgpu_mem_type_to_domain(mem_type))
+			return -EINVAL;
+
 		bo->pin_count++;
 		if (gpu_addr)
 			*gpu_addr = amdgpu_bo_gpu_offset(bo);
 
 		if (max_offset != 0) {
-			u64 domain_start;
-			if (domain == AMDGPU_GEM_DOMAIN_VRAM)
-				domain_start = bo->adev->mc.vram_start;
-			else
-				domain_start = bo->adev->mc.gtt_start;
+			u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
 			WARN_ON_ONCE(max_offset <
 				     (amdgpu_bo_gpu_offset(bo) - domain_start));
 		}
@@ -401,7 +649,8 @@
 		/* force to pin into visible video ram */
 		if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
 		    !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
-		    (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) {
+		    (!max_offset || max_offset >
+		     bo->adev->mc.visible_vram_size)) {
 			if (WARN_ON_ONCE(min_offset >
 					 bo->adev->mc.visible_vram_size))
 				return -EINVAL;
@@ -420,19 +669,28 @@
 	}
 
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
-	if (likely(r == 0)) {
-		bo->pin_count = 1;
-		if (gpu_addr != NULL)
-			*gpu_addr = amdgpu_bo_gpu_offset(bo);
-		if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
-			bo->adev->vram_pin_size += amdgpu_bo_size(bo);
-			if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-				bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
-		} else
-			bo->adev->gart_pin_size += amdgpu_bo_size(bo);
-	} else {
+	if (unlikely(r)) {
 		dev_err(bo->adev->dev, "%p pin failed\n", bo);
+		goto error;
 	}
+	r = amdgpu_ttm_bind(bo->tbo.ttm, &bo->tbo.mem);
+	if (unlikely(r)) {
+		dev_err(bo->adev->dev, "%p bind failed\n", bo);
+		goto error;
+	}
+
+	bo->pin_count = 1;
+	if (gpu_addr != NULL)
+		*gpu_addr = amdgpu_bo_gpu_offset(bo);
+	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+		bo->adev->vram_pin_size += amdgpu_bo_size(bo);
+		if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+			bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+		bo->adev->gart_pin_size += amdgpu_bo_size(bo);
+	}
+
+error:
 	return r;
 }
 
@@ -457,16 +715,20 @@
 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
 	}
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
-	if (likely(r == 0)) {
-		if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
-			bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
-			if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-				bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
-		} else
-			bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
-	} else {
+	if (unlikely(r)) {
 		dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
+		goto error;
 	}
+
+	if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+		bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
+		if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+			bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+	} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+		bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
+	}
+
+error:
 	return r;
 }
 
@@ -637,7 +899,8 @@
 	for (i = 0; i < abo->placement.num_placement; i++) {
 		/* Force into visible VRAM */
 		if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
-		    (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn))
+		    (!abo->placements[i].lpfn ||
+		     abo->placements[i].lpfn > lpfn))
 			abo->placements[i].lpfn = lpfn;
 	}
 	r = ttm_bo_validate(bo, &abo->placement, false, false);
@@ -674,3 +937,24 @@
 	else
 		reservation_object_add_excl_fence(resv, fence);
 }
+
+/**
+ * amdgpu_bo_gpu_offset - return GPU offset of bo
+ * @bo:	amdgpu object for which we query the offset
+ *
+ * Returns current GPU offset of the object.
+ *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be useful to add check for this for debugging.
+ */
+u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
+{
+	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
+	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
+		     !amdgpu_ttm_is_bound(bo->tbo.ttm));
+	WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
+		     !bo->pin_count);
+	WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
+
+	return bo->tbo.offset;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index bdb01d9..8255034 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -31,6 +31,8 @@
 #include <drm/amdgpu_drm.h>
 #include "amdgpu.h"
 
+#define AMDGPU_BO_INVALID_OFFSET	LONG_MAX
+
 /**
  * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
  * @mem_type:	ttm memory type
@@ -85,21 +87,6 @@
 	ttm_bo_unreserve(&bo->tbo);
 }
 
-/**
- * amdgpu_bo_gpu_offset - return GPU offset of bo
- * @bo:	amdgpu object for which we query the offset
- *
- * Returns current GPU offset of the object.
- *
- * Note: object should either be pinned or reserved when calling this
- * function, it might be useful to add check for this for debugging.
- */
-static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
-{
-	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
-	return bo->tbo.offset;
-}
-
 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
 {
 	return bo->tbo.num_pages << PAGE_SHIFT;
@@ -139,6 +126,12 @@
 				struct ttm_placement *placement,
 			        struct reservation_object *resv,
 				struct amdgpu_bo **bo_ptr);
+int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+			    unsigned long size, int align,
+			    u32 domain, struct amdgpu_bo **bo_ptr,
+			    u64 *gpu_addr, void **cpu_addr);
+void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
+			   void **cpu_addr);
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
@@ -165,6 +158,19 @@
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
 		     bool shared);
+u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
+int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+			       struct amdgpu_ring *ring,
+			       struct amdgpu_bo *bo,
+			       struct reservation_object *resv,
+			       struct fence **fence, bool direct);
+int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+				  struct amdgpu_ring *ring,
+				  struct amdgpu_bo *bo,
+				  struct reservation_object *resv,
+				  struct fence **fence,
+				  bool direct);
+
 
 /*
  * sub allocation
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
index d153149..8e67c12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
@@ -25,6 +25,7 @@
 #include "amdgpu.h"
 #include "atom.h"
 #include "atombios_encoders.h"
+#include "amdgpu_pll.h"
 #include <asm/div64.h>
 #include <linux/gcd.h>
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 5cc7052..d4ec3cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1103,54 +1103,46 @@
 
 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 {
-	if (adev->pp_enabled)
+	if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) {
+		/* enable/disable UVD */
+		mutex_lock(&adev->pm.mutex);
 		amdgpu_dpm_powergate_uvd(adev, !enable);
-	else {
-		if (adev->pm.funcs->powergate_uvd) {
+		mutex_unlock(&adev->pm.mutex);
+	} else {
+		if (enable) {
 			mutex_lock(&adev->pm.mutex);
-			/* enable/disable UVD */
-			amdgpu_dpm_powergate_uvd(adev, !enable);
+			adev->pm.dpm.uvd_active = true;
+			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
 			mutex_unlock(&adev->pm.mutex);
 		} else {
-			if (enable) {
-				mutex_lock(&adev->pm.mutex);
-				adev->pm.dpm.uvd_active = true;
-				adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
-				mutex_unlock(&adev->pm.mutex);
-			} else {
-				mutex_lock(&adev->pm.mutex);
-				adev->pm.dpm.uvd_active = false;
-				mutex_unlock(&adev->pm.mutex);
-			}
-			amdgpu_pm_compute_clocks(adev);
+			mutex_lock(&adev->pm.mutex);
+			adev->pm.dpm.uvd_active = false;
+			mutex_unlock(&adev->pm.mutex);
 		}
-
+		amdgpu_pm_compute_clocks(adev);
 	}
 }
 
 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 {
-	if (adev->pp_enabled)
+	if (adev->pp_enabled || adev->pm.funcs->powergate_vce) {
+		/* enable/disable VCE */
+		mutex_lock(&adev->pm.mutex);
 		amdgpu_dpm_powergate_vce(adev, !enable);
-	else {
-		if (adev->pm.funcs->powergate_vce) {
+		mutex_unlock(&adev->pm.mutex);
+	} else {
+		if (enable) {
 			mutex_lock(&adev->pm.mutex);
-			amdgpu_dpm_powergate_vce(adev, !enable);
+			adev->pm.dpm.vce_active = true;
+			/* XXX select vce level based on ring/task */
+			adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
 			mutex_unlock(&adev->pm.mutex);
 		} else {
-			if (enable) {
-				mutex_lock(&adev->pm.mutex);
-				adev->pm.dpm.vce_active = true;
-				/* XXX select vce level based on ring/task */
-				adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
-				mutex_unlock(&adev->pm.mutex);
-			} else {
-				mutex_lock(&adev->pm.mutex);
-				adev->pm.dpm.vce_active = false;
-				mutex_unlock(&adev->pm.mutex);
-			}
-			amdgpu_pm_compute_clocks(adev);
+			mutex_lock(&adev->pm.mutex);
+			adev->pm.dpm.vce_active = false;
+			mutex_unlock(&adev->pm.mutex);
 		}
+		amdgpu_pm_compute_clocks(adev);
 	}
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index c5738a22..1e7f160 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -30,6 +30,7 @@
 #include "amdgpu_pm.h"
 #include <drm/amdgpu_drm.h>
 #include "amdgpu_powerplay.h"
+#include "si_dpm.h"
 #include "cik_dpm.h"
 #include "vi_dpm.h"
 
@@ -52,8 +53,6 @@
 		pp_init->chip_family = adev->family;
 		pp_init->chip_id = adev->asic_type;
 		pp_init->device = amdgpu_cgs_create_device(adev);
-		pp_init->powercontainment_enabled = amdgpu_powercontainment;
-
 		ret = amd_powerplay_init(pp_init, amd_pp);
 		kfree(pp_init);
 #endif
@@ -61,6 +60,15 @@
 		amd_pp->pp_handle = (void *)adev;
 
 		switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+		case CHIP_TAHITI:
+		case CHIP_PITCAIRN:
+		case CHIP_VERDE:
+		case CHIP_OLAND:
+		case CHIP_HAINAN:
+			amd_pp->ip_funcs = &si_dpm_ip_funcs;
+		break;
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
 		case CHIP_BONAIRE:
 		case CHIP_HAWAII:
@@ -106,11 +114,10 @@
 		break;
 	case CHIP_TONGA:
 	case CHIP_FIJI:
-		adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
-		break;
+	case CHIP_TOPAZ:
 	case CHIP_CARRIZO:
 	case CHIP_STONEY:
-		adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
+		adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
 		break;
 	/* These chips don't have powerplay implemenations */
 	case CHIP_BONAIRE:
@@ -118,7 +125,6 @@
 	case CHIP_KABINI:
 	case CHIP_MULLINS:
 	case CHIP_KAVERI:
-	case CHIP_TOPAZ:
 	default:
 		adev->pp_enabled = false;
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 85aeb0a..777f11b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -222,33 +222,16 @@
 
 	/* Allocate ring buffer */
 	if (ring->ring_obj == NULL) {
-		r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
-				     AMDGPU_GEM_DOMAIN_GTT, 0,
-				     NULL, NULL, &ring->ring_obj);
+		r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+					    AMDGPU_GEM_DOMAIN_GTT,
+					    &ring->ring_obj,
+					    &ring->gpu_addr,
+					    (void **)&ring->ring);
 		if (r) {
 			dev_err(adev->dev, "(%d) ring create failed\n", r);
 			return r;
 		}
-		r = amdgpu_bo_reserve(ring->ring_obj, false);
-		if (unlikely(r != 0))
-			return r;
-		r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT,
-					&ring->gpu_addr);
-		if (r) {
-			amdgpu_bo_unreserve(ring->ring_obj);
-			dev_err(adev->dev, "(%d) ring pin failed\n", r);
-			return r;
-		}
-		r = amdgpu_bo_kmap(ring->ring_obj,
-				       (void **)&ring->ring);
-
 		memset((void *)ring->ring, 0, ring->ring_size);
-
-		amdgpu_bo_unreserve(ring->ring_obj);
-		if (r) {
-			dev_err(adev->dev, "(%d) ring map failed\n", r);
-			return r;
-		}
 	}
 	ring->ptr_mask = (ring->ring_size / 4) - 1;
 	ring->max_dw = max_dw;
@@ -269,28 +252,17 @@
  */
 void amdgpu_ring_fini(struct amdgpu_ring *ring)
 {
-	int r;
-	struct amdgpu_bo *ring_obj;
-
-	ring_obj = ring->ring_obj;
 	ring->ready = false;
-	ring->ring = NULL;
-	ring->ring_obj = NULL;
 
 	amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
 	amdgpu_wb_free(ring->adev, ring->fence_offs);
 	amdgpu_wb_free(ring->adev, ring->rptr_offs);
 	amdgpu_wb_free(ring->adev, ring->wptr_offs);
 
-	if (ring_obj) {
-		r = amdgpu_bo_reserve(ring_obj, false);
-		if (likely(r == 0)) {
-			amdgpu_bo_kunmap(ring_obj);
-			amdgpu_bo_unpin(ring_obj);
-			amdgpu_bo_unreserve(ring_obj);
-		}
-		amdgpu_bo_unref(&ring_obj);
-	}
+	amdgpu_bo_free_kernel(&ring->ring_obj,
+			      &ring->gpu_addr,
+			      (void **)&ring->ring);
+
 	amdgpu_debugfs_ring_fini(ring);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 05a53f4..b827c75 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -111,7 +111,7 @@
 		amdgpu_bo_kunmap(gtt_obj[i]);
 
 		r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
-				       size, NULL, &fence);
+				       size, NULL, &fence, false);
 
 		if (r) {
 			DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
@@ -156,7 +156,7 @@
 		amdgpu_bo_kunmap(vram_obj);
 
 		r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
-				       size, NULL, &fence);
+				       size, NULL, &fence, false);
 
 		if (r) {
 			DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 9b61c8b..dfb1223 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -34,6 +34,7 @@
 #include <ttm/ttm_placement.h>
 #include <ttm/ttm_module.h>
 #include <ttm/ttm_page_alloc.h>
+#include <ttm/ttm_memory.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
 #include <linux/seq_file.h>
@@ -74,7 +75,7 @@
 	ttm_mem_global_release(ref->object);
 }
 
-static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+int amdgpu_ttm_global_init(struct amdgpu_device *adev)
 {
 	struct drm_global_reference *global_ref;
 	struct amdgpu_ring *ring;
@@ -88,10 +89,10 @@
 	global_ref->init = &amdgpu_ttm_mem_global_init;
 	global_ref->release = &amdgpu_ttm_mem_global_release;
 	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
+	if (r) {
 		DRM_ERROR("Failed setting up TTM memory accounting "
 			  "subsystem.\n");
-		return r;
+		goto error_mem;
 	}
 
 	adev->mman.bo_global_ref.mem_glob =
@@ -102,26 +103,30 @@
 	global_ref->init = &ttm_bo_global_init;
 	global_ref->release = &ttm_bo_global_release;
 	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
+	if (r) {
 		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&adev->mman.mem_global_ref);
-		return r;
+		goto error_bo;
 	}
 
 	ring = adev->mman.buffer_funcs_ring;
 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
 	r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
 				  rq, amdgpu_sched_jobs);
-	if (r != 0) {
+	if (r) {
 		DRM_ERROR("Failed setting up TTM BO move run queue.\n");
-		drm_global_item_unref(&adev->mman.mem_global_ref);
-		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
-		return r;
+		goto error_entity;
 	}
 
 	adev->mman.mem_global_referenced = true;
 
 	return 0;
+
+error_entity:
+	drm_global_item_unref(&adev->mman.bo_global_ref.ref);
+error_bo:
+	drm_global_item_unref(&adev->mman.mem_global_ref);
+error_mem:
+	return r;
 }
 
 static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
@@ -196,6 +201,7 @@
 		.lpfn = 0,
 		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
 	};
+	unsigned i;
 
 	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
 		placement->placement = &placements;
@@ -207,10 +213,25 @@
 	rbo = container_of(bo, struct amdgpu_bo, tbo);
 	switch (bo->mem.mem_type) {
 	case TTM_PL_VRAM:
-		if (rbo->adev->mman.buffer_funcs_ring->ready == false)
+		if (rbo->adev->mman.buffer_funcs_ring->ready == false) {
 			amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
-		else
+		} else {
 			amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT);
+			for (i = 0; i < rbo->placement.num_placement; ++i) {
+				if (!(rbo->placements[i].flags &
+				      TTM_PL_FLAG_TT))
+					continue;
+
+				if (rbo->placements[i].lpfn)
+					continue;
+
+				/* set an upper limit to force directly
+				 * allocating address space for the BO.
+				 */
+				rbo->placements[i].lpfn =
+					rbo->adev->mc.gtt_size >> PAGE_SHIFT;
+			}
+		}
 		break;
 	case TTM_PL_TT:
 	default:
@@ -255,22 +276,26 @@
 	new_start = new_mem->start << PAGE_SHIFT;
 
 	switch (old_mem->mem_type) {
-	case TTM_PL_VRAM:
-		old_start += adev->mc.vram_start;
-		break;
 	case TTM_PL_TT:
-		old_start += adev->mc.gtt_start;
+		r = amdgpu_ttm_bind(bo->ttm, old_mem);
+		if (r)
+			return r;
+
+	case TTM_PL_VRAM:
+		old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
 		break;
 	default:
 		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
 		return -EINVAL;
 	}
 	switch (new_mem->mem_type) {
-	case TTM_PL_VRAM:
-		new_start += adev->mc.vram_start;
-		break;
 	case TTM_PL_TT:
-		new_start += adev->mc.gtt_start;
+		r = amdgpu_ttm_bind(bo->ttm, new_mem);
+		if (r)
+			return r;
+
+	case TTM_PL_VRAM:
+		new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
 		break;
 	default:
 		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
@@ -285,7 +310,7 @@
 
 	r = amdgpu_copy_buffer(ring, old_start, new_start,
 			       new_mem->num_pages * PAGE_SIZE, /* bytes */
-			       bo->resv, &fence);
+			       bo->resv, &fence, false);
 	if (r)
 		return r;
 
@@ -314,7 +339,7 @@
 	placement.num_busy_placement = 1;
 	placement.busy_placement = &placements;
 	placements.fpfn = 0;
-	placements.lpfn = 0;
+	placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
 	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
 			     interruptible, no_wait_gpu);
@@ -335,7 +360,7 @@
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
-	r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
+	r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
 out_cleanup:
 	ttm_bo_mem_put(bo, &tmp_mem);
 	return r;
@@ -361,14 +386,14 @@
 	placement.num_busy_placement = 1;
 	placement.busy_placement = &placements;
 	placements.fpfn = 0;
-	placements.lpfn = 0;
+	placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
 	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
 			     interruptible, no_wait_gpu);
 	if (unlikely(r)) {
 		return r;
 	}
-	r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
+	r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
@@ -435,8 +460,7 @@
 
 	if (r) {
 memcpy:
-		r = ttm_bo_move_memcpy(bo, evict, interruptible,
-				       no_wait_gpu, new_mem);
+		r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
 		if (r) {
 			return r;
 		}
@@ -524,6 +548,7 @@
 	spinlock_t              guptasklock;
 	struct list_head        guptasks;
 	atomic_t		mmu_invalidations;
+	struct list_head        list;
 };
 
 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
@@ -641,7 +666,6 @@
 				   struct ttm_mem_reg *bo_mem)
 {
 	struct amdgpu_ttm_tt *gtt = (void*)ttm;
-	uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
 	int r;
 
 	if (gtt->userptr) {
@@ -651,7 +675,7 @@
 			return r;
 		}
 	}
-	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+	gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
 	if (!ttm->num_pages) {
 		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
 		     ttm->num_pages, bo_mem, ttm);
@@ -662,14 +686,62 @@
 	    bo_mem->mem_type == AMDGPU_PL_OA)
 		return -EINVAL;
 
+	return 0;
+}
+
+bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
+{
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+	return gtt && !list_empty(&gtt->list);
+}
+
+int amdgpu_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+{
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+	uint32_t flags;
+	int r;
+
+	if (!ttm || amdgpu_ttm_is_bound(ttm))
+		return 0;
+
+	flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
 	r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
 		ttm->pages, gtt->ttm.dma_address, flags);
 
 	if (r) {
-		DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
-			  ttm->num_pages, (unsigned)gtt->offset);
+		DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+			  ttm->num_pages, gtt->offset);
 		return r;
 	}
+	spin_lock(&gtt->adev->gtt_list_lock);
+	list_add_tail(&gtt->list, &gtt->adev->gtt_list);
+	spin_unlock(&gtt->adev->gtt_list_lock);
+	return 0;
+}
+
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
+{
+	struct amdgpu_ttm_tt *gtt, *tmp;
+	struct ttm_mem_reg bo_mem;
+	uint32_t flags;
+	int r;
+
+	bo_mem.mem_type = TTM_PL_TT;
+	spin_lock(&adev->gtt_list_lock);
+	list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
+		flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
+		r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
+				     gtt->ttm.ttm.pages, gtt->ttm.dma_address,
+				     flags);
+		if (r) {
+			spin_unlock(&adev->gtt_list_lock);
+			DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+				  gtt->ttm.ttm.num_pages, gtt->offset);
+			return r;
+		}
+	}
+	spin_unlock(&adev->gtt_list_lock);
 	return 0;
 }
 
@@ -677,6 +749,9 @@
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 
+	if (!amdgpu_ttm_is_bound(ttm))
+		return 0;
+
 	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
 	if (gtt->adev->gart.ready)
 		amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
@@ -684,6 +759,10 @@
 	if (gtt->userptr)
 		amdgpu_ttm_tt_unpin_userptr(ttm);
 
+	spin_lock(&gtt->adev->gtt_list_lock);
+	list_del_init(&gtt->list);
+	spin_unlock(&gtt->adev->gtt_list_lock);
+
 	return 0;
 }
 
@@ -720,6 +799,7 @@
 		kfree(gtt);
 		return NULL;
 	}
+	INIT_LIST_HEAD(&gtt->list);
 	return &gtt->ttm.ttm;
 }
 
@@ -950,6 +1030,8 @@
 	struct list_head *res = lru->lru[tbo->mem.mem_type];
 
 	lru->lru[tbo->mem.mem_type] = &tbo->lru;
+	while ((++lru)->lru[tbo->mem.mem_type] == res)
+		lru->lru[tbo->mem.mem_type] = &tbo->lru;
 
 	return res;
 }
@@ -960,6 +1042,8 @@
 	struct list_head *res = lru->swap_lru;
 
 	lru->swap_lru = &tbo->swap;
+	while ((++lru)->swap_lru == res)
+		lru->swap_lru = &tbo->swap;
 
 	return res;
 }
@@ -987,10 +1071,6 @@
 	unsigned i, j;
 	int r;
 
-	r = amdgpu_ttm_global_init(adev);
-	if (r) {
-		return r;
-	}
 	/* No others user of address space so set it to 0 */
 	r = ttm_bo_device_init(&adev->mman.bdev,
 			       adev->mman.bo_global_ref.ref.object,
@@ -1011,6 +1091,10 @@
 		lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
 	}
 
+	for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
+		adev->mman.guard.lru[j] = NULL;
+	adev->mman.guard.swap_lru = NULL;
+
 	adev->mman.initialized = true;
 	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
 				adev->mc.real_vram_size >> PAGE_SHIFT);
@@ -1151,7 +1235,7 @@
 		       uint64_t dst_offset,
 		       uint32_t byte_count,
 		       struct reservation_object *resv,
-		       struct fence **fence)
+		       struct fence **fence, bool direct_submit)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_job *job;
@@ -1195,8 +1279,79 @@
 
 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 	WARN_ON(job->ibs[0].length_dw > num_dw);
+	if (direct_submit) {
+		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
+				       NULL, NULL, fence);
+		job->fence = fence_get(*fence);
+		if (r)
+			DRM_ERROR("Error scheduling IBs (%d)\n", r);
+		amdgpu_job_free(job);
+	} else {
+		r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+				      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+		if (r)
+			goto error_free;
+	}
+
+	return r;
+
+error_free:
+	amdgpu_job_free(job);
+	return r;
+}
+
+int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+		uint32_t src_data,
+		struct reservation_object *resv,
+		struct fence **fence)
+{
+	struct amdgpu_device *adev = bo->adev;
+	struct amdgpu_job *job;
+	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+
+	uint32_t max_bytes, byte_count;
+	uint64_t dst_offset;
+	unsigned int num_loops, num_dw;
+	unsigned int i;
+	int r;
+
+	byte_count = bo->tbo.num_pages << PAGE_SHIFT;
+	max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
+	num_loops = DIV_ROUND_UP(byte_count, max_bytes);
+	num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
+
+	/* for IB padding */
+	while (num_dw & 0x7)
+		num_dw++;
+
+	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+	if (r)
+		return r;
+
+	if (resv) {
+		r = amdgpu_sync_resv(adev, &job->sync, resv,
+				AMDGPU_FENCE_OWNER_UNDEFINED);
+		if (r) {
+			DRM_ERROR("sync failed (%d).\n", r);
+			goto error_free;
+		}
+	}
+
+	dst_offset = bo->tbo.mem.start << PAGE_SHIFT;
+	for (i = 0; i < num_loops; i++) {
+		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
+
+		amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
+				dst_offset, cur_size_in_bytes);
+
+		dst_offset += cur_size_in_bytes;
+		byte_count -= cur_size_in_bytes;
+	}
+
+	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+	WARN_ON(job->ibs[0].length_dw > num_dw);
 	r = amdgpu_job_submit(job, ring, &adev->mman.entity,
-			      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+			AMDGPU_FENCE_OWNER_UNDEFINED, fence);
 	if (r)
 		goto error_free;
 
@@ -1387,3 +1542,8 @@
 
 #endif
 }
+
+u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev)
+{
+	return ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
new file mode 100644
index 0000000..3ee825f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_TTM_H__
+#define __AMDGPU_TTM_H__
+
+#include "gpu_scheduler.h"
+
+#define AMDGPU_PL_GDS		(TTM_PL_PRIV + 0)
+#define AMDGPU_PL_GWS		(TTM_PL_PRIV + 1)
+#define AMDGPU_PL_OA		(TTM_PL_PRIV + 2)
+
+#define AMDGPU_PL_FLAG_GDS		(TTM_PL_FLAG_PRIV << 0)
+#define AMDGPU_PL_FLAG_GWS		(TTM_PL_FLAG_PRIV << 1)
+#define AMDGPU_PL_FLAG_OA		(TTM_PL_FLAG_PRIV << 2)
+
+#define AMDGPU_TTM_LRU_SIZE	20
+
+struct amdgpu_mman_lru {
+	struct list_head		*lru[TTM_NUM_MEM_TYPES];
+	struct list_head		*swap_lru;
+};
+
+struct amdgpu_mman {
+	struct ttm_bo_global_ref        bo_global_ref;
+	struct drm_global_reference	mem_global_ref;
+	struct ttm_bo_device		bdev;
+	bool				mem_global_referenced;
+	bool				initialized;
+
+#if defined(CONFIG_DEBUG_FS)
+	struct dentry			*vram;
+	struct dentry			*gtt;
+#endif
+
+	/* buffer handling */
+	const struct amdgpu_buffer_funcs	*buffer_funcs;
+	struct amdgpu_ring			*buffer_funcs_ring;
+	/* Scheduler entity for buffer moves */
+	struct amd_sched_entity			entity;
+
+	/* custom LRU management */
+	struct amdgpu_mman_lru			log2_size[AMDGPU_TTM_LRU_SIZE];
+	/* guard for log2_size array, don't add anything in between */
+	struct amdgpu_mman_lru			guard;
+};
+
+int amdgpu_copy_buffer(struct amdgpu_ring *ring,
+		       uint64_t src_offset,
+		       uint64_t dst_offset,
+		       uint32_t byte_count,
+		       struct reservation_object *resv,
+		       struct fence **fence, bool direct_submit);
+int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+			uint32_t src_data,
+			struct reservation_object *resv,
+			struct fence **fence);
+
+int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
+bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
+int amdgpu_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 5cc95f1..7a05f79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -247,35 +247,28 @@
 	const struct common_firmware_header *header = NULL;
 
 	err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
-			AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
+			       AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
 	if (err) {
 		dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
-		err = -ENOMEM;
 		goto failed;
 	}
 
 	err = amdgpu_bo_reserve(*bo, false);
 	if (err) {
-		amdgpu_bo_unref(bo);
 		dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
-		goto failed;
+		goto failed_reserve;
 	}
 
 	err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
 	if (err) {
-		amdgpu_bo_unreserve(*bo);
-		amdgpu_bo_unref(bo);
 		dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
-		goto failed;
+		goto failed_pin;
 	}
 
 	err = amdgpu_bo_kmap(*bo, &fw_buf_ptr);
 	if (err) {
 		dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
-		amdgpu_bo_unpin(*bo);
-		amdgpu_bo_unreserve(*bo);
-		amdgpu_bo_unref(bo);
-		goto failed;
+		goto failed_kmap;
 	}
 
 	amdgpu_bo_unreserve(*bo);
@@ -290,10 +283,16 @@
 			fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 		}
 	}
+	return 0;
 
+failed_kmap:
+	amdgpu_bo_unpin(*bo);
+failed_pin:
+	amdgpu_bo_unreserve(*bo);
+failed_reserve:
+	amdgpu_bo_unref(bo);
 failed:
-	if (err)
-		adev->firmware.smu_load = false;
+	adev->firmware.smu_load = false;
 
 	return err;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index b11f4e8..25dd58a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -201,39 +201,14 @@
 	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
 		  +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
 		  +  AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
-	r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
-			     AMDGPU_GEM_DOMAIN_VRAM,
-			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-			     NULL, NULL, &adev->uvd.vcpu_bo);
+	r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+				    AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo,
+				    &adev->uvd.gpu_addr, &adev->uvd.cpu_addr);
 	if (r) {
 		dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
 		return r;
 	}
 
-	r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
-	if (r) {
-		amdgpu_bo_unref(&adev->uvd.vcpu_bo);
-		dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r);
-		return r;
-	}
-
-	r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
-			  &adev->uvd.gpu_addr);
-	if (r) {
-		amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-		amdgpu_bo_unref(&adev->uvd.vcpu_bo);
-		dev_err(adev->dev, "(%d) UVD bo pin failed\n", r);
-		return r;
-	}
-
-	r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr);
-	if (r) {
-		dev_err(adev->dev, "(%d) UVD map failed\n", r);
-		return r;
-	}
-
-	amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-
 	ring = &adev->uvd.ring;
 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
 	r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
@@ -274,22 +249,13 @@
 
 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 {
-	int r;
-
 	kfree(adev->uvd.saved_bo);
 
 	amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
 
-	if (adev->uvd.vcpu_bo) {
-		r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
-		if (!r) {
-			amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
-			amdgpu_bo_unpin(adev->uvd.vcpu_bo);
-			amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-		}
-
-		amdgpu_bo_unref(&adev->uvd.vcpu_bo);
-	}
+	amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
+			      &adev->uvd.gpu_addr,
+			      (void **)&adev->uvd.cpu_addr);
 
 	amdgpu_ring_fini(&adev->uvd.ring);
 
@@ -323,7 +289,7 @@
 	if (!adev->uvd.saved_bo)
 		return -ENOMEM;
 
-	memcpy(adev->uvd.saved_bo, ptr, size);
+	memcpy_fromio(adev->uvd.saved_bo, ptr, size);
 
 	return 0;
 }
@@ -340,7 +306,7 @@
 	ptr = adev->uvd.cpu_addr;
 
 	if (adev->uvd.saved_bo != NULL) {
-		memcpy(ptr, adev->uvd.saved_bo, size);
+		memcpy_toio(ptr, adev->uvd.saved_bo, size);
 		kfree(adev->uvd.saved_bo);
 		adev->uvd.saved_bo = NULL;
 	} else {
@@ -349,11 +315,11 @@
 
 		hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
 		offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
-		memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
-			(adev->uvd.fw->size) - offset);
+		memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset,
+			    le32_to_cpu(hdr->ucode_size_bytes));
 		size -= le32_to_cpu(hdr->ucode_size_bytes);
 		ptr += le32_to_cpu(hdr->ucode_size_bytes);
-		memset(ptr, 0, size);
+		memset_io(ptr, 0, size);
 	}
 
 	return 0;
@@ -843,6 +809,7 @@
 				return r;
 			break;
 		case mmUVD_ENGINE_CNTL:
+		case mmUVD_NO_OP:
 			break;
 		default:
 			DRM_ERROR("Invalid reg 0x%X!\n", reg);
@@ -915,6 +882,10 @@
 		return -EINVAL;
 	}
 
+	r = amdgpu_cs_sysvm_access_required(parser);
+	if (r)
+		return r;
+
 	ctx.parser = parser;
 	ctx.buf_sizes = buf_sizes;
 	ctx.ib_idx = ib_idx;
@@ -981,8 +952,10 @@
 	ib->ptr[3] = addr >> 32;
 	ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
 	ib->ptr[5] = 0;
-	for (i = 6; i < 16; ++i)
-		ib->ptr[i] = PACKET2(0);
+	for (i = 6; i < 16; i += 2) {
+		ib->ptr[i] = PACKET0(mmUVD_NO_OP, 0);
+		ib->ptr[i+1] = 0;
+	}
 	ib->length_dw = 16;
 
 	if (direct) {
@@ -1114,15 +1087,9 @@
 {
 	struct amdgpu_device *adev =
 		container_of(work, struct amdgpu_device, uvd.idle_work.work);
-	unsigned i, fences, handles = 0;
+	unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
 
-	fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
-
-	for (i = 0; i < adev->uvd.max_handles; ++i)
-		if (atomic_read(&adev->uvd.handles[i]))
-			++handles;
-
-	if (fences == 0 && handles == 0) {
+	if (fences == 0) {
 		if (adev->pm.dpm_enabled) {
 			amdgpu_dpm_enable_uvd(adev, false);
 		} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 05865ce..2c9ea9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -282,8 +282,8 @@
 
 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
 	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
-	memcpy(cpu_addr, (adev->vce.fw->data) + offset,
-		(adev->vce.fw->size) - offset);
+	memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
+		    adev->vce.fw->size - offset);
 
 	amdgpu_bo_kunmap(adev->vce.vcpu_bo);
 
@@ -634,7 +634,11 @@
 	uint32_t allocated = 0;
 	uint32_t tmp, handle = 0;
 	uint32_t *size = &tmp;
-	int i, r = 0, idx = 0;
+	int i, r, idx = 0;
+
+	r = amdgpu_cs_sysvm_access_required(p);
+	if (r)
+		return r;
 
 	while (idx < ib->length_dw) {
 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
@@ -799,6 +803,18 @@
 	amdgpu_ring_write(ring, VCE_CMD_END);
 }
 
+unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		4; /* amdgpu_vce_ring_emit_ib */
+}
+
+unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		6; /* amdgpu_vce_ring_emit_fence  x1 no user fence */
+}
+
 /**
  * amdgpu_vce_ring_test_ring - test if VCE ring is working
  *
@@ -850,8 +866,8 @@
 	struct fence *fence = NULL;
 	long r;
 
-	/* skip vce ring1 ib test for now, since it's not reliable */
-	if (ring == &ring->adev->vce.ring[1])
+	/* skip vce ring1/2 ib test for now, since it's not reliable */
+	if (ring != &ring->adev->vce.ring[0])
 		return 0;
 
 	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 63f83d0..12729d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -42,5 +42,7 @@
 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout);
 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
+unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring);
+unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8e642fc..bd5af32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -51,19 +51,22 @@
  * SI supports 16.
  */
 
-/* Special value that no flush is necessary */
-#define AMDGPU_VM_NO_FLUSH (~0ll)
-
 /* Local structure. Encapsulate some VM table update parameters to reduce
  * the number of function parameters
  */
-struct amdgpu_vm_update_params {
+struct amdgpu_pte_update_params {
+	/* amdgpu device we do this update for */
+	struct amdgpu_device *adev;
 	/* address where to copy page table entries from */
 	uint64_t src;
-	/* DMA addresses to use for mapping */
-	dma_addr_t *pages_addr;
 	/* indirect buffer to fill with commands */
 	struct amdgpu_ib *ib;
+	/* Function which actually does the update */
+	void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
+		     uint64_t addr, unsigned count, uint32_t incr,
+		     uint32_t flags);
+	/* indicate update pt or its shadow */
+	bool shadow;
 };
 
 /**
@@ -467,10 +470,9 @@
 }
 
 /**
- * amdgpu_vm_update_pages - helper to call the right asic function
+ * amdgpu_vm_do_set_ptes - helper to call the right asic function
  *
- * @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
+ * @params: see amdgpu_pte_update_params definition
  * @pe: addr of the page entry
  * @addr: dst addr to write into pe
  * @count: number of page entries to update
@@ -480,32 +482,44 @@
  * Traces the parameters and calls the right asic functions
  * to setup the page table using the DMA.
  */
-static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
-				   struct amdgpu_vm_update_params
-					*vm_update_params,
+static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
+				  uint64_t pe, uint64_t addr,
+				  unsigned count, uint32_t incr,
+				  uint32_t flags)
+{
+	trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
+
+	if (count < 3) {
+		amdgpu_vm_write_pte(params->adev, params->ib, pe,
+				    addr | flags, count, incr);
+
+	} else {
+		amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
+				      count, incr, flags);
+	}
+}
+
+/**
+ * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
+ *
+ * @params: see amdgpu_pte_update_params definition
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the DMA function to copy the PTEs.
+ */
+static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
 				   uint64_t pe, uint64_t addr,
 				   unsigned count, uint32_t incr,
 				   uint32_t flags)
 {
 	trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
 
-	if (vm_update_params->src) {
-		amdgpu_vm_copy_pte(adev, vm_update_params->ib,
-			pe, (vm_update_params->src + (addr >> 12) * 8), count);
-
-	} else if (vm_update_params->pages_addr) {
-		amdgpu_vm_write_pte(adev, vm_update_params->ib,
-			vm_update_params->pages_addr,
-			pe, addr, count, incr, flags);
-
-	} else if (count < 3) {
-		amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr,
-				    count, incr, flags);
-
-	} else {
-		amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr,
-				      count, incr, flags);
-	}
+	amdgpu_vm_copy_pte(params->adev, params->ib, pe,
+			   (params->src + (addr >> 12) * 8), count);
 }
 
 /**
@@ -523,12 +537,11 @@
 	struct amdgpu_ring *ring;
 	struct fence *fence = NULL;
 	struct amdgpu_job *job;
-	struct amdgpu_vm_update_params vm_update_params;
+	struct amdgpu_pte_update_params params;
 	unsigned entries;
 	uint64_t addr;
 	int r;
 
-	memset(&vm_update_params, 0, sizeof(vm_update_params));
 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 
 	r = reservation_object_reserve_shared(bo->tbo.resv);
@@ -546,9 +559,10 @@
 	if (r)
 		goto error;
 
-	vm_update_params.ib = &job->ibs[0];
-	amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries,
-			       0, 0);
+	memset(&params, 0, sizeof(params));
+	params.adev = adev;
+	params.ib = &job->ibs[0];
+	amdgpu_vm_do_set_ptes(&params, addr, 0, entries, 0, 0);
 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 
 	WARN_ON(job->ibs[0].length_dw > 64);
@@ -577,55 +591,41 @@
  * Look up the physical address of the page that the pte resolves
  * to and return the pointer for the page table entry.
  */
-uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
+static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
 {
 	uint64_t result;
 
-	if (pages_addr) {
-		/* page table offset */
-		result = pages_addr[addr >> PAGE_SHIFT];
+	/* page table offset */
+	result = pages_addr[addr >> PAGE_SHIFT];
 
-		/* in case cpu page size != gpu page size*/
-		result |= addr & (~PAGE_MASK);
-
-	} else {
-		/* No mapping required */
-		result = addr;
-	}
+	/* in case cpu page size != gpu page size*/
+	result |= addr & (~PAGE_MASK);
 
 	result &= 0xFFFFFFFFFFFFF000ULL;
 
 	return result;
 }
 
-/**
- * amdgpu_vm_update_pdes - make sure that page directory is valid
- *
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
-				    struct amdgpu_vm *vm)
+static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
+					 struct amdgpu_vm *vm,
+					 bool shadow)
 {
 	struct amdgpu_ring *ring;
-	struct amdgpu_bo *pd = vm->page_directory;
-	uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
+	struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
+		vm->page_directory;
+	uint64_t pd_addr;
 	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
 	uint64_t last_pde = ~0, last_pt = ~0;
 	unsigned count = 0, pt_idx, ndw;
 	struct amdgpu_job *job;
-	struct amdgpu_vm_update_params vm_update_params;
+	struct amdgpu_pte_update_params params;
 	struct fence *fence = NULL;
 
 	int r;
 
-	memset(&vm_update_params, 0, sizeof(vm_update_params));
+	if (!pd)
+		return 0;
+	pd_addr = amdgpu_bo_gpu_offset(pd);
 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 
 	/* padding, etc. */
@@ -638,7 +638,9 @@
 	if (r)
 		return r;
 
-	vm_update_params.ib = &job->ibs[0];
+	memset(&params, 0, sizeof(params));
+	params.adev = adev;
+	params.ib = &job->ibs[0];
 
 	/* walk over the address space and update the page directory */
 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -649,19 +651,25 @@
 			continue;
 
 		pt = amdgpu_bo_gpu_offset(bo);
-		if (vm->page_tables[pt_idx].addr == pt)
-			continue;
-		vm->page_tables[pt_idx].addr = pt;
+		if (!shadow) {
+			if (vm->page_tables[pt_idx].addr == pt)
+				continue;
+			vm->page_tables[pt_idx].addr = pt;
+		} else {
+			if (vm->page_tables[pt_idx].shadow_addr == pt)
+				continue;
+			vm->page_tables[pt_idx].shadow_addr = pt;
+		}
 
 		pde = pd_addr + pt_idx * 8;
 		if (((last_pde + 8 * count) != pde) ||
-		    ((last_pt + incr * count) != pt)) {
+		    ((last_pt + incr * count) != pt) ||
+		    (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
 
 			if (count) {
-				amdgpu_vm_update_pages(adev, &vm_update_params,
-						       last_pde, last_pt,
-						       count, incr,
-						       AMDGPU_PTE_VALID);
+				amdgpu_vm_do_set_ptes(&params, last_pde,
+						      last_pt, count, incr,
+						      AMDGPU_PTE_VALID);
 			}
 
 			count = 1;
@@ -673,15 +681,14 @@
 	}
 
 	if (count)
-		amdgpu_vm_update_pages(adev, &vm_update_params,
-					last_pde, last_pt,
-					count, incr, AMDGPU_PTE_VALID);
+		amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
+				      count, incr, AMDGPU_PTE_VALID);
 
-	if (vm_update_params.ib->length_dw != 0) {
-		amdgpu_ring_pad_ib(ring, vm_update_params.ib);
+	if (params.ib->length_dw != 0) {
+		amdgpu_ring_pad_ib(ring, params.ib);
 		amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
 				 AMDGPU_FENCE_OWNER_VM);
-		WARN_ON(vm_update_params.ib->length_dw > ndw);
+		WARN_ON(params.ib->length_dw > ndw);
 		r = amdgpu_job_submit(job, ring, &vm->entity,
 				      AMDGPU_FENCE_OWNER_VM, &fence);
 		if (r)
@@ -703,21 +710,135 @@
 	return r;
 }
 
-/**
- * amdgpu_vm_frag_ptes - add fragment information to PTEs
+/*
+ * amdgpu_vm_update_pdes - make sure that page directory is valid
  *
  * @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
- * @pe_start: first PTE to handle
- * @pe_end: last PTE to handle
- * @addr: addr those PTEs should point to
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+                                   struct amdgpu_vm *vm)
+{
+	int r;
+
+	r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
+	if (r)
+		return r;
+	return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
+}
+
+/**
+ * amdgpu_vm_update_ptes - make sure that page tables are valid
+ *
+ * @params: see amdgpu_pte_update_params definition
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @dst: destination address to map to, the next dst inside the function
+ * @flags: mapping flags
+ *
+ * Update the page tables in the range @start - @end.
+ */
+static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
+				  struct amdgpu_vm *vm,
+				  uint64_t start, uint64_t end,
+				  uint64_t dst, uint32_t flags)
+{
+	const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
+
+	uint64_t cur_pe_start, cur_nptes, cur_dst;
+	uint64_t addr; /* next GPU address to be updated */
+	uint64_t pt_idx;
+	struct amdgpu_bo *pt;
+	unsigned nptes; /* next number of ptes to be updated */
+	uint64_t next_pe_start;
+
+	/* initialize the variables */
+	addr = start;
+	pt_idx = addr >> amdgpu_vm_block_size;
+	pt = vm->page_tables[pt_idx].entry.robj;
+	if (params->shadow) {
+		if (!pt->shadow)
+			return;
+		pt = vm->page_tables[pt_idx].entry.robj->shadow;
+	}
+	if ((addr & ~mask) == (end & ~mask))
+		nptes = end - addr;
+	else
+		nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+
+	cur_pe_start = amdgpu_bo_gpu_offset(pt);
+	cur_pe_start += (addr & mask) * 8;
+	cur_nptes = nptes;
+	cur_dst = dst;
+
+	/* for next ptb*/
+	addr += nptes;
+	dst += nptes * AMDGPU_GPU_PAGE_SIZE;
+
+	/* walk over the address space and update the page tables */
+	while (addr < end) {
+		pt_idx = addr >> amdgpu_vm_block_size;
+		pt = vm->page_tables[pt_idx].entry.robj;
+		if (params->shadow) {
+			if (!pt->shadow)
+				return;
+			pt = vm->page_tables[pt_idx].entry.robj->shadow;
+		}
+
+		if ((addr & ~mask) == (end & ~mask))
+			nptes = end - addr;
+		else
+			nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+
+		next_pe_start = amdgpu_bo_gpu_offset(pt);
+		next_pe_start += (addr & mask) * 8;
+
+		if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
+		    ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
+			/* The next ptb is consecutive to current ptb.
+			 * Don't call the update function now.
+			 * Will update two ptbs together in future.
+			*/
+			cur_nptes += nptes;
+		} else {
+			params->func(params, cur_pe_start, cur_dst, cur_nptes,
+				     AMDGPU_GPU_PAGE_SIZE, flags);
+
+			cur_pe_start = next_pe_start;
+			cur_nptes = nptes;
+			cur_dst = dst;
+		}
+
+		/* for next ptb*/
+		addr += nptes;
+		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
+	}
+
+	params->func(params, cur_pe_start, cur_dst, cur_nptes,
+		     AMDGPU_GPU_PAGE_SIZE, flags);
+}
+
+/*
+ * amdgpu_vm_frag_ptes - add fragment information to PTEs
+ *
+ * @params: see amdgpu_pte_update_params definition
+ * @vm: requested vm
+ * @start: first PTE to handle
+ * @end: last PTE to handle
+ * @dst: addr those PTEs should point to
  * @flags: hw mapping flags
  */
-static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
-				struct amdgpu_vm_update_params
-					*vm_update_params,
-				uint64_t pe_start, uint64_t pe_end,
-				uint64_t addr, uint32_t flags)
+static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
+				struct amdgpu_vm *vm,
+				uint64_t start, uint64_t end,
+				uint64_t dst, uint32_t flags)
 {
 	/**
 	 * The MC L1 TLB supports variable sized pages, based on a fragment
@@ -738,139 +859,44 @@
 	 * allocation size to the fragment size.
 	 */
 
-	/* SI and newer are optimized for 64KB */
-	uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
-	uint64_t frag_align = 0x80;
+	const uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
 
-	uint64_t frag_start = ALIGN(pe_start, frag_align);
-	uint64_t frag_end = pe_end & ~(frag_align - 1);
+	uint64_t frag_start = ALIGN(start, frag_align);
+	uint64_t frag_end = end & ~(frag_align - 1);
 
-	unsigned count;
-
-	/* Abort early if there isn't anything to do */
-	if (pe_start == pe_end)
-		return;
+	uint32_t frag;
 
 	/* system pages are non continuously */
-	if (vm_update_params->src || vm_update_params->pages_addr ||
-		!(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
+	if (params->src || !(flags & AMDGPU_PTE_VALID) ||
+	    (frag_start >= frag_end)) {
 
-		count = (pe_end - pe_start) / 8;
-		amdgpu_vm_update_pages(adev, vm_update_params, pe_start,
-				       addr, count, AMDGPU_GPU_PAGE_SIZE,
-				       flags);
+		amdgpu_vm_update_ptes(params, vm, start, end, dst, flags);
 		return;
 	}
 
+	/* use more than 64KB fragment size if possible */
+	frag = lower_32_bits(frag_start | frag_end);
+	frag = likely(frag) ? __ffs(frag) : 31;
+
 	/* handle the 4K area at the beginning */
-	if (pe_start != frag_start) {
-		count = (frag_start - pe_start) / 8;
-		amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr,
-				       count, AMDGPU_GPU_PAGE_SIZE, flags);
-		addr += AMDGPU_GPU_PAGE_SIZE * count;
+	if (start != frag_start) {
+		amdgpu_vm_update_ptes(params, vm, start, frag_start,
+				      dst, flags);
+		dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
 	}
 
 	/* handle the area in the middle */
-	count = (frag_end - frag_start) / 8;
-	amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count,
-			       AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
+	amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst,
+			      flags | AMDGPU_PTE_FRAG(frag));
 
 	/* handle the 4K area at the end */
-	if (frag_end != pe_end) {
-		addr += AMDGPU_GPU_PAGE_SIZE * count;
-		count = (pe_end - frag_end) / 8;
-		amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr,
-				       count, AMDGPU_GPU_PAGE_SIZE, flags);
+	if (frag_end != end) {
+		dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
+		amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags);
 	}
 }
 
 /**
- * amdgpu_vm_update_ptes - make sure that page tables are valid
- *
- * @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- * @dst: destination address to map to, the next dst inside the function
- * @flags: mapping flags
- *
- * Update the page tables in the range @start - @end.
- */
-static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
-				  struct amdgpu_vm_update_params
-					*vm_update_params,
-				  struct amdgpu_vm *vm,
-				  uint64_t start, uint64_t end,
-				  uint64_t dst, uint32_t flags)
-{
-	const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
-
-	uint64_t cur_pe_start, cur_pe_end, cur_dst;
-	uint64_t addr; /* next GPU address to be updated */
-	uint64_t pt_idx;
-	struct amdgpu_bo *pt;
-	unsigned nptes; /* next number of ptes to be updated */
-	uint64_t next_pe_start;
-
-	/* initialize the variables */
-	addr = start;
-	pt_idx = addr >> amdgpu_vm_block_size;
-	pt = vm->page_tables[pt_idx].entry.robj;
-
-	if ((addr & ~mask) == (end & ~mask))
-		nptes = end - addr;
-	else
-		nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
-
-	cur_pe_start = amdgpu_bo_gpu_offset(pt);
-	cur_pe_start += (addr & mask) * 8;
-	cur_pe_end = cur_pe_start + 8 * nptes;
-	cur_dst = dst;
-
-	/* for next ptb*/
-	addr += nptes;
-	dst += nptes * AMDGPU_GPU_PAGE_SIZE;
-
-	/* walk over the address space and update the page tables */
-	while (addr < end) {
-		pt_idx = addr >> amdgpu_vm_block_size;
-		pt = vm->page_tables[pt_idx].entry.robj;
-
-		if ((addr & ~mask) == (end & ~mask))
-			nptes = end - addr;
-		else
-			nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
-
-		next_pe_start = amdgpu_bo_gpu_offset(pt);
-		next_pe_start += (addr & mask) * 8;
-
-		if (cur_pe_end == next_pe_start) {
-			/* The next ptb is consecutive to current ptb.
-			 * Don't call amdgpu_vm_frag_ptes now.
-			 * Will update two ptbs together in future.
-			*/
-			cur_pe_end += 8 * nptes;
-		} else {
-			amdgpu_vm_frag_ptes(adev, vm_update_params,
-					    cur_pe_start, cur_pe_end,
-					    cur_dst, flags);
-
-			cur_pe_start = next_pe_start;
-			cur_pe_end = next_pe_start + 8 * nptes;
-			cur_dst = dst;
-		}
-
-		/* for next ptb*/
-		addr += nptes;
-		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
-	}
-
-	amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start,
-			    cur_pe_end, cur_dst, flags);
-}
-
-/**
  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
  *
  * @adev: amdgpu_device pointer
@@ -900,14 +926,19 @@
 	void *owner = AMDGPU_FENCE_OWNER_VM;
 	unsigned nptes, ncmds, ndw;
 	struct amdgpu_job *job;
-	struct amdgpu_vm_update_params vm_update_params;
+	struct amdgpu_pte_update_params params;
 	struct fence *f = NULL;
 	int r;
 
+	memset(&params, 0, sizeof(params));
+	params.adev = adev;
+	params.src = src;
+
 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
-	memset(&vm_update_params, 0, sizeof(vm_update_params));
-	vm_update_params.src = src;
-	vm_update_params.pages_addr = pages_addr;
+
+	memset(&params, 0, sizeof(params));
+	params.adev = adev;
+	params.src = src;
 
 	/* sync to everything on unmapping */
 	if (!(flags & AMDGPU_PTE_VALID))
@@ -924,30 +955,52 @@
 	/* padding, etc. */
 	ndw = 64;
 
-	if (vm_update_params.src) {
+	if (src) {
 		/* only copy commands needed */
 		ndw += ncmds * 7;
 
-	} else if (vm_update_params.pages_addr) {
-		/* header for write data commands */
-		ndw += ncmds * 4;
+		params.func = amdgpu_vm_do_copy_ptes;
 
-		/* body of write data command */
+	} else if (pages_addr) {
+		/* copy commands needed */
+		ndw += ncmds * 7;
+
+		/* and also PTEs */
 		ndw += nptes * 2;
 
+		params.func = amdgpu_vm_do_copy_ptes;
+
 	} else {
 		/* set page commands needed */
 		ndw += ncmds * 10;
 
 		/* two extra commands for begin/end of fragment */
 		ndw += 2 * 10;
+
+		params.func = amdgpu_vm_do_set_ptes;
 	}
 
 	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
 	if (r)
 		return r;
 
-	vm_update_params.ib = &job->ibs[0];
+	params.ib = &job->ibs[0];
+
+	if (!src && pages_addr) {
+		uint64_t *pte;
+		unsigned i;
+
+		/* Put the PTEs at the end of the IB. */
+		i = ndw - nptes * 2;
+		pte= (uint64_t *)&(job->ibs->ptr[i]);
+		params.src = job->ibs->gpu_addr + i * 4;
+
+		for (i = 0; i < nptes; ++i) {
+			pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
+						    AMDGPU_GPU_PAGE_SIZE);
+			pte[i] |= flags;
+		}
+	}
 
 	r = amdgpu_sync_fence(adev, &job->sync, exclusive);
 	if (r)
@@ -962,11 +1015,13 @@
 	if (r)
 		goto error_free;
 
-	amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
-			      last + 1, addr, flags);
+	params.shadow = true;
+	amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
+	params.shadow = false;
+	amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
 
-	amdgpu_ring_pad_ib(ring, vm_update_params.ib);
-	WARN_ON(vm_update_params.ib->length_dw > ndw);
+	amdgpu_ring_pad_ib(ring, params.ib);
+	WARN_ON(params.ib->length_dw > ndw);
 	r = amdgpu_job_submit(job, ring, &vm->entity,
 			      AMDGPU_FENCE_OWNER_VM, &f);
 	if (r)
@@ -1062,28 +1117,32 @@
  *
  * @adev: amdgpu_device pointer
  * @bo_va: requested BO and VM object
- * @mem: ttm mem
+ * @clear: if true clear the entries
  *
  * Fill in the page table entries for @bo_va.
  * Returns 0 for success, -EINVAL for failure.
- *
- * Object have to be reserved and mutex must be locked!
  */
 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 			struct amdgpu_bo_va *bo_va,
-			struct ttm_mem_reg *mem)
+			bool clear)
 {
 	struct amdgpu_vm *vm = bo_va->vm;
 	struct amdgpu_bo_va_mapping *mapping;
 	dma_addr_t *pages_addr = NULL;
 	uint32_t gtt_flags, flags;
+	struct ttm_mem_reg *mem;
 	struct fence *exclusive;
 	uint64_t addr;
 	int r;
 
-	if (mem) {
+	if (clear) {
+		mem = NULL;
+		addr = 0;
+		exclusive = NULL;
+	} else {
 		struct ttm_dma_tt *ttm;
 
+		mem = &bo_va->bo->tbo.mem;
 		addr = (u64)mem->start << PAGE_SHIFT;
 		switch (mem->mem_type) {
 		case TTM_PL_TT:
@@ -1101,13 +1160,11 @@
 		}
 
 		exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
-	} else {
-		addr = 0;
-		exclusive = NULL;
 	}
 
 	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
-	gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
+	gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
+		adev == bo_va->bo->adev) ? flags : 0;
 
 	spin_lock(&vm->status_lock);
 	if (!list_empty(&bo_va->vm_status))
@@ -1134,7 +1191,7 @@
 	spin_lock(&vm->status_lock);
 	list_splice_init(&bo_va->invalids, &bo_va->valids);
 	list_del_init(&bo_va->vm_status);
-	if (!mem)
+	if (clear)
 		list_add(&bo_va->vm_status, &vm->cleared);
 	spin_unlock(&vm->status_lock);
 
@@ -1197,7 +1254,7 @@
 			struct amdgpu_bo_va, vm_status);
 		spin_unlock(&vm->status_lock);
 
-		r = amdgpu_vm_bo_update(adev, bo_va, NULL);
+		r = amdgpu_vm_bo_update(adev, bo_va, true);
 		if (r)
 			return r;
 
@@ -1342,7 +1399,8 @@
 		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
 				     AMDGPU_GPU_PAGE_SIZE, true,
 				     AMDGPU_GEM_DOMAIN_VRAM,
-				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+				     AMDGPU_GEM_CREATE_SHADOW,
 				     NULL, resv, &pt);
 		if (r)
 			goto error_free;
@@ -1541,7 +1599,8 @@
 
 	r = amdgpu_bo_create(adev, pd_size, align, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
-			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+			     AMDGPU_GEM_CREATE_SHADOW,
 			     NULL, NULL, &vm->page_directory);
 	if (r)
 		goto error_free_sched_entity;
@@ -1597,10 +1656,16 @@
 		kfree(mapping);
 	}
 
-	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
+	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
+		if (vm->page_tables[i].entry.robj &&
+		    vm->page_tables[i].entry.robj->shadow)
+			amdgpu_bo_unref(&vm->page_tables[i].entry.robj->shadow);
 		amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
+	}
 	drm_free_large(vm->page_tables);
 
+	if (vm->page_directory->shadow)
+		amdgpu_bo_unref(&vm->page_directory->shadow);
 	amdgpu_bo_unref(&vm->page_directory);
 	fence_put(vm->page_directory_fence);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 49a39b1..f7d236f 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -497,7 +497,13 @@
 			 * SetPixelClock provides the dividers
 			 */
 			args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
-			args.v6.ucPpll = ATOM_EXT_PLL1;
+			if (adev->asic_type == CHIP_TAHITI ||
+			    adev->asic_type == CHIP_PITCAIRN ||
+			    adev->asic_type == CHIP_VERDE ||
+			    adev->asic_type == CHIP_OLAND)
+				args.v6.ucPpll = ATOM_PPLL0;
+			else
+				args.v6.ucPpll = ATOM_EXT_PLL1;
 			break;
 		default:
 			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 7f85c2c..f81068b 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -88,7 +88,6 @@
 
 	/* timeout */
 	if (args.v2.ucReplyStatus == 1) {
-		DRM_DEBUG_KMS("dp_aux_ch timeout\n");
 		r = -ETIMEDOUT;
 		goto done;
 	}
@@ -339,22 +338,21 @@
 {
 	struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
 	u8 msg[DP_DPCD_SIZE];
-	int ret, i;
+	int ret;
 
-	for (i = 0; i < 7; i++) {
-		ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, msg,
-				       DP_DPCD_SIZE);
-		if (ret == DP_DPCD_SIZE) {
-			memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+	ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV,
+			       msg, DP_DPCD_SIZE);
+	if (ret == DP_DPCD_SIZE) {
+		memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
 
-			DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
-				      dig_connector->dpcd);
+		DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+			      dig_connector->dpcd);
 
-			amdgpu_atombios_dp_probe_oui(amdgpu_connector);
+		amdgpu_atombios_dp_probe_oui(amdgpu_connector);
 
-			return 0;
-		}
+		return 0;
 	}
+
 	dig_connector->dpcd[0] = 0;
 	return -EINVAL;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
index bc56c8a..b374653 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
@@ -27,6 +27,7 @@
 #include "amdgpu.h"
 #include "atom.h"
 #include "amdgpu_atombios.h"
+#include "atombios_i2c.h"
 
 #define TARGET_HW_I2C_CLOCK 50
 
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index a5c94b4..1d8c375 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -5396,7 +5396,7 @@
 	amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
 		       AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
 
-	ci_dpm_powergate_uvd(adev, false);
+	ci_dpm_powergate_uvd(adev, true);
 
 	if (!amdgpu_ci_is_smc_running(adev))
 		return;
@@ -5874,7 +5874,10 @@
 	pi->pcie_dpm_key_disabled = 0;
 	pi->thermal_sclk_dpm_enabled = 0;
 
-	pi->caps_sclk_ds = true;
+	if (amdgpu_sclk_deep_sleep_en)
+		pi->caps_sclk_ds = true;
+	else
+		pi->caps_sclk_ds = false;
 
 	pi->mclk_strobe_mode_threshold = 40000;
 	pi->mclk_stutter_mode_threshold = 40000;
@@ -6033,7 +6036,7 @@
 
 	pi->caps_dynamic_ac_timing = true;
 
-	pi->uvd_power_gated = false;
+	pi->uvd_power_gated = true;
 
 	/* make sure dc limits are valid */
 	if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
@@ -6176,8 +6179,6 @@
 	if (ret)
 		return ret;
 
-	ci_dpm_powergate_uvd(adev, true);
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 4efc901..825de80 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -67,6 +67,7 @@
 
 #include "amdgpu_amdkfd.h"
 #include "amdgpu_powerplay.h"
+#include "dce_virtual.h"
 
 /*
  * Indirect registers accessor
@@ -1708,6 +1709,74 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 8,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 7,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &gfx_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_sdma_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 4,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v4_2_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v2_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -1776,6 +1845,74 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 8,
+		.minor = 5,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 7,
+		.minor = 3,
+		.rev = 0,
+		.funcs = &gfx_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_sdma_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 4,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v4_2_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v2_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -1844,6 +1981,74 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 8,
+		.minor = 3,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 7,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &gfx_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_sdma_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 4,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v4_2_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v2_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -1912,6 +2117,74 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 8,
+		.minor = 3,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 7,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &gfx_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_sdma_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 4,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v4_2_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v2_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -1980,32 +2253,128 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 8,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 7,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &gfx_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_sdma_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 4,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v4_2_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v2_0_ip_funcs,
+	},
+};
+
 int cik_set_ip_blocks(struct amdgpu_device *adev)
 {
-	switch (adev->asic_type) {
-	case CHIP_BONAIRE:
-		adev->ip_blocks = bonaire_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
-		break;
-	case CHIP_HAWAII:
-		adev->ip_blocks = hawaii_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
-		break;
-	case CHIP_KAVERI:
-		adev->ip_blocks = kaveri_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
-		break;
-	case CHIP_KABINI:
-		adev->ip_blocks = kabini_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
-		break;
-	case CHIP_MULLINS:
-		adev->ip_blocks = mullins_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
-		break;
-	default:
-		/* FIXME: not supported yet */
-		return -EINVAL;
+	if (adev->enable_virtual_display) {
+		switch (adev->asic_type) {
+		case CHIP_BONAIRE:
+			adev->ip_blocks = bonaire_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd);
+			break;
+		case CHIP_HAWAII:
+			adev->ip_blocks = hawaii_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd);
+			break;
+		case CHIP_KAVERI:
+			adev->ip_blocks = kaveri_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd);
+			break;
+		case CHIP_KABINI:
+			adev->ip_blocks = kabini_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd);
+			break;
+		case CHIP_MULLINS:
+			adev->ip_blocks = mullins_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd);
+			break;
+		default:
+			/* FIXME: not supported yet */
+			return -EINVAL;
+		}
+	} else {
+		switch (adev->asic_type) {
+		case CHIP_BONAIRE:
+			adev->ip_blocks = bonaire_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
+			break;
+		case CHIP_HAWAII:
+			adev->ip_blocks = hawaii_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
+			break;
+		case CHIP_KAVERI:
+			adev->ip_blocks = kaveri_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
+			break;
+		case CHIP_KABINI:
+			adev->ip_blocks = kabini_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
+			break;
+		case CHIP_MULLINS:
+			adev->ip_blocks = mullins_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
+			break;
+		default:
+			/* FIXME: not supported yet */
+			return -EINVAL;
+		}
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index ee64669..e6d7bf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -694,24 +694,16 @@
 				 uint64_t pe, uint64_t src,
 				 unsigned count)
 {
-	while (count) {
-		unsigned bytes = count * 8;
-		if (bytes > 0x1FFFF8)
-			bytes = 0x1FFFF8;
+	unsigned bytes = count * 8;
 
-		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
-			SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
-		ib->ptr[ib->length_dw++] = bytes;
-		ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
-		ib->ptr[ib->length_dw++] = lower_32_bits(src);
-		ib->ptr[ib->length_dw++] = upper_32_bits(src);
-		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
-		pe += bytes;
-		src += bytes;
-		count -= bytes / 8;
-	}
+	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
+		SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+	ib->ptr[ib->length_dw++] = bytes;
+	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+	ib->ptr[ib->length_dw++] = lower_32_bits(src);
+	ib->ptr[ib->length_dw++] = upper_32_bits(src);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 }
 
 /**
@@ -719,39 +711,27 @@
  *
  * @ib: indirect buffer to fill with commands
  * @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
- * @flags: access flags
  *
  * Update PTEs by writing them manually using sDMA (CIK).
  */
-static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
-				  const dma_addr_t *pages_addr, uint64_t pe,
-				  uint64_t addr, unsigned count,
-				  uint32_t incr, uint32_t flags)
+static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+				  uint64_t value, unsigned count,
+				  uint32_t incr)
 {
-	uint64_t value;
-	unsigned ndw;
+	unsigned ndw = count * 2;
 
-	while (count) {
-		ndw = count * 2;
-		if (ndw > 0xFFFFE)
-			ndw = 0xFFFFE;
-
-		/* for non-physically contiguous pages (system) */
-		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
-			SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
-		ib->ptr[ib->length_dw++] = pe;
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-		ib->ptr[ib->length_dw++] = ndw;
-		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-			value = amdgpu_vm_map_gart(pages_addr, addr);
-			addr += incr;
-			value |= flags;
-			ib->ptr[ib->length_dw++] = value;
-			ib->ptr[ib->length_dw++] = upper_32_bits(value);
-		}
+	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+		SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+	ib->ptr[ib->length_dw++] = ndw;
+	for (; ndw > 0; ndw -= 2) {
+		ib->ptr[ib->length_dw++] = lower_32_bits(value);
+		ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		value += incr;
 	}
 }
 
@@ -767,40 +747,21 @@
  *
  * Update the page tables using sDMA (CIK).
  */
-static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
-				    uint64_t pe,
+static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
 				    uint64_t addr, unsigned count,
 				    uint32_t incr, uint32_t flags)
 {
-	uint64_t value;
-	unsigned ndw;
-
-	while (count) {
-		ndw = count;
-		if (ndw > 0x7FFFF)
-			ndw = 0x7FFFF;
-
-		if (flags & AMDGPU_PTE_VALID)
-			value = addr;
-		else
-			value = 0;
-
-		/* for physically contiguous pages (vram) */
-		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
-		ib->ptr[ib->length_dw++] = pe; /* dst addr */
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-		ib->ptr[ib->length_dw++] = flags; /* mask */
-		ib->ptr[ib->length_dw++] = 0;
-		ib->ptr[ib->length_dw++] = value; /* value */
-		ib->ptr[ib->length_dw++] = upper_32_bits(value);
-		ib->ptr[ib->length_dw++] = incr; /* increment size */
-		ib->ptr[ib->length_dw++] = 0;
-		ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
-		pe += ndw * 8;
-		addr += ndw * incr;
-		count -= ndw;
-	}
+	/* for physically contiguous pages (vram) */
+	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+	ib->ptr[ib->length_dw++] = flags; /* mask */
+	ib->ptr[ib->length_dw++] = 0;
+	ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+	ib->ptr[ib->length_dw++] = incr; /* increment size */
+	ib->ptr[ib->length_dw++] = 0;
+	ib->ptr[ib->length_dw++] = count; /* number of entries */
 }
 
 /**
@@ -886,6 +847,22 @@
 	amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 }
 
+static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		7 + 4; /* cik_sdma_ring_emit_ib */
+}
+
+static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		6 + /* cik_sdma_ring_emit_hdp_flush */
+		3 + /* cik_sdma_ring_emit_hdp_invalidate */
+		6 + /* cik_sdma_ring_emit_pipeline_sync */
+		12 + /* cik_sdma_ring_emit_vm_flush */
+		9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
+}
+
 static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
 				 bool enable)
 {
@@ -1259,6 +1236,8 @@
 	.test_ib = cik_sdma_ring_test_ib,
 	.insert_nop = cik_sdma_ring_insert_nop,
 	.pad_ib = cik_sdma_ring_pad_ib,
+	.get_emit_ib_size = cik_sdma_ring_get_emit_ib_size,
+	.get_dma_frame_size = cik_sdma_ring_get_dma_frame_size,
 };
 
 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 2a11413..f80a083 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -44,6 +44,7 @@
 
 static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
 static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
+static void cz_dpm_fini(struct amdgpu_device *adev);
 
 static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps)
 {
@@ -350,6 +351,8 @@
 
 		ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL);
 		if (ps == NULL) {
+			for (j = 0; j < i; j++)
+				kfree(adev->pm.dpm.ps[j].ps_priv);
 			kfree(adev->pm.dpm.ps);
 			return -ENOMEM;
 		}
@@ -409,11 +412,11 @@
 
 	ret = amdgpu_get_platform_caps(adev);
 	if (ret)
-		return ret;
+		goto err;
 
 	ret = amdgpu_parse_extended_power_table(adev);
 	if (ret)
-		return ret;
+		goto err;
 
 	pi->sram_end = SMC_RAM_END;
 
@@ -435,7 +438,11 @@
 		pi->caps_td_ramping = true;
 		pi->caps_tcp_ramping = true;
 	}
-	pi->caps_sclk_ds = true;
+	if (amdgpu_sclk_deep_sleep_en)
+		pi->caps_sclk_ds = true;
+	else
+		pi->caps_sclk_ds = false;
+
 	pi->voting_clients = 0x00c00033;
 	pi->auto_thermal_throttling_enabled = true;
 	pi->bapm_enabled = false;
@@ -463,23 +470,26 @@
 
 	ret = cz_parse_sys_info_table(adev);
 	if (ret)
-		return ret;
+		goto err;
 
 	cz_patch_voltage_values(adev);
 	cz_construct_boot_state(adev);
 
 	ret = cz_parse_power_table(adev);
 	if (ret)
-		return ret;
+		goto err;
 
 	ret = cz_process_firmware_header(adev);
 	if (ret)
-		return ret;
+		goto err;
 
 	pi->dpm_enabled = true;
 	pi->uvd_dynamic_pg = false;
 
 	return 0;
+err:
+	cz_dpm_fini(adev);
+	return ret;
 }
 
 static void cz_dpm_fini(struct amdgpu_device *adev)
@@ -668,17 +678,12 @@
 	struct cz_power_info *pi = cz_get_pi(adev);
 
 	pi->active_process_mask = 0;
-
 }
 
 static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev,
 							void **table)
 {
-	int ret = 0;
-
-	ret = cz_smu_download_pptable(adev, table);
-
-	return ret;
+	return cz_smu_download_pptable(adev, table);
 }
 
 static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev)
@@ -818,9 +823,9 @@
 	pi->sclk_dpm.hard_min_clk = 0;
 	cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
 	level = cz_get_argument(adev);
-	if (level < table->count)
+	if (level < table->count) {
 		clock = table->entries[level].clk;
-	else {
+	} else {
 		DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n");
 		clock = table->entries[table->count - 1].clk;
 	}
@@ -846,9 +851,9 @@
 	pi->uvd_dpm.hard_min_clk = 0;
 	cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
 	level = cz_get_argument(adev);
-	if (level < table->count)
+	if (level < table->count) {
 		clock = table->entries[level].vclk;
-	else {
+	} else {
 		DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n");
 		clock = table->entries[table->count - 1].vclk;
 	}
@@ -874,9 +879,9 @@
 	pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;
 	cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
 	level = cz_get_argument(adev);
-	if (level < table->count)
+	if (level < table->count) {
 		clock = table->entries[level].ecclk;
-	else {
+	} else {
 		/* future BIOS would fix this error */
 		DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n");
 		clock = table->entries[table->count - 1].ecclk;
@@ -903,9 +908,9 @@
 	pi->acp_dpm.hard_min_clk = 0;
 	cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel);
 	level = cz_get_argument(adev);
-	if (level < table->count)
+	if (level < table->count) {
 		clock = table->entries[level].clk;
-	else {
+	} else {
 		DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n");
 		clock = table->entries[table->count - 1].clk;
 	}
@@ -930,7 +935,6 @@
 	struct cz_power_info *pi = cz_get_pi(adev);
 
 	pi->low_sclk_interrupt_threshold = 0;
-
 }
 
 static void cz_dpm_setup_asic(struct amdgpu_device *adev)
@@ -1203,7 +1207,7 @@
 	int ret;
 
 	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
-			pi->caps_td_ramping || pi->caps_tcp_ramping) {
+	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
 		if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) {
 			ret = cz_disable_cgpg(adev);
 			if (ret) {
@@ -1277,7 +1281,7 @@
 	ps->force_high = false;
 	ps->need_dfs_bypass = true;
 	pi->video_start = new_rps->dclk || new_rps->vclk ||
-				new_rps->evclk || new_rps->ecclk;
+			  new_rps->evclk || new_rps->ecclk;
 
 	if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
 			ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
@@ -1335,7 +1339,6 @@
 	}
 
 	cz_reset_acp_boot_level(adev);
-
 	cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
 
 	return 0;
@@ -1665,7 +1668,6 @@
 	struct amdgpu_ps *ps = &pi->requested_rps;
 
 	cz_update_current_ps(adev, ps);
-
 }
 
 static int cz_dpm_force_highest(struct amdgpu_device *adev)
@@ -2108,29 +2110,58 @@
 			/* disable clockgating so we can properly shut down the block */
 			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 							    AMD_CG_STATE_UNGATE);
+			if (ret) {
+				DRM_ERROR("UVD DPM Power Gating failed to set clockgating state\n");
+				return;
+			}
+
 			/* shutdown the UVD block */
 			ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 							    AMD_PG_STATE_GATE);
-			/* XXX: check for errors */
+
+			if (ret) {
+				DRM_ERROR("UVD DPM Power Gating failed to set powergating state\n");
+				return;
+			}
 		}
 		cz_update_uvd_dpm(adev, gate);
-		if (pi->caps_uvd_pg)
+		if (pi->caps_uvd_pg) {
 			/* power off the UVD block */
-			cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
+			ret = cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
+			if (ret) {
+				DRM_ERROR("UVD DPM Power Gating failed to send SMU PowerOFF message\n");
+				return;
+			}
+		}
 	} else {
 		if (pi->caps_uvd_pg) {
 			/* power on the UVD block */
 			if (pi->uvd_dynamic_pg)
-				cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1);
+				ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1);
 			else
-				cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0);
+				ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0);
+
+			if (ret) {
+				DRM_ERROR("UVD DPM Power Gating Failed to send SMU PowerON message\n");
+				return;
+			}
+
 			/* re-init the UVD block */
 			ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 							    AMD_PG_STATE_UNGATE);
+
+			if (ret) {
+				DRM_ERROR("UVD DPM Power Gating Failed to set powergating state\n");
+				return;
+			}
+
 			/* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
 			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 							    AMD_CG_STATE_GATE);
-			/* XXX: check for errors */
+			if (ret) {
+				DRM_ERROR("UVD DPM Power Gating Failed to set clockgating state\n");
+				return;
+			}
 		}
 		cz_update_uvd_dpm(adev, gate);
 	}
@@ -2168,7 +2199,6 @@
 	/* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
 	if (pi->caps_stable_power_state) {
 		pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk;
-
 	} else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */
 		/* leave it as set by user */
 		/*pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;*/
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
index ac7fee7..95887e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
@@ -29,6 +29,8 @@
 #include "cz_smumgr.h"
 #include "smu_ucode_xfer_cz.h"
 #include "amdgpu_ucode.h"
+#include "cz_dpm.h"
+#include "vi_dpm.h"
 
 #include "smu/smu_8_0_d.h"
 #include "smu/smu_8_0_sh_mask.h"
@@ -48,7 +50,7 @@
 	return priv;
 }
 
-int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
+static int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
 {
 	int i;
 	u32 content = 0, tmp;
@@ -140,7 +142,7 @@
 	return 0;
 }
 
-int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+static int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
 						u32 value, u32 limit)
 {
 	int ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index c1b04e9..bc5bb4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -646,8 +646,8 @@
 
 		if (save->crtc_enabled[i]) {
 			tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
-			if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
-				tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
+			if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) {
+				tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0);
 				WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
 			}
 			tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
@@ -712,6 +712,45 @@
 	WREG32(mmVGA_RENDER_CONTROL, tmp);
 }
 
+static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev)
+{
+	int num_crtc = 0;
+
+	switch (adev->asic_type) {
+	case CHIP_FIJI:
+	case CHIP_TONGA:
+		num_crtc = 6;
+		break;
+	default:
+		num_crtc = 0;
+	}
+	return num_crtc;
+}
+
+void dce_v10_0_disable_dce(struct amdgpu_device *adev)
+{
+	/*Disable VGA render and enabled crtc, if has DCE engine*/
+	if (amdgpu_atombios_has_dce_engine_info(adev)) {
+		u32 tmp;
+		int crtc_enabled, i;
+
+		dce_v10_0_set_vga_render_state(adev, false);
+
+		/*Disable crtc*/
+		for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) {
+			crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+									 CRTC_CONTROL, CRTC_MASTER_EN);
+			if (crtc_enabled) {
+				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+				tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			}
+		}
+	}
+}
+
 static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
@@ -2071,6 +2110,7 @@
 	u32 tmp, viewport_w, viewport_h;
 	int r;
 	bool bypass_lut = false;
+	char *format_name;
 
 	/* no fb bound */
 	if (!atomic && !crtc->primary->fb) {
@@ -2182,8 +2222,9 @@
 		bypass_lut = true;
 		break;
 	default:
-		DRM_ERROR("Unsupported screen format %s\n",
-			drm_get_format_name(target_fb->pixel_format));
+		format_name = drm_get_format_name(target_fb->pixel_format);
+		DRM_ERROR("Unsupported screen format %s\n", format_name);
+		kfree(format_name);
 		return -EINVAL;
 	}
 
@@ -2275,8 +2316,8 @@
 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
 	       (viewport_w << 16) | viewport_h);
 
-	/* set pageflip to happen only at start of vblank interval (front porch) */
-	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
 		amdgpu_fb = to_amdgpu_framebuffer(fb);
@@ -2698,7 +2739,7 @@
 	.gamma_set = dce_v10_0_crtc_gamma_set,
 	.set_config = amdgpu_crtc_set_config,
 	.destroy = dce_v10_0_crtc_destroy,
-	.page_flip = amdgpu_crtc_page_flip,
+	.page_flip_target = amdgpu_crtc_page_flip_target,
 };
 
 static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2962,10 +3003,11 @@
 	dce_v10_0_set_display_funcs(adev);
 	dce_v10_0_set_irq_funcs(adev);
 
+	adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev);
+
 	switch (adev->asic_type) {
 	case CHIP_FIJI:
 	case CHIP_TONGA:
-		adev->mode_info.num_crtc = 6; /* XXX 7??? */
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 7;
 		break;
@@ -3141,11 +3183,26 @@
 	return 0;
 }
 
+static int dce_v10_0_check_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (dce_v10_0_is_display_hung(adev))
+		adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true;
+	else
+		adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false;
+
+	return 0;
+}
+
 static int dce_v10_0_soft_reset(void *handle)
 {
 	u32 srbm_soft_reset = 0, tmp;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang)
+		return 0;
+
 	if (dce_v10_0_is_display_hung(adev))
 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
 
@@ -3512,6 +3569,7 @@
 	.resume = dce_v10_0_resume,
 	.is_idle = dce_v10_0_is_idle,
 	.wait_for_idle = dce_v10_0_wait_for_idle,
+	.check_soft_reset = dce_v10_0_check_soft_reset,
 	.soft_reset = dce_v10_0_soft_reset,
 	.set_clockgating_state = dce_v10_0_set_clockgating_state,
 	.set_powergating_state = dce_v10_0_set_powergating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
index 1bfa48d..e3dc04d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
@@ -26,4 +26,6 @@
 
 extern const struct amd_ip_funcs dce_v10_0_ip_funcs;
 
+void dce_v10_0_disable_dce(struct amdgpu_device *adev);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index d4bf133..b93eba0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -673,6 +673,53 @@
 	WREG32(mmVGA_RENDER_CONTROL, tmp);
 }
 
+static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
+{
+	int num_crtc = 0;
+
+	switch (adev->asic_type) {
+	case CHIP_CARRIZO:
+		num_crtc = 3;
+		break;
+	case CHIP_STONEY:
+		num_crtc = 2;
+		break;
+	case CHIP_POLARIS10:
+		num_crtc = 6;
+		break;
+	case CHIP_POLARIS11:
+		num_crtc = 5;
+		break;
+	default:
+		num_crtc = 0;
+	}
+	return num_crtc;
+}
+
+void dce_v11_0_disable_dce(struct amdgpu_device *adev)
+{
+	/*Disable VGA render and enabled crtc, if has DCE engine*/
+	if (amdgpu_atombios_has_dce_engine_info(adev)) {
+		u32 tmp;
+		int crtc_enabled, i;
+
+		dce_v11_0_set_vga_render_state(adev, false);
+
+		/*Disable crtc*/
+		for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) {
+			crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+									 CRTC_CONTROL, CRTC_MASTER_EN);
+			if (crtc_enabled) {
+				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+				tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			}
+		}
+	}
+}
+
 static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
@@ -2046,6 +2093,7 @@
 	u32 tmp, viewport_w, viewport_h;
 	int r;
 	bool bypass_lut = false;
+	char *format_name;
 
 	/* no fb bound */
 	if (!atomic && !crtc->primary->fb) {
@@ -2157,8 +2205,9 @@
 		bypass_lut = true;
 		break;
 	default:
-		DRM_ERROR("Unsupported screen format %s\n",
-			drm_get_format_name(target_fb->pixel_format));
+		format_name = drm_get_format_name(target_fb->pixel_format);
+		DRM_ERROR("Unsupported screen format %s\n", format_name);
+		kfree(format_name);
 		return -EINVAL;
 	}
 
@@ -2250,8 +2299,8 @@
 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
 	       (viewport_w << 16) | viewport_h);
 
-	/* set pageflip to happen only at start of vblank interval (front porch) */
-	WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
 		amdgpu_fb = to_amdgpu_framebuffer(fb);
@@ -2708,7 +2757,7 @@
 	.gamma_set = dce_v11_0_crtc_gamma_set,
 	.set_config = amdgpu_crtc_set_config,
 	.destroy = dce_v11_0_crtc_destroy,
-	.page_flip = amdgpu_crtc_page_flip,
+	.page_flip_target = amdgpu_crtc_page_flip_target,
 };
 
 static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2999,24 +3048,22 @@
 	dce_v11_0_set_display_funcs(adev);
 	dce_v11_0_set_irq_funcs(adev);
 
+	adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
+
 	switch (adev->asic_type) {
 	case CHIP_CARRIZO:
-		adev->mode_info.num_crtc = 3;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 9;
 		break;
 	case CHIP_STONEY:
-		adev->mode_info.num_crtc = 2;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 9;
 		break;
 	case CHIP_POLARIS10:
-		adev->mode_info.num_crtc = 6;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 6;
 		break;
 	case CHIP_POLARIS11:
-		adev->mode_info.num_crtc = 5;
 		adev->mode_info.num_hpd = 5;
 		adev->mode_info.num_dig = 5;
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
index 84e4618..1f58a65 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
@@ -26,4 +26,6 @@
 
 extern const struct amd_ip_funcs dce_v11_0_ip_funcs;
 
+void dce_v11_0_disable_dce(struct amdgpu_device *adev);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
new file mode 100644
index 0000000..d3512f3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -0,0 +1,3160 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_i2c.h"
+#include "atom.h"
+#include "amdgpu_atombios.h"
+#include "atombios_crtc.h"
+#include "atombios_encoders.h"
+#include "amdgpu_pll.h"
+#include "amdgpu_connectors.h"
+#include "si/si_reg.h"
+#include "si/sid.h"
+
+static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
+static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+
+static const u32 crtc_offsets[6] =
+{
+	SI_CRTC0_REGISTER_OFFSET,
+	SI_CRTC1_REGISTER_OFFSET,
+	SI_CRTC2_REGISTER_OFFSET,
+	SI_CRTC3_REGISTER_OFFSET,
+	SI_CRTC4_REGISTER_OFFSET,
+	SI_CRTC5_REGISTER_OFFSET
+};
+
+static const uint32_t dig_offsets[] = {
+	SI_CRTC0_REGISTER_OFFSET,
+	SI_CRTC1_REGISTER_OFFSET,
+	SI_CRTC2_REGISTER_OFFSET,
+	SI_CRTC3_REGISTER_OFFSET,
+	SI_CRTC4_REGISTER_OFFSET,
+	SI_CRTC5_REGISTER_OFFSET,
+	(0x13830 - 0x7030) >> 2,
+};
+
+static const struct {
+	uint32_t	reg;
+	uint32_t	vblank;
+	uint32_t	vline;
+	uint32_t	hpd;
+
+} interrupt_status_offsets[6] = { {
+	.reg = DISP_INTERRUPT_STATUS,
+	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
+	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
+	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
+}, {
+	.reg = DISP_INTERRUPT_STATUS_CONTINUE,
+	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
+	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
+	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
+}, {
+	.reg = DISP_INTERRUPT_STATUS_CONTINUE2,
+	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
+	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
+	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
+}, {
+	.reg = DISP_INTERRUPT_STATUS_CONTINUE3,
+	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
+	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
+	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
+}, {
+	.reg = DISP_INTERRUPT_STATUS_CONTINUE4,
+	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
+	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
+	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
+}, {
+	.reg = DISP_INTERRUPT_STATUS_CONTINUE5,
+	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
+	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
+	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
+} };
+
+static const uint32_t hpd_int_control_offsets[6] = {
+	DC_HPD1_INT_CONTROL,
+	DC_HPD2_INT_CONTROL,
+	DC_HPD3_INT_CONTROL,
+	DC_HPD4_INT_CONTROL,
+	DC_HPD5_INT_CONTROL,
+	DC_HPD6_INT_CONTROL,
+};
+
+static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
+				     u32 block_offset, u32 reg)
+{
+	DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
+	return 0;
+}
+
+static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
+				      u32 block_offset, u32 reg, u32 v)
+{
+	DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
+}
+
+static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
+{
+	if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+		return true;
+	else
+		return false;
+}
+
+static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
+{
+	u32 pos1, pos2;
+
+	pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+	pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+	if (pos1 != pos2)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * dce_v6_0_wait_for_vblank - vblank wait asic callback.
+ *
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
+{
+	unsigned i = 0;
+
+	if (crtc >= adev->mode_info.num_crtc)
+		return;
+
+	if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+		return;
+
+	/* depending on when we hit vblank, we may be close to active; if so,
+	 * wait for another frame.
+	 */
+	while (dce_v6_0_is_in_vblank(adev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!dce_v6_0_is_counter_moving(adev, crtc))
+				break;
+		}
+	}
+
+	while (!dce_v6_0_is_in_vblank(adev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!dce_v6_0_is_counter_moving(adev, crtc))
+				break;
+		}
+	}
+}
+
+static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+	if (crtc >= adev->mode_info.num_crtc)
+		return 0;
+	else
+		return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	/* Enable pflip interrupts */
+	for (i = 0; i <= adev->mode_info.num_crtc; i++)
+		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	/* Disable pflip interrupts */
+	for (i = 0; i <= adev->mode_info.num_crtc; i++)
+		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
+/**
+ * dce_v6_0_page_flip - pageflip callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (evergreen+).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
+static void dce_v6_0_page_flip(struct amdgpu_device *adev,
+			       int crtc_id, u64 crtc_base, bool async)
+{
+	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+	/* flip at hsync for async, default is vsync */
+	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
+	       EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
+	/* update the scanout addresses */
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+	       upper_32_bits(crtc_base));
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	/* post the write */
+	RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
+}
+
+static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+					u32 *vbl, u32 *position)
+{
+	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+		return -EINVAL;
+	*vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + crtc_offsets[crtc]);
+	*position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+	return 0;
+
+}
+
+/**
+ * dce_v6_0_hpd_sense - hpd sense callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (evergreen+).
+ * Returns true if connected, false if not connected.
+ */
+static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
+			       enum amdgpu_hpd_id hpd)
+{
+	bool connected = false;
+
+	switch (hpd) {
+	case AMDGPU_HPD_1:
+		if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case AMDGPU_HPD_2:
+		if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case AMDGPU_HPD_3:
+		if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case AMDGPU_HPD_4:
+		if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case AMDGPU_HPD_5:
+		if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case AMDGPU_HPD_6:
+		if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	default:
+		break;
+	}
+
+	return connected;
+}
+
+/**
+ * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (evergreen+).
+ */
+static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
+				      enum amdgpu_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = dce_v6_0_hpd_sense(adev, hpd);
+
+	switch (hpd) {
+	case AMDGPU_HPD_1:
+		tmp = RREG32(DC_HPD1_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD1_INT_CONTROL, tmp);
+		break;
+	case AMDGPU_HPD_2:
+		tmp = RREG32(DC_HPD2_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD2_INT_CONTROL, tmp);
+		break;
+	case AMDGPU_HPD_3:
+		tmp = RREG32(DC_HPD3_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD3_INT_CONTROL, tmp);
+		break;
+	case AMDGPU_HPD_4:
+		tmp = RREG32(DC_HPD4_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+		break;
+	case AMDGPU_HPD_5:
+		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD5_INT_CONTROL, tmp);
+			break;
+	case AMDGPU_HPD_6:
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD6_INT_CONTROL, tmp);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * dce_v6_0_hpd_init - hpd setup callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_connector *connector;
+	u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
+		DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+			/* don't try to enable hpd on eDP or LVDS avoid breaking the
+			 * aux dp channel on imac and help (but not completely fix)
+			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+			 * also avoid interrupt storms during dpms.
+			 */
+			continue;
+		}
+		switch (amdgpu_connector->hpd.hpd) {
+		case AMDGPU_HPD_1:
+			WREG32(DC_HPD1_CONTROL, tmp);
+			break;
+		case AMDGPU_HPD_2:
+			WREG32(DC_HPD2_CONTROL, tmp);
+			break;
+		case AMDGPU_HPD_3:
+			WREG32(DC_HPD3_CONTROL, tmp);
+			break;
+		case AMDGPU_HPD_4:
+			WREG32(DC_HPD4_CONTROL, tmp);
+			break;
+		case AMDGPU_HPD_5:
+			WREG32(DC_HPD5_CONTROL, tmp);
+			break;
+		case AMDGPU_HPD_6:
+			WREG32(DC_HPD6_CONTROL, tmp);
+			break;
+		default:
+			break;
+		}
+		dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
+		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+	}
+
+}
+
+/**
+ * dce_v6_0_hpd_fini - hpd tear down callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+		switch (amdgpu_connector->hpd.hpd) {
+		case AMDGPU_HPD_1:
+			WREG32(DC_HPD1_CONTROL, 0);
+			break;
+		case AMDGPU_HPD_2:
+			WREG32(DC_HPD2_CONTROL, 0);
+			break;
+		case AMDGPU_HPD_3:
+			WREG32(DC_HPD3_CONTROL, 0);
+			break;
+		case AMDGPU_HPD_4:
+			WREG32(DC_HPD4_CONTROL, 0);
+			break;
+		case AMDGPU_HPD_5:
+			WREG32(DC_HPD5_CONTROL, 0);
+			break;
+		case AMDGPU_HPD_6:
+			WREG32(DC_HPD6_CONTROL, 0);
+			break;
+		default:
+			break;
+		}
+		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+	}
+}
+
+static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
+{
+	return SI_DC_GPIO_HPD_A;
+}
+
+static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
+{
+	DRM_INFO("xxxx: dce_v6_0_is_display_hung ----no imp!!!!!\n");
+
+	return true;
+}
+
+static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
+{
+	if (crtc >= adev->mode_info.num_crtc)
+		return 0;
+	else
+		return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
+				    struct amdgpu_mode_mc_save *save)
+{
+	u32 crtc_enabled, tmp, frame_count;
+	int i, j;
+
+	save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+	save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+
+	/* disable VGA render */
+	WREG32(VGA_RENDER_CONTROL, 0);
+
+	/* blank the display controllers */
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
+		if (crtc_enabled) {
+			save->crtc_enabled[i] = true;
+			tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+
+			if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
+				dce_v6_0_vblank_wait(adev, i);
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+				WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			}
+			/* wait for the next frame */
+			frame_count = evergreen_get_vblank_counter(adev, i);
+			for (j = 0; j < adev->usec_timeout; j++) {
+				if (evergreen_get_vblank_counter(adev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+
+			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+			tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+			tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+			WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			save->crtc_enabled[i] = false;
+			/* ***** */
+		} else {
+			save->crtc_enabled[i] = false;
+		}
+	}
+}
+
+static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
+				      struct amdgpu_mode_mc_save *save)
+{
+	u32 tmp;
+	int i, j;
+
+	/* update crtc base addresses */
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+		       upper_32_bits(adev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+		       upper_32_bits(adev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)adev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)adev->mc.vram_start);
+	}
+
+	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
+	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
+
+	/* unlock regs and wait for update */
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
+			if ((tmp & 0x7) != 3) {
+				tmp &= ~0x7;
+				tmp |= 0x3;
+				WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+			if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
+				tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+				WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+			if (tmp & 1) {
+				tmp &= ~1;
+				WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+			}
+			for (j = 0; j < adev->usec_timeout; j++) {
+				tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+				if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+					break;
+				udelay(1);
+			}
+		}
+	}
+
+	/* Unlock vga access */
+	WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+	mdelay(1);
+	WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+
+}
+
+static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
+					  bool render)
+{
+	if (!render) 
+		WREG32(R_000300_VGA_RENDER_CONTROL,
+			RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
+
+}
+
+static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
+{
+
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+	int bpc = 0;
+	u32 tmp = 0;
+	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
+
+	if (connector) {
+		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+		bpc = amdgpu_connector_get_monitor_bpc(connector);
+		dither = amdgpu_connector->dither;
+	}
+
+	/* LVDS FMT is set up by atom */
+	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return;
+
+	if (bpc == 0)
+		return;
+
+
+	switch (bpc) {
+	case 6:
+		if (dither == AMDGPU_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN);
+		else
+			tmp |= FMT_TRUNCATE_EN;
+		break;
+	case 8:
+		if (dither == AMDGPU_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_RGB_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+		else
+			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+		break;
+	case 10:
+	default:
+		/* not needed */
+		break;
+	}
+
+	WREG32(FMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+}
+
+/**
+ * cik_get_number_of_dram_channels - get the number of dram channels
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up the number of video ram channels (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the number of dram channels
+ */
+static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
+{
+	u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
+	case 0:
+	default:
+		return 1;
+	case 1:
+		return 2;
+	case 2:
+		return 4;
+	case 3:
+		return 8;
+	case 4:
+		return 3;
+	case 5:
+		return 6;
+	case 6:
+		return 10;
+	case 7:
+		return 12;
+	case 8:
+		return 16;
+	}
+}
+
+struct dce6_wm_params {
+	u32 dram_channels; /* number of dram channels */
+	u32 yclk;          /* bandwidth per dram data pin in kHz */
+	u32 sclk;          /* engine clock in kHz */
+	u32 disp_clk;      /* display clock in kHz */
+	u32 src_width;     /* viewport width */
+	u32 active_time;   /* active display time in ns */
+	u32 blank_time;    /* blank time in ns */
+	bool interlaced;    /* mode is interlaced */
+	fixed20_12 vsc;    /* vertical scale ratio */
+	u32 num_heads;     /* number of active crtcs */
+	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+	u32 lb_size;       /* line buffer allocated to pipe */
+	u32 vtaps;         /* vertical scaler taps */
+};
+
+/**
+ * dce_v6_0_dram_bandwidth - get the dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the raw dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate raw DRAM Bandwidth */
+	fixed20_12 dram_efficiency; /* 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	dram_efficiency.full = dfixed_const(7);
+	dram_efficiency.full = dfixed_div(dram_efficiency, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dram bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth for display in MBytes/s
+ */
+static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+	/* Calculate DRAM Bandwidth and the part allocated to display. */
+	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+	return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_data_return_bandwidth - get the data return bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the data return bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the data return bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the display Data return Bandwidth */
+	fixed20_12 return_efficiency; /* 0.8 */
+	fixed20_12 sclk, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	sclk.full = dfixed_const(wm->sclk);
+	sclk.full = dfixed_div(sclk, a);
+	a.full = dfixed_const(10);
+	return_efficiency.full = dfixed_const(8);
+	return_efficiency.full = dfixed_div(return_efficiency, a);
+	a.full = dfixed_const(32);
+	bandwidth.full = dfixed_mul(a, sclk);
+	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dmif bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dmif bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the DMIF Request Bandwidth */
+	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+	fixed20_12 disp_clk, bandwidth;
+	fixed20_12 a, b;
+
+	a.full = dfixed_const(1000);
+	disp_clk.full = dfixed_const(wm->disp_clk);
+	disp_clk.full = dfixed_div(disp_clk, a);
+	a.full = dfixed_const(32);
+	b.full = dfixed_mul(a, disp_clk);
+
+	a.full = dfixed_const(10);
+	disp_clk_request_efficiency.full = dfixed_const(8);
+	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+
+	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_available_bandwidth - get the min available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the min available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the min available bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+	u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
+	u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
+	u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
+
+	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+/**
+ * dce_v6_0_average_bandwidth - get the average available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the average available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the average available bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the display mode Average Bandwidth
+	 * DisplayMode should contain the source and destination dimensions,
+	 * timing, etc.
+	 */
+	fixed20_12 bpp;
+	fixed20_12 line_time;
+	fixed20_12 src_width;
+	fixed20_12 bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+	line_time.full = dfixed_div(line_time, a);
+	bpp.full = dfixed_const(wm->bytes_per_pixel);
+	src_width.full = dfixed_const(wm->src_width);
+	bandwidth.full = dfixed_mul(src_width, bpp);
+	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+	bandwidth.full = dfixed_div(bandwidth, line_time);
+
+	return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_latency_watermark - get the latency watermark
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the latency watermark (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the latency watermark in ns
+ */
+static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
+{
+	/* First calculate the latency in ns */
+	u32 mc_latency = 2000; /* 2000 ns. */
+	u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
+	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+		(wm->num_heads * cursor_line_pair_return_time);
+	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+	u32 tmp, dmif_size = 12288;
+	fixed20_12 a, b, c;
+
+	if (wm->num_heads == 0)
+		return 0;
+
+	a.full = dfixed_const(2);
+	b.full = dfixed_const(1);
+	if ((wm->vsc.full > a.full) ||
+	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+	    (wm->vtaps >= 5) ||
+	    ((wm->vsc.full >= a.full) && wm->interlaced))
+		max_src_lines_per_dst_line = 4;
+	else
+		max_src_lines_per_dst_line = 2;
+
+	a.full = dfixed_const(available_bandwidth);
+	b.full = dfixed_const(wm->num_heads);
+	a.full = dfixed_div(a, b);
+
+	b.full = dfixed_const(mc_latency + 512);
+	c.full = dfixed_const(wm->disp_clk);
+	b.full = dfixed_div(b, c);
+
+	c.full = dfixed_const(dmif_size);
+	b.full = dfixed_div(c, b);
+
+	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
+
+	b.full = dfixed_const(1000);
+	c.full = dfixed_const(wm->disp_clk);
+	b.full = dfixed_div(c, b);
+	c.full = dfixed_const(wm->bytes_per_pixel);
+	b.full = dfixed_mul(b, c);
+
+	lb_fill_bw = min(tmp, dfixed_trunc(b));
+
+	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+	b.full = dfixed_const(1000);
+	c.full = dfixed_const(lb_fill_bw);
+	b.full = dfixed_div(c, b);
+	a.full = dfixed_div(a, b);
+	line_fill_time = dfixed_trunc(a);
+
+	if (line_fill_time < wm->active_time)
+		return latency;
+	else
+		return latency + (line_fill_time - wm->active_time);
+
+}
+
+/**
+ * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
+ * average and available dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+	if (dce_v6_0_average_bandwidth(wm) <=
+	    (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+}
+
+/**
+ * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
+ * average and available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * available bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
+{
+	if (dce_v6_0_average_bandwidth(wm) <=
+	    (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+}
+
+/**
+ * dce_v6_0_check_latency_hiding - check latency hiding
+ *
+ * @wm: watermark calculation data
+ *
+ * Check latency hiding (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
+{
+	u32 lb_partitions = wm->lb_size / wm->src_width;
+	u32 line_time = wm->active_time + wm->blank_time;
+	u32 latency_tolerant_lines;
+	u32 latency_hiding;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1);
+	if (wm->vsc.full > a.full)
+		latency_tolerant_lines = 1;
+	else {
+		if (lb_partitions <= (wm->vtaps + 1))
+			latency_tolerant_lines = 1;
+		else
+			latency_tolerant_lines = 2;
+	}
+
+	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+	if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * dce_v6_0_program_watermarks - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ * @amdgpu_crtc: the selected display controller
+ * @lb_size: line buffer size
+ * @num_heads: number of display controllers in use
+ *
+ * Calculate and program the display watermarks for the
+ * selected display controller (CIK).
+ */
+static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
+					struct amdgpu_crtc *amdgpu_crtc,
+					u32 lb_size, u32 num_heads)
+{
+	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
+	struct dce6_wm_params wm_low, wm_high;
+	u32 dram_channels;
+	u32 pixel_period;
+	u32 line_time = 0;
+	u32 latency_watermark_a = 0, latency_watermark_b = 0;
+	u32 priority_a_mark = 0, priority_b_mark = 0;
+	u32 priority_a_cnt = PRIORITY_OFF;
+	u32 priority_b_cnt = PRIORITY_OFF;
+	u32 tmp, arb_control3;
+	fixed20_12 a, b, c;
+
+	if (amdgpu_crtc->base.enabled && num_heads && mode) {
+		pixel_period = 1000000 / (u32)mode->clock;
+		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		priority_a_cnt = 0;
+		priority_b_cnt = 0;
+
+		dram_channels = si_get_number_of_dram_channels(adev);
+
+		/* watermark for high clocks */
+		if (adev->pm.dpm_enabled) {
+			wm_high.yclk =
+				amdgpu_dpm_get_mclk(adev, false) * 10;
+			wm_high.sclk =
+				amdgpu_dpm_get_sclk(adev, false) * 10;
+		} else {
+			wm_high.yclk = adev->pm.current_mclk * 10;
+			wm_high.sclk = adev->pm.current_sclk * 10;
+		}
+
+		wm_high.disp_clk = mode->clock;
+		wm_high.src_width = mode->crtc_hdisplay;
+		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_high.blank_time = line_time - wm_high.active_time;
+		wm_high.interlaced = false;
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			wm_high.interlaced = true;
+		wm_high.vsc = amdgpu_crtc->vsc;
+		wm_high.vtaps = 1;
+		if (amdgpu_crtc->rmx_type != RMX_OFF)
+			wm_high.vtaps = 2;
+		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
+		wm_high.lb_size = lb_size;
+		wm_high.dram_channels = dram_channels;
+		wm_high.num_heads = num_heads;
+
+		if (adev->pm.dpm_enabled) {
+		/* watermark for low clocks */
+			wm_low.yclk =
+				amdgpu_dpm_get_mclk(adev, true) * 10;
+			wm_low.sclk =
+				amdgpu_dpm_get_sclk(adev, true) * 10;
+		} else {
+			wm_low.yclk = adev->pm.current_mclk * 10;
+			wm_low.sclk = adev->pm.current_sclk * 10;
+		}
+
+		wm_low.disp_clk = mode->clock;
+		wm_low.src_width = mode->crtc_hdisplay;
+		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_low.blank_time = line_time - wm_low.active_time;
+		wm_low.interlaced = false;
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			wm_low.interlaced = true;
+		wm_low.vsc = amdgpu_crtc->vsc;
+		wm_low.vtaps = 1;
+		if (amdgpu_crtc->rmx_type != RMX_OFF)
+			wm_low.vtaps = 2;
+		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
+		wm_low.lb_size = lb_size;
+		wm_low.dram_channels = dram_channels;
+		wm_low.num_heads = num_heads;
+
+		/* set for high clocks */
+		latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
+		/* set for low clocks */
+		latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
+
+		/* possibly force display priority to high */
+		/* should really do this at mode validation time... */
+		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
+		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
+		    !dce_v6_0_check_latency_hiding(&wm_high) ||
+		    (adev->mode_info.disp_priority == 2)) {
+			DRM_DEBUG_KMS("force priority to high\n");
+			priority_a_cnt |= PRIORITY_ALWAYS_ON;
+			priority_b_cnt |= PRIORITY_ALWAYS_ON;
+		}
+		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
+		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
+		    !dce_v6_0_check_latency_hiding(&wm_low) ||
+		    (adev->mode_info.disp_priority == 2)) {
+			DRM_DEBUG_KMS("force priority to high\n");
+			priority_a_cnt |= PRIORITY_ALWAYS_ON;
+			priority_b_cnt |= PRIORITY_ALWAYS_ON;
+		}
+
+		a.full = dfixed_const(1000);
+		b.full = dfixed_const(mode->clock);
+		b.full = dfixed_div(b, a);
+		c.full = dfixed_const(latency_watermark_a);
+		c.full = dfixed_mul(c, b);
+		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
+		c.full = dfixed_div(c, a);
+		a.full = dfixed_const(16);
+		c.full = dfixed_div(c, a);
+		priority_a_mark = dfixed_trunc(c);
+		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
+
+		a.full = dfixed_const(1000);
+		b.full = dfixed_const(mode->clock);
+		b.full = dfixed_div(b, a);
+		c.full = dfixed_const(latency_watermark_b);
+		c.full = dfixed_mul(c, b);
+		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
+		c.full = dfixed_div(c, a);
+		a.full = dfixed_const(16);
+		c.full = dfixed_div(c, a);
+		priority_b_mark = dfixed_trunc(c);
+		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+	}
+
+	/* select wm A */
+	arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+	tmp = arb_control3;
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(1);
+	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+	WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* select wm B */
+	tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(2);
+	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+	WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* restore original selection */
+	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
+
+	/* write the priority marks */
+	WREG32(PRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
+	WREG32(PRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
+
+	/* save values for DPM */
+	amdgpu_crtc->line_time = line_time;
+	amdgpu_crtc->wm_high = latency_watermark_a;
+}
+
+/* watermark setup */
+static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
+				   struct amdgpu_crtc *amdgpu_crtc,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *other_mode)
+{
+	u32 tmp, buffer_alloc, i;
+	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
+	/*
+	 * Line Buffer Setup
+	 * There are 3 line buffers, each one shared by 2 display controllers.
+	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+	 * the display controllers.  The paritioning is done via one of four
+	 * preset allocations specified in bits 21:20:
+	 *  0 - half lb
+	 *  2 - whole lb, other crtc must be disabled
+	 */
+	/* this can get tricky if we have two large displays on a paired group
+	 * of crtcs.  Ideally for multiple large displays we'd assign them to
+	 * non-linked crtcs for maximum line buffer allocation.
+	 */
+	if (amdgpu_crtc->base.enabled && mode) {
+		if (other_mode) {
+			tmp = 0; /* 1/2 */
+			buffer_alloc = 1;
+		} else {
+			tmp = 2; /* whole */
+			buffer_alloc = 2;
+		}
+	} else {
+		tmp = 0;
+		buffer_alloc = 0;
+	}
+
+	WREG32(DC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
+	       DC_LB_MEMORY_CONFIG(tmp));
+
+	WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+	       DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+		    DMIF_BUFFERS_ALLOCATED_COMPLETED)
+			break;
+		udelay(1);
+	}
+
+	if (amdgpu_crtc->base.enabled && mode) {
+		switch (tmp) {
+		case 0:
+		default:
+			return 4096 * 2;
+		case 2:
+			return 8192 * 2;
+		}
+	}
+
+	/* controller not enabled, so no lb used */
+	return 0;
+}
+
+
+/**
+ *
+ * dce_v6_0_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line
+ * buffer allocation (CIK).
+ */
+static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
+{
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	u32 num_heads = 0, lb_size;
+	int i;
+
+	if (!adev->mode_info.mode_config_initialized)
+		return;
+
+	amdgpu_update_display_priority(adev);
+
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		if (adev->mode_info.crtcs[i]->base.enabled)
+			num_heads++;
+	}
+	for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
+		mode0 = &adev->mode_info.crtcs[i]->base.mode;
+		mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
+		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
+		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
+		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
+		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
+	}
+}
+/*
+static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
+{
+	int i;
+	u32 offset, tmp;
+
+	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+		offset = adev->mode_info.audio.pin[i].offset;
+		tmp = RREG32_AUDIO_ENDPT(offset,
+				      AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+		if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
+			adev->mode_info.audio.pin[i].connected = false;
+		else
+			adev->mode_info.audio.pin[i].connected = true;
+	}
+
+}
+
+static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
+{
+	int i;
+
+	dce_v6_0_audio_get_connected_pins(adev);
+
+	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+		if (adev->mode_info.audio.pin[i].connected)
+			return &adev->mode_info.audio.pin[i];
+	}
+	DRM_ERROR("No connected audio pins found!\n");
+	return NULL;
+}
+
+static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder)
+{
+	struct amdgpu_device *adev = encoder->dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+	u32 offset;
+
+	if (!dig || !dig->afmt || !dig->afmt->pin)
+		return;
+
+	offset = dig->afmt->offset;
+
+	WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
+	       AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
+
+}
+
+static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
+						struct drm_display_mode *mode)
+{
+	DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
+{
+	DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
+{
+	DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n");
+
+}
+*/
+static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
+				  struct amdgpu_audio_pin *pin,
+				  bool enable)
+{
+	DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
+}
+
+static const u32 pin_offsets[7] =
+{
+	(0x1780 - 0x1780),
+	(0x1786 - 0x1780),
+	(0x178c - 0x1780),
+	(0x1792 - 0x1780),
+	(0x1798 - 0x1780),
+	(0x179d - 0x1780),
+	(0x17a4 - 0x1780),
+};
+
+static int dce_v6_0_audio_init(struct amdgpu_device *adev)
+{
+	return 0;
+}
+
+static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
+{
+
+}
+
+/*
+static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+	DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n");
+}
+*/
+/*
+ * build a HDMI Video Info Frame
+ */
+/*
+static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
+					       void *buffer, size_t size)
+{
+	DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+	DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n");
+}
+*/
+/*
+ * update the info frames with the data from the current display mode
+ */
+static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
+				  struct drm_display_mode *mode)
+{
+	DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
+}
+
+static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+	if (!dig || !dig->afmt)
+		return;
+
+	/* Silent, r600_hdmi_enable will raise WARN for us */
+	if (enable && dig->afmt->enabled)
+		return;
+	if (!enable && !dig->afmt->enabled)
+		return;
+
+	if (!enable && dig->afmt->pin) {
+		dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
+		dig->afmt->pin = NULL;
+	}
+
+	dig->afmt->enabled = enable;
+
+	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
+		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
+}
+
+static void dce_v6_0_afmt_init(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->mode_info.num_dig; i++)
+		adev->mode_info.afmt[i] = NULL;
+
+	/* DCE8 has audio blocks tied to DIG encoders */
+	for (i = 0; i < adev->mode_info.num_dig; i++) {
+		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
+		if (adev->mode_info.afmt[i]) {
+			adev->mode_info.afmt[i]->offset = dig_offsets[i];
+			adev->mode_info.afmt[i]->id = i;
+		}
+	}
+}
+
+static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->mode_info.num_dig; i++) {
+		kfree(adev->mode_info.afmt[i]);
+		adev->mode_info.afmt[i] = NULL;
+	}
+}
+
+static const u32 vga_control_regs[6] =
+{
+	AVIVO_D1VGA_CONTROL,
+	AVIVO_D2VGA_CONTROL,
+	EVERGREEN_D3VGA_CONTROL,
+	EVERGREEN_D4VGA_CONTROL,
+	EVERGREEN_D5VGA_CONTROL,
+	EVERGREEN_D6VGA_CONTROL,
+};
+
+static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	u32 vga_control;
+
+	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
+	if (enable)
+		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
+	else
+		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
+}
+
+static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+
+	if (enable)
+		WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
+	else
+		WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
+}
+
+static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
+				     struct drm_framebuffer *fb,
+				     int x, int y, int atomic)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_framebuffer *amdgpu_fb;
+	struct drm_framebuffer *target_fb;
+	struct drm_gem_object *obj;
+	struct amdgpu_bo *rbo;
+	uint64_t fb_location, tiling_flags;
+	uint32_t fb_format, fb_pitch_pixels, pipe_config;
+	u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
+	u32 viewport_w, viewport_h;
+	int r;
+	bool bypass_lut = false;
+
+	/* no fb bound */
+	if (!atomic && !crtc->primary->fb) {
+		DRM_DEBUG_KMS("No FB bound\n");
+		return 0;
+	}
+
+	if (atomic) {
+		amdgpu_fb = to_amdgpu_framebuffer(fb);
+		target_fb = fb;
+	}
+	else {
+		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+		target_fb = crtc->primary->fb;
+	}
+
+	/* If atomic, assume fb object is pinned & idle & fenced and
+	 * just update base pointers
+	 */
+	obj = amdgpu_fb->obj;
+	rbo = gem_to_amdgpu_bo(obj);
+	r = amdgpu_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+
+	if (atomic)
+		fb_location = amdgpu_bo_gpu_offset(rbo);
+	else {
+		r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+		if (unlikely(r != 0)) {
+			amdgpu_bo_unreserve(rbo);
+			return -EINVAL;
+		}
+	}
+
+	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
+	amdgpu_bo_unreserve(rbo);
+
+	switch (target_fb->pixel_format) {
+	case DRM_FORMAT_C8:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
+		break;
+	case DRM_FORMAT_XRGB4444:
+	case DRM_FORMAT_ARGB4444:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+		break;
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_ARGB1555:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+		break;
+	case DRM_FORMAT_BGRX5551:
+	case DRM_FORMAT_BGRA5551:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+		break;
+	case DRM_FORMAT_RGB565:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+		break;
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+		break;
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_ARGB2101010:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+		bypass_lut = true;
+		break;
+	case DRM_FORMAT_BGRX1010102:
+	case DRM_FORMAT_BGRA1010102:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+		bypass_lut = true;
+		break;
+	default:
+		DRM_ERROR("Unsupported screen format %s\n",
+			  drm_get_format_name(target_fb->pixel_format));
+		return -EINVAL;
+	}
+
+	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
+		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
+
+		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
+		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
+		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
+		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
+		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+
+		fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
+		fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
+		fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
+		fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
+		fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
+		fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
+	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1)
+		fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+
+	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
+	fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config);
+
+	dce_v6_0_vga_enable(crtc, false);
+
+	/* Make sure surface address is updated at vertical blank rather than
+	 * horizontal blank
+	 */
+	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+	       upper_32_bits(fb_location));
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+	       upper_32_bits(fb_location));
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+	       (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+	       (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+	WREG32(EVERGREEN_GRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+	WREG32(EVERGREEN_GRPH_SWAP_CONTROL + amdgpu_crtc->crtc_offset, fb_swap);
+
+	/*
+	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
+	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
+	 * retain the full precision throughout the pipeline.
+	 */
+	WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
+		 (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
+		 ~EVERGREEN_LUT_10BIT_BYPASS_EN);
+
+	if (bypass_lut)
+		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
+	WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_X_START + amdgpu_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
+	WREG32(EVERGREEN_GRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
+
+	fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+	WREG32(EVERGREEN_GRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
+
+	dce_v6_0_grph_enable(crtc, true);
+
+	WREG32(EVERGREEN_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
+		       target_fb->height);
+	x &= ~3;
+	y &= ~1;
+	WREG32(EVERGREEN_VIEWPORT_START + amdgpu_crtc->crtc_offset,
+	       (x << 16) | y);
+	viewport_w = crtc->mode.hdisplay;
+	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+
+	WREG32(EVERGREEN_VIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
+	       (viewport_w << 16) | viewport_h);
+
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(EVERGREEN_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
+
+	if (!atomic && fb && fb != crtc->primary->fb) {
+		amdgpu_fb = to_amdgpu_framebuffer(fb);
+		rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		r = amdgpu_bo_reserve(rbo, false);
+		if (unlikely(r != 0))
+			return r;
+		amdgpu_bo_unpin(rbo);
+		amdgpu_bo_unreserve(rbo);
+	}
+
+	/* Bytes per pixel may have changed */
+	dce_v6_0_bandwidth_update(adev);
+
+	return 0;
+
+}
+
+static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
+				    struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset,
+		       EVERGREEN_INTERLEAVE_EN);
+	else
+		WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
+}
+
+static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
+{
+
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	int i;
+
+	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
+
+	WREG32(NI_INPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+	       (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
+		NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
+	WREG32(NI_PRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
+	       NI_GRPH_PRESCALE_BYPASS);
+	WREG32(NI_PRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
+	       NI_OVL_PRESCALE_BYPASS);
+	WREG32(NI_INPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+	       (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
+		NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
+
+
+
+	WREG32(EVERGREEN_DC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
+
+	WREG32(EVERGREEN_DC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
+
+	WREG32(EVERGREEN_DC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+	for (i = 0; i < 256; i++) {
+		WREG32(EVERGREEN_DC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
+		       (amdgpu_crtc->lut_r[i] << 20) |
+		       (amdgpu_crtc->lut_g[i] << 10) |
+		       (amdgpu_crtc->lut_b[i] << 0));
+	}
+
+	WREG32(NI_DEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+	       (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
+	WREG32(NI_GAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
+	       (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
+		NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
+	WREG32(NI_REGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+	       (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
+		NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
+	WREG32(NI_OUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+	       (NI_OUTPUT_CSC_GRPH_MODE(0) |
+		NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
+	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
+
+
+}
+
+static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
+{
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+	switch (amdgpu_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+		if (dig->linkb)
+			return 1;
+		else
+			return 0;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+		if (dig->linkb)
+			return 3;
+		else
+			return 2;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		if (dig->linkb)
+			return 5;
+		else
+			return 4;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+		return 6;
+		break;
+	default:
+		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
+		return 0;
+	}
+}
+
+/**
+ * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
+ * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
+ * monitors a dedicated PPLL must be used.  If a particular board has
+ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+ * as there is no need to program the PLL itself.  If we are not able to
+ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+ * avoid messing up an existing monitor.
+ *
+ *
+ */
+static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	u32 pll_in_use;
+	int pll;
+
+	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
+		if (adev->clock.dp_extclk)
+			/* skip PPLL programming if using ext clock */
+			return ATOM_PPLL_INVALID;
+		else
+			return ATOM_PPLL0;
+	} else {
+		/* use the same PPLL for all monitors with the same clock */
+		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
+		if (pll != ATOM_PPLL_INVALID)
+			return pll;
+	}
+
+	/*  PPLL1, and PPLL2 */
+	pll_in_use = amdgpu_pll_get_use_mask(crtc);
+	if (!(pll_in_use & (1 << ATOM_PPLL2)))
+		return ATOM_PPLL2;
+	if (!(pll_in_use & (1 << ATOM_PPLL1)))
+		return ATOM_PPLL1;
+	DRM_ERROR("unable to allocate a PPLL\n");
+	return ATOM_PPLL_INVALID;
+}
+
+static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
+{
+	struct amdgpu_device *adev = crtc->dev->dev_private;
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	uint32_t cur_lock;
+
+	cur_lock = RREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset);
+	if (lock)
+		cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
+	else
+		cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
+	WREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
+}
+
+static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct amdgpu_device *adev = crtc->dev->dev_private;
+
+	WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
+		   EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+		   EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+
+
+}
+
+static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct amdgpu_device *adev = crtc->dev->dev_private;
+
+	WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+	       upper_32_bits(amdgpu_crtc->cursor_addr));
+	WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+	       lower_32_bits(amdgpu_crtc->cursor_addr));
+
+	WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
+		   EVERGREEN_CURSOR_EN |
+		   EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+		   EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+
+}
+
+static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
+				       int x, int y)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct amdgpu_device *adev = crtc->dev->dev_private;
+	int xorigin = 0, yorigin = 0;
+
+	int w = amdgpu_crtc->cursor_width;
+
+	/* avivo cursor are offset into the total surface */
+	x += crtc->x;
+	y += crtc->y;
+	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+	if (x < 0) {
+		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
+		x = 0;
+	}
+	if (y < 0) {
+		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
+		y = 0;
+	}
+
+	WREG32(EVERGREEN_CUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
+	WREG32(EVERGREEN_CUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+	WREG32(EVERGREEN_CUR_SIZE + amdgpu_crtc->crtc_offset,
+	       ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
+
+	amdgpu_crtc->cursor_x = x;
+	amdgpu_crtc->cursor_y = y;
+	return 0;
+}
+
+static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
+				     int x, int y)
+{
+	int ret;
+
+	dce_v6_0_lock_cursor(crtc, true);
+	ret = dce_v6_0_cursor_move_locked(crtc, x, y);
+	dce_v6_0_lock_cursor(crtc, false);
+
+	return ret;
+}
+
+static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
+				     struct drm_file *file_priv,
+				     uint32_t handle,
+				     uint32_t width,
+				     uint32_t height,
+				     int32_t hot_x,
+				     int32_t hot_y)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_gem_object *obj;
+	struct amdgpu_bo *aobj;
+	int ret;
+
+	if (!handle) {
+		/* turn off cursor */
+		dce_v6_0_hide_cursor(crtc);
+		obj = NULL;
+		goto unpin;
+	}
+
+	if ((width > amdgpu_crtc->max_cursor_width) ||
+	    (height > amdgpu_crtc->max_cursor_height)) {
+		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+		return -EINVAL;
+	}
+
+	obj = drm_gem_object_lookup(file_priv, handle);
+	if (!obj) {
+		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
+		return -ENOENT;
+	}
+
+	aobj = gem_to_amdgpu_bo(obj);
+	ret = amdgpu_bo_reserve(aobj, false);
+	if (ret != 0) {
+		drm_gem_object_unreference_unlocked(obj);
+		return ret;
+	}
+
+	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+	amdgpu_bo_unreserve(aobj);
+	if (ret) {
+		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+		drm_gem_object_unreference_unlocked(obj);
+		return ret;
+	}
+
+	amdgpu_crtc->cursor_width = width;
+	amdgpu_crtc->cursor_height = height;
+
+	dce_v6_0_lock_cursor(crtc, true);
+
+	if (hot_x != amdgpu_crtc->cursor_hot_x ||
+	    hot_y != amdgpu_crtc->cursor_hot_y) {
+		int x, y;
+
+		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
+		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
+
+		dce_v6_0_cursor_move_locked(crtc, x, y);
+
+		amdgpu_crtc->cursor_hot_x = hot_x;
+		amdgpu_crtc->cursor_hot_y = hot_y;
+	}
+
+	dce_v6_0_show_cursor(crtc);
+	dce_v6_0_lock_cursor(crtc, false);
+
+unpin:
+	if (amdgpu_crtc->cursor_bo) {
+		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+		ret = amdgpu_bo_reserve(aobj, false);
+		if (likely(ret == 0)) {
+			amdgpu_bo_unpin(aobj);
+			amdgpu_bo_unreserve(aobj);
+		}
+		drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+	}
+
+	amdgpu_crtc->cursor_bo = obj;
+	return 0;
+}
+
+static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	if (amdgpu_crtc->cursor_bo) {
+		dce_v6_0_lock_cursor(crtc, true);
+
+		dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
+					    amdgpu_crtc->cursor_y);
+
+		dce_v6_0_show_cursor(crtc);
+
+		dce_v6_0_lock_cursor(crtc, false);
+	}
+}
+
+static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+				   u16 *blue, uint32_t size)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	int i;
+
+	/* userspace palettes are always correct as is */
+	for (i = 0; i < size; i++) {
+		amdgpu_crtc->lut_r[i] = red[i] >> 6;
+		amdgpu_crtc->lut_g[i] = green[i] >> 6;
+		amdgpu_crtc->lut_b[i] = blue[i] >> 6;
+	}
+	dce_v6_0_crtc_load_lut(crtc);
+
+	return 0;
+}
+
+static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	drm_crtc_cleanup(crtc);
+	kfree(amdgpu_crtc);
+}
+
+static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
+	.cursor_set2 = dce_v6_0_crtc_cursor_set2,
+	.cursor_move = dce_v6_0_crtc_cursor_move,
+	.gamma_set = dce_v6_0_crtc_gamma_set,
+	.set_config = amdgpu_crtc_set_config,
+	.destroy = dce_v6_0_crtc_destroy,
+	.page_flip_target = amdgpu_crtc_page_flip_target,
+};
+
+static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	unsigned type;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		amdgpu_crtc->enabled = true;
+		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
+		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
+		/* Make sure VBLANK and PFLIP interrupts are still enabled */
+		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+		amdgpu_irq_update(adev, &adev->crtc_irq, type);
+		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
+		drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+		dce_v6_0_crtc_load_lut(crtc);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+		if (amdgpu_crtc->enabled)
+			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
+		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
+		amdgpu_crtc->enabled = false;
+		break;
+	}
+	/* adjust pm to dpms */
+	amdgpu_pm_compute_clocks(adev);
+}
+
+static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
+{
+	/* disable crtc pair power gating before programming */
+	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
+	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
+	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
+{
+	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
+}
+
+static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
+{
+
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_atom_ss ss;
+	int i;
+
+	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	if (crtc->primary->fb) {
+		int r;
+		struct amdgpu_framebuffer *amdgpu_fb;
+		struct amdgpu_bo *rbo;
+
+		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+		rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		r = amdgpu_bo_reserve(rbo, false);
+		if (unlikely(r))
+			DRM_ERROR("failed to reserve rbo before unpin\n");
+		else {
+			amdgpu_bo_unpin(rbo);
+			amdgpu_bo_unreserve(rbo);
+		}
+	}
+	/* disable the GRPH */
+	dce_v6_0_grph_enable(crtc, false);
+
+	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
+
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		if (adev->mode_info.crtcs[i] &&
+		    adev->mode_info.crtcs[i]->enabled &&
+		    i != amdgpu_crtc->crtc_id &&
+		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
+			/* one other crtc is using this pll don't turn
+			 * off the pll
+			 */
+			goto done;
+		}
+	}
+
+	switch (amdgpu_crtc->pll_id) {
+	case ATOM_PPLL1:
+	case ATOM_PPLL2:
+		/* disable the ppll */
+		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
+						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+		break;
+	default:
+		break;
+	}
+done:
+	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+	amdgpu_crtc->adjusted_clock = 0;
+	amdgpu_crtc->encoder = NULL;
+	amdgpu_crtc->connector = NULL;
+}
+
+static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
+				  struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode,
+				  int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	if (!amdgpu_crtc->adjusted_clock)
+		return -EINVAL;
+
+	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
+	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
+	dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
+	amdgpu_atombios_crtc_scaler_setup(crtc);
+	dce_v6_0_cursor_reset(crtc);
+	/* update the hw version fpr dpm */
+	amdgpu_crtc->hw_mode = *adjusted_mode;
+
+	return 0;
+}
+
+static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
+				     const struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted_mode)
+{
+
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_encoder *encoder;
+
+	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc == crtc) {
+			amdgpu_crtc->encoder = encoder;
+			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
+			break;
+		}
+	}
+	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
+		amdgpu_crtc->encoder = NULL;
+		amdgpu_crtc->connector = NULL;
+		return false;
+	}
+	if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+		return false;
+	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
+		return false;
+	/* pick pll */
+	amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
+	/* if we can't get a PPLL for a non-DP encoder, fail */
+	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
+	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
+		return false;
+
+	return true;
+}
+
+static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+				  struct drm_framebuffer *old_fb)
+{
+	return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
+					 struct drm_framebuffer *fb,
+					 int x, int y, enum mode_set_atomic state)
+{
+       return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
+	.dpms = dce_v6_0_crtc_dpms,
+	.mode_fixup = dce_v6_0_crtc_mode_fixup,
+	.mode_set = dce_v6_0_crtc_mode_set,
+	.mode_set_base = dce_v6_0_crtc_set_base,
+	.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
+	.prepare = dce_v6_0_crtc_prepare,
+	.commit = dce_v6_0_crtc_commit,
+	.load_lut = dce_v6_0_crtc_load_lut,
+	.disable = dce_v6_0_crtc_disable,
+};
+
+static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
+{
+	struct amdgpu_crtc *amdgpu_crtc;
+	int i;
+
+	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
+			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+	if (amdgpu_crtc == NULL)
+		return -ENOMEM;
+
+	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
+	amdgpu_crtc->crtc_id = index;
+	adev->mode_info.crtcs[index] = amdgpu_crtc;
+
+	amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
+	amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
+	adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+	adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+
+	for (i = 0; i < 256; i++) {
+		amdgpu_crtc->lut_r[i] = i << 2;
+		amdgpu_crtc->lut_g[i] = i << 2;
+		amdgpu_crtc->lut_b[i] = i << 2;
+	}
+
+	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
+
+	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+	amdgpu_crtc->adjusted_clock = 0;
+	amdgpu_crtc->encoder = NULL;
+	amdgpu_crtc->connector = NULL;
+	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
+
+	return 0;
+}
+
+static int dce_v6_0_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
+	adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
+
+	dce_v6_0_set_display_funcs(adev);
+	dce_v6_0_set_irq_funcs(adev);
+
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+	case CHIP_VERDE:
+		adev->mode_info.num_crtc = 6;
+		adev->mode_info.num_hpd = 6;
+		adev->mode_info.num_dig = 6;
+		break;
+	case CHIP_OLAND:
+		adev->mode_info.num_crtc = 2;
+		adev->mode_info.num_hpd = 2;
+		adev->mode_info.num_dig = 2;
+		break;
+	default:
+		/* FIXME: not supported yet */
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int dce_v6_0_sw_init(void *handle)
+{
+	int r, i;
+	bool ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
+		if (r)
+			return r;
+	}
+
+	for (i = 8; i < 20; i += 2) {
+		r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+		if (r)
+			return r;
+	}
+
+	/* HPD hotplug */
+	r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+	if (r)
+		return r;
+
+	adev->mode_info.mode_config_initialized = true;
+
+	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+
+	adev->ddev->mode_config.async_page_flip = true;
+
+	adev->ddev->mode_config.max_width = 16384;
+	adev->ddev->mode_config.max_height = 16384;
+
+	adev->ddev->mode_config.preferred_depth = 24;
+	adev->ddev->mode_config.prefer_shadow = 1;
+
+	adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+	r = amdgpu_modeset_create_props(adev);
+	if (r)
+		return r;
+
+	adev->ddev->mode_config.max_width = 16384;
+	adev->ddev->mode_config.max_height = 16384;
+
+	/* allocate crtcs */
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		r = dce_v6_0_crtc_init(adev, i);
+		if (r)
+			return r;
+	}
+
+	ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
+	if (ret)
+		amdgpu_print_display_setup(adev->ddev);
+	else
+		return -EINVAL;
+
+	/* setup afmt */
+	dce_v6_0_afmt_init(adev);
+
+	r = dce_v6_0_audio_init(adev);
+	if (r)
+		return r;
+
+	drm_kms_helper_poll_init(adev->ddev);
+
+	return r;
+}
+
+static int dce_v6_0_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	kfree(adev->mode_info.bios_hardcoded_edid);
+
+	drm_kms_helper_poll_fini(adev->ddev);
+
+	dce_v6_0_audio_fini(adev);
+
+	dce_v6_0_afmt_fini(adev);
+
+	drm_mode_config_cleanup(adev->ddev);
+	adev->mode_info.mode_config_initialized = false;
+
+	return 0;
+}
+
+static int dce_v6_0_hw_init(void *handle)
+{
+	int i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	/* init dig PHYs, disp eng pll */
+	amdgpu_atombios_encoder_init_dig(adev);
+	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
+
+	/* initialize hpd */
+	dce_v6_0_hpd_init(adev);
+
+	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+	}
+
+	dce_v6_0_pageflip_interrupt_init(adev);
+
+	return 0;
+}
+
+static int dce_v6_0_hw_fini(void *handle)
+{
+	int i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	dce_v6_0_hpd_fini(adev);
+
+	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+	}
+
+	dce_v6_0_pageflip_interrupt_fini(adev);
+
+	return 0;
+}
+
+static int dce_v6_0_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	amdgpu_atombios_scratch_regs_save(adev);
+
+	return dce_v6_0_hw_fini(handle);
+}
+
+static int dce_v6_0_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int ret;
+
+	ret = dce_v6_0_hw_init(handle);
+
+	amdgpu_atombios_scratch_regs_restore(adev);
+
+	/* turn on the BL */
+	if (adev->mode_info.bl_encoder) {
+		u8 bl_level = amdgpu_display_backlight_get_level(adev,
+								  adev->mode_info.bl_encoder);
+		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
+						    bl_level);
+	}
+
+	return ret;
+}
+
+static bool dce_v6_0_is_idle(void *handle)
+{
+	return true;
+}
+
+static int dce_v6_0_wait_for_idle(void *handle)
+{
+	return 0;
+}
+
+static int dce_v6_0_soft_reset(void *handle)
+{
+	DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
+	return 0;
+}
+
+static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+						     int crtc,
+						     enum amdgpu_interrupt_state state)
+{
+	u32 reg_block, interrupt_mask;
+
+	if (crtc >= adev->mode_info.num_crtc) {
+		DRM_DEBUG("invalid crtc %d\n", crtc);
+		return;
+	}
+
+	switch (crtc) {
+	case 0:
+		reg_block = SI_CRTC0_REGISTER_OFFSET;
+		break;
+	case 1:
+		reg_block = SI_CRTC1_REGISTER_OFFSET;
+		break;
+	case 2:
+		reg_block = SI_CRTC2_REGISTER_OFFSET;
+		break;
+	case 3:
+		reg_block = SI_CRTC3_REGISTER_OFFSET;
+		break;
+	case 4:
+		reg_block = SI_CRTC4_REGISTER_OFFSET;
+		break;
+	case 5:
+		reg_block = SI_CRTC5_REGISTER_OFFSET;
+		break;
+	default:
+		DRM_DEBUG("invalid crtc %d\n", crtc);
+		return;
+	}
+
+	switch (state) {
+	case AMDGPU_IRQ_STATE_DISABLE:
+		interrupt_mask = RREG32(INT_MASK + reg_block);
+		interrupt_mask &= ~VBLANK_INT_MASK;
+		WREG32(INT_MASK + reg_block, interrupt_mask);
+		break;
+	case AMDGPU_IRQ_STATE_ENABLE:
+		interrupt_mask = RREG32(INT_MASK + reg_block);
+		interrupt_mask |= VBLANK_INT_MASK;
+		WREG32(INT_MASK + reg_block, interrupt_mask);
+		break;
+	default:
+		break;
+	}
+}
+
+static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
+						    int crtc,
+						    enum amdgpu_interrupt_state state)
+{
+
+}
+
+static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+					    struct amdgpu_irq_src *src,
+					    unsigned type,
+					    enum amdgpu_interrupt_state state)
+{
+	u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+
+	switch (type) {
+	case AMDGPU_HPD_1:
+		dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
+		break;
+	case AMDGPU_HPD_2:
+		dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
+		break;
+	case AMDGPU_HPD_3:
+		dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
+		break;
+	case AMDGPU_HPD_4:
+		dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
+		break;
+	case AMDGPU_HPD_5:
+		dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
+		break;
+	case AMDGPU_HPD_6:
+		dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
+		break;
+	default:
+		DRM_DEBUG("invalid hdp %d\n", type);
+		return 0;
+	}
+
+	switch (state) {
+	case AMDGPU_IRQ_STATE_DISABLE:
+		dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+		dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+		WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+		break;
+	case AMDGPU_IRQ_STATE_ENABLE:
+		dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+		dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+		WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+					     struct amdgpu_irq_src *src,
+					     unsigned type,
+					     enum amdgpu_interrupt_state state)
+{
+	switch (type) {
+	case AMDGPU_CRTC_IRQ_VBLANK1:
+		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VBLANK2:
+		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VBLANK3:
+		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VBLANK4:
+		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VBLANK5:
+		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VBLANK6:
+		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VLINE1:
+		dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VLINE2:
+		dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VLINE3:
+		dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VLINE4:
+		dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VLINE5:
+		dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VLINE6:
+		dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
+			     struct amdgpu_irq_src *source,
+			     struct amdgpu_iv_entry *entry)
+{
+	unsigned crtc = entry->src_id - 1;
+	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
+	unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+
+	switch (entry->src_data) {
+	case 0: /* vblank */
+		if (disp_int & interrupt_status_offsets[crtc].vblank)
+			WREG32(VBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
+		else
+			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+		if (amdgpu_irq_enabled(adev, source, irq_type)) {
+			drm_handle_vblank(adev->ddev, crtc);
+		}
+		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+		break;
+	case 1: /* vline */
+		if (disp_int & interrupt_status_offsets[crtc].vline)
+			WREG32(VLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
+		else
+			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+		break;
+	default:
+		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+		break;
+	}
+
+	return 0;
+}
+
+static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+						 struct amdgpu_irq_src *src,
+						 unsigned type,
+						 enum amdgpu_interrupt_state state)
+{
+	u32 reg;
+
+	if (type >= adev->mode_info.num_crtc) {
+		DRM_ERROR("invalid pageflip crtc %d\n", type);
+		return -EINVAL;
+	}
+
+	reg = RREG32(GRPH_INT_CONTROL + crtc_offsets[type]);
+	if (state == AMDGPU_IRQ_STATE_DISABLE)
+		WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+	else
+		WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+
+	return 0;
+}
+
+static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
+				 struct amdgpu_irq_src *source,
+				 struct amdgpu_iv_entry *entry)
+{
+		unsigned long flags;
+	unsigned crtc_id;
+	struct amdgpu_crtc *amdgpu_crtc;
+	struct amdgpu_flip_work *works;
+
+	crtc_id = (entry->src_id - 8) >> 1;
+	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+	if (crtc_id >= adev->mode_info.num_crtc) {
+		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+		return -EINVAL;
+	}
+
+	if (RREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id]) &
+	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
+		WREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id],
+		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
+
+	/* IRQ could occur when in initial stage */
+	if (amdgpu_crtc == NULL)
+		return 0;
+
+	spin_lock_irqsave(&adev->ddev->event_lock, flags);
+	works = amdgpu_crtc->pflip_works;
+	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
+						"AMDGPU_FLIP_SUBMITTED(%d)\n",
+						amdgpu_crtc->pflip_status,
+						AMDGPU_FLIP_SUBMITTED);
+		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+		return 0;
+	}
+
+	/* page flip completed. clean up */
+	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+	amdgpu_crtc->pflip_works = NULL;
+
+	/* wakeup usersapce */
+	if (works->event)
+		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
+
+	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+	drm_crtc_vblank_put(&amdgpu_crtc->base);
+	schedule_work(&works->unpin_work);
+
+	return 0;
+}
+
+static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
+			    struct amdgpu_irq_src *source,
+			    struct amdgpu_iv_entry *entry)
+{
+	uint32_t disp_int, mask, int_control, tmp;
+	unsigned hpd;
+
+	if (entry->src_data > 6) {
+		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+		return 0;
+	}
+
+	hpd = entry->src_data;
+	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
+	mask = interrupt_status_offsets[hpd].hpd;
+	int_control = hpd_int_control_offsets[hpd];
+
+	if (disp_int & mask) {
+		tmp = RREG32(int_control);
+		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
+		WREG32(int_control, tmp);
+		schedule_work(&adev->hotplug_work);
+		DRM_INFO("IH: HPD%d\n", hpd + 1);
+	}
+
+	return 0;
+
+}
+
+static int dce_v6_0_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int dce_v6_0_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs dce_v6_0_ip_funcs = {
+	.name = "dce_v6_0",
+	.early_init = dce_v6_0_early_init,
+	.late_init = NULL,
+	.sw_init = dce_v6_0_sw_init,
+	.sw_fini = dce_v6_0_sw_fini,
+	.hw_init = dce_v6_0_hw_init,
+	.hw_fini = dce_v6_0_hw_fini,
+	.suspend = dce_v6_0_suspend,
+	.resume = dce_v6_0_resume,
+	.is_idle = dce_v6_0_is_idle,
+	.wait_for_idle = dce_v6_0_wait_for_idle,
+	.soft_reset = dce_v6_0_soft_reset,
+	.set_clockgating_state = dce_v6_0_set_clockgating_state,
+	.set_powergating_state = dce_v6_0_set_powergating_state,
+};
+
+static void
+dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
+			  struct drm_display_mode *mode,
+			  struct drm_display_mode *adjusted_mode)
+{
+
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
+
+	/* need to call this here rather than in prepare() since we need some crtc info */
+	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	/* set scaler clears this on some chips */
+	dce_v6_0_set_interleave(encoder->crtc, mode);
+
+	if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+		dce_v6_0_afmt_enable(encoder, true);
+		dce_v6_0_afmt_setmode(encoder, adjusted_mode);
+	}
+}
+
+static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
+{
+
+	struct amdgpu_device *adev = encoder->dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+
+	if ((amdgpu_encoder->active_device &
+	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
+	     ENCODER_OBJECT_ID_NONE)) {
+		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+		if (dig) {
+			dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
+			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
+				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
+		}
+	}
+
+	amdgpu_atombios_scratch_regs_lock(adev, true);
+
+	if (connector) {
+		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+		/* select the clock/data port if it uses a router */
+		if (amdgpu_connector->router.cd_valid)
+			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
+
+		/* turn eDP panel on for mode set */
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+			amdgpu_atombios_encoder_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_ON);
+	}
+
+	/* this is needed for the pll/ss setup to work correctly in some cases */
+	amdgpu_atombios_encoder_set_crtc_source(encoder);
+	/* set up the FMT blocks */
+	dce_v6_0_program_fmt(encoder);
+}
+
+static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
+{
+
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+
+	/* need to call this here as we need the crtc set up */
+	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+	amdgpu_atombios_scratch_regs_lock(adev, false);
+}
+
+static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
+{
+
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct amdgpu_encoder_atom_dig *dig;
+
+	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	if (amdgpu_atombios_encoder_is_digital(encoder)) {
+		if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+			dce_v6_0_afmt_enable(encoder, false);
+		dig = amdgpu_encoder->enc_priv;
+		dig->dig_encoder = -1;
+	}
+	amdgpu_encoder->active_device = 0;
+}
+
+/* these are handled by the primary encoders */
+static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
+		      struct drm_display_mode *mode,
+		      struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
+				    const struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
+	.dpms = dce_v6_0_ext_dpms,
+	.mode_fixup = dce_v6_0_ext_mode_fixup,
+	.prepare = dce_v6_0_ext_prepare,
+	.mode_set = dce_v6_0_ext_mode_set,
+	.commit = dce_v6_0_ext_commit,
+	.disable = dce_v6_0_ext_disable,
+	/* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
+	.dpms = amdgpu_atombios_encoder_dpms,
+	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+	.prepare = dce_v6_0_encoder_prepare,
+	.mode_set = dce_v6_0_encoder_mode_set,
+	.commit = dce_v6_0_encoder_commit,
+	.disable = dce_v6_0_encoder_disable,
+	.detect = amdgpu_atombios_encoder_dig_detect,
+};
+
+static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
+	.dpms = amdgpu_atombios_encoder_dpms,
+	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+	.prepare = dce_v6_0_encoder_prepare,
+	.mode_set = dce_v6_0_encoder_mode_set,
+	.commit = dce_v6_0_encoder_commit,
+	.detect = amdgpu_atombios_encoder_dac_detect,
+};
+
+static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
+	kfree(amdgpu_encoder->enc_priv);
+	drm_encoder_cleanup(encoder);
+	kfree(amdgpu_encoder);
+}
+
+static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
+	.destroy = dce_v6_0_encoder_destroy,
+};
+
+static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
+				 uint32_t encoder_enum,
+				 uint32_t supported_device,
+				 u16 caps)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_encoder *encoder;
+	struct amdgpu_encoder *amdgpu_encoder;
+
+	/* see if we already added it */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		amdgpu_encoder = to_amdgpu_encoder(encoder);
+		if (amdgpu_encoder->encoder_enum == encoder_enum) {
+			amdgpu_encoder->devices |= supported_device;
+			return;
+		}
+
+	}
+
+	/* add a new one */
+	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
+	if (!amdgpu_encoder)
+		return;
+
+	encoder = &amdgpu_encoder->base;
+	switch (adev->mode_info.num_crtc) {
+	case 1:
+		encoder->possible_crtcs = 0x1;
+		break;
+	case 2:
+	default:
+		encoder->possible_crtcs = 0x3;
+		break;
+	case 4:
+		encoder->possible_crtcs = 0xf;
+		break;
+	case 6:
+		encoder->possible_crtcs = 0x3f;
+		break;
+	}
+
+	amdgpu_encoder->enc_priv = NULL;
+
+	amdgpu_encoder->encoder_enum = encoder_enum;
+	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+	amdgpu_encoder->devices = supported_device;
+	amdgpu_encoder->rmx_type = RMX_OFF;
+	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
+	amdgpu_encoder->is_ext_encoder = false;
+	amdgpu_encoder->caps = caps;
+
+	switch (amdgpu_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+				 DRM_MODE_ENCODER_DAC, NULL);
+		drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			amdgpu_encoder->rmx_type = RMX_FULL;
+			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+					 DRM_MODE_ENCODER_LVDS, NULL);
+			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
+		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+					 DRM_MODE_ENCODER_DAC, NULL);
+			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+		} else {
+			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+					 DRM_MODE_ENCODER_TMDS, NULL);
+			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+		}
+		drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_SI170B:
+	case ENCODER_OBJECT_ID_CH7303:
+	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+	case ENCODER_OBJECT_ID_TITFP513:
+	case ENCODER_OBJECT_ID_VT1623:
+	case ENCODER_OBJECT_ID_HDMI_SI1930:
+	case ENCODER_OBJECT_ID_TRAVIS:
+	case ENCODER_OBJECT_ID_NUTMEG:
+		/* these are handled by the primary encoders */
+		amdgpu_encoder->is_ext_encoder = true;
+		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+					 DRM_MODE_ENCODER_LVDS, NULL);
+		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+					 DRM_MODE_ENCODER_DAC, NULL);
+		else
+			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+					 DRM_MODE_ENCODER_TMDS, NULL);
+		drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
+		break;
+	}
+}
+
+static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
+	.set_vga_render_state = &dce_v6_0_set_vga_render_state,
+	.bandwidth_update = &dce_v6_0_bandwidth_update,
+	.vblank_get_counter = &dce_v6_0_vblank_get_counter,
+	.vblank_wait = &dce_v6_0_vblank_wait,
+	.is_display_hung = &dce_v6_0_is_display_hung,
+	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
+	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
+	.hpd_sense = &dce_v6_0_hpd_sense,
+	.hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
+	.hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
+	.page_flip = &dce_v6_0_page_flip,
+	.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
+	.add_encoder = &dce_v6_0_encoder_add,
+	.add_connector = &amdgpu_connector_add,
+	.stop_mc_access = &dce_v6_0_stop_mc_access,
+	.resume_mc_access = &dce_v6_0_resume_mc_access,
+};
+
+static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
+{
+	if (adev->mode_info.funcs == NULL)
+		adev->mode_info.funcs = &dce_v6_0_display_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
+	.set = dce_v6_0_set_crtc_interrupt_state,
+	.process = dce_v6_0_crtc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
+	.set = dce_v6_0_set_pageflip_interrupt_state,
+	.process = dce_v6_0_pageflip_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
+	.set = dce_v6_0_set_hpd_interrupt_state,
+	.process = dce_v6_0_hpd_irq,
+};
+
+static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+	adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
+
+	adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+	adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
+
+	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+	adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
similarity index 71%
copy from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
copy to drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
index d24b888..6a55281 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
@@ -20,16 +20,10 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#ifndef TONGA_PROCESSPPTABLES_H
-#define TONGA_PROCESSPPTABLES_H
 
-#include "hwmgr.h"
+#ifndef __DCE_V6_0_H__
+#define __DCE_V6_0_H__
 
-extern const struct pp_table_func tonga_pptable_funcs;
-extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
-extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
-		struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
-				struct pp_power_state *, void *, uint32_t));
+extern const struct amd_ip_funcs dce_v6_0_ip_funcs;
 
 #endif
-
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 4fdfab1..abd5213 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -604,6 +604,52 @@
 	WREG32(mmVGA_RENDER_CONTROL, tmp);
 }
 
+static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
+{
+	int num_crtc = 0;
+
+	switch (adev->asic_type) {
+	case CHIP_BONAIRE:
+	case CHIP_HAWAII:
+		num_crtc = 6;
+		break;
+	case CHIP_KAVERI:
+		num_crtc = 4;
+		break;
+	case CHIP_KABINI:
+	case CHIP_MULLINS:
+		num_crtc = 2;
+		break;
+	default:
+		num_crtc = 0;
+	}
+	return num_crtc;
+}
+
+void dce_v8_0_disable_dce(struct amdgpu_device *adev)
+{
+	/*Disable VGA render and enabled crtc, if has DCE engine*/
+	if (amdgpu_atombios_has_dce_engine_info(adev)) {
+		u32 tmp;
+		int crtc_enabled, i;
+
+		dce_v8_0_set_vga_render_state(adev, false);
+
+		/*Disable crtc*/
+		for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
+			crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+									 CRTC_CONTROL, CRTC_MASTER_EN);
+			if (crtc_enabled) {
+				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+				tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			}
+		}
+	}
+}
+
 static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
@@ -1501,13 +1547,13 @@
 
 			if (sad->format == eld_reg_to_type[i][1]) {
 				if (sad->channels > max_channels) {
-				value = (sad->channels <<
-				 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
-				(sad->byte2 <<
-				 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
-				(sad->freq <<
-				 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
-				max_channels = sad->channels;
+					value = (sad->channels <<
+						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
+					        (sad->byte2 <<
+						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
+					        (sad->freq <<
+						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
+					max_channels = sad->channels;
 				}
 
 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
@@ -1613,7 +1659,7 @@
 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
 	uint32_t offset = dig->afmt->offset;
 
-	WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
+	WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
 	WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
 
 	WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
@@ -1693,6 +1739,7 @@
 	/* Silent, r600_hdmi_enable will raise WARN for us */
 	if (!dig->afmt->enabled)
 		return;
+
 	offset = dig->afmt->offset;
 
 	/* hdmi deep color mode general control packets setup, if bpc > 8 */
@@ -1817,7 +1864,7 @@
 
 	WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
 		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
-		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */
+		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */
 
 	WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
 		 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
@@ -1826,13 +1873,12 @@
 	WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
 		  AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
 
-	/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
 	WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
 	WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
 	WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
 	WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
 
-	/* enable audio after to setting up hw */
+	/* enable audio after setting up hw */
 	dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
 }
 
@@ -1952,6 +1998,7 @@
 	u32 viewport_w, viewport_h;
 	int r;
 	bool bypass_lut = false;
+	char *format_name;
 
 	/* no fb bound */
 	if (!atomic && !crtc->primary->fb) {
@@ -1999,7 +2046,7 @@
 	case DRM_FORMAT_XRGB4444:
 	case DRM_FORMAT_ARGB4444:
 		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
-			     (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+			     (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
 #ifdef __BIG_ENDIAN
 		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
 #endif
@@ -2056,8 +2103,9 @@
 		bypass_lut = true;
 		break;
 	default:
-		DRM_ERROR("Unsupported screen format %s\n",
-			  drm_get_format_name(target_fb->pixel_format));
+		format_name = drm_get_format_name(target_fb->pixel_format);
+		DRM_ERROR("Unsupported screen format %s\n", format_name);
+		kfree(format_name);
 		return -EINVAL;
 	}
 
@@ -2137,8 +2185,8 @@
 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
 	       (viewport_w << 16) | viewport_h);
 
-	/* set pageflip to happen only at start of vblank interval (front porch) */
-	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
 		amdgpu_fb = to_amdgpu_framebuffer(fb);
@@ -2552,7 +2600,7 @@
 	.gamma_set = dce_v8_0_crtc_gamma_set,
 	.set_config = amdgpu_crtc_set_config,
 	.destroy = dce_v8_0_crtc_destroy,
-	.page_flip = amdgpu_crtc_page_flip,
+	.page_flip_target = amdgpu_crtc_page_flip_target,
 };
 
 static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2653,7 +2701,7 @@
 	case ATOM_PPLL2:
 		/* disable the ppll */
 		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
-					  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+                                                 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
 		break;
 	case ATOM_PPLL0:
 		/* disable the ppll */
@@ -2803,21 +2851,20 @@
 	dce_v8_0_set_display_funcs(adev);
 	dce_v8_0_set_irq_funcs(adev);
 
+	adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
+
 	switch (adev->asic_type) {
 	case CHIP_BONAIRE:
 	case CHIP_HAWAII:
-		adev->mode_info.num_crtc = 6;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 6;
 		break;
 	case CHIP_KAVERI:
-		adev->mode_info.num_crtc = 4;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 7;
 		break;
 	case CHIP_KABINI:
 	case CHIP_MULLINS:
-		adev->mode_info.num_crtc = 2;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 6; /* ? */
 		break;
@@ -3236,7 +3283,6 @@
 			drm_handle_vblank(adev->ddev, crtc);
 		}
 		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
-
 		break;
 	case 1: /* vline */
 		if (disp_int & interrupt_status_offsets[crtc].vline)
@@ -3245,7 +3291,6 @@
 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
 		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
-
 		break;
 	default:
 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
index 7701685..7d0770c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
@@ -26,4 +26,6 @@
 
 extern const struct amd_ip_funcs dce_v8_0_ip_funcs;
 
+void dce_v8_0_disable_dce(struct amdgpu_device *adev);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
new file mode 100644
index 0000000..619b604
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -0,0 +1,802 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_i2c.h"
+#include "atom.h"
+#include "amdgpu_pll.h"
+#include "amdgpu_connectors.h"
+#ifdef CONFIG_DRM_AMDGPU_CIK
+#include "dce_v8_0.h"
+#endif
+#include "dce_v10_0.h"
+#include "dce_v11_0.h"
+#include "dce_virtual.h"
+
+static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
+static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
+static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
+				  struct amdgpu_irq_src *source,
+				  struct amdgpu_iv_entry *entry);
+
+/**
+ * dce_virtual_vblank_wait - vblank wait asic callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+static void dce_virtual_vblank_wait(struct amdgpu_device *adev, int crtc)
+{
+	return;
+}
+
+static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+	return 0;
+}
+
+static void dce_virtual_page_flip(struct amdgpu_device *adev,
+			      int crtc_id, u64 crtc_base, bool async)
+{
+	return;
+}
+
+static int dce_virtual_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+					u32 *vbl, u32 *position)
+{
+	*vbl = 0;
+	*position = 0;
+
+	return -EINVAL;
+}
+
+static bool dce_virtual_hpd_sense(struct amdgpu_device *adev,
+			       enum amdgpu_hpd_id hpd)
+{
+	return true;
+}
+
+static void dce_virtual_hpd_set_polarity(struct amdgpu_device *adev,
+				      enum amdgpu_hpd_id hpd)
+{
+	return;
+}
+
+static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev)
+{
+	return 0;
+}
+
+static bool dce_virtual_is_display_hung(struct amdgpu_device *adev)
+{
+	return false;
+}
+
+void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
+			      struct amdgpu_mode_mc_save *save)
+{
+	switch (adev->asic_type) {
+	case CHIP_BONAIRE:
+	case CHIP_HAWAII:
+	case CHIP_KAVERI:
+	case CHIP_KABINI:
+	case CHIP_MULLINS:
+#ifdef CONFIG_DRM_AMDGPU_CIK
+		dce_v8_0_disable_dce(adev);
+#endif
+		break;
+	case CHIP_FIJI:
+	case CHIP_TONGA:
+		dce_v10_0_disable_dce(adev);
+		break;
+	case CHIP_CARRIZO:
+	case CHIP_STONEY:
+	case CHIP_POLARIS11:
+	case CHIP_POLARIS10:
+		dce_v11_0_disable_dce(adev);
+		break;
+	case CHIP_TOPAZ:
+		/* no DCE */
+		return;
+	default:
+		DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
+	}
+
+	return;
+}
+void dce_virtual_resume_mc_access(struct amdgpu_device *adev,
+				struct amdgpu_mode_mc_save *save)
+{
+	return;
+}
+
+void dce_virtual_set_vga_render_state(struct amdgpu_device *adev,
+				    bool render)
+{
+	return;
+}
+
+/**
+ * dce_virtual_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line
+ * buffer allocation (CIK).
+ */
+static void dce_virtual_bandwidth_update(struct amdgpu_device *adev)
+{
+	return;
+}
+
+static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+				      u16 *green, u16 *blue, uint32_t size)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	int i;
+
+	/* userspace palettes are always correct as is */
+	for (i = 0; i < size; i++) {
+		amdgpu_crtc->lut_r[i] = red[i] >> 6;
+		amdgpu_crtc->lut_g[i] = green[i] >> 6;
+		amdgpu_crtc->lut_b[i] = blue[i] >> 6;
+	}
+
+	return 0;
+}
+
+static void dce_virtual_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	drm_crtc_cleanup(crtc);
+	kfree(amdgpu_crtc);
+}
+
+static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
+	.cursor_set2 = NULL,
+	.cursor_move = NULL,
+	.gamma_set = dce_virtual_crtc_gamma_set,
+	.set_config = amdgpu_crtc_set_config,
+	.destroy = dce_virtual_crtc_destroy,
+	.page_flip_target = amdgpu_crtc_page_flip_target,
+};
+
+static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	unsigned type;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		amdgpu_crtc->enabled = true;
+		/* Make sure VBLANK and PFLIP interrupts are still enabled */
+		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+		amdgpu_irq_update(adev, &adev->crtc_irq, type);
+		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
+		drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+		amdgpu_crtc->enabled = false;
+		break;
+	}
+}
+
+
+static void dce_virtual_crtc_prepare(struct drm_crtc *crtc)
+{
+	dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void dce_virtual_crtc_commit(struct drm_crtc *crtc)
+{
+	dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	if (crtc->primary->fb) {
+		int r;
+		struct amdgpu_framebuffer *amdgpu_fb;
+		struct amdgpu_bo *rbo;
+
+		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+		rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		r = amdgpu_bo_reserve(rbo, false);
+		if (unlikely(r))
+			DRM_ERROR("failed to reserve rbo before unpin\n");
+		else {
+			amdgpu_bo_unpin(rbo);
+			amdgpu_bo_unreserve(rbo);
+		}
+	}
+
+	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+	amdgpu_crtc->encoder = NULL;
+	amdgpu_crtc->connector = NULL;
+}
+
+static int dce_virtual_crtc_mode_set(struct drm_crtc *crtc,
+				  struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode,
+				  int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	/* update the hw version fpr dpm */
+	amdgpu_crtc->hw_mode = *adjusted_mode;
+
+	return 0;
+}
+
+static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc,
+				     const struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted_mode)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_encoder *encoder;
+
+	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc == crtc) {
+			amdgpu_crtc->encoder = encoder;
+			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
+			break;
+		}
+	}
+	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
+		amdgpu_crtc->encoder = NULL;
+		amdgpu_crtc->connector = NULL;
+		return false;
+	}
+
+	return true;
+}
+
+
+static int dce_virtual_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+				  struct drm_framebuffer *old_fb)
+{
+	return 0;
+}
+
+static void dce_virtual_crtc_load_lut(struct drm_crtc *crtc)
+{
+	return;
+}
+
+static int dce_virtual_crtc_set_base_atomic(struct drm_crtc *crtc,
+					 struct drm_framebuffer *fb,
+					 int x, int y, enum mode_set_atomic state)
+{
+	return 0;
+}
+
+static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = {
+	.dpms = dce_virtual_crtc_dpms,
+	.mode_fixup = dce_virtual_crtc_mode_fixup,
+	.mode_set = dce_virtual_crtc_mode_set,
+	.mode_set_base = dce_virtual_crtc_set_base,
+	.mode_set_base_atomic = dce_virtual_crtc_set_base_atomic,
+	.prepare = dce_virtual_crtc_prepare,
+	.commit = dce_virtual_crtc_commit,
+	.load_lut = dce_virtual_crtc_load_lut,
+	.disable = dce_virtual_crtc_disable,
+};
+
+static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
+{
+	struct amdgpu_crtc *amdgpu_crtc;
+	int i;
+
+	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
+			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+	if (amdgpu_crtc == NULL)
+		return -ENOMEM;
+
+	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
+	amdgpu_crtc->crtc_id = index;
+	adev->mode_info.crtcs[index] = amdgpu_crtc;
+
+	for (i = 0; i < 256; i++) {
+		amdgpu_crtc->lut_r[i] = i << 2;
+		amdgpu_crtc->lut_g[i] = i << 2;
+		amdgpu_crtc->lut_b[i] = i << 2;
+	}
+
+	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+	amdgpu_crtc->encoder = NULL;
+	amdgpu_crtc->connector = NULL;
+	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
+
+	return 0;
+}
+
+static int dce_virtual_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
+	dce_virtual_set_display_funcs(adev);
+	dce_virtual_set_irq_funcs(adev);
+
+	adev->mode_info.num_crtc = 1;
+	adev->mode_info.num_hpd = 1;
+	adev->mode_info.num_dig = 1;
+	return 0;
+}
+
+static bool dce_virtual_get_connector_info(struct amdgpu_device *adev)
+{
+	struct amdgpu_i2c_bus_rec ddc_bus;
+	struct amdgpu_router router;
+	struct amdgpu_hpd hpd;
+
+	/* look up gpio for ddc, hpd */
+	ddc_bus.valid = false;
+	hpd.hpd = AMDGPU_HPD_NONE;
+	/* needed for aux chan transactions */
+	ddc_bus.hpd = hpd.hpd;
+
+	memset(&router, 0, sizeof(router));
+	router.ddc_valid = false;
+	router.cd_valid = false;
+	amdgpu_display_add_connector(adev,
+				      0,
+				      ATOM_DEVICE_CRT1_SUPPORT,
+				      DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus,
+				      CONNECTOR_OBJECT_ID_VIRTUAL,
+				      &hpd,
+				      &router);
+
+	amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL,
+							ATOM_DEVICE_CRT1_SUPPORT,
+							0);
+
+	amdgpu_link_encoder_connector(adev->ddev);
+
+	return true;
+}
+
+static int dce_virtual_sw_init(void *handle)
+{
+	int r, i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = amdgpu_irq_add_id(adev, 229, &adev->crtc_irq);
+	if (r)
+		return r;
+
+	adev->ddev->max_vblank_count = 0;
+
+	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+
+	adev->ddev->mode_config.max_width = 16384;
+	adev->ddev->mode_config.max_height = 16384;
+
+	adev->ddev->mode_config.preferred_depth = 24;
+	adev->ddev->mode_config.prefer_shadow = 1;
+
+	adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+	r = amdgpu_modeset_create_props(adev);
+	if (r)
+		return r;
+
+	adev->ddev->mode_config.max_width = 16384;
+	adev->ddev->mode_config.max_height = 16384;
+
+	/* allocate crtcs */
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		r = dce_virtual_crtc_init(adev, i);
+		if (r)
+			return r;
+	}
+
+	dce_virtual_get_connector_info(adev);
+	amdgpu_print_display_setup(adev->ddev);
+
+	drm_kms_helper_poll_init(adev->ddev);
+
+	adev->mode_info.mode_config_initialized = true;
+	return 0;
+}
+
+static int dce_virtual_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	kfree(adev->mode_info.bios_hardcoded_edid);
+
+	drm_kms_helper_poll_fini(adev->ddev);
+
+	drm_mode_config_cleanup(adev->ddev);
+	adev->mode_info.mode_config_initialized = false;
+	return 0;
+}
+
+static int dce_virtual_hw_init(void *handle)
+{
+	return 0;
+}
+
+static int dce_virtual_hw_fini(void *handle)
+{
+	return 0;
+}
+
+static int dce_virtual_suspend(void *handle)
+{
+	return dce_virtual_hw_fini(handle);
+}
+
+static int dce_virtual_resume(void *handle)
+{
+	return dce_virtual_hw_init(handle);
+}
+
+static bool dce_virtual_is_idle(void *handle)
+{
+	return true;
+}
+
+static int dce_virtual_wait_for_idle(void *handle)
+{
+	return 0;
+}
+
+static int dce_virtual_soft_reset(void *handle)
+{
+	return 0;
+}
+
+static int dce_virtual_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int dce_virtual_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs dce_virtual_ip_funcs = {
+	.name = "dce_virtual",
+	.early_init = dce_virtual_early_init,
+	.late_init = NULL,
+	.sw_init = dce_virtual_sw_init,
+	.sw_fini = dce_virtual_sw_fini,
+	.hw_init = dce_virtual_hw_init,
+	.hw_fini = dce_virtual_hw_fini,
+	.suspend = dce_virtual_suspend,
+	.resume = dce_virtual_resume,
+	.is_idle = dce_virtual_is_idle,
+	.wait_for_idle = dce_virtual_wait_for_idle,
+	.soft_reset = dce_virtual_soft_reset,
+	.set_clockgating_state = dce_virtual_set_clockgating_state,
+	.set_powergating_state = dce_virtual_set_powergating_state,
+};
+
+/* these are handled by the primary encoders */
+static void dce_virtual_encoder_prepare(struct drm_encoder *encoder)
+{
+	return;
+}
+
+static void dce_virtual_encoder_commit(struct drm_encoder *encoder)
+{
+	return;
+}
+
+static void
+dce_virtual_encoder_mode_set(struct drm_encoder *encoder,
+		      struct drm_display_mode *mode,
+		      struct drm_display_mode *adjusted_mode)
+{
+	return;
+}
+
+static void dce_virtual_encoder_disable(struct drm_encoder *encoder)
+{
+	return;
+}
+
+static void
+dce_virtual_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	return;
+}
+
+static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
+				    const struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode)
+{
+
+	/* set the active encoder to connector routing */
+	amdgpu_encoder_set_active_device(encoder);
+
+	return true;
+}
+
+static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = {
+	.dpms = dce_virtual_encoder_dpms,
+	.mode_fixup = dce_virtual_encoder_mode_fixup,
+	.prepare = dce_virtual_encoder_prepare,
+	.mode_set = dce_virtual_encoder_mode_set,
+	.commit = dce_virtual_encoder_commit,
+	.disable = dce_virtual_encoder_disable,
+};
+
+static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+	kfree(amdgpu_encoder->enc_priv);
+	drm_encoder_cleanup(encoder);
+	kfree(amdgpu_encoder);
+}
+
+static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
+	.destroy = dce_virtual_encoder_destroy,
+};
+
+static void dce_virtual_encoder_add(struct amdgpu_device *adev,
+				 uint32_t encoder_enum,
+				 uint32_t supported_device,
+				 u16 caps)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_encoder *encoder;
+	struct amdgpu_encoder *amdgpu_encoder;
+
+	/* see if we already added it */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		amdgpu_encoder = to_amdgpu_encoder(encoder);
+		if (amdgpu_encoder->encoder_enum == encoder_enum) {
+			amdgpu_encoder->devices |= supported_device;
+			return;
+		}
+
+	}
+
+	/* add a new one */
+	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
+	if (!amdgpu_encoder)
+		return;
+
+	encoder = &amdgpu_encoder->base;
+	encoder->possible_crtcs = 0x1;
+	amdgpu_encoder->enc_priv = NULL;
+	amdgpu_encoder->encoder_enum = encoder_enum;
+	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+	amdgpu_encoder->devices = supported_device;
+	amdgpu_encoder->rmx_type = RMX_OFF;
+	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
+	amdgpu_encoder->is_ext_encoder = false;
+	amdgpu_encoder->caps = caps;
+
+	drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs,
+					 DRM_MODE_ENCODER_VIRTUAL, NULL);
+	drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
+	DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id);
+}
+
+static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
+	.set_vga_render_state = &dce_virtual_set_vga_render_state,
+	.bandwidth_update = &dce_virtual_bandwidth_update,
+	.vblank_get_counter = &dce_virtual_vblank_get_counter,
+	.vblank_wait = &dce_virtual_vblank_wait,
+	.is_display_hung = &dce_virtual_is_display_hung,
+	.backlight_set_level = NULL,
+	.backlight_get_level = NULL,
+	.hpd_sense = &dce_virtual_hpd_sense,
+	.hpd_set_polarity = &dce_virtual_hpd_set_polarity,
+	.hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
+	.page_flip = &dce_virtual_page_flip,
+	.page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
+	.add_encoder = &dce_virtual_encoder_add,
+	.add_connector = &amdgpu_connector_add,
+	.stop_mc_access = &dce_virtual_stop_mc_access,
+	.resume_mc_access = &dce_virtual_resume_mc_access,
+};
+
+static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
+{
+	if (adev->mode_info.funcs == NULL)
+		adev->mode_info.funcs = &dce_virtual_display_funcs;
+}
+
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
+{
+	struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer);
+	struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info);
+	unsigned crtc = 0;
+	drm_handle_vblank(adev->ddev, crtc);
+	dce_virtual_pageflip_irq(adev, NULL, NULL);
+	hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+	return HRTIMER_NORESTART;
+}
+
+static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+						     int crtc,
+						     enum amdgpu_interrupt_state state)
+{
+	if (crtc >= adev->mode_info.num_crtc) {
+		DRM_DEBUG("invalid crtc %d\n", crtc);
+		return;
+	}
+
+	if (state && !adev->mode_info.vsync_timer_enabled) {
+		DRM_DEBUG("Enable software vsync timer\n");
+		hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
+		adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle;
+		hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+	} else if (!state && adev->mode_info.vsync_timer_enabled) {
+		DRM_DEBUG("Disable software vsync timer\n");
+		hrtimer_cancel(&adev->mode_info.vblank_timer);
+	}
+
+	adev->mode_info.vsync_timer_enabled = state;
+	DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
+}
+
+
+static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *source,
+                                       unsigned type,
+                                       enum amdgpu_interrupt_state state)
+{
+	switch (type) {
+	case AMDGPU_CRTC_IRQ_VBLANK1:
+		dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev,
+					  int crtc)
+{
+	if (crtc >= adev->mode_info.num_crtc) {
+		DRM_DEBUG("invalid crtc %d\n", crtc);
+		return;
+	}
+}
+
+static int dce_virtual_crtc_irq(struct amdgpu_device *adev,
+			      struct amdgpu_irq_src *source,
+			      struct amdgpu_iv_entry *entry)
+{
+	unsigned crtc = 0;
+	unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1;
+
+	dce_virtual_crtc_vblank_int_ack(adev, crtc);
+
+	if (amdgpu_irq_enabled(adev, source, irq_type)) {
+		drm_handle_vblank(adev->ddev, crtc);
+	}
+	dce_virtual_pageflip_irq(adev, NULL, NULL);
+	DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+	return 0;
+}
+
+static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev,
+					    struct amdgpu_irq_src *src,
+					    unsigned type,
+					    enum amdgpu_interrupt_state state)
+{
+	if (type >= adev->mode_info.num_crtc) {
+		DRM_ERROR("invalid pageflip crtc %d\n", type);
+		return -EINVAL;
+	}
+	DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state);
+
+	return 0;
+}
+
+static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
+				  struct amdgpu_irq_src *source,
+				  struct amdgpu_iv_entry *entry)
+{
+	unsigned long flags;
+	unsigned crtc_id = 0;
+	struct amdgpu_crtc *amdgpu_crtc;
+	struct amdgpu_flip_work *works;
+
+	crtc_id = 0;
+	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+	if (crtc_id >= adev->mode_info.num_crtc) {
+		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+		return -EINVAL;
+	}
+
+	/* IRQ could occur when in initial stage */
+	if (amdgpu_crtc == NULL)
+		return 0;
+
+	spin_lock_irqsave(&adev->ddev->event_lock, flags);
+	works = amdgpu_crtc->pflip_works;
+	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
+			"AMDGPU_FLIP_SUBMITTED(%d)\n",
+			amdgpu_crtc->pflip_status,
+			AMDGPU_FLIP_SUBMITTED);
+		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+		return 0;
+	}
+
+	/* page flip completed. clean up */
+	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+	amdgpu_crtc->pflip_works = NULL;
+
+	/* wakeup usersapce */
+	if (works->event)
+		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
+
+	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+	drm_crtc_vblank_put(&amdgpu_crtc->base);
+	schedule_work(&works->unpin_work);
+
+	return 0;
+}
+
+static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
+	.set = dce_virtual_set_crtc_irq_state,
+	.process = dce_virtual_crtc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = {
+	.set = dce_virtual_set_pageflip_irq_state,
+	.process = dce_virtual_pageflip_irq,
+};
+
+static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+	adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
+
+	adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+	adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
similarity index 68%
copy from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
copy to drivers/gpu/drm/amd/amdgpu/dce_virtual.h
index d24b888..e239243 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2014 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -20,16 +20,12 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#ifndef TONGA_PROCESSPPTABLES_H
-#define TONGA_PROCESSPPTABLES_H
 
-#include "hwmgr.h"
+#ifndef __DCE_VIRTUAL_H__
+#define __DCE_VIRTUAL_H__
 
-extern const struct pp_table_func tonga_pptable_funcs;
-extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
-extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
-		struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
-				struct pp_power_state *, void *, uint32_t));
+extern const struct amd_ip_funcs dce_virtual_ip_funcs;
+#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
 
 #endif
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
new file mode 100644
index 0000000..410b29c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -0,0 +1,3233 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "amdgpu_gfx.h"
+#include "amdgpu_ucode.h"
+#include "si/clearstate_si.h"
+#include "si/sid.h"
+
+#define GFX6_NUM_GFX_RINGS     1
+#define GFX6_NUM_COMPUTE_RINGS 2
+#define STATIC_PER_CU_PG_ENABLE                    (1 << 3)
+#define DYN_PER_CU_PG_ENABLE                       (1 << 2)
+#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
+#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET    0x3D
+
+
+static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
+static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
+
+MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
+MODULE_FIRMWARE("radeon/tahiti_me.bin");
+MODULE_FIRMWARE("radeon/tahiti_ce.bin");
+MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
+
+MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
+MODULE_FIRMWARE("radeon/pitcairn_me.bin");
+MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
+MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
+
+MODULE_FIRMWARE("radeon/verde_pfp.bin");
+MODULE_FIRMWARE("radeon/verde_me.bin");
+MODULE_FIRMWARE("radeon/verde_ce.bin");
+MODULE_FIRMWARE("radeon/verde_rlc.bin");
+
+MODULE_FIRMWARE("radeon/oland_pfp.bin");
+MODULE_FIRMWARE("radeon/oland_me.bin");
+MODULE_FIRMWARE("radeon/oland_ce.bin");
+MODULE_FIRMWARE("radeon/oland_rlc.bin");
+
+MODULE_FIRMWARE("radeon/hainan_pfp.bin");
+MODULE_FIRMWARE("radeon/hainan_me.bin");
+MODULE_FIRMWARE("radeon/hainan_ce.bin");
+MODULE_FIRMWARE("radeon/hainan_rlc.bin");
+
+static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+//static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev);
+static void gfx_v6_0_init_pg(struct amdgpu_device *adev);
+
+
+static const u32 verde_rlc_save_restore_register_list[] =
+{
+	(0x8000 << 16) | (0x98f4 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x98f4 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0xe80 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0xe80 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x89bc >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x89bc >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x8c1c >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x8c1c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x98f0 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xe7c >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x9148 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x9148 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9150 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x897c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8d8c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xac54 >> 2),
+	0X00000000,
+	0x3,
+	(0x9c00 << 16) | (0x98f8 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9910 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9914 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9918 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x991c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9920 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9924 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9928 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x992c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9930 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9934 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9938 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x993c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9940 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9944 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9948 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x994c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9950 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9954 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9958 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x995c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9960 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9964 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9968 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x996c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9970 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9974 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9978 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x997c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9980 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9984 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9988 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x998c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8c00 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8c14 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8c04 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8c08 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x9b7c >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x9b7c >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0xe84 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0xe84 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x89c0 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x89c0 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x914c >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x914c >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x8c20 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x8c20 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x9354 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x9354 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9060 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9364 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9100 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x913c >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x90e0 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x90e4 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x90e8 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x90e0 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x90e4 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x90e8 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8bcc >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8b24 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x88c4 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8e50 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8c0c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8e58 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8e5c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9508 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x950c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9494 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xac0c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xac10 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xac14 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xae00 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xac08 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x88d4 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x88c8 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x88cc >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x89b0 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8b10 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8a14 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9830 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9834 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9838 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9a10 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x9870 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x9874 >> 2),
+	0x00000000,
+	(0x8001 << 16) | (0x9870 >> 2),
+	0x00000000,
+	(0x8001 << 16) | (0x9874 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x9870 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x9874 >> 2),
+	0x00000000,
+	(0x8041 << 16) | (0x9870 >> 2),
+	0x00000000,
+	(0x8041 << 16) | (0x9874 >> 2),
+	0x00000000,
+	0x00000000
+};
+
+static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+{
+	const char *chip_name;
+	char fw_name[30];
+	int err;
+	const struct gfx_firmware_header_v1_0 *cp_hdr;
+	const struct rlc_firmware_header_v1_0 *rlc_hdr;
+
+	DRM_DEBUG("\n");
+
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+		chip_name = "tahiti";
+		break;
+	case CHIP_PITCAIRN:
+		chip_name = "pitcairn";
+		break;
+	case CHIP_VERDE:
+		chip_name = "verde";
+		break;
+	case CHIP_OLAND:
+		chip_name = "oland";
+		break;
+	case CHIP_HAINAN:
+		chip_name = "hainan";
+		break;
+	default: BUG();
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+	err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+	err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+	if (err)
+		goto out;
+	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+	adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+	adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+	err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+	err = amdgpu_ucode_validate(adev->gfx.me_fw);
+	if (err)
+		goto out;
+	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+	adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+	adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+	err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+	err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+	if (err)
+		goto out;
+	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+	adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+	adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+	err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+	err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+	rlc_hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+	adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+	adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+
+out:
+	if (err) {
+		printk(KERN_ERR
+		       "gfx6: Failed to load firmware \"%s\"\n",
+		       fw_name);
+		release_firmware(adev->gfx.pfp_fw);
+		adev->gfx.pfp_fw = NULL;
+		release_firmware(adev->gfx.me_fw);
+		adev->gfx.me_fw = NULL;
+		release_firmware(adev->gfx.ce_fw);
+		adev->gfx.ce_fw = NULL;
+		release_firmware(adev->gfx.rlc_fw);
+		adev->gfx.rlc_fw = NULL;
+	}
+	return err;
+}
+
+static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
+{
+	const u32 num_tile_mode_states = 32;
+	u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+
+	switch (adev->gfx.config.mem_row_size_in_kb) {
+	case 1:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
+		break;
+	case 2:
+	default:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
+		break;
+	case 4:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
+		break;
+	}
+
+	if (adev->asic_type == CHIP_VERDE ||
+		adev->asic_type == CHIP_OLAND ||
+		adev->asic_type == CHIP_HAINAN) {
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+			switch (reg_offset) {
+			case 0:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 1: 
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 2:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 3:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 4:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 5:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 6:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 7:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 8: 
+				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 9:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 10:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 11:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 12:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 13:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 14:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 15:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 16:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 17:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 21:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 22:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 23: 
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 24: 
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 25: 
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+						 NUM_BANKS(ADDR_SURF_8_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			default:
+				gb_tile_moden = 0;
+				break;
+			}
+			adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+			WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden);
+		}
+	} else if ((adev->asic_type == CHIP_TAHITI) || (adev->asic_type == CHIP_PITCAIRN)) {
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+			switch (reg_offset) {
+			case 0:  /* non-AA compressed depth or any compressed stencil */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 1:  /* 2xAA/4xAA compressed depth only */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 2:  /* 8xAA compressed depth only */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 3:  /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 4:  /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 5:  /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 6:  /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 7:  /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 8:  /* 1D and 1D Array Surfaces */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 9:  /* Displayable maps. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 10:  /* Display 8bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 11:  /* Display 16bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 12:  /* Display 32bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 13:  /* Thin. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 14:  /* Thin 8 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 15:  /* Thin 16 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 16:  /* Thin 32 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 17:  /* Thin 64 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 21:  /* 8 bpp PRT. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 22:  /* 16 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 23:  /* 32 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 24:  /* 64 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 25:  /* 128 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+						 NUM_BANKS(ADDR_SURF_8_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			default:
+				gb_tile_moden = 0;
+				break;
+			}
+			adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+			WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden);
+		}
+	} else{
+
+		DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+	}
+
+}
+
+static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
+				  u32 sh_num, u32 instance)
+{
+	u32 data;
+
+	if (instance == 0xffffffff)
+		data = INSTANCE_BROADCAST_WRITES;
+	else
+		data = INSTANCE_INDEX(instance);
+
+	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+		data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+	else if (se_num == 0xffffffff)
+		data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+	else if (sh_num == 0xffffffff)
+		data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+	else
+		data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+	WREG32(GRBM_GFX_INDEX, data);
+}
+
+static u32 gfx_v6_0_create_bitmask(u32 bit_width)
+{
+	return (u32)(((u64)1 << bit_width) - 1);
+}
+
+static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev,
+				    u32 max_rb_num_per_se,
+				    u32 sh_per_se)
+{
+	u32 data, mask;
+
+	data = RREG32(CC_RB_BACKEND_DISABLE);
+	data &= BACKEND_DISABLE_MASK;
+	data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+	data >>= BACKEND_DISABLE_SHIFT;
+
+	mask = gfx_v6_0_create_bitmask(max_rb_num_per_se / sh_per_se);
+
+	return data & mask;
+}
+
+static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
+			      u32 se_num, u32 sh_per_se,
+			      u32 max_rb_num_per_se)
+{
+	int i, j;
+	u32 data, mask;
+	u32 disabled_rbs = 0;
+	u32 enabled_rbs = 0;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	for (i = 0; i < se_num; i++) {
+		for (j = 0; j < sh_per_se; j++) {
+			gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+			data = gfx_v6_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
+			disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+		}
+	}
+	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	mask = 1;
+	for (i = 0; i < max_rb_num_per_se * se_num; i++) {
+		if (!(disabled_rbs & mask))
+			enabled_rbs |= mask;
+		mask <<= 1;
+	}
+
+	adev->gfx.config.backend_enable_mask = enabled_rbs;
+	adev->gfx.config.num_rbs = hweight32(enabled_rbs);
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	for (i = 0; i < se_num; i++) {
+		gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
+		data = 0;
+		for (j = 0; j < sh_per_se; j++) {
+			switch (enabled_rbs & 3) {
+			case 1:
+				data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+				break;
+			case 2:
+				data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+				break;
+			case 3:
+			default:
+				data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+				break;
+			}
+			enabled_rbs >>= 2;
+		}
+		WREG32(PA_SC_RASTER_CONFIG, data);
+	}
+	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+	mutex_unlock(&adev->grbm_idx_mutex);
+}
+/*
+static void gmc_v6_0_init_compute_vmid(struct amdgpu_device *adev)
+{
+}
+*/
+
+static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev, u32 cu_per_sh)
+{
+	u32 data, mask;
+
+	data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+	data &= INACTIVE_CUS_MASK;
+	data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+
+	data >>= INACTIVE_CUS_SHIFT;
+
+	mask = gfx_v6_0_create_bitmask(cu_per_sh);
+
+	return ~data & mask;
+}
+
+
+static void gfx_v6_0_setup_spi(struct amdgpu_device *adev,
+			 u32 se_num, u32 sh_per_se,
+			 u32 cu_per_sh)
+{
+	int i, j, k;
+	u32 data, mask;
+	u32 active_cu = 0;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	for (i = 0; i < se_num; i++) {
+		for (j = 0; j < sh_per_se; j++) {
+			gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+			data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+			active_cu = gfx_v6_0_get_cu_enabled(adev, cu_per_sh);
+
+			mask = 1;
+			for (k = 0; k < 16; k++) {
+				mask <<= k;
+				if (active_cu & mask) {
+					data &= ~mask;
+					WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+					break;
+				}
+			}
+		}
+	}
+	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+	mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
+{
+	u32 gb_addr_config = 0;
+	u32 mc_shared_chmap, mc_arb_ramcfg;
+	u32 sx_debug_1;
+	u32 hdp_host_path_cntl;
+	u32 tmp;
+
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+		adev->gfx.config.max_shader_engines = 2;
+		adev->gfx.config.max_tile_pipes = 12;
+		adev->gfx.config.max_cu_per_sh = 8;
+		adev->gfx.config.max_sh_per_se = 2;
+		adev->gfx.config.max_backends_per_se = 4;
+		adev->gfx.config.max_texture_channel_caches = 12;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 32;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_PITCAIRN:
+		adev->gfx.config.max_shader_engines = 2;
+		adev->gfx.config.max_tile_pipes = 8;
+		adev->gfx.config.max_cu_per_sh = 5;
+		adev->gfx.config.max_sh_per_se = 2;
+		adev->gfx.config.max_backends_per_se = 4;
+		adev->gfx.config.max_texture_channel_caches = 8;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 32;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+		break;
+
+	case CHIP_VERDE:
+		adev->gfx.config.max_shader_engines = 1;
+		adev->gfx.config.max_tile_pipes = 4;
+		adev->gfx.config.max_cu_per_sh = 5;
+		adev->gfx.config.max_sh_per_se = 2;
+		adev->gfx.config.max_backends_per_se = 4;
+		adev->gfx.config.max_texture_channel_caches = 4;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 32;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_OLAND:
+		adev->gfx.config.max_shader_engines = 1;
+		adev->gfx.config.max_tile_pipes = 4;
+		adev->gfx.config.max_cu_per_sh = 6;
+		adev->gfx.config.max_sh_per_se = 1;
+		adev->gfx.config.max_backends_per_se = 2;
+		adev->gfx.config.max_texture_channel_caches = 4;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 16;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_HAINAN:
+		adev->gfx.config.max_shader_engines = 1;
+		adev->gfx.config.max_tile_pipes = 4;
+		adev->gfx.config.max_cu_per_sh = 5;
+		adev->gfx.config.max_sh_per_se = 1;
+		adev->gfx.config.max_backends_per_se = 1;
+		adev->gfx.config.max_texture_channel_caches = 2;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 16;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+	WREG32(SRBM_INT_CNTL, 1);
+	WREG32(SRBM_INT_ACK, 1);
+
+	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
+	adev->gfx.config.mem_max_burst_length_bytes = 256;
+	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+	adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+	if (adev->gfx.config.mem_row_size_in_kb > 4)
+		adev->gfx.config.mem_row_size_in_kb = 4;
+	adev->gfx.config.shader_engine_tile_size = 32;
+	adev->gfx.config.num_gpus = 1;
+	adev->gfx.config.multi_gpu_tile_size = 64;
+
+	gb_addr_config &= ~ROW_SIZE_MASK;
+	switch (adev->gfx.config.mem_row_size_in_kb) {
+	case 1:
+	default:
+		gb_addr_config |= ROW_SIZE(0);
+		break;
+	case 2:
+		gb_addr_config |= ROW_SIZE(1);
+		break;
+	case 4:
+		gb_addr_config |= ROW_SIZE(2);
+		break;
+	}
+	adev->gfx.config.gb_addr_config = gb_addr_config;
+
+	WREG32(GB_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CALC, gb_addr_config);
+	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+#if 0
+	if (adev->has_uvd) {
+		WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+		WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+		WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+	}
+#endif
+	gfx_v6_0_tiling_mode_table_init(adev);
+
+	gfx_v6_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
+		    adev->gfx.config.max_sh_per_se,
+		    adev->gfx.config.max_backends_per_se);
+
+	gfx_v6_0_setup_spi(adev, adev->gfx.config.max_shader_engines,
+		     adev->gfx.config.max_sh_per_se,
+		     adev->gfx.config.max_cu_per_sh);
+
+	gfx_v6_0_get_cu_info(adev);
+
+	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+				     ROQ_IB2_START(0x2b)));
+	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+	sx_debug_1 = RREG32(SX_DEBUG_1);
+	WREG32(SX_DEBUG_1, sx_debug_1);
+
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+
+	WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_frontend) |
+				 SC_BACKEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_backend) |
+				 SC_HIZ_TILE_FIFO_SIZE(adev->gfx.config.sc_hiz_tile_fifo_size) |
+				 SC_EARLYZ_TILE_FIFO_SIZE(adev->gfx.config.sc_earlyz_tile_fifo_size)));
+
+	WREG32(VGT_NUM_INSTANCES, 1);
+	WREG32(CP_PERFMON_CNTL, 0);
+	WREG32(SQ_CONFIG, 0);
+	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+					  FORCE_EOV_MAX_REZ_CNT(255)));
+
+	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+	WREG32(CB_PERFCOUNTER0_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER0_SELECT1, 0);
+	WREG32(CB_PERFCOUNTER1_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER1_SELECT1, 0);
+	WREG32(CB_PERFCOUNTER2_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER2_SELECT1, 0);
+	WREG32(CB_PERFCOUNTER3_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER3_SELECT1, 0);
+
+	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+	udelay(50);
+}
+
+
+static void gfx_v6_0_scratch_init(struct amdgpu_device *adev)
+{
+	int i;
+
+	adev->gfx.scratch.num_reg = 7;
+	adev->gfx.scratch.reg_base = SCRATCH_REG0;
+	for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
+		adev->gfx.scratch.free[i] = true;
+		adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
+	}
+}
+
+static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	r = amdgpu_gfx_scratch_get(adev, &scratch);
+	if (r) {
+		DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+
+	r = amdgpu_ring_alloc(ring, 3);
+	if (r) {
+		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
+		amdgpu_gfx_scratch_free(adev, scratch);
+		return r;
+	}
+	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START));
+	amdgpu_ring_write(ring, 0xDEADBEEF);
+	amdgpu_ring_commit(ring);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = RREG32(scratch);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+	if (i < adev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+	} else {
+		DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+			  ring->idx, scratch, tmp);
+		r = -EINVAL;
+	}
+	amdgpu_gfx_scratch_free(adev, scratch);
+	return r;
+}
+
+static void gfx_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+	/* flush hdp cache */
+	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+				 WRITE_DATA_DST_SEL(0)));
+	amdgpu_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL);
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, 0x1);
+}
+
+/**
+ * gfx_v6_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
+ *
+ * @adev: amdgpu_device pointer
+ * @ridx: amdgpu ring index
+ *
+ * Emits an hdp invalidate on the cp.
+ */
+static void gfx_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+				 WRITE_DATA_DST_SEL(0)));
+	amdgpu_ring_write(ring, HDP_DEBUG0);
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, 0x1);
+}
+
+static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+				     u64 seq, unsigned flags)
+{
+	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+	/* flush read cache over gart */
+	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	amdgpu_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START));
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+	amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+			  PACKET3_TC_ACTION_ENA |
+			  PACKET3_SH_KCACHE_ACTION_ENA |
+			  PACKET3_SH_ICACHE_ACTION_ENA);
+	amdgpu_ring_write(ring, 0xFFFFFFFF);
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, 10); /* poll interval */
+	/* EVENT_WRITE_EOP - flush caches, send int */
+	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+	amdgpu_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
+	amdgpu_ring_write(ring, addr & 0xfffffffc);
+	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
+				DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
+	amdgpu_ring_write(ring, lower_32_bits(seq));
+	amdgpu_ring_write(ring, upper_32_bits(seq));
+}
+
+static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+				  struct amdgpu_ib *ib,
+				  unsigned vm_id, bool ctx_switch)
+{
+	u32 header, control = 0;
+
+	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
+	if (ctx_switch) {
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+	}
+
+	if (ib->flags & AMDGPU_IB_FLAG_CE)
+		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
+	else
+		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+	control |= ib->length_dw | (vm_id << 24);
+
+	amdgpu_ring_write(ring, header);
+	amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
+	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+	amdgpu_ring_write(ring, control);
+}
+
+/**
+ * gfx_v6_0_ring_test_ib - basic ring IB test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Allocate an IB and execute it on the gfx ring (SI).
+ * Provides a basic gfx ring test to verify that IBs are working.
+ * Returns 0 on success, error on failure.
+ */
+static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_ib ib;
+	struct fence *f = NULL;
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	long r;
+
+	r = amdgpu_gfx_scratch_get(adev, &scratch);
+	if (r) {
+		DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+	memset(&ib, 0, sizeof(ib));
+	r = amdgpu_ib_get(adev, NULL, 256, &ib);
+	if (r) {
+		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+		goto err1;
+	}
+	ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
+	ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START));
+	ib.ptr[2] = 0xDEADBEEF;
+	ib.length_dw = 3;
+
+	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+	if (r)
+		goto err2;
+
+	r = fence_wait_timeout(f, false, timeout);
+	if (r == 0) {
+		DRM_ERROR("amdgpu: IB test timed out\n");
+		r = -ETIMEDOUT;
+		goto err2;
+	} else if (r < 0) {
+		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+		goto err2;
+	}
+	tmp = RREG32(scratch);
+	if (tmp == 0xDEADBEEF) {
+		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+		r = 0;
+	} else {
+		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
+			  scratch, tmp);
+		r = -EINVAL;
+	}
+
+err2:
+	amdgpu_ib_free(adev, &ib, NULL);
+	fence_put(f);
+err1:
+	amdgpu_gfx_scratch_free(adev, scratch);
+	return r;
+}
+
+static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+{
+	int i;
+	if (enable)
+		WREG32(CP_ME_CNTL, 0);
+	else {
+		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
+		WREG32(SCRATCH_UMSK, 0);
+		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+			adev->gfx.gfx_ring[i].ready = false;
+		for (i = 0; i < adev->gfx.num_compute_rings; i++)
+			adev->gfx.compute_ring[i].ready = false;
+	}
+	udelay(50);
+}
+
+static int gfx_v6_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
+{
+	unsigned i;
+	const struct gfx_firmware_header_v1_0 *pfp_hdr;
+	const struct gfx_firmware_header_v1_0 *ce_hdr;
+	const struct gfx_firmware_header_v1_0 *me_hdr;
+	const __le32 *fw_data;
+	u32 fw_size;
+
+	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
+		return -EINVAL;
+
+	gfx_v6_0_cp_gfx_enable(adev, false);
+	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+	ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+	me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+
+	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
+	amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
+	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
+
+	/* PFP */
+	fw_data = (const __le32 *)
+		(adev->gfx.pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < fw_size; i++)
+		WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+
+	/* CE */
+	fw_data = (const __le32 *)
+		(adev->gfx.ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+	fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+	WREG32(CP_CE_UCODE_ADDR, 0);
+	for (i = 0; i < fw_size; i++)
+		WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+	WREG32(CP_CE_UCODE_ADDR, 0);
+
+	/* ME */
+	fw_data = (const __be32 *)
+		(adev->gfx.me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+	WREG32(CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < fw_size; i++)
+		WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+	WREG32(CP_ME_RAM_WADDR, 0);
+
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_CE_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
+	return 0;
+}
+
+static int gfx_v6_0_cp_gfx_start(struct amdgpu_device *adev)
+{
+	const struct cs_section_def *sect = NULL;
+	const struct cs_extent_def *ext = NULL;
+	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
+	int r, i;
+
+	r = amdgpu_ring_alloc(ring, 7 + 4);
+	if (r) {
+		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+	amdgpu_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+	amdgpu_ring_write(ring, 0x1);
+	amdgpu_ring_write(ring, 0x0);
+	amdgpu_ring_write(ring, adev->gfx.config.max_hw_contexts - 1);
+	amdgpu_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, 0);
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
+	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
+	amdgpu_ring_write(ring, 0xc000);
+	amdgpu_ring_write(ring, 0xe000);
+	amdgpu_ring_commit(ring);
+
+	gfx_v6_0_cp_gfx_enable(adev, true);
+
+	r = amdgpu_ring_alloc(ring, gfx_v6_0_get_csb_size(adev) + 10);
+	if (r) {
+		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+		for (ext = sect->section; ext->extent != NULL; ++ext) {
+			if (sect->id == SECT_CONTEXT) {
+				amdgpu_ring_write(ring,
+						  PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+				amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
+				for (i = 0; i < ext->reg_count; i++)
+					amdgpu_ring_write(ring, ext->extent[i]);
+			}
+		}
+	}
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+	amdgpu_ring_write(ring, 0);
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	amdgpu_ring_write(ring, 0x00000316);
+	amdgpu_ring_write(ring, 0x0000000e);
+	amdgpu_ring_write(ring, 0x00000010);
+
+	amdgpu_ring_commit(ring);
+
+	return 0;
+}
+
+static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring;
+	u32 tmp;
+	u32 rb_bufsz;
+	int r;
+	u64 rptr_addr;
+
+	WREG32(CP_SEM_WAIT_TIMER, 0x0);
+	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+	/* Set the write pointer delay */
+	WREG32(CP_RB_WPTR_DELAY, 0);
+
+	WREG32(CP_DEBUG, 0);
+	WREG32(SCRATCH_ADDR, 0);
+
+	/* ring 0 - compute and gfx */
+	/* Set ring buffer size */
+	ring = &adev->gfx.gfx_ring[0];
+	rb_bufsz = order_base_2(ring->ring_size / 8);
+	tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB0_CNTL, tmp);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
+	ring->wptr = 0;
+	WREG32(CP_RB0_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+	WREG32(CP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
+	WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+	WREG32(SCRATCH_UMSK, 0);
+
+	mdelay(1);
+	WREG32(CP_RB0_CNTL, tmp);
+
+	WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
+
+	/* start the rings */
+	gfx_v6_0_cp_gfx_start(adev);
+	ring->ready = true;
+	r = amdgpu_ring_test_ring(ring);
+	if (r) {
+		ring->ready = false;
+		return r;
+	}
+
+	return 0;
+}
+
+static u32 gfx_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
+{
+	return ring->adev->wb.wb[ring->rptr_offs];
+}
+
+static u32 gfx_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	if (ring == &adev->gfx.gfx_ring[0])
+		return RREG32(CP_RB0_WPTR);
+	else if (ring == &adev->gfx.compute_ring[0])
+		return RREG32(CP_RB1_WPTR);
+	else if (ring == &adev->gfx.compute_ring[1])
+		return RREG32(CP_RB2_WPTR);
+	else
+		BUG();
+}
+
+static void gfx_v6_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	WREG32(CP_RB0_WPTR, ring->wptr);
+	(void)RREG32(CP_RB0_WPTR);
+}
+
+static void gfx_v6_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	if (ring == &adev->gfx.compute_ring[0]) {
+		WREG32(CP_RB1_WPTR, ring->wptr);
+		(void)RREG32(CP_RB1_WPTR);
+	} else if (ring == &adev->gfx.compute_ring[1]) {
+		WREG32(CP_RB2_WPTR, ring->wptr);
+		(void)RREG32(CP_RB2_WPTR);
+	} else {
+		BUG();
+	}
+
+}
+
+static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring;
+	u32 tmp;
+	u32 rb_bufsz;
+	int r;
+	u64 rptr_addr;
+
+	/* ring1  - compute only */
+	/* Set ring buffer size */
+
+	ring = &adev->gfx.compute_ring[0];
+	rb_bufsz = order_base_2(ring->ring_size / 8);
+	tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB1_CNTL, tmp);
+
+	WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
+	ring->wptr = 0;
+	WREG32(CP_RB1_WPTR, ring->wptr);
+
+	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+	WREG32(CP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
+	WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+	mdelay(1);
+	WREG32(CP_RB1_CNTL, tmp);
+	WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
+
+	ring = &adev->gfx.compute_ring[1];
+	rb_bufsz = order_base_2(ring->ring_size / 8);
+	tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB2_CNTL, tmp);
+
+	WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
+	ring->wptr = 0;
+	WREG32(CP_RB2_WPTR, ring->wptr);
+	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+	WREG32(CP_RB2_RPTR_ADDR, lower_32_bits(rptr_addr));
+	WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+	mdelay(1);
+	WREG32(CP_RB2_CNTL, tmp);
+	WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
+
+	adev->gfx.compute_ring[0].ready = true;
+	adev->gfx.compute_ring[1].ready = true;
+
+	r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[0]);
+	if (r) {
+		adev->gfx.compute_ring[0].ready = false;
+		return r;
+	}
+
+	r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[1]);
+	if (r) {
+		adev->gfx.compute_ring[1].ready = false;
+		return r;
+	}
+
+	return 0;
+}
+
+static void gfx_v6_0_cp_enable(struct amdgpu_device *adev, bool enable)
+{
+	gfx_v6_0_cp_gfx_enable(adev, enable);
+}
+
+static int gfx_v6_0_cp_load_microcode(struct amdgpu_device *adev)
+{
+	return gfx_v6_0_cp_gfx_load_microcode(adev);
+}
+
+static void gfx_v6_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
+					       bool enable)
+{	
+	u32 tmp = RREG32(CP_INT_CNTL_RING0);
+	u32 mask;
+	int i;
+
+	if (enable)
+		tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	else
+		tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	WREG32(CP_INT_CNTL_RING0, tmp);
+
+	if (!enable) {
+		/* read a gfx register */
+		tmp = RREG32(DB_DEPTH_INFO);
+
+		mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
+		for (i = 0; i < adev->usec_timeout; i++) {
+			if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
+				break;
+			udelay(1);
+		}
+	}
+}
+
+static int gfx_v6_0_cp_resume(struct amdgpu_device *adev)
+{
+	int r;
+
+	gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+
+	r = gfx_v6_0_cp_load_microcode(adev);
+	if (r)
+		return r;
+
+	r = gfx_v6_0_cp_gfx_resume(adev);
+	if (r)
+		return r;
+	r = gfx_v6_0_cp_compute_resume(adev);
+	if (r)
+		return r;
+
+	gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+	return 0;
+}
+
+static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+	uint32_t seq = ring->fence_drv.sync_seq;
+	uint64_t addr = ring->fence_drv.gpu_addr;
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+				 WAIT_REG_MEM_FUNCTION(3) | /* equal */
+				 WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
+	amdgpu_ring_write(ring, addr & 0xfffffffc);
+	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+	amdgpu_ring_write(ring, seq);
+	amdgpu_ring_write(ring, 0xffffffff);
+	amdgpu_ring_write(ring, 4); /* poll interval */
+
+	if (usepfp) {
+		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+	}
+}
+
+static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+					unsigned vm_id, uint64_t pd_addr)
+{
+	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+
+	/* write new base address */
+	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+				 WRITE_DATA_DST_SEL(0)));
+	if (vm_id < 8) {
+		amdgpu_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id ));
+	} else {
+		amdgpu_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+	}
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, pd_addr >> 12);
+
+	/* bits 0-15 are the VM contexts0-15 */
+	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+				 WRITE_DATA_DST_SEL(0)));
+	amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, 1 << vm_id);
+
+	/* wait for the invalidate to complete */
+	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+	amdgpu_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
+				 WAIT_REG_MEM_ENGINE(0))); /* me */
+	amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, 0); /* ref */
+	amdgpu_ring_write(ring, 0); /* mask */
+	amdgpu_ring_write(ring, 0x20); /* poll interval */
+
+	if (usepfp) {
+		/* sync PFP to ME, otherwise we might get invalid PFP reads */
+		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+		amdgpu_ring_write(ring, 0x0);
+
+		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+	}
+}
+
+
+static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->gfx.rlc.save_restore_obj) {
+		r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+		if (unlikely(r != 0))
+			dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
+		amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
+		amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+
+		amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
+		adev->gfx.rlc.save_restore_obj = NULL;
+	}
+
+	if (adev->gfx.rlc.clear_state_obj) {
+		r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+		if (unlikely(r != 0))
+			dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
+		amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+		amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
+		adev->gfx.rlc.clear_state_obj = NULL;
+	}
+
+	if (adev->gfx.rlc.cp_table_obj) {
+		r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+		if (unlikely(r != 0))
+			dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+		amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
+		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+		amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
+		adev->gfx.rlc.cp_table_obj = NULL;
+	}
+}
+
+static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
+{
+	const u32 *src_ptr;
+	volatile u32 *dst_ptr;
+	u32 dws, i;
+	u64 reg_list_mc_addr;
+	const struct cs_section_def *cs_data;
+	int r;
+
+	adev->gfx.rlc.reg_list = verde_rlc_save_restore_register_list;
+	adev->gfx.rlc.reg_list_size =
+			(u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
+
+	adev->gfx.rlc.cs_data = si_cs_data;
+	src_ptr = adev->gfx.rlc.reg_list;
+	dws = adev->gfx.rlc.reg_list_size;
+	cs_data = adev->gfx.rlc.cs_data;
+
+	if (src_ptr) {
+		/* save restore block */
+		if (adev->gfx.rlc.save_restore_obj == NULL) {
+
+			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
+					     AMDGPU_GEM_DOMAIN_VRAM,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     NULL, NULL,
+					     &adev->gfx.rlc.save_restore_obj);
+
+			if (r) {
+				dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
+				return r;
+			}
+		}
+
+		r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+		if (unlikely(r != 0)) {
+			gfx_v6_0_rlc_fini(adev);
+			return r;
+		}
+		r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
+				  &adev->gfx.rlc.save_restore_gpu_addr);
+		if (r) {
+			amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+			dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
+			gfx_v6_0_rlc_fini(adev);
+			return r;
+		}
+
+		r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
+		if (r) {
+			dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
+			gfx_v6_0_rlc_fini(adev);
+			return r;
+		}
+		/* write the sr buffer */
+		dst_ptr = adev->gfx.rlc.sr_ptr;
+		for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+			dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+		amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+		amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+	}
+
+	if (cs_data) {
+		/* clear state block */
+		adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev);
+		dws = adev->gfx.rlc.clear_state_size + (256 / 4);
+
+		if (adev->gfx.rlc.clear_state_obj == NULL) {
+			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
+					     AMDGPU_GEM_DOMAIN_VRAM,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     NULL, NULL,
+					     &adev->gfx.rlc.clear_state_obj);
+
+			if (r) {
+				dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
+				gfx_v6_0_rlc_fini(adev);
+				return r;
+			}
+		}
+		r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+		if (unlikely(r != 0)) {
+			gfx_v6_0_rlc_fini(adev);
+			return r;
+		}
+		r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
+				  &adev->gfx.rlc.clear_state_gpu_addr);
+		if (r) {
+			amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+			dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+			gfx_v6_0_rlc_fini(adev);
+			return r;
+		}
+
+		r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
+		if (r) {
+			dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
+			gfx_v6_0_rlc_fini(adev);
+			return r;
+		}
+		/* set up the cs buffer */
+		dst_ptr = adev->gfx.rlc.cs_ptr;
+		reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256;
+		dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
+		dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
+		dst_ptr[2] = cpu_to_le32(adev->gfx.rlc.clear_state_size);
+		gfx_v6_0_get_csb_buffer(adev, &dst_ptr[(256/4)]);
+		amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+	}
+
+	return 0;
+}
+
+static void gfx_v6_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
+{
+	u32 tmp;
+
+	tmp = RREG32(RLC_LB_CNTL);
+	if (enable)
+		tmp |= LOAD_BALANCE_ENABLE;
+	else
+		tmp &= ~LOAD_BALANCE_ENABLE;
+	WREG32(RLC_LB_CNTL, tmp);
+
+	if (!enable) {
+		gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+		WREG32(SPI_LB_CU_MASK, 0x00ff);
+	}
+
+}
+
+static void gfx_v6_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
+			break;
+		udelay(1);
+	}
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
+			break;
+		udelay(1);
+	}
+}
+
+static void gfx_v6_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
+{
+	u32 tmp;
+
+	tmp = RREG32(RLC_CNTL);
+	if (tmp != rlc)
+		WREG32(RLC_CNTL, rlc);
+}
+
+static u32 gfx_v6_0_halt_rlc(struct amdgpu_device *adev)
+{
+	u32 data, orig;
+
+	orig = data = RREG32(RLC_CNTL);
+
+	if (data & RLC_ENABLE) {
+		data &= ~RLC_ENABLE;
+		WREG32(RLC_CNTL, data);
+
+		gfx_v6_0_wait_for_rlc_serdes(adev);
+	}
+
+	return orig;
+}
+
+static void gfx_v6_0_rlc_stop(struct amdgpu_device *adev)
+{
+	WREG32(RLC_CNTL, 0);
+
+	gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+	gfx_v6_0_wait_for_rlc_serdes(adev);
+}
+
+static void gfx_v6_0_rlc_start(struct amdgpu_device *adev)
+{
+	WREG32(RLC_CNTL, RLC_ENABLE);
+
+	gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+	udelay(50);
+}
+
+static void gfx_v6_0_rlc_reset(struct amdgpu_device *adev)
+{
+	u32 tmp = RREG32(GRBM_SOFT_RESET);
+
+	tmp |= SOFT_RESET_RLC;
+	WREG32(GRBM_SOFT_RESET, tmp);
+	udelay(50);
+	tmp &= ~SOFT_RESET_RLC;
+	WREG32(GRBM_SOFT_RESET, tmp);
+	udelay(50);
+}
+
+static bool gfx_v6_0_lbpw_supported(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	/* Enable LBPW only for DDR3 */
+	tmp = RREG32(MC_SEQ_MISC0);
+	if ((tmp & 0xF0000000) == 0xB0000000)
+		return true;
+	return false;
+}
+static void gfx_v6_0_init_cg(struct amdgpu_device *adev)
+{
+}
+
+static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
+{
+	u32 i;
+	const struct rlc_firmware_header_v1_0 *hdr;
+	const __le32 *fw_data;
+	u32 fw_size;
+
+
+	if (!adev->gfx.rlc_fw)
+		return -EINVAL;
+
+	gfx_v6_0_rlc_stop(adev);
+	gfx_v6_0_rlc_reset(adev);
+	gfx_v6_0_init_pg(adev);
+	gfx_v6_0_init_cg(adev);
+
+	WREG32(RLC_RL_BASE, 0);
+	WREG32(RLC_RL_SIZE, 0);
+	WREG32(RLC_LB_CNTL, 0);
+	WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
+	WREG32(RLC_LB_CNTR_INIT, 0);
+	WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
+
+	WREG32(RLC_MC_CNTL, 0);
+	WREG32(RLC_UCODE_CNTL, 0);
+
+	hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+	fw_data = (const __le32 *)
+		(adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+	amdgpu_ucode_print_rlc_hdr(&hdr->header);
+
+	for (i = 0; i < fw_size; i++) {
+		WREG32(RLC_UCODE_ADDR, i);
+		WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
+	}
+	WREG32(RLC_UCODE_ADDR, 0);
+
+	gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev));
+	gfx_v6_0_rlc_start(adev);
+
+	return 0;
+}
+
+static void gfx_v6_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
+{
+	u32 data, orig, tmp;
+
+	orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
+
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
+		gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+		WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
+
+		tmp = gfx_v6_0_halt_rlc(adev);
+
+		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+		WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
+
+		gfx_v6_0_wait_for_rlc_serdes(adev);
+		gfx_v6_0_update_rlc(adev, tmp);
+
+		WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
+
+		data |= CGCG_EN | CGLS_EN;
+	} else {
+		gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+
+		RREG32(CB_CGTT_SCLK_CTRL);
+		RREG32(CB_CGTT_SCLK_CTRL);
+		RREG32(CB_CGTT_SCLK_CTRL);
+		RREG32(CB_CGTT_SCLK_CTRL);
+
+		data &= ~(CGCG_EN | CGLS_EN);
+	}
+
+	if (orig != data)
+		WREG32(RLC_CGCG_CGLS_CTRL, data);
+
+}
+
+static void gfx_v6_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
+{
+
+	u32 data, orig, tmp = 0;
+
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+		orig = data = RREG32(CGTS_SM_CTRL_REG);
+		data = 0x96940200;
+		if (orig != data)
+			WREG32(CGTS_SM_CTRL_REG, data);
+
+		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
+			orig = data = RREG32(CP_MEM_SLP_CNTL);
+			data |= CP_MEM_LS_EN;
+			if (orig != data)
+				WREG32(CP_MEM_SLP_CNTL, data);
+		}
+
+		orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+		data &= 0xffffffc0;
+		if (orig != data)
+			WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+		tmp = gfx_v6_0_halt_rlc(adev);
+
+		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+		WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
+
+		gfx_v6_0_update_rlc(adev, tmp);
+	} else {
+		orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+		data |= 0x00000003;
+		if (orig != data)
+			WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+		data = RREG32(CP_MEM_SLP_CNTL);
+		if (data & CP_MEM_LS_EN) {
+			data &= ~CP_MEM_LS_EN;
+			WREG32(CP_MEM_SLP_CNTL, data);
+		}
+		orig = data = RREG32(CGTS_SM_CTRL_REG);
+		data |= LS_OVERRIDE | OVERRIDE;
+		if (orig != data)
+			WREG32(CGTS_SM_CTRL_REG, data);
+
+		tmp = gfx_v6_0_halt_rlc(adev);
+
+		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+		WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
+
+		gfx_v6_0_update_rlc(adev, tmp);
+	}
+}
+/*
+static void gfx_v6_0_update_cg(struct amdgpu_device *adev,
+			       bool enable)
+{
+	gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+	if (enable) {
+		gfx_v6_0_enable_mgcg(adev, true);
+		gfx_v6_0_enable_cgcg(adev, true);
+	} else {
+		gfx_v6_0_enable_cgcg(adev, false);
+		gfx_v6_0_enable_mgcg(adev, false);
+	}
+	gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+}
+*/
+static void gfx_v6_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
+						bool enable)
+{
+}
+
+static void gfx_v6_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
+						bool enable)
+{
+}
+
+static void gfx_v6_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
+{
+	u32 data, orig;
+
+	orig = data = RREG32(RLC_PG_CNTL);
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
+		data &= ~0x8000;
+	else
+		data |= 0x8000;
+	if (orig != data)
+		WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
+{
+}
+/*
+static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev)
+{
+	const __le32 *fw_data;
+	volatile u32 *dst_ptr;
+	int me, i, max_me = 4;
+	u32 bo_offset = 0;
+	u32 table_offset, table_size;
+
+	if (adev->asic_type == CHIP_KAVERI)
+		max_me = 5;
+
+	if (adev->gfx.rlc.cp_table_ptr == NULL)
+		return;
+
+	dst_ptr = adev->gfx.rlc.cp_table_ptr;
+	for (me = 0; me < max_me; me++) {
+		if (me == 0) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.ce_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		} else if (me == 1) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.pfp_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		} else if (me == 2) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.me_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		} else if (me == 3) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.mec_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		} else {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.mec2_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		}
+
+		for (i = 0; i < table_size; i ++) {
+			dst_ptr[bo_offset + i] =
+				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+		}
+
+		bo_offset += table_size;
+	}
+}
+*/
+static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev,
+				     bool enable)
+{
+
+	u32 tmp;
+
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+		tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
+		WREG32(RLC_TTOP_D, tmp);
+
+		tmp = RREG32(RLC_PG_CNTL);
+		tmp |= GFX_PG_ENABLE;
+		WREG32(RLC_PG_CNTL, tmp);
+
+		tmp = RREG32(RLC_AUTO_PG_CTRL);
+		tmp |= AUTO_PG_EN;
+		WREG32(RLC_AUTO_PG_CTRL, tmp);
+	} else {
+		tmp = RREG32(RLC_AUTO_PG_CTRL);
+		tmp &= ~AUTO_PG_EN;
+		WREG32(RLC_AUTO_PG_CTRL, tmp);
+
+		tmp = RREG32(DB_RENDER_CONTROL);
+	}
+}
+
+static u32 gfx_v6_0_get_cu_active_bitmap(struct amdgpu_device *adev,
+					 u32 se, u32 sh)
+{
+
+	u32 mask = 0, tmp, tmp1;
+	int i;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff);
+	tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+	tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	tmp &= 0xffff0000;
+
+	tmp |= tmp1;
+	tmp >>= 16;
+
+	for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
+		mask <<= 1;
+		mask |= 1;
+	}
+
+	return (~tmp) & mask;
+}
+
+static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev)
+{
+	u32 i, j, k, active_cu_number = 0;
+
+	u32 mask, counter, cu_bitmap;
+	u32 tmp = 0;
+
+	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+			mask = 1;
+			cu_bitmap = 0;
+			counter  = 0;
+			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
+				if (gfx_v6_0_get_cu_active_bitmap(adev, i, j) & mask) {
+					if (counter < 2)
+						cu_bitmap |= mask;
+					counter++;
+				}
+				mask <<= 1;
+			}
+
+			active_cu_number += counter;
+			tmp |= (cu_bitmap << (i * 16 + j * 8));
+		}
+	}
+
+	WREG32(RLC_PG_AO_CU_MASK, tmp);
+
+	tmp = RREG32(RLC_MAX_PG_CU);
+	tmp &= ~MAX_PU_CU_MASK;
+	tmp |= MAX_PU_CU(active_cu_number);
+	WREG32(RLC_MAX_PG_CU, tmp);
+}
+
+static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
+					    bool enable)
+{
+	u32 data, orig;
+
+	orig = data = RREG32(RLC_PG_CNTL);
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
+		data |= STATIC_PER_CU_PG_ENABLE;
+	else
+		data &= ~STATIC_PER_CU_PG_ENABLE;
+	if (orig != data)
+		WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
+					     bool enable)
+{
+	u32 data, orig;
+
+	orig = data = RREG32(RLC_PG_CNTL);
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
+		data |= DYN_PER_CU_PG_ENABLE;
+	else
+		data &= ~DYN_PER_CU_PG_ENABLE;
+	if (orig != data)
+		WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_init_gfx_cgpg(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+
+	tmp = RREG32(RLC_PG_CNTL);
+	tmp |= GFX_PG_SRC;
+	WREG32(RLC_PG_CNTL, tmp);
+
+	WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+
+	tmp = RREG32(RLC_AUTO_PG_CTRL);
+
+	tmp &= ~GRBM_REG_SGIT_MASK;
+	tmp |= GRBM_REG_SGIT(0x700);
+	tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
+	WREG32(RLC_AUTO_PG_CTRL, tmp);
+}
+
+static void gfx_v6_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
+{
+	gfx_v6_0_enable_gfx_cgpg(adev, enable);
+	gfx_v6_0_enable_gfx_static_mgpg(adev, enable);
+	gfx_v6_0_enable_gfx_dynamic_mgpg(adev, enable);
+}
+
+static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
+{
+	u32 count = 0;
+	const struct cs_section_def *sect = NULL;
+	const struct cs_extent_def *ext = NULL;
+
+	if (adev->gfx.rlc.cs_data == NULL)
+		return 0;
+
+	/* begin clear state */
+	count += 2;
+	/* context control state */
+	count += 3;
+
+	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+		for (ext = sect->section; ext->extent != NULL; ++ext) {
+			if (sect->id == SECT_CONTEXT)
+				count += 2 + ext->reg_count;
+			else
+				return 0;
+		}
+	}
+	/* pa_sc_raster_config */
+	count += 3;
+	/* end clear state */
+	count += 2;
+	/* clear state */
+	count += 2;
+
+	return count;
+}
+
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
+				    volatile u32 *buffer)
+{
+	u32 count = 0, i;
+	const struct cs_section_def *sect = NULL;
+	const struct cs_extent_def *ext = NULL;
+
+	if (adev->gfx.rlc.cs_data == NULL)
+		return;
+	if (buffer == NULL)
+		return;
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+	buffer[count++] = cpu_to_le32(0x80000000);
+	buffer[count++] = cpu_to_le32(0x80000000);
+
+	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+		for (ext = sect->section; ext->extent != NULL; ++ext) {
+			if (sect->id == SECT_CONTEXT) {
+				buffer[count++] =
+					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+				buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
+				for (i = 0; i < ext->reg_count; i++)
+					buffer[count++] = cpu_to_le32(ext->extent[i]);
+			} else {
+				return;
+			}
+		}
+	}
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
+
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+		buffer[count++] = cpu_to_le32(0x2a00126a);
+		break;
+	case CHIP_VERDE:
+		buffer[count++] = cpu_to_le32(0x0000124a);
+		break;
+	case CHIP_OLAND:
+		buffer[count++] = cpu_to_le32(0x00000082);
+		break;
+	case CHIP_HAINAN:
+		buffer[count++] = cpu_to_le32(0x00000000);
+		break;
+	default:
+		buffer[count++] = cpu_to_le32(0x00000000);
+		break;
+	}
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+	buffer[count++] = cpu_to_le32(0);
+}
+
+static void gfx_v6_0_init_pg(struct amdgpu_device *adev)
+{
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+			      AMD_PG_SUPPORT_GFX_SMG |
+			      AMD_PG_SUPPORT_GFX_DMG |
+			      AMD_PG_SUPPORT_CP |
+			      AMD_PG_SUPPORT_GDS |
+			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
+		gfx_v6_0_enable_sclk_slowdown_on_pu(adev, true);
+		gfx_v6_0_enable_sclk_slowdown_on_pd(adev, true);
+		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+			gfx_v6_0_init_gfx_cgpg(adev);
+			gfx_v6_0_enable_cp_pg(adev, true);
+			gfx_v6_0_enable_gds_pg(adev, true);
+		} else {
+			WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+			WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+
+		}
+		gfx_v6_0_init_ao_cu_mask(adev);
+		gfx_v6_0_update_gfx_pg(adev, true);
+	} else {
+
+		WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+		WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+	}
+}
+
+static void gfx_v6_0_fini_pg(struct amdgpu_device *adev)
+{
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+			      AMD_PG_SUPPORT_GFX_SMG |
+			      AMD_PG_SUPPORT_GFX_DMG |
+			      AMD_PG_SUPPORT_CP |
+			      AMD_PG_SUPPORT_GDS |
+			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
+		gfx_v6_0_update_gfx_pg(adev, false);
+		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+			gfx_v6_0_enable_cp_pg(adev, false);
+			gfx_v6_0_enable_gds_pg(adev, false);
+		}
+	}
+}
+
+static uint64_t gfx_v6_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+{
+	uint64_t clock;
+
+	mutex_lock(&adev->gfx.gpu_clock_mutex);
+	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+	        ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+	mutex_unlock(&adev->gfx.gpu_clock_mutex);
+	return clock;
+}
+
+static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+	amdgpu_ring_write(ring, 0x80000000);
+	amdgpu_ring_write(ring, 0);
+}
+
+static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		6; /* gfx_v6_0_ring_emit_ib */
+}
+
+static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+	return
+		5 + /* gfx_v6_0_ring_emit_hdp_flush */
+		5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+		14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+		7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
+		17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
+		3; /* gfx_v6_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+	return
+		5 + /* gfx_v6_0_ring_emit_hdp_flush */
+		5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+		7 + /* gfx_v6_0_ring_emit_pipeline_sync */
+		17 + /* gfx_v6_0_ring_emit_vm_flush */
+		14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+}
+
+static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
+	.get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
+	.select_se_sh = &gfx_v6_0_select_se_sh,
+};
+
+static int gfx_v6_0_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS;
+	adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS;
+	adev->gfx.funcs = &gfx_v6_0_gfx_funcs;
+	gfx_v6_0_set_ring_funcs(adev);
+	gfx_v6_0_set_irq_funcs(adev);
+
+	return 0;
+}
+
+static int gfx_v6_0_sw_init(void *handle)
+{
+	struct amdgpu_ring *ring;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int i, r;
+
+	r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
+	if (r)
+		return r;
+
+	r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
+	if (r)
+		return r;
+
+	r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
+	if (r)
+		return r;
+
+	gfx_v6_0_scratch_init(adev);
+
+	r = gfx_v6_0_init_microcode(adev);
+	if (r) {
+		DRM_ERROR("Failed to load gfx firmware!\n");
+		return r;
+	}
+
+	r = gfx_v6_0_rlc_init(adev);
+	if (r) {
+		DRM_ERROR("Failed to init rlc BOs!\n");
+		return r;
+	}
+
+	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+		ring = &adev->gfx.gfx_ring[i];
+		ring->ring_obj = NULL;
+		sprintf(ring->name, "gfx");
+		r = amdgpu_ring_init(adev, ring, 1024,
+				     0x80000000, 0xf,
+				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
+				     AMDGPU_RING_TYPE_GFX);
+		if (r)
+			return r;
+	}
+
+	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+		unsigned irq_type;
+
+		if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
+			DRM_ERROR("Too many (%d) compute rings!\n", i);
+			break;
+		}
+		ring = &adev->gfx.compute_ring[i];
+		ring->ring_obj = NULL;
+		ring->use_doorbell = false;
+		ring->doorbell_index = 0;
+		ring->me = 1;
+		ring->pipe = i;
+		ring->queue = i;
+		sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
+		irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
+		r = amdgpu_ring_init(adev, ring, 1024,
+				     0x80000000, 0xf,
+				     &adev->gfx.eop_irq, irq_type,
+				     AMDGPU_RING_TYPE_COMPUTE);
+		if (r)
+			return r;
+	}
+
+	return r;
+}
+
+static int gfx_v6_0_sw_fini(void *handle)
+{
+	int i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
+	amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
+	amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+
+	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
+	for (i = 0; i < adev->gfx.num_compute_rings; i++)
+		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
+
+	gfx_v6_0_rlc_fini(adev);
+
+	return 0;
+}
+
+static int gfx_v6_0_hw_init(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	gfx_v6_0_gpu_init(adev);
+
+	r = gfx_v6_0_rlc_resume(adev);
+	if (r)
+		return r;
+
+	r = gfx_v6_0_cp_resume(adev);
+	if (r)
+		return r;
+
+	adev->gfx.ce_ram_size = 0x8000;
+
+	return r;
+}
+
+static int gfx_v6_0_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	gfx_v6_0_cp_enable(adev, false);
+	gfx_v6_0_rlc_stop(adev);
+	gfx_v6_0_fini_pg(adev);
+
+	return 0;
+}
+
+static int gfx_v6_0_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return gfx_v6_0_hw_fini(adev);
+}
+
+static int gfx_v6_0_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return gfx_v6_0_hw_init(adev);
+}
+
+static bool gfx_v6_0_is_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (RREG32(GRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
+		return false;
+	else
+		return true;
+}
+
+static int gfx_v6_0_wait_for_idle(void *handle)
+{
+	unsigned i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (gfx_v6_0_is_idle(handle))
+			return 0;
+		udelay(1);
+	}
+	return -ETIMEDOUT;
+}
+
+static int gfx_v6_0_soft_reset(void *handle)
+{
+	return 0;
+}
+
+static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+						 enum amdgpu_interrupt_state state)
+{
+	u32 cp_int_cntl;
+
+	switch (state) {
+	case AMDGPU_IRQ_STATE_DISABLE:
+		cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+		cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+		WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+		break;
+	case AMDGPU_IRQ_STATE_ENABLE:
+		cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+		cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+		WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+		break;
+	default:
+		break;
+	}
+}
+
+static void gfx_v6_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
+						     int ring,
+						     enum amdgpu_interrupt_state state)
+{
+	u32 cp_int_cntl;
+	switch (state){
+	case AMDGPU_IRQ_STATE_DISABLE:
+		if (ring == 0) {
+			cp_int_cntl = RREG32(CP_INT_CNTL_RING1);
+			cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+			WREG32(CP_INT_CNTL_RING1, cp_int_cntl);
+			break;
+		} else {
+			cp_int_cntl = RREG32(CP_INT_CNTL_RING2);
+			cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+			WREG32(CP_INT_CNTL_RING2, cp_int_cntl);
+			break;
+
+		}
+	case AMDGPU_IRQ_STATE_ENABLE:
+		if (ring == 0) {
+			cp_int_cntl = RREG32(CP_INT_CNTL_RING1);
+			cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+			WREG32(CP_INT_CNTL_RING1, cp_int_cntl);
+			break;
+		} else {
+			cp_int_cntl = RREG32(CP_INT_CNTL_RING2);
+			cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+			WREG32(CP_INT_CNTL_RING2, cp_int_cntl);
+			break;
+
+		}
+
+	default:
+		BUG();
+		break;
+
+	}
+}
+
+static int gfx_v6_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
+					     struct amdgpu_irq_src *src,
+					     unsigned type,
+					     enum amdgpu_interrupt_state state)
+{
+	u32 cp_int_cntl;
+
+	switch (state) {
+	case AMDGPU_IRQ_STATE_DISABLE:
+		cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+		cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
+		WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+		break;
+	case AMDGPU_IRQ_STATE_ENABLE:
+		cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+		cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
+		WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int gfx_v6_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
+					      struct amdgpu_irq_src *src,
+					      unsigned type,
+					      enum amdgpu_interrupt_state state)
+{
+	u32 cp_int_cntl;
+
+	switch (state) {
+	case AMDGPU_IRQ_STATE_DISABLE:
+		cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+		cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
+		WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+		break;
+	case AMDGPU_IRQ_STATE_ENABLE:
+		cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+		cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
+		WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int gfx_v6_0_set_eop_interrupt_state(struct amdgpu_device *adev,
+					    struct amdgpu_irq_src *src,
+					    unsigned type,
+					    enum amdgpu_interrupt_state state)
+{
+	switch (type) {
+	case AMDGPU_CP_IRQ_GFX_EOP:
+		gfx_v6_0_set_gfx_eop_interrupt_state(adev, state);
+		break;
+	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
+		gfx_v6_0_set_compute_eop_interrupt_state(adev, 0, state);
+		break;
+	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
+		gfx_v6_0_set_compute_eop_interrupt_state(adev, 1, state);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int gfx_v6_0_eop_irq(struct amdgpu_device *adev,
+			    struct amdgpu_irq_src *source,
+			    struct amdgpu_iv_entry *entry)
+{
+	switch (entry->ring_id) {
+	case 0:
+		amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
+		break;
+	case 1:
+	case 2:
+		amdgpu_fence_process(&adev->gfx.compute_ring[entry->ring_id -1]);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev,
+				 struct amdgpu_irq_src *source,
+				 struct amdgpu_iv_entry *entry)
+{
+	DRM_ERROR("Illegal register access in command stream\n");
+	schedule_work(&adev->reset_work);
+	return 0;
+}
+
+static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev,
+				  struct amdgpu_irq_src *source,
+				  struct amdgpu_iv_entry *entry)
+{
+	DRM_ERROR("Illegal instruction in command stream\n");
+	schedule_work(&adev->reset_work);
+	return 0;
+}
+
+static int gfx_v6_0_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	bool gate = false;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (state == AMD_CG_STATE_GATE)
+		gate = true;
+
+	gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+	if (gate) {
+		gfx_v6_0_enable_mgcg(adev, true);
+		gfx_v6_0_enable_cgcg(adev, true);
+	} else {
+		gfx_v6_0_enable_cgcg(adev, false);
+		gfx_v6_0_enable_mgcg(adev, false);
+	}
+	gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+	return 0;
+}
+
+static int gfx_v6_0_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	bool gate = false;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (state == AMD_PG_STATE_GATE)
+		gate = true;
+
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+			      AMD_PG_SUPPORT_GFX_SMG |
+			      AMD_PG_SUPPORT_GFX_DMG |
+			      AMD_PG_SUPPORT_CP |
+			      AMD_PG_SUPPORT_GDS |
+			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
+		gfx_v6_0_update_gfx_pg(adev, gate);
+		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+			gfx_v6_0_enable_cp_pg(adev, gate);
+			gfx_v6_0_enable_gds_pg(adev, gate);
+		}
+	}
+
+	return 0;
+}
+
+const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
+	.name = "gfx_v6_0",
+	.early_init = gfx_v6_0_early_init,
+	.late_init = NULL,
+	.sw_init = gfx_v6_0_sw_init,
+	.sw_fini = gfx_v6_0_sw_fini,
+	.hw_init = gfx_v6_0_hw_init,
+	.hw_fini = gfx_v6_0_hw_fini,
+	.suspend = gfx_v6_0_suspend,
+	.resume = gfx_v6_0_resume,
+	.is_idle = gfx_v6_0_is_idle,
+	.wait_for_idle = gfx_v6_0_wait_for_idle,
+	.soft_reset = gfx_v6_0_soft_reset,
+	.set_clockgating_state = gfx_v6_0_set_clockgating_state,
+	.set_powergating_state = gfx_v6_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
+	.get_rptr = gfx_v6_0_ring_get_rptr,
+	.get_wptr = gfx_v6_0_ring_get_wptr,
+	.set_wptr = gfx_v6_0_ring_set_wptr_gfx,
+	.parse_cs = NULL,
+	.emit_ib = gfx_v6_0_ring_emit_ib,
+	.emit_fence = gfx_v6_0_ring_emit_fence,
+	.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
+	.emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
+	.emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
+	.emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
+	.test_ring = gfx_v6_0_ring_test_ring,
+	.test_ib = gfx_v6_0_ring_test_ib,
+	.insert_nop = amdgpu_ring_insert_nop,
+	.emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
+	.get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
+	.get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx,
+};
+
+static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
+	.get_rptr = gfx_v6_0_ring_get_rptr,
+	.get_wptr = gfx_v6_0_ring_get_wptr,
+	.set_wptr = gfx_v6_0_ring_set_wptr_compute,
+	.parse_cs = NULL,
+	.emit_ib = gfx_v6_0_ring_emit_ib,
+	.emit_fence = gfx_v6_0_ring_emit_fence,
+	.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
+	.emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
+	.emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
+	.emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
+	.test_ring = gfx_v6_0_ring_test_ring,
+	.test_ib = gfx_v6_0_ring_test_ib,
+	.insert_nop = amdgpu_ring_insert_nop,
+	.get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
+	.get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute,
+};
+
+static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+		adev->gfx.gfx_ring[i].funcs = &gfx_v6_0_ring_funcs_gfx;
+	for (i = 0; i < adev->gfx.num_compute_rings; i++)
+		adev->gfx.compute_ring[i].funcs = &gfx_v6_0_ring_funcs_compute;
+}
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_eop_irq_funcs = {
+	.set = gfx_v6_0_set_eop_interrupt_state,
+	.process = gfx_v6_0_eop_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_reg_irq_funcs = {
+	.set = gfx_v6_0_set_priv_reg_fault_state,
+	.process = gfx_v6_0_priv_reg_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_inst_irq_funcs = {
+	.set = gfx_v6_0_set_priv_inst_fault_state,
+	.process = gfx_v6_0_priv_inst_irq,
+};
+
+static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
+	adev->gfx.eop_irq.funcs = &gfx_v6_0_eop_irq_funcs;
+
+	adev->gfx.priv_reg_irq.num_types = 1;
+	adev->gfx.priv_reg_irq.funcs = &gfx_v6_0_priv_reg_irq_funcs;
+
+	adev->gfx.priv_inst_irq.num_types = 1;
+	adev->gfx.priv_inst_irq.funcs = &gfx_v6_0_priv_inst_irq_funcs;
+}
+
+static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
+{
+	int i, j, k, counter, active_cu_number = 0;
+	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
+	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+
+	memset(cu_info, 0, sizeof(*cu_info));
+
+	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+			mask = 1;
+			ao_bitmap = 0;
+			counter = 0;
+			bitmap = gfx_v6_0_get_cu_active_bitmap(adev, i, j);
+			cu_info->bitmap[i][j] = bitmap;
+
+			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+				if (bitmap & mask) {
+					if (counter < 2)
+						ao_bitmap |= mask;
+					counter ++;
+				}
+				mask <<= 1;
+			}
+			active_cu_number += counter;
+			ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+		}
+	}
+
+	cu_info->number = active_cu_number;
+	cu_info->ao_cu_mask = ao_cu_mask;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
similarity index 71%
copy from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
copy to drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
index d24b888..b9657e7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
@@ -20,16 +20,10 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#ifndef TONGA_PROCESSPPTABLES_H
-#define TONGA_PROCESSPPTABLES_H
 
-#include "hwmgr.h"
+#ifndef __GFX_V6_0_H__
+#define __GFX_V6_0_H__
 
-extern const struct pp_table_func tonga_pptable_funcs;
-extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
-extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
-		struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
-				struct pp_power_state *, void *, uint32_t));
+extern const struct amd_ip_funcs gfx_v6_0_ip_funcs;
 
 #endif
-
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index d869d05..90102f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2096,6 +2096,25 @@
 	amdgpu_ring_write(ring, control);
 }
 
+static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+	uint32_t dw2 = 0;
+
+	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
+	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
+		/* set load_global_config & load_global_uconfig */
+		dw2 |= 0x8001;
+		/* set load_cs_sh_regs */
+		dw2 |= 0x01000000;
+		/* set load_per_context_state & load_gfx_sh_regs */
+		dw2 |= 0x10002;
+	}
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+	amdgpu_ring_write(ring, dw2);
+	amdgpu_ring_write(ring, 0);
+}
+
 /**
  * gfx_v7_0_ring_test_ib - basic ring IB test
  *
@@ -2443,7 +2462,7 @@
 	return 0;
 }
 
-static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
+static u32 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
 	return ring->adev->wb.wb[ring->rptr_offs];
 }
@@ -2463,11 +2482,6 @@
 	(void)RREG32(mmCP_RB0_WPTR);
 }
 
-static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
-{
-	return ring->adev->wb.wb[ring->rptr_offs];
-}
-
 static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
 {
 	/* XXX check if swapping is necessary on BE */
@@ -4176,6 +4190,41 @@
 	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
 }
 
+static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+{
+	return
+		4; /* gfx_v7_0_ring_emit_ib_gfx */
+}
+
+static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+	return
+		20 + /* gfx_v7_0_ring_emit_gds_switch */
+		7 + /* gfx_v7_0_ring_emit_hdp_flush */
+		5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+		12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+		7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
+		17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
+		3; /* gfx_v7_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
+{
+	return
+		4; /* gfx_v7_0_ring_emit_ib_compute */
+}
+
+static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+	return
+		20 + /* gfx_v7_0_ring_emit_gds_switch */
+		7 + /* gfx_v7_0_ring_emit_hdp_flush */
+		5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+		7 + /* gfx_v7_0_ring_emit_pipeline_sync */
+		17 + /* gfx_v7_0_ring_emit_vm_flush */
+		7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+}
+
 static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
 	.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
 	.select_se_sh = &gfx_v7_0_select_se_sh,
@@ -4465,24 +4514,21 @@
 	}
 
 	/* reserve GDS, GWS and OA resource for gfx */
-	r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
-			PAGE_SIZE, true,
-			AMDGPU_GEM_DOMAIN_GDS, 0,
-			NULL, NULL, &adev->gds.gds_gfx_bo);
+	r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
+				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
+				    &adev->gds.gds_gfx_bo, NULL, NULL);
 	if (r)
 		return r;
 
-	r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
-		PAGE_SIZE, true,
-		AMDGPU_GEM_DOMAIN_GWS, 0,
-		NULL, NULL, &adev->gds.gws_gfx_bo);
+	r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
+				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
+				    &adev->gds.gws_gfx_bo, NULL, NULL);
 	if (r)
 		return r;
 
-	r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
-			PAGE_SIZE, true,
-			AMDGPU_GEM_DOMAIN_OA, 0,
-			NULL, NULL, &adev->gds.oa_gfx_bo);
+	r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
+				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
+				    &adev->gds.oa_gfx_bo, NULL, NULL);
 	if (r)
 		return r;
 
@@ -4498,9 +4544,9 @@
 	int i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
-	amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
-	amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+	amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
+	amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
+	amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
 
 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -4931,7 +4977,7 @@
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
-	.get_rptr = gfx_v7_0_ring_get_rptr_gfx,
+	.get_rptr = gfx_v7_0_ring_get_rptr,
 	.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
 	.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
 	.parse_cs = NULL,
@@ -4946,10 +4992,13 @@
 	.test_ib = gfx_v7_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
+	.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
+	.get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
+	.get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
-	.get_rptr = gfx_v7_0_ring_get_rptr_compute,
+	.get_rptr = gfx_v7_0_ring_get_rptr,
 	.get_wptr = gfx_v7_0_ring_get_wptr_compute,
 	.set_wptr = gfx_v7_0_ring_set_wptr_compute,
 	.parse_cs = NULL,
@@ -4964,6 +5013,8 @@
 	.test_ib = gfx_v7_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
+	.get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
+	.get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
 };
 
 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b818461..47e270a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -703,7 +703,10 @@
 						 polaris10_golden_common_all,
 						 (const u32)ARRAY_SIZE(polaris10_golden_common_all));
 		WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
-		if (adev->pdev->revision == 0xc7) {
+		if (adev->pdev->revision == 0xc7 &&
+		    ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
+		     (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) ||
+		     (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1682))) {
 			amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
 			amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
 		}
@@ -1233,10 +1236,9 @@
 	if (adev->gfx.rlc.clear_state_obj) {
 		r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
 		if (unlikely(r != 0))
-			dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
+			dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r);
 		amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
 		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
 		amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
 		adev->gfx.rlc.clear_state_obj = NULL;
 	}
@@ -1248,7 +1250,6 @@
 			dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
 		amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
 		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
 		amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
 		adev->gfx.rlc.cp_table_obj = NULL;
 	}
@@ -1290,14 +1291,14 @@
 				  &adev->gfx.rlc.clear_state_gpu_addr);
 		if (r) {
 			amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-			dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+			dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r);
 			gfx_v8_0_rlc_fini(adev);
 			return r;
 		}
 
 		r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
 		if (r) {
-			dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
+			dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r);
 			gfx_v8_0_rlc_fini(adev);
 			return r;
 		}
@@ -1332,7 +1333,7 @@
 				  &adev->gfx.rlc.cp_table_gpu_addr);
 		if (r) {
 			amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-			dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
+			dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r);
 			return r;
 		}
 		r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
@@ -1345,7 +1346,6 @@
 
 		amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
 		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
 	}
 
 	return 0;
@@ -1361,7 +1361,6 @@
 			dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
 		amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
-
 		amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
 		adev->gfx.mec.hpd_eop_obj = NULL;
 	}
@@ -2082,24 +2081,21 @@
 	}
 
 	/* reserve GDS, GWS and OA resource for gfx */
-	r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
-			PAGE_SIZE, true,
-			AMDGPU_GEM_DOMAIN_GDS, 0, NULL,
-			NULL, &adev->gds.gds_gfx_bo);
+	r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
+				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
+				    &adev->gds.gds_gfx_bo, NULL, NULL);
 	if (r)
 		return r;
 
-	r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
-		PAGE_SIZE, true,
-		AMDGPU_GEM_DOMAIN_GWS, 0, NULL,
-		NULL, &adev->gds.gws_gfx_bo);
+	r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
+				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
+				    &adev->gds.gws_gfx_bo, NULL, NULL);
 	if (r)
 		return r;
 
-	r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
-			PAGE_SIZE, true,
-			AMDGPU_GEM_DOMAIN_OA, 0, NULL,
-			NULL, &adev->gds.oa_gfx_bo);
+	r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
+				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
+				    &adev->gds.oa_gfx_bo, NULL, NULL);
 	if (r)
 		return r;
 
@@ -2117,9 +2113,9 @@
 	int i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
-	amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
-	amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+	amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
+	amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
+	amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
 
 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -2127,9 +2123,7 @@
 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
 
 	gfx_v8_0_mec_fini(adev);
-
 	gfx_v8_0_rlc_fini(adev);
-
 	gfx_v8_0_free_microcode(adev);
 
 	return 0;
@@ -3465,19 +3459,16 @@
 	else
 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
 
-	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
-		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+	if (se_num == 0xffffffff)
 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
-	} else if (se_num == 0xffffffff) {
-		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
-		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
-	} else if (sh_num == 0xffffffff) {
+	else
+		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
+
+	if (sh_num == 0xffffffff)
 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
-		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
-	} else {
+	else
 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
-		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
-	}
+
 	WREG32(mmGRBM_GFX_INDEX, data);
 }
 
@@ -3490,11 +3481,10 @@
 {
 	u32 data, mask;
 
-	data = RREG32(mmCC_RB_BACKEND_DISABLE);
-	data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+	data =  RREG32(mmCC_RB_BACKEND_DISABLE) |
+		RREG32(mmGC_USER_RB_BACKEND_DISABLE);
 
-	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
-	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
+	data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
 
 	mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se /
 				       adev->gfx.config.max_sh_per_se);
@@ -3576,16 +3566,12 @@
 	u32 tmp;
 	int i;
 
-	tmp = RREG32(mmGRBM_CNTL);
-	tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
-	WREG32(mmGRBM_CNTL, tmp);
-
+	WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
 	WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 	WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 	WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
 
 	gfx_v8_0_tiling_mode_table_init(adev);
-
 	gfx_v8_0_setup_rb(adev);
 	gfx_v8_0_get_cu_info(adev);
 
@@ -3769,9 +3755,7 @@
 				sizeof(indirect_start_offsets)/sizeof(int));
 
 	/* save and restore list */
-	temp = RREG32(mmRLC_SRM_CNTL);
-	temp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
-	WREG32(mmRLC_SRM_CNTL, temp);
+	WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
 
 	WREG32(mmRLC_SRM_ARAM_ADDR, 0);
 	for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
@@ -3808,11 +3792,7 @@
 
 static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
 {
-	uint32_t data;
-
-	data = RREG32(mmRLC_SRM_CNTL);
-	data |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
-	WREG32(mmRLC_SRM_CNTL, data);
+	WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1);
 }
 
 static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
@@ -3822,75 +3802,34 @@
 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
 			      AMD_PG_SUPPORT_GFX_SMG |
 			      AMD_PG_SUPPORT_GFX_DMG)) {
-		data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
-		data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
-		data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
-		WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
+		WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
 
-		data = 0;
-		data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
-		data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
-		data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
-		data |= (0x10 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
+		data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
+		data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
+		data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
+		data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
 		WREG32(mmRLC_PG_DELAY, data);
 
-		data = RREG32(mmRLC_PG_DELAY_2);
-		data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
-		data |= (0x3 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
-		WREG32(mmRLC_PG_DELAY_2, data);
-
-		data = RREG32(mmRLC_AUTO_PG_CTRL);
-		data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
-		data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
-		WREG32(mmRLC_AUTO_PG_CTRL, data);
+		WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
+		WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
 	}
 }
 
 static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
 						bool enable)
 {
-	u32 data, orig;
-
-	orig = data = RREG32(mmRLC_PG_CNTL);
-
-	if (enable)
-		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
-	else
-		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
-
-	if (orig != data)
-		WREG32(mmRLC_PG_CNTL, data);
+	WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0);
 }
 
 static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
 						  bool enable)
 {
-	u32 data, orig;
-
-	orig = data = RREG32(mmRLC_PG_CNTL);
-
-	if (enable)
-		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
-	else
-		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
-
-	if (orig != data)
-		WREG32(mmRLC_PG_CNTL, data);
+	WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0);
 }
 
 static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
 {
-	u32 data, orig;
-
-	orig = data = RREG32(mmRLC_PG_CNTL);
-
-	if (enable)
-		data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
-	else
-		data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
-
-	if (orig != data)
-		WREG32(mmRLC_PG_CNTL, data);
+	WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 1 : 0);
 }
 
 static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
@@ -3927,36 +3866,26 @@
 	}
 }
 
-void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
+static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
 {
-	u32 tmp = RREG32(mmRLC_CNTL);
-
-	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
-	WREG32(mmRLC_CNTL, tmp);
+	WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
 
 	gfx_v8_0_enable_gui_idle_interrupt(adev, false);
-
 	gfx_v8_0_wait_for_rlc_serdes(adev);
 }
 
 static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
 {
-	u32 tmp = RREG32(mmGRBM_SOFT_RESET);
-
-	tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
-	WREG32(mmGRBM_SOFT_RESET, tmp);
+	WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
 	udelay(50);
-	tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
-	WREG32(mmGRBM_SOFT_RESET, tmp);
+
+	WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
 	udelay(50);
 }
 
 static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
 {
-	u32 tmp = RREG32(mmRLC_CNTL);
-
-	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1);
-	WREG32(mmRLC_CNTL, tmp);
+	WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1);
 
 	/* carrizo do enable cp interrupt after cp inited */
 	if (!(adev->flags & AMD_IS_APU))
@@ -3998,14 +3927,13 @@
 	/* disable CG */
 	WREG32(mmRLC_CGCG_CGLS_CTRL, 0);
 	if (adev->asic_type == CHIP_POLARIS11 ||
-		adev->asic_type == CHIP_POLARIS10)
+	    adev->asic_type == CHIP_POLARIS10)
 		WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0);
 
 	/* disable PG */
 	WREG32(mmRLC_PG_CNTL, 0);
 
 	gfx_v8_0_rlc_reset(adev);
-
 	gfx_v8_0_init_pg(adev);
 
 	if (!adev->pp_enabled) {
@@ -4300,12 +4228,10 @@
 	gfx_v8_0_cp_gfx_start(adev);
 	ring->ready = true;
 	r = amdgpu_ring_test_ring(ring);
-	if (r) {
+	if (r)
 		ring->ready = false;
-		return r;
-	}
 
-	return 0;
+	return r;
 }
 
 static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
@@ -4980,7 +4906,6 @@
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	gfx_v8_0_init_golden_registers(adev);
-
 	gfx_v8_0_gpu_init(adev);
 
 	r = gfx_v8_0_rlc_resume(adev);
@@ -4988,8 +4913,6 @@
 		return r;
 
 	r = gfx_v8_0_cp_resume(adev);
-	if (r)
-		return r;
 
 	return r;
 }
@@ -5037,25 +4960,22 @@
 static int gfx_v8_0_wait_for_idle(void *handle)
 {
 	unsigned i;
-	u32 tmp;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	for (i = 0; i < adev->usec_timeout; i++) {
-		/* read MC_STATUS */
-		tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
-
-		if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
+		if (gfx_v8_0_is_idle(handle))
 			return 0;
+
 		udelay(1);
 	}
 	return -ETIMEDOUT;
 }
 
-static int gfx_v8_0_soft_reset(void *handle)
+static int gfx_v8_0_check_soft_reset(void *handle)
 {
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
 	u32 tmp;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	/* GRBM_STATUS */
 	tmp = RREG32(mmGRBM_STATUS);
@@ -5064,16 +4984,12 @@
 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
-		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
+		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK |
+		   GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
 						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
-	}
-
-	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
-		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
-						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 						SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
 	}
@@ -5084,73 +5000,199 @@
 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
 						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
 
+	if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) ||
+	    REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) ||
+	    REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) {
+		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+						SOFT_RESET_CPF, 1);
+		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+						SOFT_RESET_CPC, 1);
+		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+						SOFT_RESET_CPG, 1);
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
+						SOFT_RESET_GRBM, 1);
+	}
+
 	/* SRBM_STATUS */
 	tmp = RREG32(mmSRBM_STATUS);
 	if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 						SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
+	if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY))
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+						SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
 
 	if (grbm_soft_reset || srbm_soft_reset) {
-		/* stop the rlc */
-		gfx_v8_0_rlc_stop(adev);
+		adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true;
+		adev->gfx.grbm_soft_reset = grbm_soft_reset;
+		adev->gfx.srbm_soft_reset = srbm_soft_reset;
+	} else {
+		adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false;
+		adev->gfx.grbm_soft_reset = 0;
+		adev->gfx.srbm_soft_reset = 0;
+	}
 
+	return 0;
+}
+
+static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
+				  struct amdgpu_ring *ring)
+{
+	int i;
+
+	vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+	if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
+		u32 tmp;
+		tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
+		tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST,
+				    DEQUEUE_REQ, 2);
+		WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp);
+		for (i = 0; i < adev->usec_timeout; i++) {
+			if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
+				break;
+			udelay(1);
+		}
+	}
+}
+
+static int gfx_v8_0_pre_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+		return 0;
+
+	grbm_soft_reset = adev->gfx.grbm_soft_reset;
+	srbm_soft_reset = adev->gfx.srbm_soft_reset;
+
+	/* stop the rlc */
+	gfx_v8_0_rlc_stop(adev);
+
+	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
 		/* Disable GFX parsing/prefetching */
 		gfx_v8_0_cp_gfx_enable(adev, false);
 
+	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
+	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
+	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
+		int i;
+
+		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+			struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+			gfx_v8_0_inactive_hqd(adev, ring);
+		}
 		/* Disable MEC parsing/prefetching */
 		gfx_v8_0_cp_compute_enable(adev, false);
+	}
 
-		if (grbm_soft_reset || srbm_soft_reset) {
-			tmp = RREG32(mmGMCON_DEBUG);
-			tmp = REG_SET_FIELD(tmp,
-					    GMCON_DEBUG, GFX_STALL, 1);
-			tmp = REG_SET_FIELD(tmp,
-					    GMCON_DEBUG, GFX_CLEAR, 1);
-			WREG32(mmGMCON_DEBUG, tmp);
+       return 0;
+}
 
-			udelay(50);
-		}
+static int gfx_v8_0_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+	u32 tmp;
 
-		if (grbm_soft_reset) {
-			tmp = RREG32(mmGRBM_SOFT_RESET);
-			tmp |= grbm_soft_reset;
-			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
-			WREG32(mmGRBM_SOFT_RESET, tmp);
-			tmp = RREG32(mmGRBM_SOFT_RESET);
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+		return 0;
 
-			udelay(50);
+	grbm_soft_reset = adev->gfx.grbm_soft_reset;
+	srbm_soft_reset = adev->gfx.srbm_soft_reset;
 
-			tmp &= ~grbm_soft_reset;
-			WREG32(mmGRBM_SOFT_RESET, tmp);
-			tmp = RREG32(mmGRBM_SOFT_RESET);
-		}
-
-		if (srbm_soft_reset) {
-			tmp = RREG32(mmSRBM_SOFT_RESET);
-			tmp |= srbm_soft_reset;
-			dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
-			WREG32(mmSRBM_SOFT_RESET, tmp);
-			tmp = RREG32(mmSRBM_SOFT_RESET);
-
-			udelay(50);
-
-			tmp &= ~srbm_soft_reset;
-			WREG32(mmSRBM_SOFT_RESET, tmp);
-			tmp = RREG32(mmSRBM_SOFT_RESET);
-		}
-
-		if (grbm_soft_reset || srbm_soft_reset) {
-			tmp = RREG32(mmGMCON_DEBUG);
-			tmp = REG_SET_FIELD(tmp,
-					    GMCON_DEBUG, GFX_STALL, 0);
-			tmp = REG_SET_FIELD(tmp,
-					    GMCON_DEBUG, GFX_CLEAR, 0);
-			WREG32(mmGMCON_DEBUG, tmp);
-		}
-
-		/* Wait a little for things to settle down */
+	if (grbm_soft_reset || srbm_soft_reset) {
+		tmp = RREG32(mmGMCON_DEBUG);
+		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1);
+		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1);
+		WREG32(mmGMCON_DEBUG, tmp);
 		udelay(50);
 	}
+
+	if (grbm_soft_reset) {
+		tmp = RREG32(mmGRBM_SOFT_RESET);
+		tmp |= grbm_soft_reset;
+		dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(mmGRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmGRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~grbm_soft_reset;
+		WREG32(mmGRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmGRBM_SOFT_RESET);
+	}
+
+	if (srbm_soft_reset) {
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+	}
+
+	if (grbm_soft_reset || srbm_soft_reset) {
+		tmp = RREG32(mmGMCON_DEBUG);
+		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0);
+		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0);
+		WREG32(mmGMCON_DEBUG, tmp);
+	}
+
+	/* Wait a little for things to settle down */
+	udelay(50);
+
+	return 0;
+}
+
+static void gfx_v8_0_init_hqd(struct amdgpu_device *adev,
+			      struct amdgpu_ring *ring)
+{
+	vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+	WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
+	WREG32(mmCP_HQD_PQ_RPTR, 0);
+	WREG32(mmCP_HQD_PQ_WPTR, 0);
+	vi_srbm_select(adev, 0, 0, 0, 0);
+}
+
+static int gfx_v8_0_post_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+		return 0;
+
+	grbm_soft_reset = adev->gfx.grbm_soft_reset;
+	srbm_soft_reset = adev->gfx.srbm_soft_reset;
+
+	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
+		gfx_v8_0_cp_gfx_resume(adev);
+
+	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
+	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
+	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
+		int i;
+
+		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+			struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+			gfx_v8_0_init_hqd(adev, ring);
+		}
+		gfx_v8_0_cp_compute_resume(adev);
+	}
+	gfx_v8_0_rlc_start(adev);
+
 	return 0;
 }
 
@@ -5269,8 +5311,6 @@
 static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
 						       bool enable)
 {
-	uint32_t data, temp;
-
 	if (adev->asic_type == CHIP_POLARIS11)
 		/* Send msg to SMU via Powerplay */
 		amdgpu_set_powergating_state(adev,
@@ -5278,83 +5318,35 @@
 					     enable ?
 					     AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
 
-	temp = data = RREG32(mmRLC_PG_CNTL);
-	/* Enable static MGPG */
-	if (enable)
-		data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
-	else
-		data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
-
-	if (temp != data)
-		WREG32(mmRLC_PG_CNTL, data);
+	WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
 }
 
 static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
 							bool enable)
 {
-	uint32_t data, temp;
-
-	temp = data = RREG32(mmRLC_PG_CNTL);
-	/* Enable dynamic MGPG */
-	if (enable)
-		data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
-	else
-		data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
-
-	if (temp != data)
-		WREG32(mmRLC_PG_CNTL, data);
+	WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0);
 }
 
 static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
 		bool enable)
 {
-	uint32_t data, temp;
-
-	temp = data = RREG32(mmRLC_PG_CNTL);
-	/* Enable quick PG */
-	if (enable)
-		data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
-	else
-		data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
-
-	if (temp != data)
-		WREG32(mmRLC_PG_CNTL, data);
+	WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0);
 }
 
 static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
 					  bool enable)
 {
-	u32 data, orig;
-
-	orig = data = RREG32(mmRLC_PG_CNTL);
-
-	if (enable)
-		data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
-	else
-		data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
-
-	if (orig != data)
-		WREG32(mmRLC_PG_CNTL, data);
+	WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0);
 }
 
 static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
 						bool enable)
 {
-	u32 data, orig;
-
-	orig = data = RREG32(mmRLC_PG_CNTL);
-
-	if (enable)
-		data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
-	else
-		data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
-
-	if (orig != data)
-		WREG32(mmRLC_PG_CNTL, data);
+	WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0);
 
 	/* Read any GFX register to wake up GFX. */
 	if (!enable)
-		data = RREG32(mmDB_RENDER_CONTROL);
+		RREG32(mmDB_RENDER_CONTROL);
 }
 
 static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
@@ -5430,15 +5422,15 @@
 
 	data = RREG32(mmRLC_SERDES_WR_CTRL);
 	if (adev->asic_type == CHIP_STONEY)
-			data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
-			RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
-			RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
-			RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
-			RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
-			RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
-			RLC_SERDES_WR_CTRL__POWER_UP_MASK |
-			RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
-			RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
+		data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
+			  RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
+			  RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
+			  RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
+			  RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
+			  RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
+			  RLC_SERDES_WR_CTRL__POWER_UP_MASK |
+			  RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
+			  RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
 	else
 		data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
 			  RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
@@ -5461,10 +5453,10 @@
 
 #define MSG_ENTER_RLC_SAFE_MODE     1
 #define MSG_EXIT_RLC_SAFE_MODE      0
-
-#define RLC_GPR_REG2__REQ_MASK           0x00000001
-#define RLC_GPR_REG2__MESSAGE__SHIFT     0x00000001
-#define RLC_GPR_REG2__MESSAGE_MASK       0x0000001e
+#define RLC_GPR_REG2__REQ_MASK 0x00000001
+#define RLC_GPR_REG2__REQ__SHIFT 0
+#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
+#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
 
 static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev)
 {
@@ -5494,7 +5486,7 @@
 		}
 
 		for (i = 0; i < adev->usec_timeout; i++) {
-			if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0)
+			if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ))
 				break;
 			udelay(1);
 		}
@@ -5522,7 +5514,7 @@
 	}
 
 	for (i = 0; i < adev->usec_timeout; i++) {
-		if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0)
+		if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ))
 			break;
 		udelay(1);
 	}
@@ -5554,7 +5546,7 @@
 		}
 
 		for (i = 0; i < adev->usec_timeout; i++) {
-			if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0)
+			if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
 				break;
 			udelay(1);
 		}
@@ -5581,7 +5573,7 @@
 	}
 
 	for (i = 0; i < adev->usec_timeout; i++) {
-		if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0)
+		if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
 			break;
 		udelay(1);
 	}
@@ -5622,21 +5614,12 @@
 	/* It is disabled by HW by default */
 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
-			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
+			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS)
 				/* 1 - RLC memory Light sleep */
-				temp = data = RREG32(mmRLC_MEM_SLP_CNTL);
-				data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
-				if (temp != data)
-					WREG32(mmRLC_MEM_SLP_CNTL, data);
-			}
+				WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1);
 
-			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
-				/* 2 - CP memory Light sleep */
-				temp = data = RREG32(mmCP_MEM_SLP_CNTL);
-				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
-				if (temp != data)
-					WREG32(mmCP_MEM_SLP_CNTL, data);
-			}
+			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS)
+				WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1);
 		}
 
 		/* 3 - RLC_CGTT_MGCG_OVERRIDE */
@@ -5852,27 +5835,20 @@
 	return 0;
 }
 
-static u32 gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
+static u32 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
-	u32 rptr;
-
-	rptr = ring->adev->wb.wb[ring->rptr_offs];
-
-	return rptr;
+	return ring->adev->wb.wb[ring->rptr_offs];
 }
 
 static u32 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
-	u32 wptr;
 
 	if (ring->use_doorbell)
 		/* XXX check if swapping is necessary on BE */
-		wptr = ring->adev->wb.wb[ring->wptr_offs];
+		return ring->adev->wb.wb[ring->wptr_offs];
 	else
-		wptr = RREG32(mmCP_RB0_WPTR);
-
-	return wptr;
+		return RREG32(mmCP_RB0_WPTR);
 }
 
 static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
@@ -5939,12 +5915,6 @@
 {
 	u32 header, control = 0;
 
-	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
-	if (ctx_switch) {
-		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-		amdgpu_ring_write(ring, 0);
-	}
-
 	if (ib->flags & AMDGPU_IB_FLAG_CE)
 		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
 	else
@@ -5971,9 +5941,9 @@
 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
 	amdgpu_ring_write(ring,
 #ifdef __BIG_ENDIAN
-					  (2 << 0) |
+				(2 << 0) |
 #endif
-					  (ib->gpu_addr & 0xFFFFFFFC));
+				(ib->gpu_addr & 0xFFFFFFFC));
 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
 	amdgpu_ring_write(ring, control);
 }
@@ -6014,14 +5984,6 @@
 	amdgpu_ring_write(ring, seq);
 	amdgpu_ring_write(ring, 0xffffffff);
 	amdgpu_ring_write(ring, 4); /* poll interval */
-
-	if (usepfp) {
-		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
-		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-		amdgpu_ring_write(ring, 0);
-		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-		amdgpu_ring_write(ring, 0);
-	}
 }
 
 static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
@@ -6029,6 +5991,10 @@
 {
 	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
 
+	/* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */
+	if (usepfp)
+		amdgpu_ring_insert_nop(ring, 128);
+
 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
 				 WRITE_DATA_DST_SEL(0)) |
@@ -6068,18 +6034,11 @@
 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
 		amdgpu_ring_write(ring, 0x0);
-		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-		amdgpu_ring_write(ring, 0);
-		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-		amdgpu_ring_write(ring, 0);
+		/* GFX8 emits 128 dw nop to prevent CE access VM before vm_flush finish */
+		amdgpu_ring_insert_nop(ring, 128);
 	}
 }
 
-static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
-{
-	return ring->adev->wb.wb[ring->rptr_offs];
-}
-
 static u32 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
 {
 	return ring->adev->wb.wb[ring->wptr_offs];
@@ -6115,36 +6074,88 @@
 	amdgpu_ring_write(ring, upper_32_bits(seq));
 }
 
+static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+	amdgpu_ring_write(ring, 0);
+}
+
+static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+	uint32_t dw2 = 0;
+
+	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
+	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
+		/* set load_global_config & load_global_uconfig */
+		dw2 |= 0x8001;
+		/* set load_cs_sh_regs */
+		dw2 |= 0x01000000;
+		/* set load_per_context_state & load_gfx_sh_regs for GFX */
+		dw2 |= 0x10002;
+
+		/* set load_ce_ram if preamble presented */
+		if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
+			dw2 |= 0x10000000;
+	} else {
+		/* still load_ce_ram if this is the first time preamble presented
+		 * although there is no context switch happens.
+		 */
+		if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
+			dw2 |= 0x10000000;
+	}
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+	amdgpu_ring_write(ring, dw2);
+	amdgpu_ring_write(ring, 0);
+}
+
+static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+{
+	return
+		4; /* gfx_v8_0_ring_emit_ib_gfx */
+}
+
+static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+	return
+		20 + /* gfx_v8_0_ring_emit_gds_switch */
+		7 + /* gfx_v8_0_ring_emit_hdp_flush */
+		5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+		6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+		256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
+		2 + /* gfx_v8_ring_emit_sb */
+		3; /* gfx_v8_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
+{
+	return
+		4; /* gfx_v8_0_ring_emit_ib_compute */
+}
+
+static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+	return
+		20 + /* gfx_v8_0_ring_emit_gds_switch */
+		7 + /* gfx_v8_0_ring_emit_hdp_flush */
+		5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+		17 + /* gfx_v8_0_ring_emit_vm_flush */
+		7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+}
+
 static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
 						 enum amdgpu_interrupt_state state)
 {
-	u32 cp_int_cntl;
-
-	switch (state) {
-	case AMDGPU_IRQ_STATE_DISABLE:
-		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-					    TIME_STAMP_INT_ENABLE, 0);
-		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-		break;
-	case AMDGPU_IRQ_STATE_ENABLE:
-		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-		cp_int_cntl =
-			REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-				      TIME_STAMP_INT_ENABLE, 1);
-		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-		break;
-	default:
-		break;
-	}
+	WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE,
+		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 }
 
 static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
 						     int me, int pipe,
 						     enum amdgpu_interrupt_state state)
 {
-	u32 mec_int_cntl, mec_int_cntl_reg;
-
 	/*
 	 * amdgpu controls only pipe 0 of MEC1. That's why this function only
 	 * handles the setting of interrupts for this specific pipe. All other
@@ -6154,7 +6165,6 @@
 	if (me == 1) {
 		switch (pipe) {
 		case 0:
-			mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
 			break;
 		default:
 			DRM_DEBUG("invalid pipe %d\n", pipe);
@@ -6165,22 +6175,8 @@
 		return;
 	}
 
-	switch (state) {
-	case AMDGPU_IRQ_STATE_DISABLE:
-		mec_int_cntl = RREG32(mec_int_cntl_reg);
-		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
-					     TIME_STAMP_INT_ENABLE, 0);
-		WREG32(mec_int_cntl_reg, mec_int_cntl);
-		break;
-	case AMDGPU_IRQ_STATE_ENABLE:
-		mec_int_cntl = RREG32(mec_int_cntl_reg);
-		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
-					     TIME_STAMP_INT_ENABLE, 1);
-		WREG32(mec_int_cntl_reg, mec_int_cntl);
-		break;
-	default:
-		break;
-	}
+	WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE,
+		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 }
 
 static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
@@ -6188,24 +6184,8 @@
 					     unsigned type,
 					     enum amdgpu_interrupt_state state)
 {
-	u32 cp_int_cntl;
-
-	switch (state) {
-	case AMDGPU_IRQ_STATE_DISABLE:
-		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-					    PRIV_REG_INT_ENABLE, 0);
-		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-		break;
-	case AMDGPU_IRQ_STATE_ENABLE:
-		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-					    PRIV_REG_INT_ENABLE, 1);
-		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-		break;
-	default:
-		break;
-	}
+	WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
+		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 
 	return 0;
 }
@@ -6215,24 +6195,8 @@
 					      unsigned type,
 					      enum amdgpu_interrupt_state state)
 {
-	u32 cp_int_cntl;
-
-	switch (state) {
-	case AMDGPU_IRQ_STATE_DISABLE:
-		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-					    PRIV_INSTR_INT_ENABLE, 0);
-		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-		break;
-	case AMDGPU_IRQ_STATE_ENABLE:
-		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-					    PRIV_INSTR_INT_ENABLE, 1);
-		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-		break;
-	default:
-		break;
-	}
+	WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE,
+		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 
 	return 0;
 }
@@ -6338,13 +6302,16 @@
 	.resume = gfx_v8_0_resume,
 	.is_idle = gfx_v8_0_is_idle,
 	.wait_for_idle = gfx_v8_0_wait_for_idle,
+	.check_soft_reset = gfx_v8_0_check_soft_reset,
+	.pre_soft_reset = gfx_v8_0_pre_soft_reset,
 	.soft_reset = gfx_v8_0_soft_reset,
+	.post_soft_reset = gfx_v8_0_post_soft_reset,
 	.set_clockgating_state = gfx_v8_0_set_clockgating_state,
 	.set_powergating_state = gfx_v8_0_set_powergating_state,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
-	.get_rptr = gfx_v8_0_ring_get_rptr_gfx,
+	.get_rptr = gfx_v8_0_ring_get_rptr,
 	.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
 	.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
 	.parse_cs = NULL,
@@ -6359,10 +6326,14 @@
 	.test_ib = gfx_v8_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
+	.emit_switch_buffer = gfx_v8_ring_emit_sb,
+	.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
+	.get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
+	.get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
-	.get_rptr = gfx_v8_0_ring_get_rptr_compute,
+	.get_rptr = gfx_v8_0_ring_get_rptr,
 	.get_wptr = gfx_v8_0_ring_get_wptr_compute,
 	.set_wptr = gfx_v8_0_ring_set_wptr_compute,
 	.parse_cs = NULL,
@@ -6377,6 +6348,8 @@
 	.test_ib = gfx_v8_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
+	.get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
+	.get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
 };
 
 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -6479,15 +6452,12 @@
 {
 	u32 data, mask;
 
-	data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
-	data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
-
-	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
-	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+	data =  RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
+		RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
 
 	mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
 
-	return (~data) & mask;
+	return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
 }
 
 static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
index bc82c79..ebed1f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
@@ -26,6 +26,4 @@
 
 extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
 
-void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
new file mode 100644
index 0000000..b13c8aa
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -0,0 +1,1071 @@
+
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "gmc_v6_0.h"
+#include "amdgpu_ucode.h"
+#include "si/sid.h"
+
+static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v6_0_wait_for_idle(void *handle);
+
+MODULE_FIRMWARE("radeon/tahiti_mc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
+MODULE_FIRMWARE("radeon/verde_mc.bin");
+MODULE_FIRMWARE("radeon/oland_mc.bin");
+
+static const u32 crtc_offsets[6] =
+{
+	SI_CRTC0_REGISTER_OFFSET,
+	SI_CRTC1_REGISTER_OFFSET,
+	SI_CRTC2_REGISTER_OFFSET,
+	SI_CRTC3_REGISTER_OFFSET,
+	SI_CRTC4_REGISTER_OFFSET,
+	SI_CRTC5_REGISTER_OFFSET
+};
+
+static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
+			     struct amdgpu_mode_mc_save *save)
+{
+	u32 blackout;
+
+	if (adev->mode_info.num_crtc)
+		amdgpu_display_stop_mc_access(adev, save);
+
+	gmc_v6_0_wait_for_idle((void *)adev);
+
+	blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+	if (REG_GET_FIELD(blackout, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE) != 1) {
+		/* Block CPU access */
+		WREG32(BIF_FB_EN, 0);
+		/* blackout the MC */
+		blackout = REG_SET_FIELD(blackout,
+					 mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
+		WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+	}
+	/* wait for the MC to settle */
+	udelay(100);
+
+}
+
+static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
+			       struct amdgpu_mode_mc_save *save)
+{
+	u32 tmp;
+
+	/* unblackout the MC */
+	tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
+	tmp = REG_SET_FIELD(tmp, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
+	WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
+	/* allow CPU access */
+	tmp = REG_SET_FIELD(0, mmBIF_FB_EN, xxFB_READ_EN, 1);
+	tmp = REG_SET_FIELD(tmp, mmBIF_FB_EN, xxFB_WRITE_EN, 1);
+	WREG32(BIF_FB_EN, tmp);
+
+	if (adev->mode_info.num_crtc)
+		amdgpu_display_resume_mc_access(adev, save);
+
+}
+
+static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
+{
+	const char *chip_name;
+	char fw_name[30];
+	int err;
+
+	DRM_DEBUG("\n");
+
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+		chip_name = "tahiti";
+		break;
+	case CHIP_PITCAIRN:
+		chip_name = "pitcairn";
+		break;
+	case CHIP_VERDE:
+		chip_name = "verde";
+		break;
+	case CHIP_OLAND:
+		chip_name = "oland";
+		break;
+	case CHIP_HAINAN:
+		chip_name = "hainan";
+		break;
+	default: BUG();
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+	err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+
+	err = amdgpu_ucode_validate(adev->mc.fw);
+
+out:
+	if (err) {
+		dev_err(adev->dev,
+		       "si_mc: Failed to load firmware \"%s\"\n",
+		       fw_name);
+		release_firmware(adev->mc.fw);
+		adev->mc.fw = NULL;
+	}
+	return err;
+}
+
+static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
+{
+	const __le32 *new_fw_data = NULL;
+	u32 running;
+	const __le32 *new_io_mc_regs = NULL;
+	int i, regs_size, ucode_size;
+	const struct mc_firmware_header_v1_0 *hdr;
+
+	if (!adev->mc.fw)
+		return -EINVAL;
+
+	hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
+
+	amdgpu_ucode_print_mc_hdr(&hdr->header);
+
+	adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
+	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+	new_io_mc_regs = (const __le32 *)
+		(adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+	new_fw_data = (const __le32 *)
+		(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+	if (running == 0) {
+
+		/* reset the engine and set to writable */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+		/* load mc io regs */
+		for (i = 0; i < regs_size; i++) {
+			WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+			WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+		}
+		/* load the MC ucode */
+		for (i = 0; i < ucode_size; i++) {
+			WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+		}
+
+		/* put the engine back into the active state */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+		/* wait for training to complete */
+		for (i = 0; i < adev->usec_timeout; i++) {
+			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
+				break;
+			udelay(1);
+		}
+		for (i = 0; i < adev->usec_timeout; i++) {
+			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
+				break;
+			udelay(1);
+		}
+
+	}
+
+	return 0;
+}
+
+static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
+				       struct amdgpu_mc *mc)
+{
+	if (mc->mc_vram_size > 0xFFC0000000ULL) {
+		dev_warn(adev->dev, "limiting VRAM\n");
+		mc->real_vram_size = 0xFFC0000000ULL;
+		mc->mc_vram_size = 0xFFC0000000ULL;
+	}
+	amdgpu_vram_location(adev, &adev->mc, 0);
+	adev->mc.gtt_base_align = 0;
+	amdgpu_gtt_location(adev, mc);
+}
+
+static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_mc_save save;
+	u32 tmp;
+	int i, j;
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
+		WREG32((0xb05 + j), 0x00000000);
+		WREG32((0xb06 + j), 0x00000000);
+		WREG32((0xb07 + j), 0x00000000);
+		WREG32((0xb08 + j), 0x00000000);
+		WREG32((0xb09 + j), 0x00000000);
+	}
+	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+	gmc_v6_0_mc_stop(adev, &save);
+
+	if (gmc_v6_0_wait_for_idle((void *)adev)) {
+		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+	/* Update configuration */
+	WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+	       adev->mc.vram_start >> 12);
+	WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+	       adev->mc.vram_end >> 12);
+	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
+	       adev->vram_scratch.gpu_addr >> 12);
+	tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
+	tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
+	WREG32(MC_VM_FB_LOCATION, tmp);
+	/* XXX double check these! */
+	WREG32(HDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
+	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+	WREG32(MC_VM_AGP_BASE, 0);
+	WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+	WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+
+	if (gmc_v6_0_wait_for_idle((void *)adev)) {
+		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+	}
+	gmc_v6_0_mc_resume(adev, &save);
+	amdgpu_display_set_vga_render_state(adev, false);
+}
+
+static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
+{
+
+	u32 tmp;
+	int chansize, numchan;
+
+	tmp = RREG32(MC_ARB_RAMCFG);
+	if (tmp & CHANSIZE_OVERRIDE) {
+		chansize = 16;
+	} else if (tmp & CHANSIZE_MASK) {
+		chansize = 64;
+	} else {
+		chansize = 32;
+	}
+	tmp = RREG32(MC_SHARED_CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		numchan = 1;
+		break;
+	case 1:
+		numchan = 2;
+		break;
+	case 2:
+		numchan = 4;
+		break;
+	case 3:
+		numchan = 8;
+		break;
+	case 4:
+		numchan = 3;
+		break;
+	case 5:
+		numchan = 6;
+		break;
+	case 6:
+		numchan = 10;
+		break;
+	case 7:
+		numchan = 12;
+		break;
+	case 8:
+		numchan = 16;
+		break;
+	}
+	adev->mc.vram_width = numchan * chansize;
+	/* Could aper size report 0 ? */
+	adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
+	adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
+	/* size in MB on si */
+	adev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+	adev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+	adev->mc.visible_vram_size = adev->mc.aper_size;
+
+	/* unless the user had overridden it, set the gart
+	 * size equal to the 1024 or vram, whichever is larger.
+	 */
+	if (amdgpu_gart_size == -1)
+		adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
+	else
+		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
+
+	gmc_v6_0_vram_gtt_location(adev, &adev->mc);
+
+	return 0;
+}
+
+static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
+					uint32_t vmid)
+{
+	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+
+	WREG32(VM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
+				     void *cpu_pt_addr,
+				     uint32_t gpu_page_idx,
+				     uint64_t addr,
+				     uint32_t flags)
+{
+	void __iomem *ptr = (void *)cpu_pt_addr;
+	uint64_t value;
+
+	value = addr & 0xFFFFFFFFFFFFF000ULL;
+	value |= flags;
+	writeq(value, ptr + (gpu_page_idx * 8));
+
+	return 0;
+}
+
+static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
+					      bool value)
+{
+	u32 tmp;
+
+	tmp = RREG32(VM_CONTEXT1_CNTL);
+	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+			    xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+			    xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+			    xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+			    xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+			    xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+			    xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	WREG32(VM_CONTEXT1_CNTL, tmp);
+}
+
+static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
+{
+	int r, i;
+
+	if (adev->gart.robj == NULL) {
+		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = amdgpu_gart_table_vram_pin(adev);
+	if (r)
+		return r;
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL,
+	       (0xA << 7) |
+	       ENABLE_L1_TLB |
+	       ENABLE_L1_FRAGMENT_PROCESSING |
+	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       ENABLE_ADVANCED_DRIVER_MODEL |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+	       ENABLE_L2_FRAGMENT_PROCESSING |
+	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       BANK_SELECT(4) |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(4));
+	/* setup context0 */
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(adev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT0_CNTL2, 0);
+	WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				  RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
+
+	WREG32(0x575, 0);
+	WREG32(0x576, 0);
+	WREG32(0x577, 0);
+
+	/* empty context1-15 */
+	/* set vm size, must be a multiple of 4 */
+	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
+	/* Assign the pt base to something valid for now; the pts used for
+	 * the VMs are determined by the application and setup and assigned
+	 * on the fly in the vm part of radeon_gart.c
+	 */
+	for (i = 1; i < 16; i++) {
+		if (i < 8)
+			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
+			       adev->gart.table_addr >> 12);
+		else
+			WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
+			       adev->gart.table_addr >> 12);
+	}
+
+	/* enable context1-15 */
+	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+	       (u32)(adev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT1_CNTL2, 4);
+	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+				PAGE_TABLE_BLOCK_SIZE(amdgpu_vm_block_size - 9) |
+				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+	gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
+	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(adev->mc.gtt_size >> 20),
+		 (unsigned long long)adev->gart.table_addr);
+	adev->gart.ready = true;
+	return 0;
+}
+
+static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->gart.robj) {
+		dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
+		return 0;
+	}
+	r = amdgpu_gart_init(adev);
+	if (r)
+		return r;
+	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
+	return amdgpu_gart_table_vram_alloc(adev);
+}
+
+static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
+{
+	/*unsigned i;
+
+	for (i = 1; i < 16; ++i) {
+		uint32_t reg;
+		if (i < 8)
+			reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
+		else
+			reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
+		adev->vm_manager.saved_table_addr[i] = RREG32(reg);
+	}*/
+
+	/* Disable all tables */
+	WREG32(VM_CONTEXT0_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(0));
+	amdgpu_gart_table_vram_unpin(adev);
+}
+
+static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
+{
+	amdgpu_gart_table_vram_free(adev);
+	amdgpu_gart_fini(adev);
+}
+
+static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
+{
+	/*
+	 * number of VMs
+	 * VMID 0 is reserved for System
+	 * amdgpu graphics/compute will use VMIDs 1-7
+	 * amdkfd will use VMIDs 8-15
+	 */
+	adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
+	amdgpu_vm_manager_init(adev);
+
+	/* base offset of vram pages */
+	if (adev->flags & AMD_IS_APU) {
+		u64 tmp = RREG32(MC_VM_FB_OFFSET);
+		tmp <<= 22;
+		adev->vm_manager.vram_base_offset = tmp;
+	} else
+		adev->vm_manager.vram_base_offset = 0;
+
+	return 0;
+}
+
+static void gmc_v6_0_vm_fini(struct amdgpu_device *adev)
+{
+}
+
+static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
+				     u32 status, u32 addr, u32 mc_client)
+{
+	u32 mc_id;
+	u32 vmid = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, xxVMID);
+	u32 protections = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+					xxPROTECTIONS);
+	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
+		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
+
+	mc_id = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+			      xxMEMORY_CLIENT_ID);
+
+	dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+	       protections, vmid, addr,
+	       REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+			     xxMEMORY_CLIENT_RW) ?
+	       "write" : "read", block, mc_client, mc_id);
+}
+
+/*
+static const u32 mc_cg_registers[] = {
+	MC_HUB_MISC_HUB_CG,
+	MC_HUB_MISC_SIP_CG,
+	MC_HUB_MISC_VM_CG,
+	MC_XPB_CLK_GAT,
+	ATC_MISC_CG,
+	MC_CITF_MISC_WR_CG,
+	MC_CITF_MISC_RD_CG,
+	MC_CITF_MISC_VM_CG,
+	VM_L2_CG,
+};
+
+static const u32 mc_cg_ls_en[] = {
+	MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
+	MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
+	MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
+	MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
+	ATC_MISC_CG__MEM_LS_ENABLE_MASK,
+	MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
+	MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
+	MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
+	VM_L2_CG__MEM_LS_ENABLE_MASK,
+};
+
+static const u32 mc_cg_en[] = {
+	MC_HUB_MISC_HUB_CG__ENABLE_MASK,
+	MC_HUB_MISC_SIP_CG__ENABLE_MASK,
+	MC_HUB_MISC_VM_CG__ENABLE_MASK,
+	MC_XPB_CLK_GAT__ENABLE_MASK,
+	ATC_MISC_CG__ENABLE_MASK,
+	MC_CITF_MISC_WR_CG__ENABLE_MASK,
+	MC_CITF_MISC_RD_CG__ENABLE_MASK,
+	MC_CITF_MISC_VM_CG__ENABLE_MASK,
+	VM_L2_CG__ENABLE_MASK,
+};
+
+static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
+				  bool enable)
+{
+	int i;
+	u32 orig, data;
+
+	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+		orig = data = RREG32(mc_cg_registers[i]);
+		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
+			data |= mc_cg_ls_en[i];
+		else
+			data &= ~mc_cg_ls_en[i];
+		if (data != orig)
+			WREG32(mc_cg_registers[i], data);
+	}
+}
+
+static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
+				    bool enable)
+{
+	int i;
+	u32 orig, data;
+
+	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+		orig = data = RREG32(mc_cg_registers[i]);
+		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
+			data |= mc_cg_en[i];
+		else
+			data &= ~mc_cg_en[i];
+		if (data != orig)
+			WREG32(mc_cg_registers[i], data);
+	}
+}
+
+static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
+				     bool enable)
+{
+	u32 orig, data;
+
+	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
+
+	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
+		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
+		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
+		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
+		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
+	} else {
+		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
+		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
+		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
+		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
+	}
+
+	if (orig != data)
+		WREG32_PCIE(ixPCIE_CNTL2, data);
+}
+
+static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
+				     bool enable)
+{
+	u32 orig, data;
+
+	orig = data = RREG32(HDP_HOST_PATH_CNTL);
+
+	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
+		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
+	else
+		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
+
+	if (orig != data)
+		WREG32(HDP_HOST_PATH_CNTL, data);
+}
+
+static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
+				   bool enable)
+{
+	u32 orig, data;
+
+	orig = data = RREG32(HDP_MEM_POWER_LS);
+
+	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
+		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
+	else
+		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
+
+	if (orig != data)
+		WREG32(HDP_MEM_POWER_LS, data);
+}
+*/
+
+static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
+{
+	switch (mc_seq_vram_type) {
+	case MC_SEQ_MISC0__MT__GDDR1:
+		return AMDGPU_VRAM_TYPE_GDDR1;
+	case MC_SEQ_MISC0__MT__DDR2:
+		return AMDGPU_VRAM_TYPE_DDR2;
+	case MC_SEQ_MISC0__MT__GDDR3:
+		return AMDGPU_VRAM_TYPE_GDDR3;
+	case MC_SEQ_MISC0__MT__GDDR4:
+		return AMDGPU_VRAM_TYPE_GDDR4;
+	case MC_SEQ_MISC0__MT__GDDR5:
+		return AMDGPU_VRAM_TYPE_GDDR5;
+	case MC_SEQ_MISC0__MT__DDR3:
+		return AMDGPU_VRAM_TYPE_DDR3;
+	default:
+		return AMDGPU_VRAM_TYPE_UNKNOWN;
+	}
+}
+
+static int gmc_v6_0_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	gmc_v6_0_set_gart_funcs(adev);
+	gmc_v6_0_set_irq_funcs(adev);
+
+	if (adev->flags & AMD_IS_APU) {
+		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+	} else {
+		u32 tmp = RREG32(MC_SEQ_MISC0);
+		tmp &= MC_SEQ_MISC0__MT__MASK;
+		adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
+	}
+
+	return 0;
+}
+
+static int gmc_v6_0_late_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+}
+
+static int gmc_v6_0_sw_init(void *handle)
+{
+	int r;
+	int dma_bits;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+	if (r)
+		return r;
+
+	r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
+	if (r)
+		return r;
+
+	adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+
+	adev->mc.mc_mask = 0xffffffffffULL;
+
+	adev->need_dma32 = false;
+	dma_bits = adev->need_dma32 ? 32 : 40;
+	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+	if (r) {
+		adev->need_dma32 = true;
+		dma_bits = 32;
+		dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
+	}
+	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+	if (r) {
+		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
+		dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
+	}
+
+	r = gmc_v6_0_init_microcode(adev);
+	if (r) {
+		dev_err(adev->dev, "Failed to load mc firmware!\n");
+		return r;
+	}
+
+	r = amdgpu_ttm_global_init(adev);
+	if (r) {
+		return r;
+	}
+
+	r = gmc_v6_0_mc_init(adev);
+	if (r)
+		return r;
+
+	r = amdgpu_bo_init(adev);
+	if (r)
+		return r;
+
+	r = gmc_v6_0_gart_init(adev);
+	if (r)
+		return r;
+
+	if (!adev->vm_manager.enabled) {
+		r = gmc_v6_0_vm_init(adev);
+		if (r) {
+			dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
+			return r;
+		}
+		adev->vm_manager.enabled = true;
+	}
+
+	return r;
+}
+
+static int gmc_v6_0_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->vm_manager.enabled) {
+		gmc_v6_0_vm_fini(adev);
+		adev->vm_manager.enabled = false;
+	}
+	gmc_v6_0_gart_fini(adev);
+	amdgpu_gem_force_release(adev);
+	amdgpu_bo_fini(adev);
+
+	return 0;
+}
+
+static int gmc_v6_0_hw_init(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	gmc_v6_0_mc_program(adev);
+
+	if (!(adev->flags & AMD_IS_APU)) {
+		r = gmc_v6_0_mc_load_microcode(adev);
+		if (r) {
+			dev_err(adev->dev, "Failed to load MC firmware!\n");
+			return r;
+		}
+	}
+
+	r = gmc_v6_0_gart_enable(adev);
+	if (r)
+		return r;
+
+	return r;
+}
+
+static int gmc_v6_0_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
+	gmc_v6_0_gart_disable(adev);
+
+	return 0;
+}
+
+static int gmc_v6_0_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->vm_manager.enabled) {
+		gmc_v6_0_vm_fini(adev);
+		adev->vm_manager.enabled = false;
+	}
+	gmc_v6_0_hw_fini(adev);
+
+	return 0;
+}
+
+static int gmc_v6_0_resume(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = gmc_v6_0_hw_init(adev);
+	if (r)
+		return r;
+
+	if (!adev->vm_manager.enabled) {
+		r = gmc_v6_0_vm_init(adev);
+		if (r) {
+			dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
+			return r;
+		}
+		adev->vm_manager.enabled = true;
+	}
+
+	return r;
+}
+
+static bool gmc_v6_0_is_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 tmp = RREG32(SRBM_STATUS);
+
+	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
+		return false;
+
+	return true;
+}
+
+static int gmc_v6_0_wait_for_idle(void *handle)
+{
+	unsigned i;
+	u32 tmp;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = RREG32(SRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
+					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+					       SRBM_STATUS__MCC_BUSY_MASK |
+					       SRBM_STATUS__MCD_BUSY_MASK |
+					       SRBM_STATUS__VMC_BUSY_MASK);
+		if (!tmp)
+			return 0;
+		udelay(1);
+	}
+	return -ETIMEDOUT;
+
+}
+
+static int gmc_v6_0_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	struct amdgpu_mode_mc_save save;
+	u32 srbm_soft_reset = 0;
+	u32 tmp = RREG32(SRBM_STATUS);
+
+	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+						mmSRBM_SOFT_RESET, xxSOFT_RESET_VMC, 1);
+
+	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
+		if (!(adev->flags & AMD_IS_APU))
+			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+							mmSRBM_SOFT_RESET, xxSOFT_RESET_MC, 1);
+	}
+
+	if (srbm_soft_reset) {
+		gmc_v6_0_mc_stop(adev, &save);
+		if (gmc_v6_0_wait_for_idle(adev)) {
+			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
+		}
+
+
+		tmp = RREG32(SRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+
+		udelay(50);
+
+		gmc_v6_0_mc_resume(adev, &save);
+		udelay(50);
+	}
+
+	return 0;
+}
+
+static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
+					     struct amdgpu_irq_src *src,
+					     unsigned type,
+					     enum amdgpu_interrupt_state state)
+{
+	u32 tmp;
+	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
+
+	switch (state) {
+	case AMDGPU_IRQ_STATE_DISABLE:
+		tmp = RREG32(VM_CONTEXT0_CNTL);
+		tmp &= ~bits;
+		WREG32(VM_CONTEXT0_CNTL, tmp);
+		tmp = RREG32(VM_CONTEXT1_CNTL);
+		tmp &= ~bits;
+		WREG32(VM_CONTEXT1_CNTL, tmp);
+		break;
+	case AMDGPU_IRQ_STATE_ENABLE:
+		tmp = RREG32(VM_CONTEXT0_CNTL);
+		tmp |= bits;
+		WREG32(VM_CONTEXT0_CNTL, tmp);
+		tmp = RREG32(VM_CONTEXT1_CNTL);
+		tmp |= bits;
+		WREG32(VM_CONTEXT1_CNTL, tmp);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
+				      struct amdgpu_irq_src *source,
+				      struct amdgpu_iv_entry *entry)
+{
+	u32 addr, status;
+
+	addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
+	status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+	WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+
+	if (!addr && !status)
+		return 0;
+
+	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
+		gmc_v6_0_set_fault_enable_default(adev, false);
+
+	dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
+		entry->src_id, entry->src_data);
+	dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+		addr);
+	dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+		status);
+	gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
+
+	return 0;
+}
+
+static int gmc_v6_0_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int gmc_v6_0_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
+	.name = "gmc_v6_0",
+	.early_init = gmc_v6_0_early_init,
+	.late_init = gmc_v6_0_late_init,
+	.sw_init = gmc_v6_0_sw_init,
+	.sw_fini = gmc_v6_0_sw_fini,
+	.hw_init = gmc_v6_0_hw_init,
+	.hw_fini = gmc_v6_0_hw_fini,
+	.suspend = gmc_v6_0_suspend,
+	.resume = gmc_v6_0_resume,
+	.is_idle = gmc_v6_0_is_idle,
+	.wait_for_idle = gmc_v6_0_wait_for_idle,
+	.soft_reset = gmc_v6_0_soft_reset,
+	.set_clockgating_state = gmc_v6_0_set_clockgating_state,
+	.set_powergating_state = gmc_v6_0_set_powergating_state,
+};
+
+static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
+	.flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
+	.set_pte_pde = gmc_v6_0_gart_set_pte_pde,
+};
+
+static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
+	.set = gmc_v6_0_vm_fault_interrupt_state,
+	.process = gmc_v6_0_process_interrupt,
+};
+
+static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
+{
+	if (adev->gart.gart_funcs == NULL)
+		adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
+}
+
+static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->mc.vm_fault.num_types = 1;
+	adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
similarity index 71%
copy from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
copy to drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
index d24b888..42c4fc6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
@@ -20,16 +20,10 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#ifndef TONGA_PROCESSPPTABLES_H
-#define TONGA_PROCESSPPTABLES_H
 
-#include "hwmgr.h"
+#ifndef __GMC_V6_0_H__
+#define __GMC_V6_0_H__
 
-extern const struct pp_table_func tonga_pptable_funcs;
-extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
-extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
-		struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
-				struct pp_power_state *, void *, uint32_t));
+extern const struct amd_ip_funcs gmc_v6_0_ip_funcs;
 
 #endif
-
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 0b0f086..aa0c4b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -183,7 +183,7 @@
 	const struct mc_firmware_header_v1_0 *hdr;
 	const __le32 *fw_data = NULL;
 	const __le32 *io_mc_regs = NULL;
-	u32 running, blackout = 0;
+	u32 running;
 	int i, ucode_size, regs_size;
 
 	if (!adev->mc.fw)
@@ -203,11 +203,6 @@
 	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
 
 	if (running == 0) {
-		if (running) {
-			blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
-			WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
-		}
-
 		/* reset the engine and set to writable */
 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
@@ -239,9 +234,6 @@
 				break;
 			udelay(1);
 		}
-
-		if (running)
-			WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
 	}
 
 	return 0;
@@ -393,7 +385,7 @@
 	 * size equal to the 1024 or vram, whichever is larger.
 	 */
 	if (amdgpu_gart_size == -1)
-		adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+		adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
 	else
 		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
 
@@ -953,6 +945,11 @@
 		return r;
 	}
 
+	r = amdgpu_ttm_global_init(adev);
+	if (r) {
+		return r;
+	}
+
 	r = gmc_v7_0_mc_init(adev);
 	if (r)
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 2aee2c6..84c10d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -261,7 +261,7 @@
 	const struct mc_firmware_header_v1_0 *hdr;
 	const __le32 *fw_data = NULL;
 	const __le32 *io_mc_regs = NULL;
-	u32 running, blackout = 0;
+	u32 running;
 	int i, ucode_size, regs_size;
 
 	if (!adev->mc.fw)
@@ -287,11 +287,6 @@
 	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
 
 	if (running == 0) {
-		if (running) {
-			blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
-			WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
-		}
-
 		/* reset the engine and set to writable */
 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
@@ -323,9 +318,6 @@
 				break;
 			udelay(1);
 		}
-
-		if (running)
-			WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
 	}
 
 	return 0;
@@ -477,7 +469,7 @@
 	 * size equal to the 1024 or vram, whichever is larger.
 	 */
 	if (amdgpu_gart_size == -1)
-		adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+		adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
 	else
 		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
 
@@ -957,6 +949,11 @@
 		return r;
 	}
 
+	r = amdgpu_ttm_global_init(adev);
+	if (r) {
+		return r;
+	}
+
 	r = gmc_v8_0_mc_init(adev);
 	if (r)
 		return r;
@@ -1100,9 +1097,8 @@
 
 }
 
-static int gmc_v8_0_soft_reset(void *handle)
+static int gmc_v8_0_check_soft_reset(void *handle)
 {
-	struct amdgpu_mode_mc_save save;
 	u32 srbm_soft_reset = 0;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	u32 tmp = RREG32(mmSRBM_STATUS);
@@ -1117,13 +1113,42 @@
 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
 	}
+	if (srbm_soft_reset) {
+		adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true;
+		adev->mc.srbm_soft_reset = srbm_soft_reset;
+	} else {
+		adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false;
+		adev->mc.srbm_soft_reset = 0;
+	}
+	return 0;
+}
+
+static int gmc_v8_0_pre_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+		return 0;
+
+	gmc_v8_0_mc_stop(adev, &adev->mc.save);
+	if (gmc_v8_0_wait_for_idle(adev)) {
+		dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
+	}
+
+	return 0;
+}
+
+static int gmc_v8_0_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset;
+
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+		return 0;
+	srbm_soft_reset = adev->mc.srbm_soft_reset;
 
 	if (srbm_soft_reset) {
-		gmc_v8_0_mc_stop(adev, &save);
-		if (gmc_v8_0_wait_for_idle((void *)adev)) {
-			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
-		}
-
+		u32 tmp;
 
 		tmp = RREG32(mmSRBM_SOFT_RESET);
 		tmp |= srbm_soft_reset;
@@ -1139,14 +1164,22 @@
 
 		/* Wait a little for things to settle down */
 		udelay(50);
-
-		gmc_v8_0_mc_resume(adev, &save);
-		udelay(50);
 	}
 
 	return 0;
 }
 
+static int gmc_v8_0_post_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+		return 0;
+
+	gmc_v8_0_mc_resume(adev, &adev->mc.save);
+	return 0;
+}
+
 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
 					     struct amdgpu_irq_src *src,
 					     unsigned type,
@@ -1414,7 +1447,10 @@
 	.resume = gmc_v8_0_resume,
 	.is_idle = gmc_v8_0_is_idle,
 	.wait_for_idle = gmc_v8_0_wait_for_idle,
+	.check_soft_reset = gmc_v8_0_check_soft_reset,
+	.pre_soft_reset = gmc_v8_0_pre_soft_reset,
 	.soft_reset = gmc_v8_0_soft_reset,
+	.post_soft_reset = gmc_v8_0_post_soft_reset,
 	.set_clockgating_state = gmc_v8_0_set_clockgating_state,
 	.set_powergating_state = gmc_v8_0_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index 2118399..ef7c27d 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -121,7 +121,7 @@
 	return result;
 }
 
-void iceland_start_smc(struct amdgpu_device *adev)
+static void iceland_start_smc(struct amdgpu_device *adev)
 {
 	uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
 
@@ -129,7 +129,7 @@
 	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
 }
 
-void iceland_reset_smc(struct amdgpu_device *adev)
+static void iceland_reset_smc(struct amdgpu_device *adev)
 {
 	uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
 
@@ -145,7 +145,7 @@
 	return 0;
 }
 
-void iceland_stop_smc_clock(struct amdgpu_device *adev)
+static void iceland_stop_smc_clock(struct amdgpu_device *adev)
 {
 	uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
 
@@ -153,7 +153,7 @@
 	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
 }
 
-void iceland_start_smc_clock(struct amdgpu_device *adev)
+static void iceland_start_smc_clock(struct amdgpu_device *adev)
 {
 	uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index a845e88..f8618a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2845,7 +2845,11 @@
 		pi->caps_tcp_ramping = true;
 	}
 
-	pi->caps_sclk_ds = true;
+	if (amdgpu_sclk_deep_sleep_en)
+		pi->caps_sclk_ds = true;
+	else
+		pi->caps_sclk_ds = false;
+
 	pi->enable_auto_thermal_throttling = true;
 	pi->disable_nb_ps3_in_battery = false;
 	if (amdgpu_bapm == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/r600_dpm.h b/drivers/gpu/drm/amd/amdgpu/r600_dpm.h
new file mode 100644
index 0000000..055321f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/r600_dpm.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __R600_DPM_H__
+#define __R600_DPM_H__
+
+#define R600_ASI_DFLT                                10000
+#define R600_BSP_DFLT                                0x41EB
+#define R600_BSU_DFLT                                0x2
+#define R600_AH_DFLT                                 5
+#define R600_RLP_DFLT                                25
+#define R600_RMP_DFLT                                65
+#define R600_LHP_DFLT                                40
+#define R600_LMP_DFLT                                15
+#define R600_TD_DFLT                                 0
+#define R600_UTC_DFLT_00                             0x24
+#define R600_UTC_DFLT_01                             0x22
+#define R600_UTC_DFLT_02                             0x22
+#define R600_UTC_DFLT_03                             0x22
+#define R600_UTC_DFLT_04                             0x22
+#define R600_UTC_DFLT_05                             0x22
+#define R600_UTC_DFLT_06                             0x22
+#define R600_UTC_DFLT_07                             0x22
+#define R600_UTC_DFLT_08                             0x22
+#define R600_UTC_DFLT_09                             0x22
+#define R600_UTC_DFLT_10                             0x22
+#define R600_UTC_DFLT_11                             0x22
+#define R600_UTC_DFLT_12                             0x22
+#define R600_UTC_DFLT_13                             0x22
+#define R600_UTC_DFLT_14                             0x22
+#define R600_DTC_DFLT_00                             0x24
+#define R600_DTC_DFLT_01                             0x22
+#define R600_DTC_DFLT_02                             0x22
+#define R600_DTC_DFLT_03                             0x22
+#define R600_DTC_DFLT_04                             0x22
+#define R600_DTC_DFLT_05                             0x22
+#define R600_DTC_DFLT_06                             0x22
+#define R600_DTC_DFLT_07                             0x22
+#define R600_DTC_DFLT_08                             0x22
+#define R600_DTC_DFLT_09                             0x22
+#define R600_DTC_DFLT_10                             0x22
+#define R600_DTC_DFLT_11                             0x22
+#define R600_DTC_DFLT_12                             0x22
+#define R600_DTC_DFLT_13                             0x22
+#define R600_DTC_DFLT_14                             0x22
+#define R600_VRC_DFLT                                0x0000C003
+#define R600_VOLTAGERESPONSETIME_DFLT                1000
+#define R600_BACKBIASRESPONSETIME_DFLT               1000
+#define R600_VRU_DFLT                                0x3
+#define R600_SPLLSTEPTIME_DFLT                       0x1000
+#define R600_SPLLSTEPUNIT_DFLT                       0x3
+#define R600_TPU_DFLT                                0
+#define R600_TPC_DFLT                                0x200
+#define R600_SSTU_DFLT                               0
+#define R600_SST_DFLT                                0x00C8
+#define R600_GICST_DFLT                              0x200
+#define R600_FCT_DFLT                                0x0400
+#define R600_FCTU_DFLT                               0
+#define R600_CTXCGTT3DRPHC_DFLT                      0x20
+#define R600_CTXCGTT3DRSDC_DFLT                      0x40
+#define R600_VDDC3DOORPHC_DFLT                       0x100
+#define R600_VDDC3DOORSDC_DFLT                       0x7
+#define R600_VDDC3DOORSU_DFLT                        0
+#define R600_MPLLLOCKTIME_DFLT                       100
+#define R600_MPLLRESETTIME_DFLT                      150
+#define R600_VCOSTEPPCT_DFLT                          20
+#define R600_ENDINGVCOSTEPPCT_DFLT                    5
+#define R600_REFERENCEDIVIDER_DFLT                    4
+
+#define R600_PM_NUMBER_OF_TC 15
+#define R600_PM_NUMBER_OF_SCLKS 20
+#define R600_PM_NUMBER_OF_MCLKS 4
+#define R600_PM_NUMBER_OF_VOLTAGE_LEVELS 4
+#define R600_PM_NUMBER_OF_ACTIVITY_LEVELS 3
+
+/* XXX are these ok? */
+#define R600_TEMP_RANGE_MIN (90 * 1000)
+#define R600_TEMP_RANGE_MAX (120 * 1000)
+
+#define FDO_PWM_MODE_STATIC  1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+enum r600_power_level {
+	R600_POWER_LEVEL_LOW = 0,
+	R600_POWER_LEVEL_MEDIUM = 1,
+	R600_POWER_LEVEL_HIGH = 2,
+	R600_POWER_LEVEL_CTXSW = 3,
+};
+
+enum r600_td {
+	R600_TD_AUTO,
+	R600_TD_UP,
+	R600_TD_DOWN,
+};
+
+enum r600_display_watermark {
+	R600_DISPLAY_WATERMARK_LOW = 0,
+	R600_DISPLAY_WATERMARK_HIGH = 1,
+};
+
+enum r600_display_gap
+{
+    R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
+    R600_PM_DISPLAY_GAP_VBLANK       = 1,
+    R600_PM_DISPLAY_GAP_WATERMARK    = 2,
+    R600_PM_DISPLAY_GAP_IGNORE       = 3,
+};
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 1351c7e..9ae3075 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -190,12 +190,8 @@
  */
 static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
 {
-	u32 rptr;
-
 	/* XXX check if swapping is necessary on BE */
-	rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2;
-
-	return rptr;
+	return ring->adev->wb.wb[ring->rptr_offs] >> 2;
 }
 
 /**
@@ -749,24 +745,16 @@
 				  uint64_t pe, uint64_t src,
 				  unsigned count)
 {
-	while (count) {
-		unsigned bytes = count * 8;
-		if (bytes > 0x1FFFF8)
-			bytes = 0x1FFFF8;
+	unsigned bytes = count * 8;
 
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
-			SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
-		ib->ptr[ib->length_dw++] = bytes;
-		ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
-		ib->ptr[ib->length_dw++] = lower_32_bits(src);
-		ib->ptr[ib->length_dw++] = upper_32_bits(src);
-		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
-		pe += bytes;
-		src += bytes;
-		count -= bytes / 8;
-	}
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+	ib->ptr[ib->length_dw++] = bytes;
+	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+	ib->ptr[ib->length_dw++] = lower_32_bits(src);
+	ib->ptr[ib->length_dw++] = upper_32_bits(src);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 }
 
 /**
@@ -774,39 +762,27 @@
  *
  * @ib: indirect buffer to fill with commands
  * @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
- * @flags: access flags
  *
  * Update PTEs by writing them manually using sDMA (CIK).
  */
-static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
-				   const dma_addr_t *pages_addr, uint64_t pe,
-				   uint64_t addr, unsigned count,
-				   uint32_t incr, uint32_t flags)
+static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+				   uint64_t value, unsigned count,
+				   uint32_t incr)
 {
-	uint64_t value;
-	unsigned ndw;
+	unsigned ndw = count * 2;
 
-	while (count) {
-		ndw = count * 2;
-		if (ndw > 0xFFFFE)
-			ndw = 0xFFFFE;
-
-		/* for non-physically contiguous pages (system) */
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
-			SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
-		ib->ptr[ib->length_dw++] = pe;
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-		ib->ptr[ib->length_dw++] = ndw;
-		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-			value = amdgpu_vm_map_gart(pages_addr, addr);
-			addr += incr;
-			value |= flags;
-			ib->ptr[ib->length_dw++] = value;
-			ib->ptr[ib->length_dw++] = upper_32_bits(value);
-		}
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+	ib->ptr[ib->length_dw++] = pe;
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+	ib->ptr[ib->length_dw++] = ndw;
+	for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+		ib->ptr[ib->length_dw++] = lower_32_bits(value);
+		ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		value += incr;
 	}
 }
 
@@ -822,40 +798,21 @@
  *
  * Update the page tables using sDMA (CIK).
  */
-static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
-				     uint64_t pe,
+static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
 				     uint64_t addr, unsigned count,
 				     uint32_t incr, uint32_t flags)
 {
-	uint64_t value;
-	unsigned ndw;
-
-	while (count) {
-		ndw = count;
-		if (ndw > 0x7FFFF)
-			ndw = 0x7FFFF;
-
-		if (flags & AMDGPU_PTE_VALID)
-			value = addr;
-		else
-			value = 0;
-
-		/* for physically contiguous pages (vram) */
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
-		ib->ptr[ib->length_dw++] = pe; /* dst addr */
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-		ib->ptr[ib->length_dw++] = flags; /* mask */
-		ib->ptr[ib->length_dw++] = 0;
-		ib->ptr[ib->length_dw++] = value; /* value */
-		ib->ptr[ib->length_dw++] = upper_32_bits(value);
-		ib->ptr[ib->length_dw++] = incr; /* increment size */
-		ib->ptr[ib->length_dw++] = 0;
-		ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
-		pe += ndw * 8;
-		addr += ndw * incr;
-		count -= ndw;
-	}
+	/* for physically contiguous pages (vram) */
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+	ib->ptr[ib->length_dw++] = flags; /* mask */
+	ib->ptr[ib->length_dw++] = 0;
+	ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+	ib->ptr[ib->length_dw++] = incr; /* increment size */
+	ib->ptr[ib->length_dw++] = 0;
+	ib->ptr[ib->length_dw++] = count; /* number of entries */
 }
 
 /**
@@ -945,6 +902,22 @@
 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 }
 
+static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		7 + 6; /* sdma_v2_4_ring_emit_ib */
+}
+
+static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		6 + /* sdma_v2_4_ring_emit_hdp_flush */
+		3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
+		6 + /* sdma_v2_4_ring_emit_pipeline_sync */
+		12 + /* sdma_v2_4_ring_emit_vm_flush */
+		10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
+}
+
 static int sdma_v2_4_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1263,6 +1236,8 @@
 	.test_ib = sdma_v2_4_ring_test_ib,
 	.insert_nop = sdma_v2_4_ring_insert_nop,
 	.pad_ib = sdma_v2_4_ring_pad_ib,
+	.get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size,
+	.get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size,
 };
 
 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 653ce5e..f325fd8 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -335,12 +335,8 @@
  */
 static uint32_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
-	u32 rptr;
-
 	/* XXX check if swapping is necessary on BE */
-	rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2;
-
-	return rptr;
+	return ring->adev->wb.wb[ring->rptr_offs] >> 2;
 }
 
 /**
@@ -499,31 +495,6 @@
 	amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
 }
 
-unsigned init_cond_exec(struct amdgpu_ring *ring)
-{
-	unsigned ret;
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
-	amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
-	amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
-	amdgpu_ring_write(ring, 1);
-	ret = ring->wptr;/* this is the offset we need patch later */
-	amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
-	return ret;
-}
-
-void patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
-{
-	unsigned cur;
-	BUG_ON(ring->ring[offset] != 0x55aa55aa);
-
-	cur = ring->wptr - 1;
-	if (likely(cur > offset))
-		ring->ring[offset] = cur - offset;
-	else
-		ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
-}
-
-
 /**
  * sdma_v3_0_gfx_stop - stop the gfx async dma engines
  *
@@ -976,24 +947,16 @@
 				  uint64_t pe, uint64_t src,
 				  unsigned count)
 {
-	while (count) {
-		unsigned bytes = count * 8;
-		if (bytes > 0x1FFFF8)
-			bytes = 0x1FFFF8;
+	unsigned bytes = count * 8;
 
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
-			SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
-		ib->ptr[ib->length_dw++] = bytes;
-		ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
-		ib->ptr[ib->length_dw++] = lower_32_bits(src);
-		ib->ptr[ib->length_dw++] = upper_32_bits(src);
-		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
-		pe += bytes;
-		src += bytes;
-		count -= bytes / 8;
-	}
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+	ib->ptr[ib->length_dw++] = bytes;
+	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+	ib->ptr[ib->length_dw++] = lower_32_bits(src);
+	ib->ptr[ib->length_dw++] = upper_32_bits(src);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 }
 
 /**
@@ -1001,39 +964,27 @@
  *
  * @ib: indirect buffer to fill with commands
  * @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
- * @flags: access flags
  *
  * Update PTEs by writing them manually using sDMA (CIK).
  */
-static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
-				   const dma_addr_t *pages_addr, uint64_t pe,
-				   uint64_t addr, unsigned count,
-				   uint32_t incr, uint32_t flags)
+static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+				   uint64_t value, unsigned count,
+				   uint32_t incr)
 {
-	uint64_t value;
-	unsigned ndw;
+	unsigned ndw = count * 2;
 
-	while (count) {
-		ndw = count * 2;
-		if (ndw > 0xFFFFE)
-			ndw = 0xFFFFE;
-
-		/* for non-physically contiguous pages (system) */
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
-			SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
-		ib->ptr[ib->length_dw++] = pe;
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-		ib->ptr[ib->length_dw++] = ndw;
-		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-			value = amdgpu_vm_map_gart(pages_addr, addr);
-			addr += incr;
-			value |= flags;
-			ib->ptr[ib->length_dw++] = value;
-			ib->ptr[ib->length_dw++] = upper_32_bits(value);
-		}
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+	ib->ptr[ib->length_dw++] = ndw;
+	for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+		ib->ptr[ib->length_dw++] = lower_32_bits(value);
+		ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		value += incr;
 	}
 }
 
@@ -1049,40 +1000,21 @@
  *
  * Update the page tables using sDMA (CIK).
  */
-static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
-				     uint64_t pe,
+static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
 				     uint64_t addr, unsigned count,
 				     uint32_t incr, uint32_t flags)
 {
-	uint64_t value;
-	unsigned ndw;
-
-	while (count) {
-		ndw = count;
-		if (ndw > 0x7FFFF)
-			ndw = 0x7FFFF;
-
-		if (flags & AMDGPU_PTE_VALID)
-			value = addr;
-		else
-			value = 0;
-
-		/* for physically contiguous pages (vram) */
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
-		ib->ptr[ib->length_dw++] = pe; /* dst addr */
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-		ib->ptr[ib->length_dw++] = flags; /* mask */
-		ib->ptr[ib->length_dw++] = 0;
-		ib->ptr[ib->length_dw++] = value; /* value */
-		ib->ptr[ib->length_dw++] = upper_32_bits(value);
-		ib->ptr[ib->length_dw++] = incr; /* increment size */
-		ib->ptr[ib->length_dw++] = 0;
-		ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
-		pe += ndw * 8;
-		addr += ndw * incr;
-		count -= ndw;
-	}
+	/* for physically contiguous pages (vram) */
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+	ib->ptr[ib->length_dw++] = flags; /* mask */
+	ib->ptr[ib->length_dw++] = 0;
+	ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+	ib->ptr[ib->length_dw++] = incr; /* increment size */
+	ib->ptr[ib->length_dw++] = 0;
+	ib->ptr[ib->length_dw++] = count; /* number of entries */
 }
 
 /**
@@ -1172,6 +1104,22 @@
 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 }
 
+static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		7 + 6; /* sdma_v3_0_ring_emit_ib */
+}
+
+static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		6 + /* sdma_v3_0_ring_emit_hdp_flush */
+		3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
+		6 + /* sdma_v3_0_ring_emit_pipeline_sync */
+		12 + /* sdma_v3_0_ring_emit_vm_flush */
+		10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
+}
+
 static int sdma_v3_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1320,28 +1268,79 @@
 	return -ETIMEDOUT;
 }
 
-static int sdma_v3_0_soft_reset(void *handle)
+static int sdma_v3_0_check_soft_reset(void *handle)
 {
-	u32 srbm_soft_reset = 0;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset = 0;
 	u32 tmp = RREG32(mmSRBM_STATUS2);
 
-	if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
-		/* sdma0 */
-		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
-		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
-		WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+	if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) ||
+	    (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) {
 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
-	}
-	if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
-		/* sdma1 */
-		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
-		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
-		WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
 	}
 
 	if (srbm_soft_reset) {
+		adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true;
+		adev->sdma.srbm_soft_reset = srbm_soft_reset;
+	} else {
+		adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false;
+		adev->sdma.srbm_soft_reset = 0;
+	}
+
+	return 0;
+}
+
+static int sdma_v3_0_pre_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset = 0;
+
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+		return 0;
+
+	srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
+	if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
+	    REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
+		sdma_v3_0_ctx_switch_enable(adev, false);
+		sdma_v3_0_enable(adev, false);
+	}
+
+	return 0;
+}
+
+static int sdma_v3_0_post_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset = 0;
+
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+		return 0;
+
+	srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
+	if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
+	    REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
+		sdma_v3_0_gfx_resume(adev);
+		sdma_v3_0_rlc_resume(adev);
+	}
+
+	return 0;
+}
+
+static int sdma_v3_0_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset = 0;
+	u32 tmp;
+
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+		return 0;
+
+	srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
+	if (srbm_soft_reset) {
 		tmp = RREG32(mmSRBM_SOFT_RESET);
 		tmp |= srbm_soft_reset;
 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -1559,6 +1558,9 @@
 	.resume = sdma_v3_0_resume,
 	.is_idle = sdma_v3_0_is_idle,
 	.wait_for_idle = sdma_v3_0_wait_for_idle,
+	.check_soft_reset = sdma_v3_0_check_soft_reset,
+	.pre_soft_reset = sdma_v3_0_pre_soft_reset,
+	.post_soft_reset = sdma_v3_0_post_soft_reset,
 	.soft_reset = sdma_v3_0_soft_reset,
 	.set_clockgating_state = sdma_v3_0_set_clockgating_state,
 	.set_powergating_state = sdma_v3_0_set_powergating_state,
@@ -1579,6 +1581,8 @@
 	.test_ib = sdma_v3_0_ring_test_ib,
 	.insert_nop = sdma_v3_0_ring_insert_nop,
 	.pad_ib = sdma_v3_0_ring_pad_ib,
+	.get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size,
+	.get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size,
 };
 
 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
new file mode 100644
index 0000000..fee76b8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -0,0 +1,1965 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "amdgpu_ih.h"
+#include "amdgpu_uvd.h"
+#include "amdgpu_vce.h"
+#include "atom.h"
+#include "amdgpu_powerplay.h"
+#include "si/sid.h"
+#include "si_ih.h"
+#include "gfx_v6_0.h"
+#include "gmc_v6_0.h"
+#include "si_dma.h"
+#include "dce_v6_0.h"
+#include "si.h"
+
+static const u32 tahiti_golden_registers[] =
+{
+	0x2684, 0x00010000, 0x00018208,
+	0x260c, 0xffffffff, 0x00000000,
+	0x260d, 0xf00fffff, 0x00000400,
+	0x260e, 0x0002021c, 0x00020200,
+	0x031e, 0x00000080, 0x00000000,
+	0x340c, 0x000300c0, 0x00800040,
+	0x360c, 0x000300c0, 0x00800040,
+	0x16ec, 0x000000f0, 0x00000070,
+	0x16f0, 0x00200000, 0x50100000,
+	0x1c0c, 0x31000311, 0x00000011,
+	0x09df, 0x00000003, 0x000007ff,
+	0x0903, 0x000007ff, 0x00000000,
+	0x2285, 0xf000001f, 0x00000007,
+	0x22c9, 0xffffffff, 0x00ffffff,
+	0x22c4, 0x0000ff0f, 0x00000000,
+	0xa293, 0x07ffffff, 0x4e000000,
+	0xa0d4, 0x3f3f3fff, 0x2a00126a,
+	0x000c, 0x000000ff, 0x0040,
+	0x000d, 0x00000040, 0x00004040,
+	0x2440, 0x07ffffff, 0x03000000,
+	0x23a2, 0x01ff1f3f, 0x00000000,
+	0x23a1, 0x01ff1f3f, 0x00000000,
+	0x2418, 0x0000007f, 0x00000020,
+	0x2542, 0x00010000, 0x00010000,
+	0x2b05, 0x00000200, 0x000002fb,
+	0x2b04, 0xffffffff, 0x0000543b,
+	0x2b03, 0xffffffff, 0xa9210876,
+	0x2234, 0xffffffff, 0x000fff40,
+	0x2235, 0x0000001f, 0x00000010,
+	0x0504, 0x20000000, 0x20fffed8,
+	0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 tahiti_golden_registers2[] =
+{
+	0x0319, 0x00000001, 0x00000001
+};
+
+static const u32 tahiti_golden_rlc_registers[] =
+{
+	0x3109, 0xffffffff, 0x00601005,
+	0x311f, 0xffffffff, 0x10104040,
+	0x3122, 0xffffffff, 0x0100000a,
+	0x30c5, 0xffffffff, 0x00000800,
+	0x30c3, 0xffffffff, 0x800000f4,
+	0x3d2a, 0xffffffff, 0x00000000
+};
+
+static const u32 pitcairn_golden_registers[] =
+{
+	0x2684, 0x00010000, 0x00018208,
+	0x260c, 0xffffffff, 0x00000000,
+	0x260d, 0xf00fffff, 0x00000400,
+	0x260e, 0x0002021c, 0x00020200,
+	0x031e, 0x00000080, 0x00000000,
+	0x340c, 0x000300c0, 0x00800040,
+	0x360c, 0x000300c0, 0x00800040,
+	0x16ec, 0x000000f0, 0x00000070,
+	0x16f0, 0x00200000, 0x50100000,
+	0x1c0c, 0x31000311, 0x00000011,
+	0x0ab9, 0x00073ffe, 0x000022a2,
+	0x0903, 0x000007ff, 0x00000000,
+	0x2285, 0xf000001f, 0x00000007,
+	0x22c9, 0xffffffff, 0x00ffffff,
+	0x22c4, 0x0000ff0f, 0x00000000,
+	0xa293, 0x07ffffff, 0x4e000000,
+	0xa0d4, 0x3f3f3fff, 0x2a00126a,
+	0x000c, 0x000000ff, 0x0040,
+	0x000d, 0x00000040, 0x00004040,
+	0x2440, 0x07ffffff, 0x03000000,
+	0x2418, 0x0000007f, 0x00000020,
+	0x2542, 0x00010000, 0x00010000,
+	0x2b05, 0x000003ff, 0x000000f7,
+	0x2b04, 0xffffffff, 0x00000000,
+	0x2b03, 0xffffffff, 0x32761054,
+	0x2235, 0x0000001f, 0x00000010,
+	0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 pitcairn_golden_rlc_registers[] =
+{
+	0x3109, 0xffffffff, 0x00601004,
+	0x311f, 0xffffffff, 0x10102020,
+	0x3122, 0xffffffff, 0x01000020,
+	0x30c5, 0xffffffff, 0x00000800,
+	0x30c3, 0xffffffff, 0x800000a4
+};
+
+static const u32 verde_pg_init[] =
+{
+	0xd4f, 0xffffffff, 0x40000,
+	0xd4e, 0xffffffff, 0x200010ff,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x7007,
+	0xd4e, 0xffffffff, 0x300010ff,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x400000,
+	0xd4e, 0xffffffff, 0x100010ff,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x120200,
+	0xd4e, 0xffffffff, 0x500010ff,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x1e1e16,
+	0xd4e, 0xffffffff, 0x600010ff,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x171f1e,
+	0xd4e, 0xffffffff, 0x700010ff,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4e, 0xffffffff, 0x9ff,
+	0xd40, 0xffffffff, 0x0,
+	0xd41, 0xffffffff, 0x10000800,
+	0xd41, 0xffffffff, 0xf,
+	0xd41, 0xffffffff, 0xf,
+	0xd40, 0xffffffff, 0x4,
+	0xd41, 0xffffffff, 0x1000051e,
+	0xd41, 0xffffffff, 0xffff,
+	0xd41, 0xffffffff, 0xffff,
+	0xd40, 0xffffffff, 0x8,
+	0xd41, 0xffffffff, 0x80500,
+	0xd40, 0xffffffff, 0x12,
+	0xd41, 0xffffffff, 0x9050c,
+	0xd40, 0xffffffff, 0x1d,
+	0xd41, 0xffffffff, 0xb052c,
+	0xd40, 0xffffffff, 0x2a,
+	0xd41, 0xffffffff, 0x1053e,
+	0xd40, 0xffffffff, 0x2d,
+	0xd41, 0xffffffff, 0x10546,
+	0xd40, 0xffffffff, 0x30,
+	0xd41, 0xffffffff, 0xa054e,
+	0xd40, 0xffffffff, 0x3c,
+	0xd41, 0xffffffff, 0x1055f,
+	0xd40, 0xffffffff, 0x3f,
+	0xd41, 0xffffffff, 0x10567,
+	0xd40, 0xffffffff, 0x42,
+	0xd41, 0xffffffff, 0x1056f,
+	0xd40, 0xffffffff, 0x45,
+	0xd41, 0xffffffff, 0x10572,
+	0xd40, 0xffffffff, 0x48,
+	0xd41, 0xffffffff, 0x20575,
+	0xd40, 0xffffffff, 0x4c,
+	0xd41, 0xffffffff, 0x190801,
+	0xd40, 0xffffffff, 0x67,
+	0xd41, 0xffffffff, 0x1082a,
+	0xd40, 0xffffffff, 0x6a,
+	0xd41, 0xffffffff, 0x1b082d,
+	0xd40, 0xffffffff, 0x87,
+	0xd41, 0xffffffff, 0x310851,
+	0xd40, 0xffffffff, 0xba,
+	0xd41, 0xffffffff, 0x891,
+	0xd40, 0xffffffff, 0xbc,
+	0xd41, 0xffffffff, 0x893,
+	0xd40, 0xffffffff, 0xbe,
+	0xd41, 0xffffffff, 0x20895,
+	0xd40, 0xffffffff, 0xc2,
+	0xd41, 0xffffffff, 0x20899,
+	0xd40, 0xffffffff, 0xc6,
+	0xd41, 0xffffffff, 0x2089d,
+	0xd40, 0xffffffff, 0xca,
+	0xd41, 0xffffffff, 0x8a1,
+	0xd40, 0xffffffff, 0xcc,
+	0xd41, 0xffffffff, 0x8a3,
+	0xd40, 0xffffffff, 0xce,
+	0xd41, 0xffffffff, 0x308a5,
+	0xd40, 0xffffffff, 0xd3,
+	0xd41, 0xffffffff, 0x6d08cd,
+	0xd40, 0xffffffff, 0x142,
+	0xd41, 0xffffffff, 0x2000095a,
+	0xd41, 0xffffffff, 0x1,
+	0xd40, 0xffffffff, 0x144,
+	0xd41, 0xffffffff, 0x301f095b,
+	0xd40, 0xffffffff, 0x165,
+	0xd41, 0xffffffff, 0xc094d,
+	0xd40, 0xffffffff, 0x173,
+	0xd41, 0xffffffff, 0xf096d,
+	0xd40, 0xffffffff, 0x184,
+	0xd41, 0xffffffff, 0x15097f,
+	0xd40, 0xffffffff, 0x19b,
+	0xd41, 0xffffffff, 0xc0998,
+	0xd40, 0xffffffff, 0x1a9,
+	0xd41, 0xffffffff, 0x409a7,
+	0xd40, 0xffffffff, 0x1af,
+	0xd41, 0xffffffff, 0xcdc,
+	0xd40, 0xffffffff, 0x1b1,
+	0xd41, 0xffffffff, 0x800,
+	0xd42, 0xffffffff, 0x6c9b2000,
+	0xd44, 0xfc00, 0x2000,
+	0xd51, 0xffffffff, 0xfc0,
+	0xa35, 0x00000100, 0x100
+};
+
+static const u32 verde_golden_rlc_registers[] =
+{
+	0x3109, 0xffffffff, 0x033f1005,
+	0x311f, 0xffffffff, 0x10808020,
+	0x3122, 0xffffffff, 0x00800008,
+	0x30c5, 0xffffffff, 0x00001000,
+	0x30c3, 0xffffffff, 0x80010014
+};
+
+static const u32 verde_golden_registers[] =
+{
+	0x2684, 0x00010000, 0x00018208,
+	0x260c, 0xffffffff, 0x00000000,
+	0x260d, 0xf00fffff, 0x00000400,
+	0x260e, 0x0002021c, 0x00020200,
+	0x031e, 0x00000080, 0x00000000,
+	0x340c, 0x000300c0, 0x00800040,
+	0x340c, 0x000300c0, 0x00800040,
+	0x360c, 0x000300c0, 0x00800040,
+	0x360c, 0x000300c0, 0x00800040,
+	0x16ec, 0x000000f0, 0x00000070,
+	0x16f0, 0x00200000, 0x50100000,
+
+	0x1c0c, 0x31000311, 0x00000011,
+	0x0ab9, 0x00073ffe, 0x000022a2,
+	0x0ab9, 0x00073ffe, 0x000022a2,
+	0x0ab9, 0x00073ffe, 0x000022a2,
+	0x0903, 0x000007ff, 0x00000000,
+	0x0903, 0x000007ff, 0x00000000,
+	0x0903, 0x000007ff, 0x00000000,
+	0x2285, 0xf000001f, 0x00000007,
+	0x2285, 0xf000001f, 0x00000007,
+	0x2285, 0xf000001f, 0x00000007,
+	0x2285, 0xffffffff, 0x00ffffff,
+	0x22c4, 0x0000ff0f, 0x00000000,
+
+	0xa293, 0x07ffffff, 0x4e000000,
+	0xa0d4, 0x3f3f3fff, 0x0000124a,
+	0xa0d4, 0x3f3f3fff, 0x0000124a,
+	0xa0d4, 0x3f3f3fff, 0x0000124a,
+	0x000c, 0x000000ff, 0x0040,
+	0x000d, 0x00000040, 0x00004040,
+	0x2440, 0x07ffffff, 0x03000000,
+	0x2440, 0x07ffffff, 0x03000000,
+	0x23a2, 0x01ff1f3f, 0x00000000,
+	0x23a3, 0x01ff1f3f, 0x00000000,
+	0x23a2, 0x01ff1f3f, 0x00000000,
+	0x23a1, 0x01ff1f3f, 0x00000000,
+	0x23a1, 0x01ff1f3f, 0x00000000,
+
+	0x23a1, 0x01ff1f3f, 0x00000000,
+	0x2418, 0x0000007f, 0x00000020,
+	0x2542, 0x00010000, 0x00010000,
+	0x2b01, 0x000003ff, 0x00000003,
+	0x2b05, 0x000003ff, 0x00000003,
+	0x2b05, 0x000003ff, 0x00000003,
+	0x2b04, 0xffffffff, 0x00000000,
+	0x2b04, 0xffffffff, 0x00000000,
+	0x2b04, 0xffffffff, 0x00000000,
+	0x2b03, 0xffffffff, 0x00001032,
+	0x2b03, 0xffffffff, 0x00001032,
+	0x2b03, 0xffffffff, 0x00001032,
+	0x2235, 0x0000001f, 0x00000010,
+	0x2235, 0x0000001f, 0x00000010,
+	0x2235, 0x0000001f, 0x00000010,
+	0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_registers[] =
+{
+	0x2684, 0x00010000, 0x00018208,
+	0x260c, 0xffffffff, 0x00000000,
+	0x260d, 0xf00fffff, 0x00000400,
+	0x260e, 0x0002021c, 0x00020200,
+	0x031e, 0x00000080, 0x00000000,
+	0x340c, 0x000300c0, 0x00800040,
+	0x360c, 0x000300c0, 0x00800040,
+	0x16ec, 0x000000f0, 0x00000070,
+	0x16f9, 0x00200000, 0x50100000,
+	0x1c0c, 0x31000311, 0x00000011,
+	0x0ab9, 0x00073ffe, 0x000022a2,
+	0x0903, 0x000007ff, 0x00000000,
+	0x2285, 0xf000001f, 0x00000007,
+	0x22c9, 0xffffffff, 0x00ffffff,
+	0x22c4, 0x0000ff0f, 0x00000000,
+	0xa293, 0x07ffffff, 0x4e000000,
+	0xa0d4, 0x3f3f3fff, 0x00000082,
+	0x000c, 0x000000ff, 0x0040,
+	0x000d, 0x00000040, 0x00004040,
+	0x2440, 0x07ffffff, 0x03000000,
+	0x2418, 0x0000007f, 0x00000020,
+	0x2542, 0x00010000, 0x00010000,
+	0x2b05, 0x000003ff, 0x000000f3,
+	0x2b04, 0xffffffff, 0x00000000,
+	0x2b03, 0xffffffff, 0x00003210,
+	0x2235, 0x0000001f, 0x00000010,
+	0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_rlc_registers[] =
+{
+	0x3109, 0xffffffff, 0x00601005,
+	0x311f, 0xffffffff, 0x10104040,
+	0x3122, 0xffffffff, 0x0100000a,
+	0x30c5, 0xffffffff, 0x00000800,
+	0x30c3, 0xffffffff, 0x800000f4
+};
+
+static const u32 hainan_golden_registers[] =
+{
+	0x2684, 0x00010000, 0x00018208,
+	0x260c, 0xffffffff, 0x00000000,
+	0x260d, 0xf00fffff, 0x00000400,
+	0x260e, 0x0002021c, 0x00020200,
+	0x4595, 0xff000fff, 0x00000100,
+	0x340c, 0x000300c0, 0x00800040,
+	0x3630, 0xff000fff, 0x00000100,
+	0x360c, 0x000300c0, 0x00800040,
+	0x0ab9, 0x00073ffe, 0x000022a2,
+	0x0903, 0x000007ff, 0x00000000,
+	0x2285, 0xf000001f, 0x00000007,
+	0x22c9, 0xffffffff, 0x00ffffff,
+	0x22c4, 0x0000ff0f, 0x00000000,
+	0xa393, 0x07ffffff, 0x4e000000,
+	0xa0d4, 0x3f3f3fff, 0x00000000,
+	0x000c, 0x000000ff, 0x0040,
+	0x000d, 0x00000040, 0x00004040,
+	0x2440, 0x03e00000, 0x03600000,
+	0x2418, 0x0000007f, 0x00000020,
+	0x2542, 0x00010000, 0x00010000,
+	0x2b05, 0x000003ff, 0x000000f1,
+	0x2b04, 0xffffffff, 0x00000000,
+	0x2b03, 0xffffffff, 0x00003210,
+	0x2235, 0x0000001f, 0x00000010,
+	0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 hainan_golden_registers2[] =
+{
+	0x263e, 0xffffffff, 0x02010001
+};
+
+static const u32 tahiti_mgcg_cgcg_init[] =
+{
+	0x3100, 0xffffffff, 0xfffffffc,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2698, 0xffffffff, 0x00000100,
+	0x24a9, 0xffffffff, 0x00000100,
+	0x3059, 0xffffffff, 0x00000100,
+	0x25dd, 0xffffffff, 0x00000100,
+	0x2261, 0xffffffff, 0x06000100,
+	0x2286, 0xffffffff, 0x00000100,
+	0x24a8, 0xffffffff, 0x00000100,
+	0x30e0, 0xffffffff, 0x00000100,
+	0x22ca, 0xffffffff, 0x00000100,
+	0x2451, 0xffffffff, 0x00000100,
+	0x2362, 0xffffffff, 0x00000100,
+	0x2363, 0xffffffff, 0x00000100,
+	0x240c, 0xffffffff, 0x00000100,
+	0x240d, 0xffffffff, 0x00000100,
+	0x240e, 0xffffffff, 0x00000100,
+	0x240f, 0xffffffff, 0x00000100,
+	0x2b60, 0xffffffff, 0x00000100,
+	0x2b15, 0xffffffff, 0x00000100,
+	0x225f, 0xffffffff, 0x06000100,
+	0x261a, 0xffffffff, 0x00000100,
+	0x2544, 0xffffffff, 0x00000100,
+	0x2bc1, 0xffffffff, 0x00000100,
+	0x2b81, 0xffffffff, 0x00000100,
+	0x2527, 0xffffffff, 0x00000100,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2458, 0xffffffff, 0x00010000,
+	0x2459, 0xffffffff, 0x00030002,
+	0x245a, 0xffffffff, 0x00040007,
+	0x245b, 0xffffffff, 0x00060005,
+	0x245c, 0xffffffff, 0x00090008,
+	0x245d, 0xffffffff, 0x00020001,
+	0x245e, 0xffffffff, 0x00040003,
+	0x245f, 0xffffffff, 0x00000007,
+	0x2460, 0xffffffff, 0x00060005,
+	0x2461, 0xffffffff, 0x00090008,
+	0x2462, 0xffffffff, 0x00030002,
+	0x2463, 0xffffffff, 0x00050004,
+	0x2464, 0xffffffff, 0x00000008,
+	0x2465, 0xffffffff, 0x00070006,
+	0x2466, 0xffffffff, 0x000a0009,
+	0x2467, 0xffffffff, 0x00040003,
+	0x2468, 0xffffffff, 0x00060005,
+	0x2469, 0xffffffff, 0x00000009,
+	0x246a, 0xffffffff, 0x00080007,
+	0x246b, 0xffffffff, 0x000b000a,
+	0x246c, 0xffffffff, 0x00050004,
+	0x246d, 0xffffffff, 0x00070006,
+	0x246e, 0xffffffff, 0x0008000b,
+	0x246f, 0xffffffff, 0x000a0009,
+	0x2470, 0xffffffff, 0x000d000c,
+	0x2471, 0xffffffff, 0x00060005,
+	0x2472, 0xffffffff, 0x00080007,
+	0x2473, 0xffffffff, 0x0000000b,
+	0x2474, 0xffffffff, 0x000a0009,
+	0x2475, 0xffffffff, 0x000d000c,
+	0x2476, 0xffffffff, 0x00070006,
+	0x2477, 0xffffffff, 0x00090008,
+	0x2478, 0xffffffff, 0x0000000c,
+	0x2479, 0xffffffff, 0x000b000a,
+	0x247a, 0xffffffff, 0x000e000d,
+	0x247b, 0xffffffff, 0x00080007,
+	0x247c, 0xffffffff, 0x000a0009,
+	0x247d, 0xffffffff, 0x0000000d,
+	0x247e, 0xffffffff, 0x000c000b,
+	0x247f, 0xffffffff, 0x000f000e,
+	0x2480, 0xffffffff, 0x00090008,
+	0x2481, 0xffffffff, 0x000b000a,
+	0x2482, 0xffffffff, 0x000c000f,
+	0x2483, 0xffffffff, 0x000e000d,
+	0x2484, 0xffffffff, 0x00110010,
+	0x2485, 0xffffffff, 0x000a0009,
+	0x2486, 0xffffffff, 0x000c000b,
+	0x2487, 0xffffffff, 0x0000000f,
+	0x2488, 0xffffffff, 0x000e000d,
+	0x2489, 0xffffffff, 0x00110010,
+	0x248a, 0xffffffff, 0x000b000a,
+	0x248b, 0xffffffff, 0x000d000c,
+	0x248c, 0xffffffff, 0x00000010,
+	0x248d, 0xffffffff, 0x000f000e,
+	0x248e, 0xffffffff, 0x00120011,
+	0x248f, 0xffffffff, 0x000c000b,
+	0x2490, 0xffffffff, 0x000e000d,
+	0x2491, 0xffffffff, 0x00000011,
+	0x2492, 0xffffffff, 0x0010000f,
+	0x2493, 0xffffffff, 0x00130012,
+	0x2494, 0xffffffff, 0x000d000c,
+	0x2495, 0xffffffff, 0x000f000e,
+	0x2496, 0xffffffff, 0x00100013,
+	0x2497, 0xffffffff, 0x00120011,
+	0x2498, 0xffffffff, 0x00150014,
+	0x2499, 0xffffffff, 0x000e000d,
+	0x249a, 0xffffffff, 0x0010000f,
+	0x249b, 0xffffffff, 0x00000013,
+	0x249c, 0xffffffff, 0x00120011,
+	0x249d, 0xffffffff, 0x00150014,
+	0x249e, 0xffffffff, 0x000f000e,
+	0x249f, 0xffffffff, 0x00110010,
+	0x24a0, 0xffffffff, 0x00000014,
+	0x24a1, 0xffffffff, 0x00130012,
+	0x24a2, 0xffffffff, 0x00160015,
+	0x24a3, 0xffffffff, 0x0010000f,
+	0x24a4, 0xffffffff, 0x00120011,
+	0x24a5, 0xffffffff, 0x00000015,
+	0x24a6, 0xffffffff, 0x00140013,
+	0x24a7, 0xffffffff, 0x00170016,
+	0x2454, 0xffffffff, 0x96940200,
+	0x21c2, 0xffffffff, 0x00900100,
+	0x311e, 0xffffffff, 0x00000080,
+	0x3101, 0xffffffff, 0x0020003f,
+	0xc, 0xffffffff, 0x0000001c,
+	0xd, 0x000f0000, 0x000f0000,
+	0x583, 0xffffffff, 0x00000100,
+	0x409, 0xffffffff, 0x00000100,
+	0x40b, 0x00000101, 0x00000000,
+	0x82a, 0xffffffff, 0x00000104,
+	0x993, 0x000c0000, 0x000c0000,
+	0x992, 0x000c0000, 0x000c0000,
+	0x1579, 0xff000fff, 0x00000100,
+	0x157a, 0x00000001, 0x00000001,
+	0xbd4, 0x00000001, 0x00000001,
+	0xc33, 0xc0000fff, 0x00000104,
+	0x3079, 0x00000001, 0x00000001,
+	0x3430, 0xfffffff0, 0x00000100,
+	0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 pitcairn_mgcg_cgcg_init[] =
+{
+	0x3100, 0xffffffff, 0xfffffffc,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2698, 0xffffffff, 0x00000100,
+	0x24a9, 0xffffffff, 0x00000100,
+	0x3059, 0xffffffff, 0x00000100,
+	0x25dd, 0xffffffff, 0x00000100,
+	0x2261, 0xffffffff, 0x06000100,
+	0x2286, 0xffffffff, 0x00000100,
+	0x24a8, 0xffffffff, 0x00000100,
+	0x30e0, 0xffffffff, 0x00000100,
+	0x22ca, 0xffffffff, 0x00000100,
+	0x2451, 0xffffffff, 0x00000100,
+	0x2362, 0xffffffff, 0x00000100,
+	0x2363, 0xffffffff, 0x00000100,
+	0x240c, 0xffffffff, 0x00000100,
+	0x240d, 0xffffffff, 0x00000100,
+	0x240e, 0xffffffff, 0x00000100,
+	0x240f, 0xffffffff, 0x00000100,
+	0x2b60, 0xffffffff, 0x00000100,
+	0x2b15, 0xffffffff, 0x00000100,
+	0x225f, 0xffffffff, 0x06000100,
+	0x261a, 0xffffffff, 0x00000100,
+	0x2544, 0xffffffff, 0x00000100,
+	0x2bc1, 0xffffffff, 0x00000100,
+	0x2b81, 0xffffffff, 0x00000100,
+	0x2527, 0xffffffff, 0x00000100,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2458, 0xffffffff, 0x00010000,
+	0x2459, 0xffffffff, 0x00030002,
+	0x245a, 0xffffffff, 0x00040007,
+	0x245b, 0xffffffff, 0x00060005,
+	0x245c, 0xffffffff, 0x00090008,
+	0x245d, 0xffffffff, 0x00020001,
+	0x245e, 0xffffffff, 0x00040003,
+	0x245f, 0xffffffff, 0x00000007,
+	0x2460, 0xffffffff, 0x00060005,
+	0x2461, 0xffffffff, 0x00090008,
+	0x2462, 0xffffffff, 0x00030002,
+	0x2463, 0xffffffff, 0x00050004,
+	0x2464, 0xffffffff, 0x00000008,
+	0x2465, 0xffffffff, 0x00070006,
+	0x2466, 0xffffffff, 0x000a0009,
+	0x2467, 0xffffffff, 0x00040003,
+	0x2468, 0xffffffff, 0x00060005,
+	0x2469, 0xffffffff, 0x00000009,
+	0x246a, 0xffffffff, 0x00080007,
+	0x246b, 0xffffffff, 0x000b000a,
+	0x246c, 0xffffffff, 0x00050004,
+	0x246d, 0xffffffff, 0x00070006,
+	0x246e, 0xffffffff, 0x0008000b,
+	0x246f, 0xffffffff, 0x000a0009,
+	0x2470, 0xffffffff, 0x000d000c,
+	0x2480, 0xffffffff, 0x00090008,
+	0x2481, 0xffffffff, 0x000b000a,
+	0x2482, 0xffffffff, 0x000c000f,
+	0x2483, 0xffffffff, 0x000e000d,
+	0x2484, 0xffffffff, 0x00110010,
+	0x2485, 0xffffffff, 0x000a0009,
+	0x2486, 0xffffffff, 0x000c000b,
+	0x2487, 0xffffffff, 0x0000000f,
+	0x2488, 0xffffffff, 0x000e000d,
+	0x2489, 0xffffffff, 0x00110010,
+	0x248a, 0xffffffff, 0x000b000a,
+	0x248b, 0xffffffff, 0x000d000c,
+	0x248c, 0xffffffff, 0x00000010,
+	0x248d, 0xffffffff, 0x000f000e,
+	0x248e, 0xffffffff, 0x00120011,
+	0x248f, 0xffffffff, 0x000c000b,
+	0x2490, 0xffffffff, 0x000e000d,
+	0x2491, 0xffffffff, 0x00000011,
+	0x2492, 0xffffffff, 0x0010000f,
+	0x2493, 0xffffffff, 0x00130012,
+	0x2494, 0xffffffff, 0x000d000c,
+	0x2495, 0xffffffff, 0x000f000e,
+	0x2496, 0xffffffff, 0x00100013,
+	0x2497, 0xffffffff, 0x00120011,
+	0x2498, 0xffffffff, 0x00150014,
+	0x2454, 0xffffffff, 0x96940200,
+	0x21c2, 0xffffffff, 0x00900100,
+	0x311e, 0xffffffff, 0x00000080,
+	0x3101, 0xffffffff, 0x0020003f,
+	0xc, 0xffffffff, 0x0000001c,
+	0xd, 0x000f0000, 0x000f0000,
+	0x583, 0xffffffff, 0x00000100,
+	0x409, 0xffffffff, 0x00000100,
+	0x40b, 0x00000101, 0x00000000,
+	0x82a, 0xffffffff, 0x00000104,
+	0x1579, 0xff000fff, 0x00000100,
+	0x157a, 0x00000001, 0x00000001,
+	0xbd4, 0x00000001, 0x00000001,
+	0xc33, 0xc0000fff, 0x00000104,
+	0x3079, 0x00000001, 0x00000001,
+	0x3430, 0xfffffff0, 0x00000100,
+	0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 verde_mgcg_cgcg_init[] =
+{
+	0x3100, 0xffffffff, 0xfffffffc,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2698, 0xffffffff, 0x00000100,
+	0x24a9, 0xffffffff, 0x00000100,
+	0x3059, 0xffffffff, 0x00000100,
+	0x25dd, 0xffffffff, 0x00000100,
+	0x2261, 0xffffffff, 0x06000100,
+	0x2286, 0xffffffff, 0x00000100,
+	0x24a8, 0xffffffff, 0x00000100,
+	0x30e0, 0xffffffff, 0x00000100,
+	0x22ca, 0xffffffff, 0x00000100,
+	0x2451, 0xffffffff, 0x00000100,
+	0x2362, 0xffffffff, 0x00000100,
+	0x2363, 0xffffffff, 0x00000100,
+	0x240c, 0xffffffff, 0x00000100,
+	0x240d, 0xffffffff, 0x00000100,
+	0x240e, 0xffffffff, 0x00000100,
+	0x240f, 0xffffffff, 0x00000100,
+	0x2b60, 0xffffffff, 0x00000100,
+	0x2b15, 0xffffffff, 0x00000100,
+	0x225f, 0xffffffff, 0x06000100,
+	0x261a, 0xffffffff, 0x00000100,
+	0x2544, 0xffffffff, 0x00000100,
+	0x2bc1, 0xffffffff, 0x00000100,
+	0x2b81, 0xffffffff, 0x00000100,
+	0x2527, 0xffffffff, 0x00000100,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2458, 0xffffffff, 0x00010000,
+	0x2459, 0xffffffff, 0x00030002,
+	0x245a, 0xffffffff, 0x00040007,
+	0x245b, 0xffffffff, 0x00060005,
+	0x245c, 0xffffffff, 0x00090008,
+	0x245d, 0xffffffff, 0x00020001,
+	0x245e, 0xffffffff, 0x00040003,
+	0x245f, 0xffffffff, 0x00000007,
+	0x2460, 0xffffffff, 0x00060005,
+	0x2461, 0xffffffff, 0x00090008,
+	0x2462, 0xffffffff, 0x00030002,
+	0x2463, 0xffffffff, 0x00050004,
+	0x2464, 0xffffffff, 0x00000008,
+	0x2465, 0xffffffff, 0x00070006,
+	0x2466, 0xffffffff, 0x000a0009,
+	0x2467, 0xffffffff, 0x00040003,
+	0x2468, 0xffffffff, 0x00060005,
+	0x2469, 0xffffffff, 0x00000009,
+	0x246a, 0xffffffff, 0x00080007,
+	0x246b, 0xffffffff, 0x000b000a,
+	0x246c, 0xffffffff, 0x00050004,
+	0x246d, 0xffffffff, 0x00070006,
+	0x246e, 0xffffffff, 0x0008000b,
+	0x246f, 0xffffffff, 0x000a0009,
+	0x2470, 0xffffffff, 0x000d000c,
+	0x2480, 0xffffffff, 0x00090008,
+	0x2481, 0xffffffff, 0x000b000a,
+	0x2482, 0xffffffff, 0x000c000f,
+	0x2483, 0xffffffff, 0x000e000d,
+	0x2484, 0xffffffff, 0x00110010,
+	0x2485, 0xffffffff, 0x000a0009,
+	0x2486, 0xffffffff, 0x000c000b,
+	0x2487, 0xffffffff, 0x0000000f,
+	0x2488, 0xffffffff, 0x000e000d,
+	0x2489, 0xffffffff, 0x00110010,
+	0x248a, 0xffffffff, 0x000b000a,
+	0x248b, 0xffffffff, 0x000d000c,
+	0x248c, 0xffffffff, 0x00000010,
+	0x248d, 0xffffffff, 0x000f000e,
+	0x248e, 0xffffffff, 0x00120011,
+	0x248f, 0xffffffff, 0x000c000b,
+	0x2490, 0xffffffff, 0x000e000d,
+	0x2491, 0xffffffff, 0x00000011,
+	0x2492, 0xffffffff, 0x0010000f,
+	0x2493, 0xffffffff, 0x00130012,
+	0x2494, 0xffffffff, 0x000d000c,
+	0x2495, 0xffffffff, 0x000f000e,
+	0x2496, 0xffffffff, 0x00100013,
+	0x2497, 0xffffffff, 0x00120011,
+	0x2498, 0xffffffff, 0x00150014,
+	0x2454, 0xffffffff, 0x96940200,
+	0x21c2, 0xffffffff, 0x00900100,
+	0x311e, 0xffffffff, 0x00000080,
+	0x3101, 0xffffffff, 0x0020003f,
+	0xc, 0xffffffff, 0x0000001c,
+	0xd, 0x000f0000, 0x000f0000,
+	0x583, 0xffffffff, 0x00000100,
+	0x409, 0xffffffff, 0x00000100,
+	0x40b, 0x00000101, 0x00000000,
+	0x82a, 0xffffffff, 0x00000104,
+	0x993, 0x000c0000, 0x000c0000,
+	0x992, 0x000c0000, 0x000c0000,
+	0x1579, 0xff000fff, 0x00000100,
+	0x157a, 0x00000001, 0x00000001,
+	0xbd4, 0x00000001, 0x00000001,
+	0xc33, 0xc0000fff, 0x00000104,
+	0x3079, 0x00000001, 0x00000001,
+	0x3430, 0xfffffff0, 0x00000100,
+	0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 oland_mgcg_cgcg_init[] =
+{
+	0x3100, 0xffffffff, 0xfffffffc,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2698, 0xffffffff, 0x00000100,
+	0x24a9, 0xffffffff, 0x00000100,
+	0x3059, 0xffffffff, 0x00000100,
+	0x25dd, 0xffffffff, 0x00000100,
+	0x2261, 0xffffffff, 0x06000100,
+	0x2286, 0xffffffff, 0x00000100,
+	0x24a8, 0xffffffff, 0x00000100,
+	0x30e0, 0xffffffff, 0x00000100,
+	0x22ca, 0xffffffff, 0x00000100,
+	0x2451, 0xffffffff, 0x00000100,
+	0x2362, 0xffffffff, 0x00000100,
+	0x2363, 0xffffffff, 0x00000100,
+	0x240c, 0xffffffff, 0x00000100,
+	0x240d, 0xffffffff, 0x00000100,
+	0x240e, 0xffffffff, 0x00000100,
+	0x240f, 0xffffffff, 0x00000100,
+	0x2b60, 0xffffffff, 0x00000100,
+	0x2b15, 0xffffffff, 0x00000100,
+	0x225f, 0xffffffff, 0x06000100,
+	0x261a, 0xffffffff, 0x00000100,
+	0x2544, 0xffffffff, 0x00000100,
+	0x2bc1, 0xffffffff, 0x00000100,
+	0x2b81, 0xffffffff, 0x00000100,
+	0x2527, 0xffffffff, 0x00000100,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2458, 0xffffffff, 0x00010000,
+	0x2459, 0xffffffff, 0x00030002,
+	0x245a, 0xffffffff, 0x00040007,
+	0x245b, 0xffffffff, 0x00060005,
+	0x245c, 0xffffffff, 0x00090008,
+	0x245d, 0xffffffff, 0x00020001,
+	0x245e, 0xffffffff, 0x00040003,
+	0x245f, 0xffffffff, 0x00000007,
+	0x2460, 0xffffffff, 0x00060005,
+	0x2461, 0xffffffff, 0x00090008,
+	0x2462, 0xffffffff, 0x00030002,
+	0x2463, 0xffffffff, 0x00050004,
+	0x2464, 0xffffffff, 0x00000008,
+	0x2465, 0xffffffff, 0x00070006,
+	0x2466, 0xffffffff, 0x000a0009,
+	0x2467, 0xffffffff, 0x00040003,
+	0x2468, 0xffffffff, 0x00060005,
+	0x2469, 0xffffffff, 0x00000009,
+	0x246a, 0xffffffff, 0x00080007,
+	0x246b, 0xffffffff, 0x000b000a,
+	0x246c, 0xffffffff, 0x00050004,
+	0x246d, 0xffffffff, 0x00070006,
+	0x246e, 0xffffffff, 0x0008000b,
+	0x246f, 0xffffffff, 0x000a0009,
+	0x2470, 0xffffffff, 0x000d000c,
+	0x2471, 0xffffffff, 0x00060005,
+	0x2472, 0xffffffff, 0x00080007,
+	0x2473, 0xffffffff, 0x0000000b,
+	0x2474, 0xffffffff, 0x000a0009,
+	0x2475, 0xffffffff, 0x000d000c,
+	0x2454, 0xffffffff, 0x96940200,
+	0x21c2, 0xffffffff, 0x00900100,
+	0x311e, 0xffffffff, 0x00000080,
+	0x3101, 0xffffffff, 0x0020003f,
+	0xc, 0xffffffff, 0x0000001c,
+	0xd, 0x000f0000, 0x000f0000,
+	0x583, 0xffffffff, 0x00000100,
+	0x409, 0xffffffff, 0x00000100,
+	0x40b, 0x00000101, 0x00000000,
+	0x82a, 0xffffffff, 0x00000104,
+	0x993, 0x000c0000, 0x000c0000,
+	0x992, 0x000c0000, 0x000c0000,
+	0x1579, 0xff000fff, 0x00000100,
+	0x157a, 0x00000001, 0x00000001,
+	0xbd4, 0x00000001, 0x00000001,
+	0xc33, 0xc0000fff, 0x00000104,
+	0x3079, 0x00000001, 0x00000001,
+	0x3430, 0xfffffff0, 0x00000100,
+	0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 hainan_mgcg_cgcg_init[] =
+{
+	0x3100, 0xffffffff, 0xfffffffc,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2698, 0xffffffff, 0x00000100,
+	0x24a9, 0xffffffff, 0x00000100,
+	0x3059, 0xffffffff, 0x00000100,
+	0x25dd, 0xffffffff, 0x00000100,
+	0x2261, 0xffffffff, 0x06000100,
+	0x2286, 0xffffffff, 0x00000100,
+	0x24a8, 0xffffffff, 0x00000100,
+	0x30e0, 0xffffffff, 0x00000100,
+	0x22ca, 0xffffffff, 0x00000100,
+	0x2451, 0xffffffff, 0x00000100,
+	0x2362, 0xffffffff, 0x00000100,
+	0x2363, 0xffffffff, 0x00000100,
+	0x240c, 0xffffffff, 0x00000100,
+	0x240d, 0xffffffff, 0x00000100,
+	0x240e, 0xffffffff, 0x00000100,
+	0x240f, 0xffffffff, 0x00000100,
+	0x2b60, 0xffffffff, 0x00000100,
+	0x2b15, 0xffffffff, 0x00000100,
+	0x225f, 0xffffffff, 0x06000100,
+	0x261a, 0xffffffff, 0x00000100,
+	0x2544, 0xffffffff, 0x00000100,
+	0x2bc1, 0xffffffff, 0x00000100,
+	0x2b81, 0xffffffff, 0x00000100,
+	0x2527, 0xffffffff, 0x00000100,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2458, 0xffffffff, 0x00010000,
+	0x2459, 0xffffffff, 0x00030002,
+	0x245a, 0xffffffff, 0x00040007,
+	0x245b, 0xffffffff, 0x00060005,
+	0x245c, 0xffffffff, 0x00090008,
+	0x245d, 0xffffffff, 0x00020001,
+	0x245e, 0xffffffff, 0x00040003,
+	0x245f, 0xffffffff, 0x00000007,
+	0x2460, 0xffffffff, 0x00060005,
+	0x2461, 0xffffffff, 0x00090008,
+	0x2462, 0xffffffff, 0x00030002,
+	0x2463, 0xffffffff, 0x00050004,
+	0x2464, 0xffffffff, 0x00000008,
+	0x2465, 0xffffffff, 0x00070006,
+	0x2466, 0xffffffff, 0x000a0009,
+	0x2467, 0xffffffff, 0x00040003,
+	0x2468, 0xffffffff, 0x00060005,
+	0x2469, 0xffffffff, 0x00000009,
+	0x246a, 0xffffffff, 0x00080007,
+	0x246b, 0xffffffff, 0x000b000a,
+	0x246c, 0xffffffff, 0x00050004,
+	0x246d, 0xffffffff, 0x00070006,
+	0x246e, 0xffffffff, 0x0008000b,
+	0x246f, 0xffffffff, 0x000a0009,
+	0x2470, 0xffffffff, 0x000d000c,
+	0x2471, 0xffffffff, 0x00060005,
+	0x2472, 0xffffffff, 0x00080007,
+	0x2473, 0xffffffff, 0x0000000b,
+	0x2474, 0xffffffff, 0x000a0009,
+	0x2475, 0xffffffff, 0x000d000c,
+	0x2454, 0xffffffff, 0x96940200,
+	0x21c2, 0xffffffff, 0x00900100,
+	0x311e, 0xffffffff, 0x00000080,
+	0x3101, 0xffffffff, 0x0020003f,
+	0xc, 0xffffffff, 0x0000001c,
+	0xd, 0x000f0000, 0x000f0000,
+	0x583, 0xffffffff, 0x00000100,
+	0x409, 0xffffffff, 0x00000100,
+	0x82a, 0xffffffff, 0x00000104,
+	0x993, 0x000c0000, 0x000c0000,
+	0x992, 0x000c0000, 0x000c0000,
+	0xbd4, 0x00000001, 0x00000001,
+	0xc33, 0xc0000fff, 0x00000104,
+	0x3079, 0x00000001, 0x00000001,
+	0x3430, 0xfffffff0, 0x00000100,
+	0x3630, 0xfffffff0, 0x00000100
+};
+
+static u32 si_pcie_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(AMDGPU_PCIE_INDEX, reg);
+	(void)RREG32(AMDGPU_PCIE_INDEX);
+	r = RREG32(AMDGPU_PCIE_DATA);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+	return r;
+}
+
+static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(AMDGPU_PCIE_INDEX, reg);
+	(void)RREG32(AMDGPU_PCIE_INDEX);
+	WREG32(AMDGPU_PCIE_DATA, v);
+	(void)RREG32(AMDGPU_PCIE_DATA);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+	(void)RREG32(PCIE_PORT_INDEX);
+	r = RREG32(PCIE_PORT_DATA);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+	return r;
+}
+
+void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+	(void)RREG32(PCIE_PORT_INDEX);
+	WREG32(PCIE_PORT_DATA, (v));
+	(void)RREG32(PCIE_PORT_DATA);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+static u32 si_smc_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	WREG32(SMC_IND_INDEX_0, (reg));
+	r = RREG32(SMC_IND_DATA_0);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+	return r;
+}
+
+static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	WREG32(SMC_IND_INDEX_0, (reg));
+	WREG32(SMC_IND_DATA_0, (v));
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+}
+
+static u32 si_get_virtual_caps(struct amdgpu_device *adev)
+{
+	/* SI does not support SR-IOV */
+	return 0;
+}
+
+static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
+	{GRBM_STATUS, false},
+	{GB_ADDR_CONFIG, false},
+	{MC_ARB_RAMCFG, false},
+	{GB_TILE_MODE0, false},
+	{GB_TILE_MODE1, false},
+	{GB_TILE_MODE2, false},
+	{GB_TILE_MODE3, false},
+	{GB_TILE_MODE4, false},
+	{GB_TILE_MODE5, false},
+	{GB_TILE_MODE6, false},
+	{GB_TILE_MODE7, false},
+	{GB_TILE_MODE8, false},
+	{GB_TILE_MODE9, false},
+	{GB_TILE_MODE10, false},
+	{GB_TILE_MODE11, false},
+	{GB_TILE_MODE12, false},
+	{GB_TILE_MODE13, false},
+	{GB_TILE_MODE14, false},
+	{GB_TILE_MODE15, false},
+	{GB_TILE_MODE16, false},
+	{GB_TILE_MODE17, false},
+	{GB_TILE_MODE18, false},
+	{GB_TILE_MODE19, false},
+	{GB_TILE_MODE20, false},
+	{GB_TILE_MODE21, false},
+	{GB_TILE_MODE22, false},
+	{GB_TILE_MODE23, false},
+	{GB_TILE_MODE24, false},
+	{GB_TILE_MODE25, false},
+	{GB_TILE_MODE26, false},
+	{GB_TILE_MODE27, false},
+	{GB_TILE_MODE28, false},
+	{GB_TILE_MODE29, false},
+	{GB_TILE_MODE30, false},
+	{GB_TILE_MODE31, false},
+	{CC_RB_BACKEND_DISABLE, false, true},
+	{GC_USER_RB_BACKEND_DISABLE, false, true},
+	{PA_SC_RASTER_CONFIG, false, true},
+};
+
+static uint32_t si_read_indexed_register(struct amdgpu_device *adev,
+					  u32 se_num, u32 sh_num,
+					  u32 reg_offset)
+{
+	uint32_t val;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	if (se_num != 0xffffffff || sh_num != 0xffffffff)
+		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+
+	val = RREG32(reg_offset);
+
+	if (se_num != 0xffffffff || sh_num != 0xffffffff)
+		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+	mutex_unlock(&adev->grbm_idx_mutex);
+	return val;
+}
+
+static int si_read_register(struct amdgpu_device *adev, u32 se_num,
+			     u32 sh_num, u32 reg_offset, u32 *value)
+{
+	uint32_t i;
+
+	*value = 0;
+	for (i = 0; i < ARRAY_SIZE(si_allowed_read_registers); i++) {
+		if (reg_offset != si_allowed_read_registers[i].reg_offset)
+			continue;
+
+		if (!si_allowed_read_registers[i].untouched)
+			*value = si_allowed_read_registers[i].grbm_indexed ?
+				 si_read_indexed_register(adev, se_num,
+							   sh_num, reg_offset) :
+				 RREG32(reg_offset);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static bool si_read_disabled_bios(struct amdgpu_device *adev)
+{
+	u32 bus_cntl;
+	u32 d1vga_control = 0;
+	u32 d2vga_control = 0;
+	u32 vga_render_control = 0;
+	u32 rom_cntl;
+	bool r;
+
+	bus_cntl = RREG32(R600_BUS_CNTL);
+	if (adev->mode_info.num_crtc) {
+		d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+		d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+		vga_render_control = RREG32(VGA_RENDER_CONTROL);
+	}
+	rom_cntl = RREG32(R600_ROM_CNTL);
+
+	/* enable the rom */
+	WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+	if (adev->mode_info.num_crtc) {
+		/* Disable VGA mode */
+		WREG32(AVIVO_D1VGA_CONTROL,
+		       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+					  AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+		WREG32(AVIVO_D2VGA_CONTROL,
+		       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+					  AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+		WREG32(VGA_RENDER_CONTROL,
+		       (vga_render_control & C_000300_VGA_VSTATUS_CNTL));
+	}
+	WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
+
+	r = amdgpu_read_bios(adev);
+
+	/* restore regs */
+	WREG32(R600_BUS_CNTL, bus_cntl);
+	if (adev->mode_info.num_crtc) {
+		WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+		WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+		WREG32(VGA_RENDER_CONTROL, vga_render_control);
+	}
+	WREG32(R600_ROM_CNTL, rom_cntl);
+	return r;
+}
+
+//xxx: not implemented
+static int si_asic_reset(struct amdgpu_device *adev)
+{
+	return 0;
+}
+
+static void si_vga_set_state(struct amdgpu_device *adev, bool state)
+{
+	uint32_t temp;
+
+	temp = RREG32(CONFIG_CNTL);
+	if (state == false) {
+		temp &= ~(1<<0);
+		temp |= (1<<1);
+	} else {
+		temp &= ~(1<<1);
+	}
+	WREG32(CONFIG_CNTL, temp);
+}
+
+static u32 si_get_xclk(struct amdgpu_device *adev)
+{
+        u32 reference_clock = adev->clock.spll.reference_freq;
+	u32 tmp;
+
+	tmp = RREG32(CG_CLKPIN_CNTL_2);
+	if (tmp & MUX_TCLK_TO_XCLK)
+		return TCLK;
+
+	tmp = RREG32(CG_CLKPIN_CNTL);
+	if (tmp & XTALIN_DIVIDE)
+		return reference_clock / 4;
+
+	return reference_clock;
+}
+
+//xxx:not implemented
+static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
+{
+	return 0;
+}
+
+static const struct amdgpu_asic_funcs si_asic_funcs =
+{
+	.read_disabled_bios = &si_read_disabled_bios,
+	.read_register = &si_read_register,
+	.reset = &si_asic_reset,
+	.set_vga_state = &si_vga_set_state,
+	.get_xclk = &si_get_xclk,
+	.set_uvd_clocks = &si_set_uvd_clocks,
+	.set_vce_clocks = NULL,
+	.get_virtual_caps = &si_get_virtual_caps,
+};
+
+static uint32_t si_get_rev_id(struct amdgpu_device *adev)
+{
+	return (RREG32(CC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
+		>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
+}
+
+static int si_common_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->smc_rreg = &si_smc_rreg;
+	adev->smc_wreg = &si_smc_wreg;
+	adev->pcie_rreg = &si_pcie_rreg;
+	adev->pcie_wreg = &si_pcie_wreg;
+	adev->pciep_rreg = &si_pciep_rreg;
+	adev->pciep_wreg = &si_pciep_wreg;
+	adev->uvd_ctx_rreg = NULL;
+	adev->uvd_ctx_wreg = NULL;
+	adev->didt_rreg = NULL;
+	adev->didt_wreg = NULL;
+
+	adev->asic_funcs = &si_asic_funcs;
+
+	adev->rev_id = si_get_rev_id(adev);
+	adev->external_rev_id = 0xFF;
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+		adev->cg_flags =
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			/*AMD_CG_SUPPORT_GFX_CGCG |*/
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_MC_MGCG |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_VCE_MGCG |
+			AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
+			adev->pg_flags = 0;
+		break;
+	case CHIP_PITCAIRN:
+		adev->cg_flags =
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			/*AMD_CG_SUPPORT_GFX_CGCG |*/
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_GFX_RLC_LS |
+			AMD_CG_SUPPORT_MC_LS |
+			AMD_CG_SUPPORT_MC_MGCG |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_VCE_MGCG |
+			AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
+		adev->pg_flags = 0;
+		break;
+
+	case CHIP_VERDE:
+		adev->cg_flags =
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CGTS_LS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_MC_LS |
+			AMD_CG_SUPPORT_MC_MGCG |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_VCE_MGCG |
+			AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
+		adev->pg_flags = 0;
+		//???
+		adev->external_rev_id = adev->rev_id + 0x14;
+		break;
+	case CHIP_OLAND:
+		adev->cg_flags =
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			/*AMD_CG_SUPPORT_GFX_CGCG |*/
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_GFX_RLC_LS |
+			AMD_CG_SUPPORT_MC_LS |
+			AMD_CG_SUPPORT_MC_MGCG |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
+		adev->pg_flags = 0;
+		break;
+	case CHIP_HAINAN:
+		adev->cg_flags =
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			/*AMD_CG_SUPPORT_GFX_CGCG |*/
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_GFX_RLC_LS |
+			AMD_CG_SUPPORT_MC_LS |
+			AMD_CG_SUPPORT_MC_MGCG |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
+		adev->pg_flags = 0;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int si_common_sw_init(void *handle)
+{
+	return 0;
+}
+
+static int si_common_sw_fini(void *handle)
+{
+	return 0;
+}
+
+
+static void si_init_golden_registers(struct amdgpu_device *adev)
+{
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+		amdgpu_program_register_sequence(adev,
+						 tahiti_golden_registers,
+						 (const u32)ARRAY_SIZE(tahiti_golden_registers));
+		amdgpu_program_register_sequence(adev,
+						 tahiti_golden_rlc_registers,
+						 (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
+		amdgpu_program_register_sequence(adev,
+						 tahiti_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 tahiti_golden_registers2,
+						 (const u32)ARRAY_SIZE(tahiti_golden_registers2));
+		break;
+	case CHIP_PITCAIRN:
+		amdgpu_program_register_sequence(adev,
+						 pitcairn_golden_registers,
+						 (const u32)ARRAY_SIZE(pitcairn_golden_registers));
+		amdgpu_program_register_sequence(adev,
+						 pitcairn_golden_rlc_registers,
+						 (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
+		amdgpu_program_register_sequence(adev,
+						 pitcairn_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+	case CHIP_VERDE:
+		amdgpu_program_register_sequence(adev,
+						 verde_golden_registers,
+						 (const u32)ARRAY_SIZE(verde_golden_registers));
+		amdgpu_program_register_sequence(adev,
+						 verde_golden_rlc_registers,
+						 (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
+		amdgpu_program_register_sequence(adev,
+						 verde_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 verde_pg_init,
+						 (const u32)ARRAY_SIZE(verde_pg_init));
+		break;
+	case CHIP_OLAND:
+		amdgpu_program_register_sequence(adev,
+						 oland_golden_registers,
+						 (const u32)ARRAY_SIZE(oland_golden_registers));
+		amdgpu_program_register_sequence(adev,
+						 oland_golden_rlc_registers,
+						 (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
+		amdgpu_program_register_sequence(adev,
+						 oland_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
+	case CHIP_HAINAN:
+		amdgpu_program_register_sequence(adev,
+						 hainan_golden_registers,
+						 (const u32)ARRAY_SIZE(hainan_golden_registers));
+		amdgpu_program_register_sequence(adev,
+						 hainan_golden_registers2,
+						 (const u32)ARRAY_SIZE(hainan_golden_registers2));
+		amdgpu_program_register_sequence(adev,
+						 hainan_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
+		break;
+
+
+	default:
+		BUG();
+	}
+}
+
+static void si_pcie_gen3_enable(struct amdgpu_device *adev)
+{
+	struct pci_dev *root = adev->pdev->bus->self;
+	int bridge_pos, gpu_pos;
+	u32 speed_cntl, mask, current_data_rate;
+	int ret, i;
+	u16 tmp16;
+
+	if (pci_is_root_bus(adev->pdev->bus))
+		return;
+
+	if (amdgpu_pcie_gen2 == 0)
+		return;
+
+	if (adev->flags & AMD_IS_APU)
+		return;
+
+	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+	if (ret != 0)
+		return;
+
+	if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+		return;
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
+		LC_CURRENT_DATA_RATE_SHIFT;
+	if (mask & DRM_PCIE_SPEED_80) {
+		if (current_data_rate == 2) {
+			DRM_INFO("PCIE gen 3 link speeds already enabled\n");
+			return;
+		}
+		DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
+	} else if (mask & DRM_PCIE_SPEED_50) {
+		if (current_data_rate == 1) {
+			DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+			return;
+		}
+		DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
+	}
+
+	bridge_pos = pci_pcie_cap(root);
+	if (!bridge_pos)
+		return;
+
+	gpu_pos = pci_pcie_cap(adev->pdev);
+	if (!gpu_pos)
+		return;
+
+	if (mask & DRM_PCIE_SPEED_80) {
+		if (current_data_rate != 2) {
+			u16 bridge_cfg, gpu_cfg;
+			u16 bridge_cfg2, gpu_cfg2;
+			u32 max_lw, current_lw, tmp;
+
+			pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+			pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+			tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+			pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+			tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+			pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+			tmp = RREG32_PCIE(PCIE_LC_STATUS1);
+			max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
+			current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
+
+			if (current_lw < max_lw) {
+				tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+				if (tmp & LC_RENEGOTIATION_SUPPORT) {
+					tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
+					tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
+					tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
+					WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
+				}
+			}
+
+			for (i = 0; i < 10; i++) {
+				pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+				if (tmp16 & PCI_EXP_DEVSTA_TRPND)
+					break;
+
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+				pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
+				pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+
+				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+				tmp |= LC_SET_QUIESCE;
+				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+				tmp |= LC_REDO_EQ;
+				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+				mdelay(100);
+
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+				tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+				pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+				pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+				tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+				pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
+				tmp16 &= ~((1 << 4) | (7 << 9));
+				tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
+				pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
+
+				pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+				tmp16 &= ~((1 << 4) | (7 << 9));
+				tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
+				pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+				tmp &= ~LC_SET_QUIESCE;
+				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+			}
+		}
+	}
+
+	speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
+	speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
+	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+	tmp16 &= ~0xf;
+	if (mask & DRM_PCIE_SPEED_80)
+		tmp16 |= 3;
+	else if (mask & DRM_PCIE_SPEED_50)
+		tmp16 |= 2;
+	else
+		tmp16 |= 1;
+	pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
+	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
+			break;
+		udelay(1);
+	}
+}
+
+static inline u32 si_pif_phy0_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
+	r = RREG32(EVERGREEN_PIF_PHY0_DATA);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+	return r;
+}
+
+static inline void si_pif_phy0_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
+	WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+static inline u32 si_pif_phy1_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
+	r = RREG32(EVERGREEN_PIF_PHY1_DATA);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+	return r;
+}
+
+static inline void si_pif_phy1_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
+	WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+static void si_program_aspm(struct amdgpu_device *adev)
+{
+	u32 data, orig;
+	bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
+	bool disable_clkreq = false;
+
+	if (amdgpu_aspm == 0)
+		return;
+
+	if (adev->flags & AMD_IS_APU)
+		return;
+	orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+	data &= ~LC_XMIT_N_FTS_MASK;
+	data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
+	if (orig != data)
+		WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
+
+	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
+	data |= LC_GO_TO_RECOVERY;
+	if (orig != data)
+		WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
+
+	orig = data = RREG32_PCIE(PCIE_P_CNTL);
+	data |= P_IGNORE_EDB_ERR;
+	if (orig != data)
+		WREG32_PCIE(PCIE_P_CNTL, data);
+
+	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+	data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
+	data |= LC_PMI_TO_L1_DIS;
+	if (!disable_l0s)
+		data |= LC_L0S_INACTIVITY(7);
+
+	if (!disable_l1) {
+		data |= LC_L1_INACTIVITY(7);
+		data &= ~LC_PMI_TO_L1_DIS;
+		if (orig != data)
+			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+
+		if (!disable_plloff_in_l1) {
+			bool clk_req_support;
+
+			orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
+			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+			if (orig != data)
+				si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+
+			orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
+			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+			if (orig != data)
+				si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+
+			orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
+			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+			if (orig != data)
+				si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+
+			orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
+			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+			if (orig != data)
+				si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+
+			if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) {
+				orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
+				data &= ~PLL_RAMP_UP_TIME_0_MASK;
+				if (orig != data)
+					si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+
+				orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
+				data &= ~PLL_RAMP_UP_TIME_1_MASK;
+				if (orig != data)
+					si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+
+				orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_2);
+				data &= ~PLL_RAMP_UP_TIME_2_MASK;
+				if (orig != data)
+					si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_2, data);
+
+				orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_3);
+				data &= ~PLL_RAMP_UP_TIME_3_MASK;
+				if (orig != data)
+					si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_3, data);
+
+				orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
+				data &= ~PLL_RAMP_UP_TIME_0_MASK;
+				if (orig != data)
+					si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+
+				orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
+				data &= ~PLL_RAMP_UP_TIME_1_MASK;
+				if (orig != data)
+					si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+
+				orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_2);
+				data &= ~PLL_RAMP_UP_TIME_2_MASK;
+				if (orig != data)
+					si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_2, data);
+
+				orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_3);
+				data &= ~PLL_RAMP_UP_TIME_3_MASK;
+				if (orig != data)
+					si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_3, data);
+			}
+			orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+			data &= ~LC_DYN_LANES_PWR_STATE_MASK;
+			data |= LC_DYN_LANES_PWR_STATE(3);
+			if (orig != data)
+				WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
+
+			orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
+			data &= ~LS2_EXIT_TIME_MASK;
+			if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+				data |= LS2_EXIT_TIME(5);
+			if (orig != data)
+				si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
+
+			orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
+			data &= ~LS2_EXIT_TIME_MASK;
+			if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+				data |= LS2_EXIT_TIME(5);
+			if (orig != data)
+				si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
+
+			if (!disable_clkreq &&
+			    !pci_is_root_bus(adev->pdev->bus)) {
+				struct pci_dev *root = adev->pdev->bus->self;
+				u32 lnkcap;
+
+				clk_req_support = false;
+				pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+				if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
+					clk_req_support = true;
+			} else {
+				clk_req_support = false;
+			}
+
+			if (clk_req_support) {
+				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
+				data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
+				if (orig != data)
+					WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
+
+				orig = data = RREG32(THM_CLK_CNTL);
+				data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
+				data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
+				if (orig != data)
+					WREG32(THM_CLK_CNTL, data);
+
+				orig = data = RREG32(MISC_CLK_CNTL);
+				data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
+				data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
+				if (orig != data)
+					WREG32(MISC_CLK_CNTL, data);
+
+				orig = data = RREG32(CG_CLKPIN_CNTL);
+				data &= ~BCLK_AS_XCLK;
+				if (orig != data)
+					WREG32(CG_CLKPIN_CNTL, data);
+
+				orig = data = RREG32(CG_CLKPIN_CNTL_2);
+				data &= ~FORCE_BIF_REFCLK_EN;
+				if (orig != data)
+					WREG32(CG_CLKPIN_CNTL_2, data);
+
+				orig = data = RREG32(MPLL_BYPASSCLK_SEL);
+				data &= ~MPLL_CLKOUT_SEL_MASK;
+				data |= MPLL_CLKOUT_SEL(4);
+				if (orig != data)
+					WREG32(MPLL_BYPASSCLK_SEL, data);
+
+				orig = data = RREG32(SPLL_CNTL_MODE);
+				data &= ~SPLL_REFCLK_SEL_MASK;
+				if (orig != data)
+					WREG32(SPLL_CNTL_MODE, data);
+			}
+		}
+	} else {
+		if (orig != data)
+			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+	}
+
+	orig = data = RREG32_PCIE(PCIE_CNTL2);
+	data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
+	if (orig != data)
+		WREG32_PCIE(PCIE_CNTL2, data);
+
+	if (!disable_l0s) {
+		data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+		if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
+			data = RREG32_PCIE(PCIE_LC_STATUS1);
+			if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
+				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+				data &= ~LC_L0S_INACTIVITY_MASK;
+				if (orig != data)
+					WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+			}
+		}
+	}
+}
+
+static void si_fix_pci_max_read_req_size(struct amdgpu_device *adev)
+{
+	int readrq;
+	u16 v;
+
+	readrq = pcie_get_readrq(adev->pdev);
+	v = ffs(readrq) - 8;
+	if ((v == 0) || (v == 6) || (v == 7))
+		pcie_set_readrq(adev->pdev, 512);
+}
+
+static int si_common_hw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	si_fix_pci_max_read_req_size(adev);
+	si_init_golden_registers(adev);
+	si_pcie_gen3_enable(adev);
+	si_program_aspm(adev);
+
+	return 0;
+}
+
+static int si_common_hw_fini(void *handle)
+{
+	return 0;
+}
+
+static int si_common_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return si_common_hw_fini(adev);
+}
+
+static int si_common_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return si_common_hw_init(adev);
+}
+
+static bool si_common_is_idle(void *handle)
+{
+	return true;
+}
+
+static int si_common_wait_for_idle(void *handle)
+{
+	return 0;
+}
+
+static int si_common_soft_reset(void *handle)
+{
+	return 0;
+}
+
+static int si_common_set_clockgating_state(void *handle,
+					    enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int si_common_set_powergating_state(void *handle,
+					    enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs si_common_ip_funcs = {
+	.name = "si_common",
+	.early_init = si_common_early_init,
+	.late_init = NULL,
+	.sw_init = si_common_sw_init,
+	.sw_fini = si_common_sw_fini,
+	.hw_init = si_common_hw_init,
+	.hw_fini = si_common_hw_fini,
+	.suspend = si_common_suspend,
+	.resume = si_common_resume,
+	.is_idle = si_common_is_idle,
+	.wait_for_idle = si_common_wait_for_idle,
+	.soft_reset = si_common_soft_reset,
+	.set_clockgating_state = si_common_set_clockgating_state,
+	.set_powergating_state = si_common_set_powergating_state,
+};
+
+static const struct amdgpu_ip_block_version verde_ip_blocks[] =
+{
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &si_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &si_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &dce_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &si_dma_ip_funcs,
+	},
+/*	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 3,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &si_null_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &si_null_ip_funcs,
+	},
+	*/
+};
+
+
+static const struct amdgpu_ip_block_version hainan_ip_blocks[] =
+{
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &si_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &si_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &si_dma_ip_funcs,
+	},
+};
+
+int si_set_ip_blocks(struct amdgpu_device *adev)
+{
+	switch (adev->asic_type) {
+	case CHIP_VERDE:
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+	case CHIP_OLAND:
+		adev->ip_blocks = verde_ip_blocks;
+		adev->num_ip_blocks = ARRAY_SIZE(verde_ip_blocks);
+		break;
+	case CHIP_HAINAN:
+		adev->ip_blocks = hainan_ip_blocks;
+		adev->num_ip_blocks = ARRAY_SIZE(hainan_ip_blocks);
+		break;
+	default:
+		BUG();
+	}
+	return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/amdgpu/si.h
similarity index 71%
copy from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
copy to drivers/gpu/drm/amd/amdgpu/si.h
index d24b888..959d7b6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
+++ b/drivers/gpu/drm/amd/amdgpu/si.h
@@ -20,16 +20,14 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#ifndef TONGA_PROCESSPPTABLES_H
-#define TONGA_PROCESSPPTABLES_H
 
-#include "hwmgr.h"
+#ifndef __SI_H__
+#define __SI_H__
 
-extern const struct pp_table_func tonga_pptable_funcs;
-extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
-extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
-		struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
-				struct pp_power_state *, void *, uint32_t));
+extern const struct amd_ip_funcs si_common_ip_funcs;
+
+void si_srbm_select(struct amdgpu_device *adev,
+		     u32 me, u32 pipe, u32 queue, u32 vmid);
+int si_set_ip_blocks(struct amdgpu_device *adev);
 
 #endif
-
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
new file mode 100644
index 0000000..de35819
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -0,0 +1,915 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+#include "si/sid.h"
+
+const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
+{
+	DMA0_REGISTER_OFFSET,
+	DMA1_REGISTER_OFFSET
+};
+
+static void si_dma_set_ring_funcs(struct amdgpu_device *adev);
+static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
+static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
+static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
+
+static uint32_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
+{
+	return ring->adev->wb.wb[ring->rptr_offs>>2];
+}
+
+static uint32_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
+
+	return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+}
+
+static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
+
+	WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
+}
+
+static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
+				struct amdgpu_ib *ib,
+				unsigned vm_id, bool ctx_switch)
+{
+	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+	 * Pad as necessary with NOPs.
+	 */
+	while ((ring->wptr & 7) != 5)
+		amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+	amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
+	amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+	amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+static void si_dma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	amdgpu_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL));
+	amdgpu_ring_write(ring, 1);
+}
+
+static void si_dma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	amdgpu_ring_write(ring, (0xf << 16) | (HDP_DEBUG0));
+	amdgpu_ring_write(ring, 1);
+}
+
+/**
+ * si_dma_ring_emit_fence - emit a fence on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ * @fence: amdgpu fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (VI).
+ */
+static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+				      unsigned flags)
+{
+
+	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+	/* write the fence */
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
+	amdgpu_ring_write(ring, addr & 0xfffffffc);
+	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
+	amdgpu_ring_write(ring, seq);
+	/* optionally write high bits as well */
+	if (write64bit) {
+		addr += 4;
+		amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
+		amdgpu_ring_write(ring, addr & 0xfffffffc);
+		amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
+		amdgpu_ring_write(ring, upper_32_bits(seq));
+	}
+	/* generate an interrupt */
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0));
+}
+
+static void si_dma_stop(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring;
+	u32 rb_cntl;
+	unsigned i;
+
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		ring = &adev->sdma.instance[i].ring;
+		/* dma0 */
+		rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
+		rb_cntl &= ~DMA_RB_ENABLE;
+		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+		if (adev->mman.buffer_funcs_ring == ring)
+			amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+		ring->ready = false;
+	}
+}
+
+static int si_dma_start(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring;
+	u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz;
+	int i, r;
+	uint64_t rptr_addr;
+
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		ring = &adev->sdma.instance[i].ring;
+
+		WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
+		WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
+
+		/* Set ring buffer size in dwords */
+		rb_bufsz = order_base_2(ring->ring_size / 4);
+		rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+		rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+		/* Initialize the ring buffer's read and write pointers */
+		WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
+		WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
+
+		rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+
+		WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
+		WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
+
+		rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+		WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
+
+		/* enable DMA IBs */
+		ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+		ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+		WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
+
+		dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
+		dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+		WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
+
+		ring->wptr = 0;
+		WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
+
+		ring->ready = true;
+
+		r = amdgpu_ring_test_ring(ring);
+		if (r) {
+			ring->ready = false;
+			return r;
+		}
+
+		if (adev->mman.buffer_funcs_ring == ring)
+			amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+	}
+
+	return 0;
+}
+
+/**
+ * si_dma_ring_test_ring - simple async dma engine test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (VI).
+ * Returns 0 for success, error for failure.
+ */
+static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	unsigned i;
+	unsigned index;
+	int r;
+	u32 tmp;
+	u64 gpu_addr;
+
+	r = amdgpu_wb_get(adev, &index);
+	if (r) {
+		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+		return r;
+	}
+
+	gpu_addr = adev->wb.gpu_addr + (index * 4);
+	tmp = 0xCAFEDEAD;
+	adev->wb.wb[index] = cpu_to_le32(tmp);
+
+	r = amdgpu_ring_alloc(ring, 4);
+	if (r) {
+		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
+		amdgpu_wb_free(adev, index);
+		return r;
+	}
+
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
+	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
+	amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
+	amdgpu_ring_write(ring, 0xDEADBEEF);
+	amdgpu_ring_commit(ring);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = le32_to_cpu(adev->wb.wb[index]);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (i < adev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+	} else {
+		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+			  ring->idx, tmp);
+		r = -EINVAL;
+	}
+	amdgpu_wb_free(adev, index);
+
+	return r;
+}
+
+/**
+ * si_dma_ring_test_ib - test an IB on the DMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (VI).
+ * Returns 0 on success, error on failure.
+ */
+static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_ib ib;
+	struct fence *f = NULL;
+	unsigned index;
+	u32 tmp = 0;
+	u64 gpu_addr;
+	long r;
+
+	r = amdgpu_wb_get(adev, &index);
+	if (r) {
+		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+		return r;
+	}
+
+	gpu_addr = adev->wb.gpu_addr + (index * 4);
+	tmp = 0xCAFEDEAD;
+	adev->wb.wb[index] = cpu_to_le32(tmp);
+	memset(&ib, 0, sizeof(ib));
+	r = amdgpu_ib_get(adev, NULL, 256, &ib);
+	if (r) {
+		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+		goto err0;
+	}
+
+	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
+	ib.ptr[1] = lower_32_bits(gpu_addr);
+	ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
+	ib.ptr[3] = 0xDEADBEEF;
+	ib.length_dw = 4;
+	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+	if (r)
+		goto err1;
+
+	r = fence_wait_timeout(f, false, timeout);
+	if (r == 0) {
+		DRM_ERROR("amdgpu: IB test timed out\n");
+		r = -ETIMEDOUT;
+		goto err1;
+	} else if (r < 0) {
+		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+		goto err1;
+	}
+	tmp = le32_to_cpu(adev->wb.wb[index]);
+	if (tmp == 0xDEADBEEF) {
+		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+		r = 0;
+	} else {
+		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+		r = -EINVAL;
+	}
+
+err1:
+	amdgpu_ib_free(adev, &ib, NULL);
+	fence_put(f);
+err0:
+	amdgpu_wb_free(adev, index);
+	return r;
+}
+
+/**
+ * cik_dma_vm_copy_pte - update PTEs by copying them from the GART
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using DMA (SI).
+ */
+static void si_dma_vm_copy_pte(struct amdgpu_ib *ib,
+			       uint64_t pe, uint64_t src,
+			       unsigned count)
+{
+	unsigned bytes = count * 8;
+
+	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+					      1, 0, 0, bytes);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+	ib->ptr[ib->length_dw++] = lower_32_bits(src);
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+	ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+}
+
+/**
+ * si_dma_vm_write_pte - update PTEs by writing them manually
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @value: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ *
+ * Update PTEs by writing them manually using DMA (SI).
+ */
+static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+				uint64_t value, unsigned count,
+				uint32_t incr)
+{
+	unsigned ndw = count * 2;
+
+	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+	for (; ndw > 0; ndw -= 2) {
+		ib->ptr[ib->length_dw++] = lower_32_bits(value);
+		ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		value += incr;
+	}
+}
+
+/**
+ * si_dma_vm_set_pte_pde - update the page tables using sDMA
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
+				     uint64_t pe,
+				     uint64_t addr, unsigned count,
+				     uint32_t incr, uint32_t flags)
+{
+	uint64_t value;
+	unsigned ndw;
+
+	while (count) {
+		ndw = count * 2;
+		if (ndw > 0xFFFFE)
+			ndw = 0xFFFFE;
+
+		if (flags & AMDGPU_PTE_VALID)
+			value = addr;
+		else
+			value = 0;
+
+		/* for physically contiguous pages (vram) */
+		ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+		ib->ptr[ib->length_dw++] = pe; /* dst addr */
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+		ib->ptr[ib->length_dw++] = flags; /* mask */
+		ib->ptr[ib->length_dw++] = 0;
+		ib->ptr[ib->length_dw++] = value; /* value */
+		ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		ib->ptr[ib->length_dw++] = incr; /* increment size */
+		ib->ptr[ib->length_dw++] = 0;
+		pe += ndw * 4;
+		addr += (ndw / 2) * incr;
+		count -= ndw / 2;
+	}
+}
+
+/**
+ * si_dma_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
+{
+	while (ib->length_dw & 0x7)
+		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+}
+
+/**
+ * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+	uint32_t seq = ring->fence_drv.sync_seq;
+	uint64_t addr = ring->fence_drv.gpu_addr;
+
+	/* wait for idle */
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) |
+			  (1 << 27)); /* Poll memory */
+	amdgpu_ring_write(ring, lower_32_bits(addr));
+	amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */
+	amdgpu_ring_write(ring, 0xffffffff); /* mask */
+	amdgpu_ring_write(ring, seq); /* value */
+	amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */
+}
+
+/**
+ * si_dma_ring_emit_vm_flush - cik vm flush using sDMA
+ *
+ * @ring: amdgpu_ring pointer
+ * @vm: amdgpu_vm pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA (VI).
+ */
+static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
+				      unsigned vm_id, uint64_t pd_addr)
+{
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	if (vm_id < 8)
+		amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+	else
+		amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+	amdgpu_ring_write(ring, pd_addr >> 12);
+
+	/* bits 0-7 are the VM contexts0-7 */
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST));
+	amdgpu_ring_write(ring, 1 << vm_id);
+
+	/* wait for invalidate to complete */
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
+	amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+	amdgpu_ring_write(ring, 0xff << 16); /* retry */
+	amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+	amdgpu_ring_write(ring, 0); /* value */
+	amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
+}
+
+static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		7 + 3; /* si_dma_ring_emit_ib */
+}
+
+static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		3 + /* si_dma_ring_emit_hdp_flush */
+		3 + /* si_dma_ring_emit_hdp_invalidate */
+		6 + /* si_dma_ring_emit_pipeline_sync */
+		12 + /* si_dma_ring_emit_vm_flush */
+		9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
+}
+
+static int si_dma_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->sdma.num_instances = 2;
+
+	si_dma_set_ring_funcs(adev);
+	si_dma_set_buffer_funcs(adev);
+	si_dma_set_vm_pte_funcs(adev);
+	si_dma_set_irq_funcs(adev);
+
+	return 0;
+}
+
+static int si_dma_sw_init(void *handle)
+{
+	struct amdgpu_ring *ring;
+	int r, i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	/* DMA0 trap event */
+	r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
+	if (r)
+		return r;
+
+	/* DMA1 trap event */
+	r = amdgpu_irq_add_id(adev, 244, &adev->sdma.trap_irq_1);
+	if (r)
+		return r;
+
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		ring = &adev->sdma.instance[i].ring;
+		ring->ring_obj = NULL;
+		ring->use_doorbell = false;
+		sprintf(ring->name, "sdma%d", i);
+		r = amdgpu_ring_init(adev, ring, 1024,
+				     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf,
+				     &adev->sdma.trap_irq,
+				     (i == 0) ?
+				     AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
+				     AMDGPU_RING_TYPE_SDMA);
+		if (r)
+			return r;
+	}
+
+	return r;
+}
+
+static int si_dma_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int i;
+
+	for (i = 0; i < adev->sdma.num_instances; i++)
+		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+
+	return 0;
+}
+
+static int si_dma_hw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return si_dma_start(adev);
+}
+
+static int si_dma_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	si_dma_stop(adev);
+
+	return 0;
+}
+
+static int si_dma_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return si_dma_hw_fini(adev);
+}
+
+static int si_dma_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return si_dma_hw_init(adev);
+}
+
+static bool si_dma_is_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 tmp = RREG32(SRBM_STATUS2);
+
+	if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
+	    return false;
+
+	return true;
+}
+
+static int si_dma_wait_for_idle(void *handle)
+{
+	unsigned i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (si_dma_is_idle(handle))
+			return 0;
+		udelay(1);
+	}
+	return -ETIMEDOUT;
+}
+
+static int si_dma_soft_reset(void *handle)
+{
+	DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n");
+	return 0;
+}
+
+static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
+					struct amdgpu_irq_src *src,
+					unsigned type,
+					enum amdgpu_interrupt_state state)
+{
+	u32 sdma_cntl;
+
+	switch (type) {
+	case AMDGPU_SDMA_IRQ_TRAP0:
+		switch (state) {
+		case AMDGPU_IRQ_STATE_DISABLE:
+			sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
+			sdma_cntl &= ~TRAP_ENABLE;
+			WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+			break;
+		case AMDGPU_IRQ_STATE_ENABLE:
+			sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
+			sdma_cntl |= TRAP_ENABLE;
+			WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+			break;
+		default:
+			break;
+		}
+		break;
+	case AMDGPU_SDMA_IRQ_TRAP1:
+		switch (state) {
+		case AMDGPU_IRQ_STATE_DISABLE:
+			sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
+			sdma_cntl &= ~TRAP_ENABLE;
+			WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+			break;
+		case AMDGPU_IRQ_STATE_ENABLE:
+			sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
+			sdma_cntl |= TRAP_ENABLE;
+			WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int si_dma_process_trap_irq(struct amdgpu_device *adev,
+				      struct amdgpu_irq_src *source,
+				      struct amdgpu_iv_entry *entry)
+{
+	amdgpu_fence_process(&adev->sdma.instance[0].ring);
+
+	return 0;
+}
+
+static int si_dma_process_trap_irq_1(struct amdgpu_device *adev,
+				      struct amdgpu_irq_src *source,
+				      struct amdgpu_iv_entry *entry)
+{
+	amdgpu_fence_process(&adev->sdma.instance[1].ring);
+
+	return 0;
+}
+
+static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev,
+					      struct amdgpu_irq_src *source,
+					      struct amdgpu_iv_entry *entry)
+{
+	DRM_ERROR("Illegal instruction in SDMA command stream\n");
+	schedule_work(&adev->reset_work);
+	return 0;
+}
+
+static int si_dma_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	u32 orig, data, offset;
+	int i;
+	bool enable;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
+		for (i = 0; i < adev->sdma.num_instances; i++) {
+			if (i == 0)
+				offset = DMA0_REGISTER_OFFSET;
+			else
+				offset = DMA1_REGISTER_OFFSET;
+			orig = data = RREG32(DMA_POWER_CNTL + offset);
+			data &= ~MEM_POWER_OVERRIDE;
+			if (data != orig)
+				WREG32(DMA_POWER_CNTL + offset, data);
+			WREG32(DMA_CLK_CTRL + offset, 0x00000100);
+		}
+	} else {
+		for (i = 0; i < adev->sdma.num_instances; i++) {
+			if (i == 0)
+				offset = DMA0_REGISTER_OFFSET;
+			else
+				offset = DMA1_REGISTER_OFFSET;
+			orig = data = RREG32(DMA_POWER_CNTL + offset);
+			data |= MEM_POWER_OVERRIDE;
+			if (data != orig)
+				WREG32(DMA_POWER_CNTL + offset, data);
+
+			orig = data = RREG32(DMA_CLK_CTRL + offset);
+			data = 0xff000000;
+			if (data != orig)
+				WREG32(DMA_CLK_CTRL + offset, data);
+		}
+	}
+
+	return 0;
+}
+
+static int si_dma_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	u32 tmp;
+
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	WREG32(DMA_PGFSM_WRITE,  0x00002000);
+	WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
+
+	for (tmp = 0; tmp < 5; tmp++)
+		WREG32(DMA_PGFSM_WRITE, 0);
+
+	return 0;
+}
+
+const struct amd_ip_funcs si_dma_ip_funcs = {
+	.name = "si_dma",
+	.early_init = si_dma_early_init,
+	.late_init = NULL,
+	.sw_init = si_dma_sw_init,
+	.sw_fini = si_dma_sw_fini,
+	.hw_init = si_dma_hw_init,
+	.hw_fini = si_dma_hw_fini,
+	.suspend = si_dma_suspend,
+	.resume = si_dma_resume,
+	.is_idle = si_dma_is_idle,
+	.wait_for_idle = si_dma_wait_for_idle,
+	.soft_reset = si_dma_soft_reset,
+	.set_clockgating_state = si_dma_set_clockgating_state,
+	.set_powergating_state = si_dma_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
+	.get_rptr = si_dma_ring_get_rptr,
+	.get_wptr = si_dma_ring_get_wptr,
+	.set_wptr = si_dma_ring_set_wptr,
+	.parse_cs = NULL,
+	.emit_ib = si_dma_ring_emit_ib,
+	.emit_fence = si_dma_ring_emit_fence,
+	.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
+	.emit_vm_flush = si_dma_ring_emit_vm_flush,
+	.emit_hdp_flush = si_dma_ring_emit_hdp_flush,
+	.emit_hdp_invalidate = si_dma_ring_emit_hdp_invalidate,
+	.test_ring = si_dma_ring_test_ring,
+	.test_ib = si_dma_ring_test_ib,
+	.insert_nop = amdgpu_ring_insert_nop,
+	.pad_ib = si_dma_ring_pad_ib,
+	.get_emit_ib_size = si_dma_ring_get_emit_ib_size,
+	.get_dma_frame_size = si_dma_ring_get_dma_frame_size,
+};
+
+static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->sdma.num_instances; i++)
+		adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
+	.set = si_dma_set_trap_irq_state,
+	.process = si_dma_process_trap_irq,
+};
+
+static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs_1 = {
+	.set = si_dma_set_trap_irq_state,
+	.process = si_dma_process_trap_irq_1,
+};
+
+static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = {
+	.process = si_dma_process_illegal_inst_irq,
+};
+
+static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+	adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
+	adev->sdma.trap_irq_1.funcs = &si_dma_trap_irq_funcs_1;
+	adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs;
+}
+
+/**
+ * si_dma_emit_copy_buffer - copy buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Copy GPU buffers using the DMA engine (VI).
+ * Used by the amdgpu ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
+				       uint64_t src_offset,
+				       uint64_t dst_offset,
+				       uint32_t byte_count)
+{
+	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+					      1, 0, 0, byte_count);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff;
+	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff;
+}
+
+/**
+ * si_dma_emit_fill_buffer - fill buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_data: value to write to buffer
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Fill GPU buffers using the DMA engine (VI).
+ */
+static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib,
+				       uint32_t src_data,
+				       uint64_t dst_offset,
+				       uint32_t byte_count)
+{
+	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL,
+					      0, 0, 0, byte_count / 4);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = src_data;
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16;
+}
+
+
+static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
+	.copy_max_bytes = 0xffff8,
+	.copy_num_dw = 5,
+	.emit_copy_buffer = si_dma_emit_copy_buffer,
+
+	.fill_max_bytes = 0xffff8,
+	.fill_num_dw = 4,
+	.emit_fill_buffer = si_dma_emit_fill_buffer,
+};
+
+static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
+{
+	if (adev->mman.buffer_funcs == NULL) {
+		adev->mman.buffer_funcs = &si_dma_buffer_funcs;
+		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+	}
+}
+
+static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
+	.copy_pte = si_dma_vm_copy_pte,
+	.write_pte = si_dma_vm_write_pte,
+	.set_pte_pde = si_dma_vm_set_pte_pde,
+};
+
+static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	if (adev->vm_manager.vm_pte_funcs == NULL) {
+		adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
+		for (i = 0; i < adev->sdma.num_instances; i++)
+			adev->vm_manager.vm_pte_rings[i] =
+				&adev->sdma.instance[i].ring;
+
+		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+	}
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/amdgpu/si_dma.h
similarity index 71%
copy from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
copy to drivers/gpu/drm/amd/amdgpu/si_dma.h
index d24b888..3a3e0c7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.h
@@ -20,16 +20,10 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#ifndef TONGA_PROCESSPPTABLES_H
-#define TONGA_PROCESSPPTABLES_H
 
-#include "hwmgr.h"
+#ifndef __SI_DMA_H__
+#define __SI_DMA_H__
 
-extern const struct pp_table_func tonga_pptable_funcs;
-extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
-extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
-		struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
-				struct pp_power_state *, void *, uint32_t));
+extern const struct amd_ip_funcs si_dma_ip_funcs;
 
 #endif
-
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
new file mode 100644
index 0000000..e2db4a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -0,0 +1,7993 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_dpm.h"
+#include "amdgpu_atombios.h"
+#include "si/sid.h"
+#include "r600_dpm.h"
+#include "si_dpm.h"
+#include "atom.h"
+#include "../include/pptable.h"
+#include <linux/math64.h>
+#include <linux/seq_file.h>
+#include <linux/firmware.h>
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+#define SMC_RAM_END                 0x20000
+
+#define SCLK_MIN_DEEPSLEEP_FREQ     1350
+
+
+/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
+
+#define BIOS_SCRATCH_4                                    0x5cd
+
+MODULE_FIRMWARE("radeon/tahiti_smc.bin");
+MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
+MODULE_FIRMWARE("radeon/verde_smc.bin");
+MODULE_FIRMWARE("radeon/verde_k_smc.bin");
+MODULE_FIRMWARE("radeon/oland_smc.bin");
+MODULE_FIRMWARE("radeon/oland_k_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+
+union power_info {
+	struct _ATOM_POWERPLAY_INFO info;
+	struct _ATOM_POWERPLAY_INFO_V2 info_2;
+	struct _ATOM_POWERPLAY_INFO_V3 info_3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
+	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
+};
+
+union fan_info {
+	struct _ATOM_PPLIB_FANTABLE fan;
+	struct _ATOM_PPLIB_FANTABLE2 fan2;
+	struct _ATOM_PPLIB_FANTABLE3 fan3;
+};
+
+union pplib_clock_info {
+	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+};
+
+static const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
+{
+	R600_UTC_DFLT_00,
+	R600_UTC_DFLT_01,
+	R600_UTC_DFLT_02,
+	R600_UTC_DFLT_03,
+	R600_UTC_DFLT_04,
+	R600_UTC_DFLT_05,
+	R600_UTC_DFLT_06,
+	R600_UTC_DFLT_07,
+	R600_UTC_DFLT_08,
+	R600_UTC_DFLT_09,
+	R600_UTC_DFLT_10,
+	R600_UTC_DFLT_11,
+	R600_UTC_DFLT_12,
+	R600_UTC_DFLT_13,
+	R600_UTC_DFLT_14,
+};
+
+static const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
+{
+	R600_DTC_DFLT_00,
+	R600_DTC_DFLT_01,
+	R600_DTC_DFLT_02,
+	R600_DTC_DFLT_03,
+	R600_DTC_DFLT_04,
+	R600_DTC_DFLT_05,
+	R600_DTC_DFLT_06,
+	R600_DTC_DFLT_07,
+	R600_DTC_DFLT_08,
+	R600_DTC_DFLT_09,
+	R600_DTC_DFLT_10,
+	R600_DTC_DFLT_11,
+	R600_DTC_DFLT_12,
+	R600_DTC_DFLT_13,
+	R600_DTC_DFLT_14,
+};
+
+static const struct si_cac_config_reg cac_weights_tahiti[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_tahiti[] =
+{
+	{ 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+
+};
+
+static const struct si_cac_config_reg cac_override_tahiti[] =
+{
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_tahiti =
+{
+	((1 << 16) | 27027),
+	6,
+	0,
+	4,
+	95,
+	{
+		0UL,
+		0UL,
+		4521550UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		40
+	},
+	595000000UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+static const struct si_dte_data dte_data_tahiti =
+{
+	{ 1159409, 0, 0, 0, 0 },
+	{ 777, 0, 0, 0, 0 },
+	2,
+	54000,
+	127000,
+	25,
+	2,
+	10,
+	13,
+	{ 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
+	{ 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
+	{ 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
+	85,
+	false
+};
+
+#if 0
+static const struct si_dte_data dte_data_tahiti_le =
+{
+	{ 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
+	{ 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
+	0x5,
+	0xAFC8,
+	0x64,
+	0x32,
+	1,
+	0,
+	0x10,
+	{ 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
+	{ 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
+	{ 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
+	85,
+	true
+};
+#endif
+
+static const struct si_dte_data dte_data_tahiti_pro =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_new_zealand =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
+	{ 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
+	0x5,
+	0xAFC8,
+	0x69,
+	0x32,
+	1,
+	0,
+	0x10,
+	{ 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
+	85,
+	true
+};
+
+static const struct si_dte_data dte_data_aruba_pro =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_malta =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_cac_config_reg cac_weights_pitcairn[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_pitcairn[] =
+{
+	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_pitcairn[] =
+{
+    { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_pitcairn =
+{
+	((1 << 16) | 27027),
+	5,
+	0,
+	6,
+	100,
+	{
+		51600000UL,
+		1800000UL,
+		7194395UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		100
+	},
+	117830498UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+static const struct si_dte_data dte_data_pitcairn =
+{
+	{ 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0 },
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	0,
+	false
+};
+
+static const struct si_dte_data dte_data_curacao_xt =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_curacao_pro =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_neptune_xt =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_heathrow[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_cape_verde[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_cape_verde[] =
+{
+	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_cape_verde[] =
+{
+    { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_cape_verde =
+{
+	((1 << 16) | 0x6993),
+	5,
+	0,
+	7,
+	105,
+	{
+		0UL,
+		0UL,
+		7194395UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		100
+	},
+	117830498UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+static const struct si_dte_data dte_data_cape_verde =
+{
+	{ 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0 },
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	0,
+	false
+};
+
+static const struct si_dte_data dte_data_venus_xtx =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
+	5,
+	55000,
+	0x69,
+	0xA,
+	1,
+	0,
+	0x3,
+	{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_venus_xt =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
+	5,
+	55000,
+	0x69,
+	0xA,
+	1,
+	0,
+	0x3,
+	{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_venus_pro =
+{
+	{  0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
+	5,
+	55000,
+	0x69,
+	0xA,
+	1,
+	0,
+	0x3,
+	{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_cac_config_reg cac_weights_oland[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_mars_pro[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_mars_xt[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_oland_pro[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_oland_xt[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_oland[] =
+{
+	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_mars_pro[] =
+{
+	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_oland[] =
+{
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_oland =
+{
+	((1 << 16) | 0x6993),
+	5,
+	0,
+	7,
+	105,
+	{
+		0UL,
+		0UL,
+		7194395UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		100
+	},
+	117830498UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+static const struct si_powertune_data powertune_data_mars_pro =
+{
+	((1 << 16) | 0x6993),
+	5,
+	0,
+	7,
+	105,
+	{
+		0UL,
+		0UL,
+		7194395UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		100
+	},
+	117830498UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+static const struct si_dte_data dte_data_oland =
+{
+	{ 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0 },
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	0,
+	false
+};
+
+static const struct si_dte_data dte_data_mars_pro =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	55000,
+	105,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_sun_xt =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	55000,
+	105,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+
+static const struct si_cac_config_reg cac_weights_hainan[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_hainan =
+{
+	((1 << 16) | 0x6993),
+	5,
+	0,
+	9,
+	105,
+	{
+		0UL,
+		0UL,
+		7194395UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		100
+	},
+	117830498UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev);
+static struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev);
+static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev);
+static struct  si_ps *si_get_ps(struct amdgpu_ps *rps);
+
+static int si_populate_voltage_value(struct amdgpu_device *adev,
+				     const struct atom_voltage_table *table,
+				     u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
+static int si_get_std_voltage_value(struct amdgpu_device *adev,
+				    SISLANDS_SMC_VOLTAGE_VALUE *voltage,
+				    u16 *std_voltage);
+static int si_write_smc_soft_register(struct amdgpu_device *adev,
+				      u16 reg_offset, u32 value);
+static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
+					 struct rv7xx_pl *pl,
+					 SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level);
+static int si_calculate_sclk_params(struct amdgpu_device *adev,
+				    u32 engine_clock,
+				    SISLANDS_SMC_SCLK_VALUE *sclk);
+
+static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev);
+static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
+static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev);
+static void si_dpm_set_irq_funcs(struct amdgpu_device *adev);
+
+static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
+{
+	struct si_power_info *pi = adev->pm.dpm.priv;
+	return pi;
+}
+
+static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
+						     u16 v, s32 t, u32 ileakage, u32 *leakage)
+{
+	s64 kt, kv, leakage_w, i_leakage, vddc;
+	s64 temperature, t_slope, t_intercept, av, bv, t_ref;
+	s64 tmp;
+
+	i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
+	vddc = div64_s64(drm_int2fixp(v), 1000);
+	temperature = div64_s64(drm_int2fixp(t), 1000);
+
+	t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000);
+	t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000);
+	av = div64_s64(drm_int2fixp(coeff->av), 100000000);
+	bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
+	t_ref = drm_int2fixp(coeff->t_ref);
+
+	tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
+	kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
+	kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
+	kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
+
+	leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
+
+	*leakage = drm_fixp2int(leakage_w * 1000);
+}
+
+static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev,
+					     const struct ni_leakage_coeffients *coeff,
+					     u16 v,
+					     s32 t,
+					     u32 i_leakage,
+					     u32 *leakage)
+{
+	si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
+}
+
+static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff,
+					       const u32 fixed_kt, u16 v,
+					       u32 ileakage, u32 *leakage)
+{
+	s64 kt, kv, leakage_w, i_leakage, vddc;
+
+	i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
+	vddc = div64_s64(drm_int2fixp(v), 1000);
+
+	kt = div64_s64(drm_int2fixp(fixed_kt), 100000000);
+	kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000),
+			  drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc)));
+
+	leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
+
+	*leakage = drm_fixp2int(leakage_w * 1000);
+}
+
+static void si_calculate_leakage_for_v(struct amdgpu_device *adev,
+				       const struct ni_leakage_coeffients *coeff,
+				       const u32 fixed_kt,
+				       u16 v,
+				       u32 i_leakage,
+				       u32 *leakage)
+{
+	si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage);
+}
+
+
+static void si_update_dte_from_pl2(struct amdgpu_device *adev,
+				   struct si_dte_data *dte_data)
+{
+	u32 p_limit1 = adev->pm.dpm.tdp_limit;
+	u32 p_limit2 = adev->pm.dpm.near_tdp_limit;
+	u32 k = dte_data->k;
+	u32 t_max = dte_data->max_t;
+	u32 t_split[5] = { 10, 15, 20, 25, 30 };
+	u32 t_0 = dte_data->t0;
+	u32 i;
+
+	if (p_limit2 != 0 && p_limit2 <= p_limit1) {
+		dte_data->tdep_count = 3;
+
+		for (i = 0; i < k; i++) {
+			dte_data->r[i] =
+				(t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) /
+				(p_limit2  * (u32)100);
+		}
+
+		dte_data->tdep_r[1] = dte_data->r[4] * 2;
+
+		for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) {
+			dte_data->tdep_r[i] = dte_data->r[4];
+		}
+	} else {
+		DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
+	}
+}
+
+static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = adev->pm.dpm.priv;
+
+	return pi;
+}
+
+static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev)
+{
+	struct ni_power_info *pi = adev->pm.dpm.priv;
+
+	return pi;
+}
+
+static struct si_ps *si_get_ps(struct amdgpu_ps *aps)
+{
+	struct  si_ps *ps = aps->ps_priv;
+
+	return ps;
+}
+
+static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	bool update_dte_from_pl2 = false;
+
+	if (adev->asic_type == CHIP_TAHITI) {
+		si_pi->cac_weights = cac_weights_tahiti;
+		si_pi->lcac_config = lcac_tahiti;
+		si_pi->cac_override = cac_override_tahiti;
+		si_pi->powertune_data = &powertune_data_tahiti;
+		si_pi->dte_data = dte_data_tahiti;
+
+		switch (adev->pdev->device) {
+		case 0x6798:
+			si_pi->dte_data.enable_dte_by_default = true;
+			break;
+		case 0x6799:
+			si_pi->dte_data = dte_data_new_zealand;
+			break;
+		case 0x6790:
+		case 0x6791:
+		case 0x6792:
+		case 0x679E:
+			si_pi->dte_data = dte_data_aruba_pro;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x679B:
+			si_pi->dte_data = dte_data_malta;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x679A:
+			si_pi->dte_data = dte_data_tahiti_pro;
+			update_dte_from_pl2 = true;
+			break;
+		default:
+			if (si_pi->dte_data.enable_dte_by_default == true)
+				DRM_ERROR("DTE is not enabled!\n");
+			break;
+		}
+	} else if (adev->asic_type == CHIP_PITCAIRN) {
+		si_pi->cac_weights = cac_weights_pitcairn;
+		si_pi->lcac_config = lcac_pitcairn;
+		si_pi->cac_override = cac_override_pitcairn;
+		si_pi->powertune_data = &powertune_data_pitcairn;
+
+		switch (adev->pdev->device) {
+		case 0x6810:
+		case 0x6818:
+			si_pi->dte_data = dte_data_curacao_xt;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x6819:
+		case 0x6811:
+			si_pi->dte_data = dte_data_curacao_pro;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x6800:
+		case 0x6806:
+			si_pi->dte_data = dte_data_neptune_xt;
+			update_dte_from_pl2 = true;
+			break;
+		default:
+			si_pi->dte_data = dte_data_pitcairn;
+			break;
+		}
+	} else if (adev->asic_type == CHIP_VERDE) {
+		si_pi->lcac_config = lcac_cape_verde;
+		si_pi->cac_override = cac_override_cape_verde;
+		si_pi->powertune_data = &powertune_data_cape_verde;
+
+		switch (adev->pdev->device) {
+		case 0x683B:
+		case 0x683F:
+		case 0x6829:
+		case 0x6835:
+			si_pi->cac_weights = cac_weights_cape_verde_pro;
+			si_pi->dte_data = dte_data_cape_verde;
+			break;
+		case 0x682C:
+			si_pi->cac_weights = cac_weights_cape_verde_pro;
+			si_pi->dte_data = dte_data_sun_xt;
+			break;
+		case 0x6825:
+		case 0x6827:
+			si_pi->cac_weights = cac_weights_heathrow;
+			si_pi->dte_data = dte_data_cape_verde;
+			break;
+		case 0x6824:
+		case 0x682D:
+			si_pi->cac_weights = cac_weights_chelsea_xt;
+			si_pi->dte_data = dte_data_cape_verde;
+			break;
+		case 0x682F:
+			si_pi->cac_weights = cac_weights_chelsea_pro;
+			si_pi->dte_data = dte_data_cape_verde;
+			break;
+		case 0x6820:
+			si_pi->cac_weights = cac_weights_heathrow;
+			si_pi->dte_data = dte_data_venus_xtx;
+			break;
+		case 0x6821:
+			si_pi->cac_weights = cac_weights_heathrow;
+			si_pi->dte_data = dte_data_venus_xt;
+			break;
+		case 0x6823:
+		case 0x682B:
+		case 0x6822:
+		case 0x682A:
+			si_pi->cac_weights = cac_weights_chelsea_pro;
+			si_pi->dte_data = dte_data_venus_pro;
+			break;
+		default:
+			si_pi->cac_weights = cac_weights_cape_verde;
+			si_pi->dte_data = dte_data_cape_verde;
+			break;
+		}
+	} else if (adev->asic_type == CHIP_OLAND) {
+		si_pi->lcac_config = lcac_mars_pro;
+		si_pi->cac_override = cac_override_oland;
+		si_pi->powertune_data = &powertune_data_mars_pro;
+		si_pi->dte_data = dte_data_mars_pro;
+
+		switch (adev->pdev->device) {
+		case 0x6601:
+		case 0x6621:
+		case 0x6603:
+		case 0x6605:
+			si_pi->cac_weights = cac_weights_mars_pro;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x6600:
+		case 0x6606:
+		case 0x6620:
+		case 0x6604:
+			si_pi->cac_weights = cac_weights_mars_xt;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x6611:
+		case 0x6613:
+		case 0x6608:
+			si_pi->cac_weights = cac_weights_oland_pro;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x6610:
+			si_pi->cac_weights = cac_weights_oland_xt;
+			update_dte_from_pl2 = true;
+			break;
+		default:
+			si_pi->cac_weights = cac_weights_oland;
+			si_pi->lcac_config = lcac_oland;
+			si_pi->cac_override = cac_override_oland;
+			si_pi->powertune_data = &powertune_data_oland;
+			si_pi->dte_data = dte_data_oland;
+			break;
+		}
+	} else if (adev->asic_type == CHIP_HAINAN) {
+		si_pi->cac_weights = cac_weights_hainan;
+		si_pi->lcac_config = lcac_oland;
+		si_pi->cac_override = cac_override_oland;
+		si_pi->powertune_data = &powertune_data_hainan;
+		si_pi->dte_data = dte_data_sun_xt;
+		update_dte_from_pl2 = true;
+	} else {
+		DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
+		return;
+	}
+
+	ni_pi->enable_power_containment = false;
+	ni_pi->enable_cac = false;
+	ni_pi->enable_sq_ramping = false;
+	si_pi->enable_dte = false;
+
+	if (si_pi->powertune_data->enable_powertune_by_default) {
+		ni_pi->enable_power_containment = true;
+		ni_pi->enable_cac = true;
+		if (si_pi->dte_data.enable_dte_by_default) {
+			si_pi->enable_dte = true;
+			if (update_dte_from_pl2)
+				si_update_dte_from_pl2(adev, &si_pi->dte_data);
+
+		}
+		ni_pi->enable_sq_ramping = true;
+	}
+
+	ni_pi->driver_calculate_cac_leakage = true;
+	ni_pi->cac_configuration_required = true;
+
+	if (ni_pi->cac_configuration_required) {
+		ni_pi->support_cac_long_term_average = true;
+		si_pi->dyn_powertune_data.l2_lta_window_size =
+			si_pi->powertune_data->l2_lta_window_size_default;
+		si_pi->dyn_powertune_data.lts_truncate =
+			si_pi->powertune_data->lts_truncate_default;
+	} else {
+		ni_pi->support_cac_long_term_average = false;
+		si_pi->dyn_powertune_data.l2_lta_window_size = 0;
+		si_pi->dyn_powertune_data.lts_truncate = 0;
+	}
+
+	si_pi->dyn_powertune_data.disable_uvd_powertune = false;
+}
+
+static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev)
+{
+	return 1;
+}
+
+static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)
+{
+	u32 xclk;
+	u32 wintime;
+	u32 cac_window;
+	u32 cac_window_size;
+
+	xclk = amdgpu_asic_get_xclk(adev);
+
+	if (xclk == 0)
+		return 0;
+
+	cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
+	cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
+
+	wintime = (cac_window_size * 100) / xclk;
+
+	return wintime;
+}
+
+static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
+{
+	return power_in_watts;
+}
+
+static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev,
+					    bool adjust_polarity,
+					    u32 tdp_adjustment,
+					    u32 *tdp_limit,
+					    u32 *near_tdp_limit)
+{
+	u32 adjustment_delta, max_tdp_limit;
+
+	if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit)
+		return -EINVAL;
+
+	max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100;
+
+	if (adjust_polarity) {
+		*tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
+		*near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit);
+	} else {
+		*tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
+		adjustment_delta  = adev->pm.dpm.tdp_limit - *tdp_limit;
+		if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted)
+			*near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta;
+		else
+			*near_tdp_limit = 0;
+	}
+
+	if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit))
+		return -EINVAL;
+	if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int si_populate_smc_tdp_limits(struct amdgpu_device *adev,
+				      struct amdgpu_ps *amdgpu_state)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	if (ni_pi->enable_power_containment) {
+		SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
+		PP_SIslands_PAPMParameters *papm_parm;
+		struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
+		u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
+		u32 tdp_limit;
+		u32 near_tdp_limit;
+		int ret;
+
+		if (scaling_factor == 0)
+			return -EINVAL;
+
+		memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
+
+		ret = si_calculate_adjusted_tdp_limits(adev,
+						       false, /* ??? */
+						       adev->pm.dpm.tdp_adjustment,
+						       &tdp_limit,
+						       &near_tdp_limit);
+		if (ret)
+			return ret;
+
+		smc_table->dpm2Params.TDPLimit =
+			cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000);
+		smc_table->dpm2Params.NearTDPLimit =
+			cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000);
+		smc_table->dpm2Params.SafePowerLimit =
+			cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
+
+		ret = amdgpu_si_copy_bytes_to_smc(adev,
+						  (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
+						   offsetof(PP_SIslands_DPM2Parameters, TDPLimit)),
+						  (u8 *)(&(smc_table->dpm2Params.TDPLimit)),
+						  sizeof(u32) * 3,
+						  si_pi->sram_end);
+		if (ret)
+			return ret;
+
+		if (si_pi->enable_ppm) {
+			papm_parm = &si_pi->papm_parm;
+			memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters));
+			papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp);
+			papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max);
+			papm_parm->dGPU_T_Warning = cpu_to_be32(95);
+			papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5);
+			papm_parm->PlatformPowerLimit = 0xffffffff;
+			papm_parm->NearTDPLimitPAPM = 0xffffffff;
+
+			ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start,
+							  (u8 *)papm_parm,
+							  sizeof(PP_SIslands_PAPMParameters),
+							  si_pi->sram_end);
+			if (ret)
+				return ret;
+		}
+	}
+	return 0;
+}
+
+static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev,
+					struct amdgpu_ps *amdgpu_state)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	if (ni_pi->enable_power_containment) {
+		SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
+		u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
+		int ret;
+
+		memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
+
+		smc_table->dpm2Params.NearTDPLimit =
+			cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000);
+		smc_table->dpm2Params.SafePowerLimit =
+			cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
+
+		ret = amdgpu_si_copy_bytes_to_smc(adev,
+						  (si_pi->state_table_start +
+						   offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
+						   offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)),
+						  (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)),
+						  sizeof(u32) * 2,
+						  si_pi->sram_end);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev,
+					       const u16 prev_std_vddc,
+					       const u16 curr_std_vddc)
+{
+	u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN;
+	u64 prev_vddc = (u64)prev_std_vddc;
+	u64 curr_vddc = (u64)curr_std_vddc;
+	u64 pwr_efficiency_ratio, n, d;
+
+	if ((prev_vddc == 0) || (curr_vddc == 0))
+		return 0;
+
+	n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000);
+	d = prev_vddc * prev_vddc;
+	pwr_efficiency_ratio = div64_u64(n, d);
+
+	if (pwr_efficiency_ratio > (u64)0xFFFF)
+		return 0;
+
+	return (u16)pwr_efficiency_ratio;
+}
+
+static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev,
+					    struct amdgpu_ps *amdgpu_state)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	if (si_pi->dyn_powertune_data.disable_uvd_powertune &&
+	    amdgpu_state->vclk && amdgpu_state->dclk)
+		return true;
+
+	return false;
+}
+
+struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev)
+{
+	struct evergreen_power_info *pi = adev->pm.dpm.priv;
+
+	return pi;
+}
+
+static int si_populate_power_containment_values(struct amdgpu_device *adev,
+						struct amdgpu_ps *amdgpu_state,
+						SISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct  si_ps *state = si_get_ps(amdgpu_state);
+	SISLANDS_SMC_VOLTAGE_VALUE vddc;
+	u32 prev_sclk;
+	u32 max_sclk;
+	u32 min_sclk;
+	u16 prev_std_vddc;
+	u16 curr_std_vddc;
+	int i;
+	u16 pwr_efficiency_ratio;
+	u8 max_ps_percent;
+	bool disable_uvd_power_tune;
+	int ret;
+
+	if (ni_pi->enable_power_containment == false)
+		return 0;
+
+	if (state->performance_level_count == 0)
+		return -EINVAL;
+
+	if (smc_state->levelCount != state->performance_level_count)
+		return -EINVAL;
+
+	disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state);
+
+	smc_state->levels[0].dpm2.MaxPS = 0;
+	smc_state->levels[0].dpm2.NearTDPDec = 0;
+	smc_state->levels[0].dpm2.AboveSafeInc = 0;
+	smc_state->levels[0].dpm2.BelowSafeInc = 0;
+	smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+	for (i = 1; i < state->performance_level_count; i++) {
+		prev_sclk = state->performance_levels[i-1].sclk;
+		max_sclk  = state->performance_levels[i].sclk;
+		if (i == 1)
+			max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M;
+		else
+			max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H;
+
+		if (prev_sclk > max_sclk)
+			return -EINVAL;
+
+		if ((max_ps_percent == 0) ||
+		    (prev_sclk == max_sclk) ||
+		    disable_uvd_power_tune)
+			min_sclk = max_sclk;
+		else if (i == 1)
+			min_sclk = prev_sclk;
+		else
+			min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
+
+		if (min_sclk < state->performance_levels[0].sclk)
+			min_sclk = state->performance_levels[0].sclk;
+
+		if (min_sclk == 0)
+			return -EINVAL;
+
+		ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+						state->performance_levels[i-1].vddc, &vddc);
+		if (ret)
+			return ret;
+
+		ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc);
+		if (ret)
+			return ret;
+
+		ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+						state->performance_levels[i].vddc, &vddc);
+		if (ret)
+			return ret;
+
+		ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc);
+		if (ret)
+			return ret;
+
+		pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev,
+									   prev_std_vddc, curr_std_vddc);
+
+		smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
+		smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC;
+		smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC;
+		smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC;
+		smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio);
+	}
+
+	return 0;
+}
+
+static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
+					 struct amdgpu_ps *amdgpu_state,
+					 SISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct  si_ps *state = si_get_ps(amdgpu_state);
+	u32 sq_power_throttle, sq_power_throttle2;
+	bool enable_sq_ramping = ni_pi->enable_sq_ramping;
+	int i;
+
+	if (state->performance_level_count == 0)
+		return -EINVAL;
+
+	if (smc_state->levelCount != state->performance_level_count)
+		return -EINVAL;
+
+	if (adev->pm.dpm.sq_ramping_threshold == 0)
+		return -EINVAL;
+
+	if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
+		enable_sq_ramping = false;
+
+	if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
+		enable_sq_ramping = false;
+
+	if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
+		enable_sq_ramping = false;
+
+	if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
+		enable_sq_ramping = false;
+
+	if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+		enable_sq_ramping = false;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		sq_power_throttle = 0;
+		sq_power_throttle2 = 0;
+
+		if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&
+		    enable_sq_ramping) {
+			sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
+			sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
+			sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
+			sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
+			sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
+		} else {
+			sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
+			sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+		}
+
+		smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
+		smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
+	}
+
+	return 0;
+}
+
+static int si_enable_power_containment(struct amdgpu_device *adev,
+				       struct amdgpu_ps *amdgpu_new_state,
+				       bool enable)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	PPSMC_Result smc_result;
+	int ret = 0;
+
+	if (ni_pi->enable_power_containment) {
+		if (enable) {
+			if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
+				smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive);
+				if (smc_result != PPSMC_Result_OK) {
+					ret = -EINVAL;
+					ni_pi->pc_enabled = false;
+				} else {
+					ni_pi->pc_enabled = true;
+				}
+			}
+		} else {
+			smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive);
+			if (smc_result != PPSMC_Result_OK)
+				ret = -EINVAL;
+			ni_pi->pc_enabled = false;
+		}
+	}
+
+	return ret;
+}
+
+static int si_initialize_smc_dte_tables(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	int ret = 0;
+	struct si_dte_data *dte_data = &si_pi->dte_data;
+	Smc_SIslands_DTE_Configuration *dte_tables = NULL;
+	u32 table_size;
+	u8 tdep_count;
+	u32 i;
+
+	if (dte_data == NULL)
+		si_pi->enable_dte = false;
+
+	if (si_pi->enable_dte == false)
+		return 0;
+
+	if (dte_data->k <= 0)
+		return -EINVAL;
+
+	dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL);
+	if (dte_tables == NULL) {
+		si_pi->enable_dte = false;
+		return -ENOMEM;
+	}
+
+	table_size = dte_data->k;
+
+	if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES)
+		table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES;
+
+	tdep_count = dte_data->tdep_count;
+	if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE)
+		tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE;
+
+	dte_tables->K = cpu_to_be32(table_size);
+	dte_tables->T0 = cpu_to_be32(dte_data->t0);
+	dte_tables->MaxT = cpu_to_be32(dte_data->max_t);
+	dte_tables->WindowSize = dte_data->window_size;
+	dte_tables->temp_select = dte_data->temp_select;
+	dte_tables->DTE_mode = dte_data->dte_mode;
+	dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold);
+
+	if (tdep_count > 0)
+		table_size--;
+
+	for (i = 0; i < table_size; i++) {
+		dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]);
+		dte_tables->R[i]   = cpu_to_be32(dte_data->r[i]);
+	}
+
+	dte_tables->Tdep_count = tdep_count;
+
+	for (i = 0; i < (u32)tdep_count; i++) {
+		dte_tables->T_limits[i] = dte_data->t_limits[i];
+		dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]);
+		dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]);
+	}
+
+	ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->dte_table_start,
+					  (u8 *)dte_tables,
+					  sizeof(Smc_SIslands_DTE_Configuration),
+					  si_pi->sram_end);
+	kfree(dte_tables);
+
+	return ret;
+}
+
+static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev,
+					  u16 *max, u16 *min)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct amdgpu_cac_leakage_table *table =
+		&adev->pm.dpm.dyn_state.cac_leakage_table;
+	u32 i;
+	u32 v0_loadline;
+
+	if (table == NULL)
+		return -EINVAL;
+
+	*max = 0;
+	*min = 0xFFFF;
+
+	for (i = 0; i < table->count; i++) {
+		if (table->entries[i].vddc > *max)
+			*max = table->entries[i].vddc;
+		if (table->entries[i].vddc < *min)
+			*min = table->entries[i].vddc;
+	}
+
+	if (si_pi->powertune_data->lkge_lut_v0_percent > 100)
+		return -EINVAL;
+
+	v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100;
+
+	if (v0_loadline > 0xFFFFUL)
+		return -EINVAL;
+
+	*min = (u16)v0_loadline;
+
+	if ((*min > *max) || (*max == 0) || (*min == 0))
+		return -EINVAL;
+
+	return 0;
+}
+
+static u16 si_get_cac_std_voltage_step(u16 max, u16 min)
+{
+	return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) /
+		SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
+}
+
+static int si_init_dte_leakage_table(struct amdgpu_device *adev,
+				     PP_SIslands_CacConfig *cac_tables,
+				     u16 vddc_max, u16 vddc_min, u16 vddc_step,
+				     u16 t0, u16 t_step)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 leakage;
+	unsigned int i, j;
+	s32 t;
+	u32 smc_leakage;
+	u32 scaling_factor;
+	u16 voltage;
+
+	scaling_factor = si_get_smc_power_scaling_factor(adev);
+
+	for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) {
+		t = (1000 * (i * t_step + t0));
+
+		for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
+			voltage = vddc_max - (vddc_step * j);
+
+			si_calculate_leakage_for_v_and_t(adev,
+							 &si_pi->powertune_data->leakage_coefficients,
+							 voltage,
+							 t,
+							 si_pi->dyn_powertune_data.cac_leakage,
+							 &leakage);
+
+			smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
+
+			if (smc_leakage > 0xFFFF)
+				smc_leakage = 0xFFFF;
+
+			cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
+				cpu_to_be16((u16)smc_leakage);
+		}
+	}
+	return 0;
+}
+
+static int si_init_simplified_leakage_table(struct amdgpu_device *adev,
+					    PP_SIslands_CacConfig *cac_tables,
+					    u16 vddc_max, u16 vddc_min, u16 vddc_step)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 leakage;
+	unsigned int i, j;
+	u32 smc_leakage;
+	u32 scaling_factor;
+	u16 voltage;
+
+	scaling_factor = si_get_smc_power_scaling_factor(adev);
+
+	for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
+		voltage = vddc_max - (vddc_step * j);
+
+		si_calculate_leakage_for_v(adev,
+					   &si_pi->powertune_data->leakage_coefficients,
+					   si_pi->powertune_data->fixed_kt,
+					   voltage,
+					   si_pi->dyn_powertune_data.cac_leakage,
+					   &leakage);
+
+		smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
+
+		if (smc_leakage > 0xFFFF)
+			smc_leakage = 0xFFFF;
+
+		for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++)
+			cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
+				cpu_to_be16((u16)smc_leakage);
+	}
+	return 0;
+}
+
+static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	PP_SIslands_CacConfig *cac_tables = NULL;
+	u16 vddc_max, vddc_min, vddc_step;
+	u16 t0, t_step;
+	u32 load_line_slope, reg;
+	int ret = 0;
+	u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100;
+
+	if (ni_pi->enable_cac == false)
+		return 0;
+
+	cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL);
+	if (!cac_tables)
+		return -ENOMEM;
+
+	reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
+	reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
+	WREG32(CG_CAC_CTRL, reg);
+
+	si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;
+	si_pi->dyn_powertune_data.dc_pwr_value =
+		si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0];
+	si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev);
+	si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default;
+
+	si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000;
+
+	ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min);
+	if (ret)
+		goto done_free;
+
+	vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min);
+	vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1));
+	t_step = 4;
+	t0 = 60;
+
+	if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage)
+		ret = si_init_dte_leakage_table(adev, cac_tables,
+						vddc_max, vddc_min, vddc_step,
+						t0, t_step);
+	else
+		ret = si_init_simplified_leakage_table(adev, cac_tables,
+						       vddc_max, vddc_min, vddc_step);
+	if (ret)
+		goto done_free;
+
+	load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100;
+
+	cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size);
+	cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate;
+	cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n;
+	cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min);
+	cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step);
+	cac_tables->R_LL = cpu_to_be32(load_line_slope);
+	cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime);
+	cac_tables->calculation_repeats = cpu_to_be32(2);
+	cac_tables->dc_cac = cpu_to_be32(0);
+	cac_tables->log2_PG_LKG_SCALE = 12;
+	cac_tables->cac_temp = si_pi->powertune_data->operating_temp;
+	cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0);
+	cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step);
+
+	ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->cac_table_start,
+					  (u8 *)cac_tables,
+					  sizeof(PP_SIslands_CacConfig),
+					  si_pi->sram_end);
+
+	if (ret)
+		goto done_free;
+
+	ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us);
+
+done_free:
+	if (ret) {
+		ni_pi->enable_cac = false;
+		ni_pi->enable_power_containment = false;
+	}
+
+	kfree(cac_tables);
+
+	return ret;
+}
+
+static int si_program_cac_config_registers(struct amdgpu_device *adev,
+					   const struct si_cac_config_reg *cac_config_regs)
+{
+	const struct si_cac_config_reg *config_regs = cac_config_regs;
+	u32 data = 0, offset;
+
+	if (!config_regs)
+		return -EINVAL;
+
+	while (config_regs->offset != 0xFFFFFFFF) {
+		switch (config_regs->type) {
+		case SISLANDS_CACCONFIG_CGIND:
+			offset = SMC_CG_IND_START + config_regs->offset;
+			if (offset < SMC_CG_IND_END)
+				data = RREG32_SMC(offset);
+			break;
+		default:
+			data = RREG32(config_regs->offset);
+			break;
+		}
+
+		data &= ~config_regs->mask;
+		data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+
+		switch (config_regs->type) {
+		case SISLANDS_CACCONFIG_CGIND:
+			offset = SMC_CG_IND_START + config_regs->offset;
+			if (offset < SMC_CG_IND_END)
+				WREG32_SMC(offset, data);
+			break;
+		default:
+			WREG32(config_regs->offset, data);
+			break;
+		}
+		config_regs++;
+	}
+	return 0;
+}
+
+static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	int ret;
+
+	if ((ni_pi->enable_cac == false) ||
+	    (ni_pi->cac_configuration_required == false))
+		return 0;
+
+	ret = si_program_cac_config_registers(adev, si_pi->lcac_config);
+	if (ret)
+		return ret;
+	ret = si_program_cac_config_registers(adev, si_pi->cac_override);
+	if (ret)
+		return ret;
+	ret = si_program_cac_config_registers(adev, si_pi->cac_weights);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int si_enable_smc_cac(struct amdgpu_device *adev,
+			     struct amdgpu_ps *amdgpu_new_state,
+			     bool enable)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	PPSMC_Result smc_result;
+	int ret = 0;
+
+	if (ni_pi->enable_cac) {
+		if (enable) {
+			if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
+				if (ni_pi->support_cac_long_term_average) {
+					smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable);
+					if (smc_result != PPSMC_Result_OK)
+						ni_pi->support_cac_long_term_average = false;
+				}
+
+				smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
+				if (smc_result != PPSMC_Result_OK) {
+					ret = -EINVAL;
+					ni_pi->cac_enabled = false;
+				} else {
+					ni_pi->cac_enabled = true;
+				}
+
+				if (si_pi->enable_dte) {
+					smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
+					if (smc_result != PPSMC_Result_OK)
+						ret = -EINVAL;
+				}
+			}
+		} else if (ni_pi->cac_enabled) {
+			if (si_pi->enable_dte)
+				smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
+
+			smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
+
+			ni_pi->cac_enabled = false;
+
+			if (ni_pi->support_cac_long_term_average)
+				smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable);
+		}
+	}
+	return ret;
+}
+
+static int si_init_smc_spll_table(struct amdgpu_device *adev)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	SMC_SISLANDS_SPLL_DIV_TABLE *spll_table;
+	SISLANDS_SMC_SCLK_VALUE sclk_params;
+	u32 fb_div, p_div;
+	u32 clk_s, clk_v;
+	u32 sclk = 0;
+	int ret = 0;
+	u32 tmp;
+	int i;
+
+	if (si_pi->spll_table_start == 0)
+		return -EINVAL;
+
+	spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
+	if (spll_table == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < 256; i++) {
+		ret = si_calculate_sclk_params(adev, sclk, &sclk_params);
+		if (ret)
+			break;
+		p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
+		fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
+		clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
+		clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
+
+		fb_div &= ~0x00001FFF;
+		fb_div >>= 1;
+		clk_v >>= 6;
+
+		if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
+			ret = -EINVAL;
+		if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
+			ret = -EINVAL;
+		if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
+			ret = -EINVAL;
+		if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
+			ret = -EINVAL;
+
+		if (ret)
+			break;
+
+		tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
+			((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
+		spll_table->freq[i] = cpu_to_be32(tmp);
+
+		tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
+			((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
+		spll_table->ss[i] = cpu_to_be32(tmp);
+
+		sclk += 512;
+	}
+
+
+	if (!ret)
+		ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->spll_table_start,
+						  (u8 *)spll_table,
+						  sizeof(SMC_SISLANDS_SPLL_DIV_TABLE),
+						  si_pi->sram_end);
+
+	if (ret)
+		ni_pi->enable_power_containment = false;
+
+	kfree(spll_table);
+
+	return ret;
+}
+
+struct si_dpm_quirk {
+	u32 chip_vendor;
+	u32 chip_device;
+	u32 subsys_vendor;
+	u32 subsys_device;
+	u32 max_sclk;
+	u32 max_mclk;
+};
+
+/* cards with dpm stability problems */
+static struct si_dpm_quirk si_dpm_quirk_list[] = {
+	/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+	{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
+	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
+	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+	{ 0, 0, 0, 0 },
+};
+
+static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev,
+						   u16 vce_voltage)
+{
+	u16 highest_leakage = 0;
+	struct si_power_info *si_pi = si_get_pi(adev);
+	int i;
+
+	for (i = 0; i < si_pi->leakage_voltage.count; i++){
+		if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage)
+			highest_leakage = si_pi->leakage_voltage.entries[i].voltage;
+	}
+
+	if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage))
+		return highest_leakage;
+
+	return vce_voltage;
+}
+
+static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
+				    u32 evclk, u32 ecclk, u16 *voltage)
+{
+	u32 i;
+	int ret = -EINVAL;
+	struct amdgpu_vce_clock_voltage_dependency_table *table =
+		&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+	if (((evclk == 0) && (ecclk == 0)) ||
+	    (table && (table->count == 0))) {
+		*voltage = 0;
+		return 0;
+	}
+
+	for (i = 0; i < table->count; i++) {
+		if ((evclk <= table->entries[i].evclk) &&
+		    (ecclk <= table->entries[i].ecclk)) {
+			*voltage = table->entries[i].v;
+			ret = 0;
+			break;
+		}
+	}
+
+	/* if no match return the highest voltage */
+	if (ret)
+		*voltage = table->entries[table->count - 1].v;
+
+	*voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage);
+
+	return ret;
+}
+
+static bool si_dpm_vblank_too_short(struct amdgpu_device *adev)
+{
+
+	u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
+	/* we never hit the non-gddr5 limit so disable it */
+	u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
+
+	if (vblank_time < switch_limit)
+		return true;
+	else
+		return false;
+
+}
+
+static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev,
+				u32 arb_freq_src, u32 arb_freq_dest)
+{
+	u32 mc_arb_dram_timing;
+	u32 mc_arb_dram_timing2;
+	u32 burst_time;
+	u32 mc_cg_config;
+
+	switch (arb_freq_src) {
+	case MC_CG_ARB_FREQ_F0:
+		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
+		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
+		break;
+	case MC_CG_ARB_FREQ_F1:
+		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_1);
+		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
+		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
+		break;
+	case MC_CG_ARB_FREQ_F2:
+		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_2);
+		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
+		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
+		break;
+	case MC_CG_ARB_FREQ_F3:
+		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_3);
+		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
+		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (arb_freq_dest) {
+	case MC_CG_ARB_FREQ_F0:
+		WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
+		WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
+		WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
+		break;
+	case MC_CG_ARB_FREQ_F1:
+		WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
+		WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
+		WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
+		break;
+	case MC_CG_ARB_FREQ_F2:
+		WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
+		WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
+		WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
+		break;
+	case MC_CG_ARB_FREQ_F3:
+		WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
+		WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
+		WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
+	WREG32(MC_CG_CONFIG, mc_cg_config);
+	WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
+
+	return 0;
+}
+
+static void ni_update_current_ps(struct amdgpu_device *adev,
+			  struct amdgpu_ps *rps)
+{
+	struct si_ps *new_ps = si_get_ps(rps);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+
+	eg_pi->current_rps = *rps;
+	ni_pi->current_ps = *new_ps;
+	eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
+}
+
+static void ni_update_requested_ps(struct amdgpu_device *adev,
+			    struct amdgpu_ps *rps)
+{
+	struct si_ps *new_ps = si_get_ps(rps);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+
+	eg_pi->requested_rps = *rps;
+	ni_pi->requested_ps = *new_ps;
+	eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
+}
+
+static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
+					   struct amdgpu_ps *new_ps,
+					   struct amdgpu_ps *old_ps)
+{
+	struct si_ps *new_state = si_get_ps(new_ps);
+	struct si_ps *current_state = si_get_ps(old_ps);
+
+	if ((new_ps->vclk == old_ps->vclk) &&
+	    (new_ps->dclk == old_ps->dclk))
+		return;
+
+	if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
+	    current_state->performance_levels[current_state->performance_level_count - 1].sclk)
+		return;
+
+	amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
+}
+
+static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev,
+					  struct amdgpu_ps *new_ps,
+					  struct amdgpu_ps *old_ps)
+{
+	struct si_ps *new_state = si_get_ps(new_ps);
+	struct si_ps *current_state = si_get_ps(old_ps);
+
+	if ((new_ps->vclk == old_ps->vclk) &&
+	    (new_ps->dclk == old_ps->dclk))
+		return;
+
+	if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
+	    current_state->performance_levels[current_state->performance_level_count - 1].sclk)
+		return;
+
+	amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
+}
+
+static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
+{
+	unsigned int i;
+
+	for (i = 0; i < table->count; i++)
+		if (voltage <= table->entries[i].value)
+			return table->entries[i].value;
+
+	return table->entries[table->count - 1].value;
+}
+
+static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks,
+		                u32 max_clock, u32 requested_clock)
+{
+	unsigned int i;
+
+	if ((clocks == NULL) || (clocks->count == 0))
+		return (requested_clock < max_clock) ? requested_clock : max_clock;
+
+	for (i = 0; i < clocks->count; i++) {
+		if (clocks->values[i] >= requested_clock)
+			return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock;
+	}
+
+	return (clocks->values[clocks->count - 1] < max_clock) ?
+		clocks->values[clocks->count - 1] : max_clock;
+}
+
+static u32 btc_get_valid_mclk(struct amdgpu_device *adev,
+			      u32 max_mclk, u32 requested_mclk)
+{
+	return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values,
+				    max_mclk, requested_mclk);
+}
+
+static u32 btc_get_valid_sclk(struct amdgpu_device *adev,
+		              u32 max_sclk, u32 requested_sclk)
+{
+	return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values,
+				    max_sclk, requested_sclk);
+}
+
+static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table,
+							    u32 *max_clock)
+{
+	u32 i, clock = 0;
+
+	if ((table == NULL) || (table->count == 0)) {
+		*max_clock = clock;
+		return;
+	}
+
+	for (i = 0; i < table->count; i++) {
+		if (clock < table->entries[i].clk)
+			clock = table->entries[i].clk;
+	}
+	*max_clock = clock;
+}
+
+static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table,
+					       u32 clock, u16 max_voltage, u16 *voltage)
+{
+	u32 i;
+
+	if ((table == NULL) || (table->count == 0))
+		return;
+
+	for (i= 0; i < table->count; i++) {
+		if (clock <= table->entries[i].clk) {
+			if (*voltage < table->entries[i].v)
+				*voltage = (u16)((table->entries[i].v < max_voltage) ?
+					   table->entries[i].v : max_voltage);
+			return;
+		}
+	}
+
+	*voltage = (*voltage > max_voltage) ? *voltage : max_voltage;
+}
+
+static void btc_adjust_clock_combinations(struct amdgpu_device *adev,
+					  const struct amdgpu_clock_and_voltage_limits *max_limits,
+					  struct rv7xx_pl *pl)
+{
+
+	if ((pl->mclk == 0) || (pl->sclk == 0))
+		return;
+
+	if (pl->mclk == pl->sclk)
+		return;
+
+	if (pl->mclk > pl->sclk) {
+		if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio)
+			pl->sclk = btc_get_valid_sclk(adev,
+						      max_limits->sclk,
+						      (pl->mclk +
+						      (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) /
+						      adev->pm.dpm.dyn_state.mclk_sclk_ratio);
+	} else {
+		if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta)
+			pl->mclk = btc_get_valid_mclk(adev,
+						      max_limits->mclk,
+						      pl->sclk -
+						      adev->pm.dpm.dyn_state.sclk_mclk_delta);
+	}
+}
+
+static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
+					  u16 max_vddc, u16 max_vddci,
+					  u16 *vddc, u16 *vddci)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	u16 new_voltage;
+
+	if ((0 == *vddc) || (0 == *vddci))
+		return;
+
+	if (*vddc > *vddci) {
+		if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
+			new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table,
+						       (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta));
+			*vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci;
+		}
+	} else {
+		if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
+			new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table,
+						       (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta));
+			*vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc;
+		}
+	}
+}
+
+static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
+					       u32 sys_mask,
+					       enum amdgpu_pcie_gen asic_gen,
+					       enum amdgpu_pcie_gen default_gen)
+{
+	switch (asic_gen) {
+	case AMDGPU_PCIE_GEN1:
+		return AMDGPU_PCIE_GEN1;
+	case AMDGPU_PCIE_GEN2:
+		return AMDGPU_PCIE_GEN2;
+	case AMDGPU_PCIE_GEN3:
+		return AMDGPU_PCIE_GEN3;
+	default:
+		if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
+			return AMDGPU_PCIE_GEN3;
+		else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
+			return AMDGPU_PCIE_GEN2;
+		else
+			return AMDGPU_PCIE_GEN1;
+	}
+	return AMDGPU_PCIE_GEN1;
+}
+
+static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
+			    u32 *p, u32 *u)
+{
+	u32 b_c = 0;
+	u32 i_c;
+	u32 tmp;
+
+	i_c = (i * r_c) / 100;
+	tmp = i_c >> p_b;
+
+	while (tmp) {
+		b_c++;
+		tmp >>= 1;
+	}
+
+	*u = (b_c + 1) / 2;
+	*p = i_c / (1 << (2 * (*u)));
+}
+
+static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
+{
+	u32 k, a, ah, al;
+	u32 t1;
+
+	if ((fl == 0) || (fh == 0) || (fl > fh))
+		return -EINVAL;
+
+	k = (100 * fh) / fl;
+	t1 = (t * (k - 100));
+	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
+	a = (a + 5) / 10;
+	ah = ((a * t) + 5000) / 10000;
+	al = a - ah;
+
+	*th = t - ah;
+	*tl = t + al;
+
+	return 0;
+}
+
+static bool r600_is_uvd_state(u32 class, u32 class2)
+{
+	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+		return true;
+	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+		return true;
+	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+		return true;
+	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+		return true;
+	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+		return true;
+	return false;
+}
+
+static u8 rv770_get_memory_module_index(struct amdgpu_device *adev)
+{
+	return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff);
+}
+
+static void rv770_get_max_vddc(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	u16 vddc;
+
+	if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc))
+		pi->max_vddc = 0;
+	else
+		pi->max_vddc = vddc;
+}
+
+static void rv770_get_engine_memory_ss(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct amdgpu_atom_ss ss;
+
+	pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
+						       ASIC_INTERNAL_ENGINE_SS, 0);
+	pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
+						       ASIC_INTERNAL_MEMORY_SS, 0);
+
+	if (pi->sclk_ss || pi->mclk_ss)
+		pi->dynamic_ss = true;
+	else
+		pi->dynamic_ss = false;
+}
+
+
+static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
+					struct amdgpu_ps *rps)
+{
+	struct  si_ps *ps = si_get_ps(rps);
+	struct amdgpu_clock_and_voltage_limits *max_limits;
+	bool disable_mclk_switching = false;
+	bool disable_sclk_switching = false;
+	u32 mclk, sclk;
+	u16 vddc, vddci, min_vce_voltage = 0;
+	u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
+	u32 max_sclk = 0, max_mclk = 0;
+	int i;
+	struct si_dpm_quirk *p = si_dpm_quirk_list;
+
+	/* Apply dpm quirks */
+	while (p && p->chip_device != 0) {
+		if (adev->pdev->vendor == p->chip_vendor &&
+		    adev->pdev->device == p->chip_device &&
+		    adev->pdev->subsystem_vendor == p->subsys_vendor &&
+		    adev->pdev->subsystem_device == p->subsys_device) {
+			max_sclk = p->max_sclk;
+			max_mclk = p->max_mclk;
+			break;
+		}
+		++p;
+	}
+
+	if (rps->vce_active) {
+		rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
+		rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
+		si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk,
+					 &min_vce_voltage);
+	} else {
+		rps->evclk = 0;
+		rps->ecclk = 0;
+	}
+
+	if ((adev->pm.dpm.new_active_crtc_count > 1) ||
+	    si_dpm_vblank_too_short(adev))
+		disable_mclk_switching = true;
+
+	if (rps->vclk || rps->dclk) {
+		disable_mclk_switching = true;
+		disable_sclk_switching = true;
+	}
+
+	if (adev->pm.dpm.ac_power)
+		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	else
+		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+	for (i = ps->performance_level_count - 2; i >= 0; i--) {
+		if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
+			ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
+	}
+	if (adev->pm.dpm.ac_power == false) {
+		for (i = 0; i < ps->performance_level_count; i++) {
+			if (ps->performance_levels[i].mclk > max_limits->mclk)
+				ps->performance_levels[i].mclk = max_limits->mclk;
+			if (ps->performance_levels[i].sclk > max_limits->sclk)
+				ps->performance_levels[i].sclk = max_limits->sclk;
+			if (ps->performance_levels[i].vddc > max_limits->vddc)
+				ps->performance_levels[i].vddc = max_limits->vddc;
+			if (ps->performance_levels[i].vddci > max_limits->vddci)
+				ps->performance_levels[i].vddci = max_limits->vddci;
+		}
+	}
+
+	/* limit clocks to max supported clocks based on voltage dependency tables */
+	btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+							&max_sclk_vddc);
+	btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+							&max_mclk_vddci);
+	btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+							&max_mclk_vddc);
+
+	for (i = 0; i < ps->performance_level_count; i++) {
+		if (max_sclk_vddc) {
+			if (ps->performance_levels[i].sclk > max_sclk_vddc)
+				ps->performance_levels[i].sclk = max_sclk_vddc;
+		}
+		if (max_mclk_vddci) {
+			if (ps->performance_levels[i].mclk > max_mclk_vddci)
+				ps->performance_levels[i].mclk = max_mclk_vddci;
+		}
+		if (max_mclk_vddc) {
+			if (ps->performance_levels[i].mclk > max_mclk_vddc)
+				ps->performance_levels[i].mclk = max_mclk_vddc;
+		}
+		if (max_mclk) {
+			if (ps->performance_levels[i].mclk > max_mclk)
+				ps->performance_levels[i].mclk = max_mclk;
+		}
+		if (max_sclk) {
+			if (ps->performance_levels[i].sclk > max_sclk)
+				ps->performance_levels[i].sclk = max_sclk;
+		}
+	}
+
+	/* XXX validate the min clocks required for display */
+
+	if (disable_mclk_switching) {
+		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
+		vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
+	} else {
+		mclk = ps->performance_levels[0].mclk;
+		vddci = ps->performance_levels[0].vddci;
+	}
+
+	if (disable_sclk_switching) {
+		sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
+		vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
+	} else {
+		sclk = ps->performance_levels[0].sclk;
+		vddc = ps->performance_levels[0].vddc;
+	}
+
+	if (rps->vce_active) {
+		if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
+			sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
+		if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
+			mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
+	}
+
+	/* adjusted low state */
+	ps->performance_levels[0].sclk = sclk;
+	ps->performance_levels[0].mclk = mclk;
+	ps->performance_levels[0].vddc = vddc;
+	ps->performance_levels[0].vddci = vddci;
+
+	if (disable_sclk_switching) {
+		sclk = ps->performance_levels[0].sclk;
+		for (i = 1; i < ps->performance_level_count; i++) {
+			if (sclk < ps->performance_levels[i].sclk)
+				sclk = ps->performance_levels[i].sclk;
+		}
+		for (i = 0; i < ps->performance_level_count; i++) {
+			ps->performance_levels[i].sclk = sclk;
+			ps->performance_levels[i].vddc = vddc;
+		}
+	} else {
+		for (i = 1; i < ps->performance_level_count; i++) {
+			if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
+				ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
+			if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
+				ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+		}
+	}
+
+	if (disable_mclk_switching) {
+		mclk = ps->performance_levels[0].mclk;
+		for (i = 1; i < ps->performance_level_count; i++) {
+			if (mclk < ps->performance_levels[i].mclk)
+				mclk = ps->performance_levels[i].mclk;
+		}
+		for (i = 0; i < ps->performance_level_count; i++) {
+			ps->performance_levels[i].mclk = mclk;
+			ps->performance_levels[i].vddci = vddci;
+		}
+	} else {
+		for (i = 1; i < ps->performance_level_count; i++) {
+			if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
+				ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
+			if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
+				ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
+		}
+	}
+
+	for (i = 0; i < ps->performance_level_count; i++)
+		btc_adjust_clock_combinations(adev, max_limits,
+					      &ps->performance_levels[i]);
+
+	for (i = 0; i < ps->performance_level_count; i++) {
+		if (ps->performance_levels[i].vddc < min_vce_voltage)
+			ps->performance_levels[i].vddc = min_vce_voltage;
+		btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+						   ps->performance_levels[i].sclk,
+						   max_limits->vddc,  &ps->performance_levels[i].vddc);
+		btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+						   ps->performance_levels[i].mclk,
+						   max_limits->vddci, &ps->performance_levels[i].vddci);
+		btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+						   ps->performance_levels[i].mclk,
+						   max_limits->vddc,  &ps->performance_levels[i].vddc);
+		btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
+						   adev->clock.current_dispclk,
+						   max_limits->vddc,  &ps->performance_levels[i].vddc);
+	}
+
+	for (i = 0; i < ps->performance_level_count; i++) {
+		btc_apply_voltage_delta_rules(adev,
+					      max_limits->vddc, max_limits->vddci,
+					      &ps->performance_levels[i].vddc,
+					      &ps->performance_levels[i].vddci);
+	}
+
+	ps->dc_compatible = true;
+	for (i = 0; i < ps->performance_level_count; i++) {
+		if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
+			ps->dc_compatible = false;
+	}
+}
+
+#if 0
+static int si_read_smc_soft_register(struct amdgpu_device *adev,
+				     u16 reg_offset, u32 *value)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	return amdgpu_si_read_smc_sram_dword(adev,
+					     si_pi->soft_regs_start + reg_offset, value,
+					     si_pi->sram_end);
+}
+#endif
+
+static int si_write_smc_soft_register(struct amdgpu_device *adev,
+				      u16 reg_offset, u32 value)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	return amdgpu_si_write_smc_sram_dword(adev,
+					      si_pi->soft_regs_start + reg_offset,
+					      value, si_pi->sram_end);
+}
+
+static bool si_is_special_1gb_platform(struct amdgpu_device *adev)
+{
+	bool ret = false;
+	u32 tmp, width, row, column, bank, density;
+	bool is_memory_gddr5, is_special;
+
+	tmp = RREG32(MC_SEQ_MISC0);
+	is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT));
+	is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT))
+		& (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT));
+
+	WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
+	width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
+
+	tmp = RREG32(MC_ARB_RAMCFG);
+	row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
+	column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
+	bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
+
+	density = (1 << (row + column - 20 + bank)) * width;
+
+	if ((adev->pdev->device == 0x6819) &&
+	    is_memory_gddr5 && is_special && (density == 0x400))
+		ret = true;
+
+	return ret;
+}
+
+static void si_get_leakage_vddc(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u16 vddc, count = 0;
+	int i, ret;
+
+	for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) {
+		ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i);
+
+		if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) {
+			si_pi->leakage_voltage.entries[count].voltage = vddc;
+			si_pi->leakage_voltage.entries[count].leakage_index =
+				SISLANDS_LEAKAGE_INDEX0 + i;
+			count++;
+		}
+	}
+	si_pi->leakage_voltage.count = count;
+}
+
+static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev,
+						     u32 index, u16 *leakage_voltage)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	int i;
+
+	if (leakage_voltage == NULL)
+		return -EINVAL;
+
+	if ((index & 0xff00) != 0xff00)
+		return -EINVAL;
+
+	if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1)
+		return -EINVAL;
+
+	if (index < SISLANDS_LEAKAGE_INDEX0)
+		return -EINVAL;
+
+	for (i = 0; i < si_pi->leakage_voltage.count; i++) {
+		if (si_pi->leakage_voltage.entries[i].leakage_index == index) {
+			*leakage_voltage = si_pi->leakage_voltage.entries[i].voltage;
+			return 0;
+		}
+	}
+	return -EAGAIN;
+}
+
+static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	bool want_thermal_protection;
+	enum amdgpu_dpm_event_src dpm_event_src;
+
+	switch (sources) {
+	case 0:
+	default:
+		want_thermal_protection = false;
+		break;
+	case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
+		want_thermal_protection = true;
+		dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
+		break;
+	case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+		want_thermal_protection = true;
+		dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
+		break;
+	case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+	      (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+		want_thermal_protection = true;
+		dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+		break;
+	}
+
+	if (want_thermal_protection) {
+		WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
+		if (pi->thermal_protection)
+			WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+	} else {
+		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+	}
+}
+
+static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
+					   enum amdgpu_dpm_auto_throttle_src source,
+					   bool enable)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+	if (enable) {
+		if (!(pi->active_auto_throttle_sources & (1 << source))) {
+			pi->active_auto_throttle_sources |= 1 << source;
+			si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
+		}
+	} else {
+		if (pi->active_auto_throttle_sources & (1 << source)) {
+			pi->active_auto_throttle_sources &= ~(1 << source);
+			si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
+		}
+	}
+}
+
+static void si_start_dpm(struct amdgpu_device *adev)
+{
+	WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
+}
+
+static void si_stop_dpm(struct amdgpu_device *adev)
+{
+	WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
+}
+
+static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)
+{
+	if (enable)
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
+	else
+		WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+
+}
+
+#if 0
+static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev,
+					       u32 thermal_level)
+{
+	PPSMC_Result ret;
+
+	if (thermal_level == 0) {
+		ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
+		if (ret == PPSMC_Result_OK)
+			return 0;
+		else
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev)
+{
+	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true);
+}
+#endif
+
+#if 0
+static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power)
+{
+	if (ac_power)
+		return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
+			0 : -EINVAL;
+
+	return 0;
+}
+#endif
+
+static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+						      PPSMC_Msg msg, u32 parameter)
+{
+	WREG32(SMC_SCRATCH0, parameter);
+	return amdgpu_si_send_msg_to_smc(adev, msg);
+}
+
+static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev)
+{
+	if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static int si_dpm_force_performance_level(struct amdgpu_device *adev,
+				   enum amdgpu_dpm_forced_level level)
+{
+	struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
+	struct  si_ps *ps = si_get_ps(rps);
+	u32 levels = ps->performance_level_count;
+
+	if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
+		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+			return -EINVAL;
+
+		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
+			return -EINVAL;
+	} else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
+		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
+			return -EINVAL;
+
+		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
+			return -EINVAL;
+	} else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
+		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
+			return -EINVAL;
+
+		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	adev->pm.dpm.forced_level = level;
+
+	return 0;
+}
+
+#if 0
+static int si_set_boot_state(struct amdgpu_device *adev)
+{
+	return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+#endif
+
+static int si_set_sw_state(struct amdgpu_device *adev)
+{
+	return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static int si_halt_smc(struct amdgpu_device *adev)
+{
+	if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	return (amdgpu_si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static int si_resume_smc(struct amdgpu_device *adev)
+{
+	if (amdgpu_si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static void si_dpm_start_smc(struct amdgpu_device *adev)
+{
+	amdgpu_si_program_jump_on_start(adev);
+	amdgpu_si_start_smc(adev);
+	amdgpu_si_smc_clock(adev, true);
+}
+
+static void si_dpm_stop_smc(struct amdgpu_device *adev)
+{
+	amdgpu_si_reset_smc(adev);
+	amdgpu_si_smc_clock(adev, false);
+}
+
+static int si_process_firmware_header(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 tmp;
+	int ret;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_stateTable,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->state_table_start = tmp;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->soft_regs_start = tmp;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->mc_reg_table_start = tmp;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->fan_table_start = tmp;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->arb_table_start = tmp;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->cac_table_start = tmp;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->dte_table_start = tmp;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_spllTable,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->spll_table_start = tmp;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->papm_cfg_table_start = tmp;
+
+	return ret;
+}
+
+static void si_read_clock_registers(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
+	si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
+	si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
+	si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
+	si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
+	si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
+	si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
+	si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
+	si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
+	si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
+	si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
+	si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
+	si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
+	si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
+	si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
+}
+
+static void si_enable_thermal_protection(struct amdgpu_device *adev,
+					  bool enable)
+{
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+	else
+		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+}
+
+static void si_enable_acpi_power_management(struct amdgpu_device *adev)
+{
+	WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
+}
+
+#if 0
+static int si_enter_ulp_state(struct amdgpu_device *adev)
+{
+	WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
+
+	udelay(25000);
+
+	return 0;
+}
+
+static int si_exit_ulp_state(struct amdgpu_device *adev)
+{
+	int i;
+
+	WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
+
+	udelay(7000);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (RREG32(SMC_RESP_0) == 1)
+			break;
+		udelay(1000);
+	}
+
+	return 0;
+}
+#endif
+
+static int si_notify_smc_display_change(struct amdgpu_device *adev,
+				     bool has_display)
+{
+	PPSMC_Msg msg = has_display ?
+		PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
+
+	return (amdgpu_si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static void si_program_response_times(struct amdgpu_device *adev)
+{
+	u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
+	u32 vddc_dly, acpi_dly, vbi_dly;
+	u32 reference_clock;
+
+	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
+
+	voltage_response_time = (u32)adev->pm.dpm.voltage_response_time;
+	backbias_response_time = (u32)adev->pm.dpm.backbias_response_time;
+
+	if (voltage_response_time == 0)
+		voltage_response_time = 1000;
+
+	acpi_delay_time = 15000;
+	vbi_time_out = 100000;
+
+	reference_clock = amdgpu_asic_get_xclk(adev);
+
+	vddc_dly = (voltage_response_time  * reference_clock) / 100;
+	acpi_dly = (acpi_delay_time * reference_clock) / 100;
+	vbi_dly  = (vbi_time_out * reference_clock) / 100;
+
+	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg,  vddc_dly);
+	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi,  acpi_dly);
+	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
+	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
+}
+
+static void si_program_ds_registers(struct amdgpu_device *adev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	u32 tmp;
+
+	/* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */
+	if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0)
+		tmp = 0x10;
+	else
+		tmp = 0x1;
+
+	if (eg_pi->sclk_deep_sleep) {
+		WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
+		WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
+			 ~AUTOSCALE_ON_SS_CLEAR);
+	}
+}
+
+static void si_program_display_gap(struct amdgpu_device *adev)
+{
+	u32 tmp, pipe;
+	int i;
+
+	tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+	if (adev->pm.dpm.new_active_crtc_count > 0)
+		tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+	else
+		tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+
+	if (adev->pm.dpm.new_active_crtc_count > 1)
+		tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+	else
+		tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+
+	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+
+	tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
+	pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
+
+	if ((adev->pm.dpm.new_active_crtc_count > 0) &&
+	    (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
+		/* find the first active crtc */
+		for (i = 0; i < adev->mode_info.num_crtc; i++) {
+			if (adev->pm.dpm.new_active_crtcs & (1 << i))
+				break;
+		}
+		if (i == adev->mode_info.num_crtc)
+			pipe = 0;
+		else
+			pipe = i;
+
+		tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
+		tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
+		WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
+	}
+
+	/* Setting this to false forces the performance state to low if the crtcs are disabled.
+	 * This can be a problem on PowerXpress systems or if you want to use the card
+	 * for offscreen rendering or compute if there are no crtcs enabled.
+	 */
+	si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0);
+}
+
+static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+	if (enable) {
+		if (pi->sclk_ss)
+			WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
+	} else {
+		WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
+		WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
+	}
+}
+
+static void si_setup_bsp(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	u32 xclk = amdgpu_asic_get_xclk(adev);
+
+	r600_calculate_u_and_p(pi->asi,
+			       xclk,
+			       16,
+			       &pi->bsp,
+			       &pi->bsu);
+
+	r600_calculate_u_and_p(pi->pasi,
+			       xclk,
+			       16,
+			       &pi->pbsp,
+			       &pi->pbsu);
+
+
+        pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
+	pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
+
+	WREG32(CG_BSP, pi->dsp);
+}
+
+static void si_program_git(struct amdgpu_device *adev)
+{
+	WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
+}
+
+static void si_program_tp(struct amdgpu_device *adev)
+{
+	int i;
+	enum r600_td td = R600_TD_DFLT;
+
+	for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
+		WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
+
+	if (td == R600_TD_AUTO)
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
+	else
+		WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
+
+	if (td == R600_TD_UP)
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
+
+	if (td == R600_TD_DOWN)
+		WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
+}
+
+static void si_program_tpp(struct amdgpu_device *adev)
+{
+	WREG32(CG_TPC, R600_TPC_DFLT);
+}
+
+static void si_program_sstp(struct amdgpu_device *adev)
+{
+	WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
+}
+
+static void si_enable_display_gap(struct amdgpu_device *adev)
+{
+	u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+
+	tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+	tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+		DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
+
+	tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
+	tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
+		DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
+	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+}
+
+static void si_program_vc(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+	WREG32(CG_FTV, pi->vrc);
+}
+
+static void si_clear_vc(struct amdgpu_device *adev)
+{
+	WREG32(CG_FTV, 0);
+}
+
+static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
+{
+	u8 mc_para_index;
+
+	if (memory_clock < 10000)
+		mc_para_index = 0;
+	else if (memory_clock >= 80000)
+		mc_para_index = 0x0f;
+	else
+		mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
+	return mc_para_index;
+}
+
+static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
+{
+	u8 mc_para_index;
+
+	if (strobe_mode) {
+		if (memory_clock < 12500)
+			mc_para_index = 0x00;
+		else if (memory_clock > 47500)
+			mc_para_index = 0x0f;
+		else
+			mc_para_index = (u8)((memory_clock - 10000) / 2500);
+	} else {
+		if (memory_clock < 65000)
+			mc_para_index = 0x00;
+		else if (memory_clock > 135000)
+			mc_para_index = 0x0f;
+		else
+			mc_para_index = (u8)((memory_clock - 60000) / 5000);
+	}
+	return mc_para_index;
+}
+
+static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	bool strobe_mode = false;
+	u8 result = 0;
+
+	if (mclk <= pi->mclk_strobe_mode_threshold)
+		strobe_mode = true;
+
+	if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+		result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
+	else
+		result = si_get_ddr3_mclk_frequency_ratio(mclk);
+
+	if (strobe_mode)
+		result |= SISLANDS_SMC_STROBE_ENABLE;
+
+	return result;
+}
+
+static int si_upload_firmware(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	amdgpu_si_reset_smc(adev);
+	amdgpu_si_smc_clock(adev, false);
+
+	return amdgpu_si_load_smc_ucode(adev, si_pi->sram_end);
+}
+
+static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev,
+					      const struct atom_voltage_table *table,
+					      const struct amdgpu_phase_shedding_limits_table *limits)
+{
+	u32 data, num_bits, num_levels;
+
+	if ((table == NULL) || (limits == NULL))
+		return false;
+
+	data = table->mask_low;
+
+	num_bits = hweight32(data);
+
+	if (num_bits == 0)
+		return false;
+
+	num_levels = (1 << num_bits);
+
+	if (table->count != num_levels)
+		return false;
+
+	if (limits->count != (num_levels - 1))
+		return false;
+
+	return true;
+}
+
+static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
+					      u32 max_voltage_steps,
+					      struct atom_voltage_table *voltage_table)
+{
+	unsigned int i, diff;
+
+	if (voltage_table->count <= max_voltage_steps)
+		return;
+
+	diff = voltage_table->count - max_voltage_steps;
+
+	for (i= 0; i < max_voltage_steps; i++)
+		voltage_table->entries[i] = voltage_table->entries[i + diff];
+
+	voltage_table->count = max_voltage_steps;
+}
+
+static int si_get_svi2_voltage_table(struct amdgpu_device *adev,
+				     struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
+				     struct atom_voltage_table *voltage_table)
+{
+	u32 i;
+
+	if (voltage_dependency_table == NULL)
+		return -EINVAL;
+
+	voltage_table->mask_low = 0;
+	voltage_table->phase_delay = 0;
+
+	voltage_table->count = voltage_dependency_table->count;
+	for (i = 0; i < voltage_table->count; i++) {
+		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
+		voltage_table->entries[i].smio_low = 0;
+	}
+
+	return 0;
+}
+
+static int si_construct_voltage_tables(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	int ret;
+
+	if (pi->voltage_control) {
+		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
+						    VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
+		if (ret)
+			return ret;
+
+		if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+			si_trim_voltage_table_to_fit_state_table(adev,
+								 SISLANDS_MAX_NO_VREG_STEPS,
+								 &eg_pi->vddc_voltage_table);
+	} else if (si_pi->voltage_control_svi2) {
+		ret = si_get_svi2_voltage_table(adev,
+						&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+						&eg_pi->vddc_voltage_table);
+		if (ret)
+			return ret;
+	} else {
+		return -EINVAL;
+	}
+
+	if (eg_pi->vddci_control) {
+		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
+						    VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table);
+		if (ret)
+			return ret;
+
+		if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+			si_trim_voltage_table_to_fit_state_table(adev,
+								 SISLANDS_MAX_NO_VREG_STEPS,
+								 &eg_pi->vddci_voltage_table);
+	}
+	if (si_pi->vddci_control_svi2) {
+		ret = si_get_svi2_voltage_table(adev,
+						&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+						&eg_pi->vddci_voltage_table);
+		if (ret)
+			return ret;
+	}
+
+	if (pi->mvdd_control) {
+		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
+						    VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table);
+
+		if (ret) {
+			pi->mvdd_control = false;
+			return ret;
+		}
+
+		if (si_pi->mvdd_voltage_table.count == 0) {
+			pi->mvdd_control = false;
+			return -EINVAL;
+		}
+
+		if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+			si_trim_voltage_table_to_fit_state_table(adev,
+								 SISLANDS_MAX_NO_VREG_STEPS,
+								 &si_pi->mvdd_voltage_table);
+	}
+
+	if (si_pi->vddc_phase_shed_control) {
+		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
+						    VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table);
+		if (ret)
+			si_pi->vddc_phase_shed_control = false;
+
+		if ((si_pi->vddc_phase_shed_table.count == 0) ||
+		    (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS))
+			si_pi->vddc_phase_shed_control = false;
+	}
+
+	return 0;
+}
+
+static void si_populate_smc_voltage_table(struct amdgpu_device *adev,
+					  const struct atom_voltage_table *voltage_table,
+					  SISLANDS_SMC_STATETABLE *table)
+{
+	unsigned int i;
+
+	for (i = 0; i < voltage_table->count; i++)
+		table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
+}
+
+static int si_populate_smc_voltage_tables(struct amdgpu_device *adev,
+					  SISLANDS_SMC_STATETABLE *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u8 i;
+
+	if (si_pi->voltage_control_svi2) {
+		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
+			si_pi->svc_gpio_id);
+		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
+			si_pi->svd_gpio_id);
+		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
+					   2);
+	} else {
+		if (eg_pi->vddc_voltage_table.count) {
+			si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table);
+			table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+				cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+
+			for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
+				if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
+					table->maxVDDCIndexInPPTable = i;
+					break;
+				}
+			}
+		}
+
+		if (eg_pi->vddci_voltage_table.count) {
+			si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table);
+
+			table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
+				cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+		}
+
+
+		if (si_pi->mvdd_voltage_table.count) {
+			si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table);
+
+			table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
+				cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
+		}
+
+		if (si_pi->vddc_phase_shed_control) {
+			if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table,
+							      &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
+				si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table);
+
+				table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+					cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
+
+				si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
+							   (u32)si_pi->vddc_phase_shed_table.phase_delay);
+			} else {
+				si_pi->vddc_phase_shed_control = false;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int si_populate_voltage_value(struct amdgpu_device *adev,
+				     const struct atom_voltage_table *table,
+				     u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+	unsigned int i;
+
+	for (i = 0; i < table->count; i++) {
+		if (value <= table->entries[i].value) {
+			voltage->index = (u8)i;
+			voltage->value = cpu_to_be16(table->entries[i].value);
+			break;
+		}
+	}
+
+	if (i >= table->count)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
+				  SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	if (pi->mvdd_control) {
+		if (mclk <= pi->mvdd_split_frequency)
+			voltage->index = 0;
+		else
+			voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1;
+
+		voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value);
+	}
+	return 0;
+}
+
+static int si_get_std_voltage_value(struct amdgpu_device *adev,
+				    SISLANDS_SMC_VOLTAGE_VALUE *voltage,
+				    u16 *std_voltage)
+{
+	u16 v_index;
+	bool voltage_found = false;
+	*std_voltage = be16_to_cpu(voltage->value);
+
+	if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+		if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) {
+			if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
+				return -EINVAL;
+
+			for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+				if (be16_to_cpu(voltage->value) ==
+				    (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+					voltage_found = true;
+					if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+						*std_voltage =
+							adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
+					else
+						*std_voltage =
+							adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
+					break;
+				}
+			}
+
+			if (!voltage_found) {
+				for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+					if (be16_to_cpu(voltage->value) <=
+					    (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+						voltage_found = true;
+						if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+							*std_voltage =
+								adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
+						else
+							*std_voltage =
+								adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
+						break;
+					}
+				}
+			}
+		} else {
+			if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+				*std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
+		}
+	}
+
+	return 0;
+}
+
+static int si_populate_std_voltage_value(struct amdgpu_device *adev,
+					 u16 value, u8 index,
+					 SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+	voltage->index = index;
+	voltage->value = cpu_to_be16(value);
+
+	return 0;
+}
+
+static int si_populate_phase_shedding_value(struct amdgpu_device *adev,
+					    const struct amdgpu_phase_shedding_limits_table *limits,
+					    u16 voltage, u32 sclk, u32 mclk,
+					    SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage)
+{
+	unsigned int i;
+
+	for (i = 0; i < limits->count; i++) {
+		if ((voltage <= limits->entries[i].voltage) &&
+		    (sclk <= limits->entries[i].sclk) &&
+		    (mclk <= limits->entries[i].mclk))
+			break;
+	}
+
+	smc_voltage->phase_settings = (u8)i;
+
+	return 0;
+}
+
+static int si_init_arb_table_index(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 tmp;
+	int ret;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	tmp &= 0x00FFFFFF;
+	tmp |= MC_CG_ARB_FREQ_F1 << 24;
+
+	return amdgpu_si_write_smc_sram_dword(adev, si_pi->arb_table_start,
+					      tmp, si_pi->sram_end);
+}
+
+static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
+{
+	return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int si_reset_to_default(struct amdgpu_device *adev)
+{
+	return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static int si_force_switch_to_arb_f0(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 tmp;
+	int ret;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	tmp = (tmp >> 24) & 0xff;
+
+	if (tmp == MC_CG_ARB_FREQ_F0)
+		return 0;
+
+	return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,
+					    u32 engine_clock)
+{
+	u32 dram_rows;
+	u32 dram_refresh_rate;
+	u32 mc_arb_rfsh_rate;
+	u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+
+	if (tmp >= 4)
+		dram_rows = 16384;
+	else
+		dram_rows = 1 << (tmp + 10);
+
+	dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
+	mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
+
+	return mc_arb_rfsh_rate;
+}
+
+static int si_populate_memory_timing_parameters(struct amdgpu_device *adev,
+						struct rv7xx_pl *pl,
+						SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs)
+{
+	u32 dram_timing;
+	u32 dram_timing2;
+	u32 burst_time;
+
+	arb_regs->mc_arb_rfsh_rate =
+		(u8)si_calculate_memory_refresh_rate(adev, pl->sclk);
+
+	amdgpu_atombios_set_engine_dram_timings(adev,
+					    pl->sclk,
+		                            pl->mclk);
+
+	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
+	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
+
+	arb_regs->mc_arb_dram_timing  = cpu_to_be32(dram_timing);
+	arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
+	arb_regs->mc_arb_burst_time = (u8)burst_time;
+
+	return 0;
+}
+
+static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev,
+						  struct amdgpu_ps *amdgpu_state,
+						  unsigned int first_arb_set)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct  si_ps *state = si_get_ps(amdgpu_state);
+	SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
+	int i, ret = 0;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs);
+		if (ret)
+			break;
+		ret = amdgpu_si_copy_bytes_to_smc(adev,
+						  si_pi->arb_table_start +
+						  offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
+						  sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i),
+						  (u8 *)&arb_regs,
+						  sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
+						  si_pi->sram_end);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
+static int si_program_memory_timing_parameters(struct amdgpu_device *adev,
+					       struct amdgpu_ps *amdgpu_new_state)
+{
+	return si_do_program_memory_timing_parameters(adev, amdgpu_new_state,
+						      SISLANDS_DRIVER_STATE_ARB_INDEX);
+}
+
+static int si_populate_initial_mvdd_value(struct amdgpu_device *adev,
+					  struct SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	if (pi->mvdd_control)
+		return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table,
+						 si_pi->mvdd_bootup_value, voltage);
+
+	return 0;
+}
+
+static int si_populate_smc_initial_state(struct amdgpu_device *adev,
+					 struct amdgpu_ps *amdgpu_initial_state,
+					 SISLANDS_SMC_STATETABLE *table)
+{
+	struct  si_ps *initial_state = si_get_ps(amdgpu_initial_state);
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 reg;
+	int ret;
+
+	table->initialState.levels[0].mclk.vDLL_CNTL =
+		cpu_to_be32(si_pi->clock_registers.dll_cntl);
+	table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+		cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
+	table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+		cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
+	table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+		cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
+	table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
+		cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
+	table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+		cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
+	table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+		cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
+	table->initialState.levels[0].mclk.vMPLL_SS =
+		cpu_to_be32(si_pi->clock_registers.mpll_ss1);
+	table->initialState.levels[0].mclk.vMPLL_SS2 =
+		cpu_to_be32(si_pi->clock_registers.mpll_ss2);
+
+	table->initialState.levels[0].mclk.mclk_value =
+		cpu_to_be32(initial_state->performance_levels[0].mclk);
+
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
+	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
+	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2  =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
+
+	table->initialState.levels[0].sclk.sclk_value =
+		cpu_to_be32(initial_state->performance_levels[0].sclk);
+
+	table->initialState.levels[0].arbRefreshState =
+		SISLANDS_INITIAL_STATE_ARB_INDEX;
+
+	table->initialState.levels[0].ACIndex = 0;
+
+	ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+					initial_state->performance_levels[0].vddc,
+					&table->initialState.levels[0].vddc);
+
+	if (!ret) {
+		u16 std_vddc;
+
+		ret = si_get_std_voltage_value(adev,
+					       &table->initialState.levels[0].vddc,
+					       &std_vddc);
+		if (!ret)
+			si_populate_std_voltage_value(adev, std_vddc,
+						      table->initialState.levels[0].vddc.index,
+						      &table->initialState.levels[0].std_vddc);
+	}
+
+	if (eg_pi->vddci_control)
+		si_populate_voltage_value(adev,
+					  &eg_pi->vddci_voltage_table,
+					  initial_state->performance_levels[0].vddci,
+					  &table->initialState.levels[0].vddci);
+
+	if (si_pi->vddc_phase_shed_control)
+		si_populate_phase_shedding_value(adev,
+						 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+						 initial_state->performance_levels[0].vddc,
+						 initial_state->performance_levels[0].sclk,
+						 initial_state->performance_levels[0].mclk,
+						 &table->initialState.levels[0].vddc);
+
+	si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd);
+
+	reg = CG_R(0xffff) | CG_L(0);
+	table->initialState.levels[0].aT = cpu_to_be32(reg);
+	table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
+	table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
+
+	if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+		table->initialState.levels[0].strobeMode =
+			si_get_strobe_mode_settings(adev,
+						    initial_state->performance_levels[0].mclk);
+
+		if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
+			table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
+		else
+			table->initialState.levels[0].mcFlags =  0;
+	}
+
+	table->initialState.levelCount = 1;
+
+	table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
+
+	table->initialState.levels[0].dpm2.MaxPS = 0;
+	table->initialState.levels[0].dpm2.NearTDPDec = 0;
+	table->initialState.levels[0].dpm2.AboveSafeInc = 0;
+	table->initialState.levels[0].dpm2.BelowSafeInc = 0;
+	table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+	reg = MIN_POWER_MASK | MAX_POWER_MASK;
+	table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+
+	reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+	table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+
+	return 0;
+}
+
+static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
+				      SISLANDS_SMC_STATETABLE *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
+	u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
+	u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
+	u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
+	u32 dll_cntl = si_pi->clock_registers.dll_cntl;
+	u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
+	u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
+	u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
+	u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
+	u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
+	u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
+	u32 reg;
+	int ret;
+
+	table->ACPIState = table->initialState;
+
+	table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+	if (pi->acpi_vddc) {
+		ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+						pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
+		if (!ret) {
+			u16 std_vddc;
+
+			ret = si_get_std_voltage_value(adev,
+						       &table->ACPIState.levels[0].vddc, &std_vddc);
+			if (!ret)
+				si_populate_std_voltage_value(adev, std_vddc,
+							      table->ACPIState.levels[0].vddc.index,
+							      &table->ACPIState.levels[0].std_vddc);
+		}
+		table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
+
+		if (si_pi->vddc_phase_shed_control) {
+			si_populate_phase_shedding_value(adev,
+							 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+							 pi->acpi_vddc,
+							 0,
+							 0,
+							 &table->ACPIState.levels[0].vddc);
+		}
+	} else {
+		ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+						pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
+		if (!ret) {
+			u16 std_vddc;
+
+			ret = si_get_std_voltage_value(adev,
+						       &table->ACPIState.levels[0].vddc, &std_vddc);
+
+			if (!ret)
+				si_populate_std_voltage_value(adev, std_vddc,
+							      table->ACPIState.levels[0].vddc.index,
+							      &table->ACPIState.levels[0].std_vddc);
+		}
+		table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev,
+										    si_pi->sys_pcie_mask,
+										    si_pi->boot_pcie_gen,
+										    AMDGPU_PCIE_GEN1);
+
+		if (si_pi->vddc_phase_shed_control)
+			si_populate_phase_shedding_value(adev,
+							 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+							 pi->min_vddc_in_table,
+							 0,
+							 0,
+							 &table->ACPIState.levels[0].vddc);
+	}
+
+	if (pi->acpi_vddc) {
+		if (eg_pi->acpi_vddci)
+			si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
+						  eg_pi->acpi_vddci,
+						  &table->ACPIState.levels[0].vddci);
+	}
+
+	mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
+	mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+	dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
+
+	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+
+	table->ACPIState.levels[0].mclk.vDLL_CNTL =
+		cpu_to_be32(dll_cntl);
+	table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+		cpu_to_be32(mclk_pwrmgt_cntl);
+	table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+		cpu_to_be32(mpll_ad_func_cntl);
+	table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+		cpu_to_be32(mpll_dq_func_cntl);
+	table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
+		cpu_to_be32(mpll_func_cntl);
+	table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+		cpu_to_be32(mpll_func_cntl_1);
+	table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+		cpu_to_be32(mpll_func_cntl_2);
+	table->ACPIState.levels[0].mclk.vMPLL_SS =
+		cpu_to_be32(si_pi->clock_registers.mpll_ss1);
+	table->ACPIState.levels[0].mclk.vMPLL_SS2 =
+		cpu_to_be32(si_pi->clock_registers.mpll_ss2);
+
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+		cpu_to_be32(spll_func_cntl);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+		cpu_to_be32(spll_func_cntl_2);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+		cpu_to_be32(spll_func_cntl_3);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+		cpu_to_be32(spll_func_cntl_4);
+
+	table->ACPIState.levels[0].mclk.mclk_value = 0;
+	table->ACPIState.levels[0].sclk.sclk_value = 0;
+
+	si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd);
+
+	if (eg_pi->dynamic_ac_timing)
+		table->ACPIState.levels[0].ACIndex = 0;
+
+	table->ACPIState.levels[0].dpm2.MaxPS = 0;
+	table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
+	table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
+	table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
+	table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+	reg = MIN_POWER_MASK | MAX_POWER_MASK;
+	table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+
+	reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+	table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+
+	return 0;
+}
+
+static int si_populate_ulv_state(struct amdgpu_device *adev,
+				 SISLANDS_SMC_SWSTATE *state)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct si_ulv_param *ulv = &si_pi->ulv;
+	u32 sclk_in_sr = 1350; /* ??? */
+	int ret;
+
+	ret = si_convert_power_level_to_smc(adev, &ulv->pl,
+					    &state->levels[0]);
+	if (!ret) {
+		if (eg_pi->sclk_deep_sleep) {
+			if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
+				state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+			else
+				state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+		}
+		if (ulv->one_pcie_lane_in_ulv)
+			state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
+		state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
+		state->levels[0].ACIndex = 1;
+		state->levels[0].std_vddc = state->levels[0].vddc;
+		state->levelCount = 1;
+
+		state->flags |= PPSMC_SWSTATE_FLAG_DC;
+	}
+
+	return ret;
+}
+
+static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct si_ulv_param *ulv = &si_pi->ulv;
+	SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
+	int ret;
+
+	ret = si_populate_memory_timing_parameters(adev, &ulv->pl,
+						   &arb_regs);
+	if (ret)
+		return ret;
+
+	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay,
+				   ulv->volt_change_delay);
+
+	ret = amdgpu_si_copy_bytes_to_smc(adev,
+					  si_pi->arb_table_start +
+					  offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
+					  sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX,
+					  (u8 *)&arb_regs,
+					  sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
+					  si_pi->sram_end);
+
+	return ret;
+}
+
+static void si_get_mvdd_configuration(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+	pi->mvdd_split_frequency = 30000;
+}
+
+static int si_init_smc_table(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
+	const struct si_ulv_param *ulv = &si_pi->ulv;
+	SISLANDS_SMC_STATETABLE  *table = &si_pi->smc_statetable;
+	int ret;
+	u32 lane_width;
+	u32 vr_hot_gpio;
+
+	si_populate_smc_voltage_tables(adev, table);
+
+	switch (adev->pm.int_thermal_type) {
+	case THERMAL_TYPE_SI:
+	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
+		break;
+	case THERMAL_TYPE_NONE:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
+		break;
+	default:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
+		break;
+	}
+
+	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) {
+		if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819))
+			table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
+	}
+
+	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+	if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
+		table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
+
+	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
+		table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
+		vr_hot_gpio = adev->pm.dpm.backbias_response_time;
+		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio,
+					   vr_hot_gpio);
+	}
+
+	ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table);
+	if (ret)
+		return ret;
+
+	ret = si_populate_smc_acpi_state(adev, table);
+	if (ret)
+		return ret;
+
+	table->driverState = table->initialState;
+
+	ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state,
+						     SISLANDS_INITIAL_STATE_ARB_INDEX);
+	if (ret)
+		return ret;
+
+	if (ulv->supported && ulv->pl.vddc) {
+		ret = si_populate_ulv_state(adev, &table->ULVState);
+		if (ret)
+			return ret;
+
+		ret = si_program_ulv_memory_timing_parameters(adev);
+		if (ret)
+			return ret;
+
+		WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
+		WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+
+		lane_width = amdgpu_get_pcie_lanes(adev);
+		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
+	} else {
+		table->ULVState = table->initialState;
+	}
+
+	return amdgpu_si_copy_bytes_to_smc(adev, si_pi->state_table_start,
+					   (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE),
+					   si_pi->sram_end);
+}
+
+static int si_calculate_sclk_params(struct amdgpu_device *adev,
+				    u32 engine_clock,
+				    SISLANDS_SMC_SCLK_VALUE *sclk)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct atom_clock_dividers dividers;
+	u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
+	u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
+	u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
+	u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
+	u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2;
+	u64 tmp;
+	u32 reference_clock = adev->clock.spll.reference_freq;
+	u32 reference_divider;
+	u32 fbdiv;
+	int ret;
+
+	ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+					     engine_clock, false, &dividers);
+	if (ret)
+		return ret;
+
+	reference_divider = 1 + dividers.ref_div;
+
+	tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
+	do_div(tmp, reference_clock);
+	fbdiv = (u32) tmp;
+
+	spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
+	spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
+	spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
+
+	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 |= SCLK_MUX_SEL(2);
+
+	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+	spll_func_cntl_3 |= SPLL_DITHEN;
+
+	if (pi->sclk_ss) {
+		struct amdgpu_atom_ss ss;
+		u32 vco_freq = engine_clock * dividers.post_div;
+
+		if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
+						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
+
+			cg_spll_spread_spectrum &= ~CLK_S_MASK;
+			cg_spll_spread_spectrum |= CLK_S(clk_s);
+			cg_spll_spread_spectrum |= SSEN;
+
+			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
+			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+		}
+	}
+
+	sclk->sclk_value = engine_clock;
+	sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
+	sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
+	sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
+	sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
+	sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
+	sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
+
+	return 0;
+}
+
+static int si_populate_sclk_value(struct amdgpu_device *adev,
+				  u32 engine_clock,
+				  SISLANDS_SMC_SCLK_VALUE *sclk)
+{
+	SISLANDS_SMC_SCLK_VALUE sclk_tmp;
+	int ret;
+
+	ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp);
+	if (!ret) {
+		sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
+		sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
+		sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
+		sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
+		sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
+		sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
+		sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
+	}
+
+	return ret;
+}
+
+static int si_populate_mclk_value(struct amdgpu_device *adev,
+				  u32 engine_clock,
+				  u32 memory_clock,
+				  SISLANDS_SMC_MCLK_VALUE *mclk,
+				  bool strobe_mode,
+				  bool dll_state_on)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32  dll_cntl = si_pi->clock_registers.dll_cntl;
+	u32  mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
+	u32  mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
+	u32  mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
+	u32  mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
+	u32  mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
+	u32  mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
+	u32  mpll_ss1 = si_pi->clock_registers.mpll_ss1;
+	u32  mpll_ss2 = si_pi->clock_registers.mpll_ss2;
+	struct atom_mpll_param mpll_param;
+	int ret;
+
+	ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
+	if (ret)
+		return ret;
+
+	mpll_func_cntl &= ~BWCTRL_MASK;
+	mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
+
+	mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
+	mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
+		CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
+
+	mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
+	mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
+
+	if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+		mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
+		mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
+			YCLK_POST_DIV(mpll_param.post_div);
+	}
+
+	if (pi->mclk_ss) {
+		struct amdgpu_atom_ss ss;
+		u32 freq_nom;
+		u32 tmp;
+		u32 reference_clock = adev->clock.mpll.reference_freq;
+
+		if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+			freq_nom = memory_clock * 4;
+		else
+			freq_nom = memory_clock * 2;
+
+		tmp = freq_nom / reference_clock;
+		tmp = tmp * tmp;
+		if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
+		                                     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
+			u32 clks = reference_clock * 5 / ss.rate;
+			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
+
+		        mpll_ss1 &= ~CLKV_MASK;
+		        mpll_ss1 |= CLKV(clkv);
+
+		        mpll_ss2 &= ~CLKS_MASK;
+		        mpll_ss2 |= CLKS(clks);
+		}
+	}
+
+	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
+	mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
+
+	if (dll_state_on)
+		mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
+	else
+		mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+	mclk->mclk_value = cpu_to_be32(memory_clock);
+	mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
+	mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1);
+	mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2);
+	mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
+	mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
+	mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+	mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
+	mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
+	mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
+
+	return 0;
+}
+
+static void si_populate_smc_sp(struct amdgpu_device *adev,
+			       struct amdgpu_ps *amdgpu_state,
+			       SISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct  si_ps *ps = si_get_ps(amdgpu_state);
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	int i;
+
+	for (i = 0; i < ps->performance_level_count - 1; i++)
+		smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
+
+	smc_state->levels[ps->performance_level_count - 1].bSP =
+		cpu_to_be32(pi->psp);
+}
+
+static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
+					 struct rv7xx_pl *pl,
+					 SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	int ret;
+	bool dll_state_on;
+	u16 std_vddc;
+	bool gmc_pg = false;
+
+	if (eg_pi->pcie_performance_request &&
+	    (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID))
+		level->gen2PCIE = (u8)si_pi->force_pcie_gen;
+	else
+		level->gen2PCIE = (u8)pl->pcie_gen;
+
+	ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk);
+	if (ret)
+		return ret;
+
+	level->mcFlags =  0;
+
+	if (pi->mclk_stutter_mode_threshold &&
+	    (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
+	    !eg_pi->uvd_enabled &&
+	    (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
+	    (adev->pm.dpm.new_active_crtc_count <= 2)) {
+		level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
+
+		if (gmc_pg)
+			level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
+	}
+
+	if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+		if (pl->mclk > pi->mclk_edc_enable_threshold)
+			level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
+
+		if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
+			level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG;
+
+		level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk);
+
+		if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) {
+			if (si_get_mclk_frequency_ratio(pl->mclk, true) >=
+			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
+				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+			else
+				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
+		} else {
+			dll_state_on = false;
+		}
+	} else {
+		level->strobeMode = si_get_strobe_mode_settings(adev,
+								pl->mclk);
+
+		dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+	}
+
+	ret = si_populate_mclk_value(adev,
+				     pl->sclk,
+				     pl->mclk,
+				     &level->mclk,
+				     (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on);
+	if (ret)
+		return ret;
+
+	ret = si_populate_voltage_value(adev,
+					&eg_pi->vddc_voltage_table,
+					pl->vddc, &level->vddc);
+	if (ret)
+		return ret;
+
+
+	ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc);
+	if (ret)
+		return ret;
+
+	ret = si_populate_std_voltage_value(adev, std_vddc,
+					    level->vddc.index, &level->std_vddc);
+	if (ret)
+		return ret;
+
+	if (eg_pi->vddci_control) {
+		ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
+						pl->vddci, &level->vddci);
+		if (ret)
+			return ret;
+	}
+
+	if (si_pi->vddc_phase_shed_control) {
+		ret = si_populate_phase_shedding_value(adev,
+						       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+						       pl->vddc,
+						       pl->sclk,
+						       pl->mclk,
+						       &level->vddc);
+		if (ret)
+			return ret;
+	}
+
+	level->MaxPoweredUpCU = si_pi->max_cu;
+
+	ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd);
+
+	return ret;
+}
+
+static int si_populate_smc_t(struct amdgpu_device *adev,
+			     struct amdgpu_ps *amdgpu_state,
+			     SISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct  si_ps *state = si_get_ps(amdgpu_state);
+	u32 a_t;
+	u32 t_l, t_h;
+	u32 high_bsp;
+	int i, ret;
+
+	if (state->performance_level_count >= 9)
+		return -EINVAL;
+
+	if (state->performance_level_count < 2) {
+		a_t = CG_R(0xffff) | CG_L(0);
+		smc_state->levels[0].aT = cpu_to_be32(a_t);
+		return 0;
+	}
+
+	smc_state->levels[0].aT = cpu_to_be32(0);
+
+	for (i = 0; i <= state->performance_level_count - 2; i++) {
+		ret = r600_calculate_at(
+			(50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1),
+			100 * R600_AH_DFLT,
+			state->performance_levels[i + 1].sclk,
+			state->performance_levels[i].sclk,
+			&t_l,
+			&t_h);
+
+		if (ret) {
+			t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
+			t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
+		}
+
+		a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
+		a_t |= CG_R(t_l * pi->bsp / 20000);
+		smc_state->levels[i].aT = cpu_to_be32(a_t);
+
+		high_bsp = (i == state->performance_level_count - 2) ?
+			pi->pbsp : pi->bsp;
+		a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
+		smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
+	}
+
+	return 0;
+}
+
+static int si_disable_ulv(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct si_ulv_param *ulv = &si_pi->ulv;
+
+	if (ulv->supported)
+		return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
+			0 : -EINVAL;
+
+	return 0;
+}
+
+static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
+				       struct amdgpu_ps *amdgpu_state)
+{
+	const struct si_power_info *si_pi = si_get_pi(adev);
+	const struct si_ulv_param *ulv = &si_pi->ulv;
+	const struct  si_ps *state = si_get_ps(amdgpu_state);
+	int i;
+
+	if (state->performance_levels[0].mclk != ulv->pl.mclk)
+		return false;
+
+	/* XXX validate against display requirements! */
+
+	for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
+		if (adev->clock.current_dispclk <=
+		    adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
+			if (ulv->pl.vddc <
+			    adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
+				return false;
+		}
+	}
+
+	if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0))
+		return false;
+
+	return true;
+}
+
+static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev,
+						       struct amdgpu_ps *amdgpu_new_state)
+{
+	const struct si_power_info *si_pi = si_get_pi(adev);
+	const struct si_ulv_param *ulv = &si_pi->ulv;
+
+	if (ulv->supported) {
+		if (si_is_state_ulv_compatible(adev, amdgpu_new_state))
+			return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
+				0 : -EINVAL;
+	}
+	return 0;
+}
+
+static int si_convert_power_state_to_smc(struct amdgpu_device *adev,
+					 struct amdgpu_ps *amdgpu_state,
+					 SISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct  si_ps *state = si_get_ps(amdgpu_state);
+	int i, ret;
+	u32 threshold;
+	u32 sclk_in_sr = 1350; /* ??? */
+
+	if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS)
+		return -EINVAL;
+
+	threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100;
+
+	if (amdgpu_state->vclk && amdgpu_state->dclk) {
+		eg_pi->uvd_enabled = true;
+		if (eg_pi->smu_uvd_hs)
+			smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD;
+	} else {
+		eg_pi->uvd_enabled = false;
+	}
+
+	if (state->dc_compatible)
+		smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
+
+	smc_state->levelCount = 0;
+	for (i = 0; i < state->performance_level_count; i++) {
+		if (eg_pi->sclk_deep_sleep) {
+			if ((i == 0) || si_pi->sclk_deep_sleep_above_low) {
+				if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
+					smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+				else
+					smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+			}
+		}
+
+		ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i],
+						    &smc_state->levels[i]);
+		smc_state->levels[i].arbRefreshState =
+			(u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i);
+
+		if (ret)
+			return ret;
+
+		if (ni_pi->enable_power_containment)
+			smc_state->levels[i].displayWatermark =
+				(state->performance_levels[i].sclk < threshold) ?
+				PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
+		else
+			smc_state->levels[i].displayWatermark = (i < 2) ?
+				PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
+
+		if (eg_pi->dynamic_ac_timing)
+			smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
+		else
+			smc_state->levels[i].ACIndex = 0;
+
+		smc_state->levelCount++;
+	}
+
+	si_write_smc_soft_register(adev,
+				   SI_SMC_SOFT_REGISTER_watermark_threshold,
+				   threshold / 512);
+
+	si_populate_smc_sp(adev, amdgpu_state, smc_state);
+
+	ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state);
+	if (ret)
+		ni_pi->enable_power_containment = false;
+
+	ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state);
+	if (ret)
+		ni_pi->enable_sq_ramping = false;
+
+	return si_populate_smc_t(adev, amdgpu_state, smc_state);
+}
+
+static int si_upload_sw_state(struct amdgpu_device *adev,
+			      struct amdgpu_ps *amdgpu_new_state)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct  si_ps *new_state = si_get_ps(amdgpu_new_state);
+	int ret;
+	u32 address = si_pi->state_table_start +
+		offsetof(SISLANDS_SMC_STATETABLE, driverState);
+	u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) +
+		((new_state->performance_level_count - 1) *
+		 sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL));
+	SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState;
+
+	memset(smc_state, 0, state_size);
+
+	ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state);
+	if (ret)
+		return ret;
+
+	return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
+					   state_size, si_pi->sram_end);
+}
+
+static int si_upload_ulv_state(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct si_ulv_param *ulv = &si_pi->ulv;
+	int ret = 0;
+
+	if (ulv->supported && ulv->pl.vddc) {
+		u32 address = si_pi->state_table_start +
+			offsetof(SISLANDS_SMC_STATETABLE, ULVState);
+		SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
+		u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
+
+		memset(smc_state, 0, state_size);
+
+		ret = si_populate_ulv_state(adev, smc_state);
+		if (!ret)
+			ret = amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
+							  state_size, si_pi->sram_end);
+	}
+
+	return ret;
+}
+
+static int si_upload_smc_data(struct amdgpu_device *adev)
+{
+	struct amdgpu_crtc *amdgpu_crtc = NULL;
+	int i;
+
+	if (adev->pm.dpm.new_active_crtc_count == 0)
+		return 0;
+
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
+			amdgpu_crtc = adev->mode_info.crtcs[i];
+			break;
+		}
+	}
+
+	if (amdgpu_crtc == NULL)
+		return 0;
+
+	if (amdgpu_crtc->line_time <= 0)
+		return 0;
+
+	if (si_write_smc_soft_register(adev,
+				       SI_SMC_SOFT_REGISTER_crtc_index,
+				       amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
+		return 0;
+
+	if (si_write_smc_soft_register(adev,
+				       SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
+				       amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
+		return 0;
+
+	if (si_write_smc_soft_register(adev,
+				       SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
+				       amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
+		return 0;
+
+	return 0;
+}
+
+static int si_set_mc_special_registers(struct amdgpu_device *adev,
+				       struct si_mc_reg_table *table)
+{
+	u8 i, j, k;
+	u32 temp_reg;
+
+	for (i = 0, j = table->last; i < table->last; i++) {
+		if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+			return -EINVAL;
+		switch (table->mc_reg_address[i].s1) {
+		case MC_SEQ_MISC1:
+			temp_reg = RREG32(MC_PMG_CMD_EMRS);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP;
+			for (k = 0; k < table->num_entries; k++)
+				table->mc_reg_table_entry[k].mc_data[j] =
+					((temp_reg & 0xffff0000)) |
+					((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+			j++;
+			if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+
+			temp_reg = RREG32(MC_PMG_CMD_MRS);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) |
+					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+				if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
+					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+			}
+			j++;
+			if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+
+			if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
+				table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
+				table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD;
+				for (k = 0; k < table->num_entries; k++)
+					table->mc_reg_table_entry[k].mc_data[j] =
+						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+				j++;
+				if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+					return -EINVAL;
+			}
+			break;
+		case MC_SEQ_RESERVE_M:
+			temp_reg = RREG32(MC_PMG_CMD_MRS1);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP;
+			for(k = 0; k < table->num_entries; k++)
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) |
+					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+			j++;
+			if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+			break;
+		default:
+			break;
+		}
+	}
+
+	table->last = j;
+
+	return 0;
+}
+
+static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
+{
+	bool result = true;
+	switch (in_reg) {
+	case  MC_SEQ_RAS_TIMING:
+		*out_reg = MC_SEQ_RAS_TIMING_LP;
+		break;
+	case MC_SEQ_CAS_TIMING:
+		*out_reg = MC_SEQ_CAS_TIMING_LP;
+		break;
+	case MC_SEQ_MISC_TIMING:
+		*out_reg = MC_SEQ_MISC_TIMING_LP;
+		break;
+	case MC_SEQ_MISC_TIMING2:
+		*out_reg = MC_SEQ_MISC_TIMING2_LP;
+		break;
+	case MC_SEQ_RD_CTL_D0:
+		*out_reg = MC_SEQ_RD_CTL_D0_LP;
+		break;
+	case MC_SEQ_RD_CTL_D1:
+		*out_reg = MC_SEQ_RD_CTL_D1_LP;
+		break;
+	case MC_SEQ_WR_CTL_D0:
+		*out_reg = MC_SEQ_WR_CTL_D0_LP;
+		break;
+	case MC_SEQ_WR_CTL_D1:
+		*out_reg = MC_SEQ_WR_CTL_D1_LP;
+		break;
+	case MC_PMG_CMD_EMRS:
+		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP;
+		break;
+	case MC_PMG_CMD_MRS:
+		*out_reg = MC_SEQ_PMG_CMD_MRS_LP;
+		break;
+	case MC_PMG_CMD_MRS1:
+		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP;
+		break;
+	case MC_SEQ_PMG_TIMING:
+		*out_reg = MC_SEQ_PMG_TIMING_LP;
+		break;
+	case MC_PMG_CMD_MRS2:
+		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP;
+		break;
+	case MC_SEQ_WR_CTL_2:
+		*out_reg = MC_SEQ_WR_CTL_2_LP;
+		break;
+	default:
+		result = false;
+		break;
+	}
+
+	return result;
+}
+
+static void si_set_valid_flag(struct si_mc_reg_table *table)
+{
+	u8 i, j;
+
+	for (i = 0; i < table->last; i++) {
+		for (j = 1; j < table->num_entries; j++) {
+			if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
+				table->valid_flag |= 1 << i;
+				break;
+			}
+		}
+	}
+}
+
+static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table)
+{
+	u32 i;
+	u16 address;
+
+	for (i = 0; i < table->last; i++)
+		table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
+			address : table->mc_reg_address[i].s1;
+
+}
+
+static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
+				      struct si_mc_reg_table *si_table)
+{
+	u8 i, j;
+
+	if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+		return -EINVAL;
+	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
+		return -EINVAL;
+
+	for (i = 0; i < table->last; i++)
+		si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+	si_table->last = table->last;
+
+	for (i = 0; i < table->num_entries; i++) {
+		si_table->mc_reg_table_entry[i].mclk_max =
+			table->mc_reg_table_entry[i].mclk_max;
+		for (j = 0; j < table->last; j++) {
+			si_table->mc_reg_table_entry[i].mc_data[j] =
+				table->mc_reg_table_entry[i].mc_data[j];
+		}
+	}
+	si_table->num_entries = table->num_entries;
+
+	return 0;
+}
+
+static int si_initialize_mc_reg_table(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct atom_mc_reg_table *table;
+	struct si_mc_reg_table *si_table = &si_pi->mc_reg_table;
+	u8 module_index = rv770_get_memory_module_index(adev);
+	int ret;
+
+	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+
+	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
+	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
+	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
+	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
+	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
+	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
+	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
+	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
+	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
+	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
+	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
+	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
+	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
+	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
+
+	ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
+	if (ret)
+		goto init_mc_done;
+
+	ret = si_copy_vbios_mc_reg_table(table, si_table);
+	if (ret)
+		goto init_mc_done;
+
+	si_set_s0_mc_reg_index(si_table);
+
+	ret = si_set_mc_special_registers(adev, si_table);
+	if (ret)
+		goto init_mc_done;
+
+	si_set_valid_flag(si_table);
+
+init_mc_done:
+	kfree(table);
+
+	return ret;
+
+}
+
+static void si_populate_mc_reg_addresses(struct amdgpu_device *adev,
+					 SMC_SIslands_MCRegisters *mc_reg_table)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 i, j;
+
+	for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
+		if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
+			if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+				break;
+			mc_reg_table->address[i].s0 =
+				cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
+			mc_reg_table->address[i].s1 =
+				cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1);
+			i++;
+		}
+	}
+	mc_reg_table->last = (u8)i;
+}
+
+static void si_convert_mc_registers(const struct si_mc_reg_entry *entry,
+				    SMC_SIslands_MCRegisterSet *data,
+				    u32 num_entries, u32 valid_flag)
+{
+	u32 i, j;
+
+	for(i = 0, j = 0; j < num_entries; j++) {
+		if (valid_flag & (1 << j)) {
+			data->value[i] = cpu_to_be32(entry->mc_data[j]);
+			i++;
+		}
+	}
+}
+
+static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
+						 struct rv7xx_pl *pl,
+						 SMC_SIslands_MCRegisterSet *mc_reg_table_data)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 i = 0;
+
+	for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) {
+		if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
+			break;
+	}
+
+	if ((i == si_pi->mc_reg_table.num_entries) && (i > 0))
+		--i;
+
+	si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i],
+				mc_reg_table_data, si_pi->mc_reg_table.last,
+				si_pi->mc_reg_table.valid_flag);
+}
+
+static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
+					   struct amdgpu_ps *amdgpu_state,
+					   SMC_SIslands_MCRegisters *mc_reg_table)
+{
+	struct si_ps *state = si_get_ps(amdgpu_state);
+	int i;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		si_convert_mc_reg_table_entry_to_smc(adev,
+						     &state->performance_levels[i],
+						     &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
+	}
+}
+
+static int si_populate_mc_reg_table(struct amdgpu_device *adev,
+				    struct amdgpu_ps *amdgpu_boot_state)
+{
+	struct  si_ps *boot_state = si_get_ps(amdgpu_boot_state);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct si_ulv_param *ulv = &si_pi->ulv;
+	SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
+
+	memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
+
+	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1);
+
+	si_populate_mc_reg_addresses(adev, smc_mc_reg_table);
+
+	si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0],
+					     &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]);
+
+	si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
+				&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT],
+				si_pi->mc_reg_table.last,
+				si_pi->mc_reg_table.valid_flag);
+
+	if (ulv->supported && ulv->pl.vddc != 0)
+		si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl,
+						     &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]);
+	else
+		si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
+					&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT],
+					si_pi->mc_reg_table.last,
+					si_pi->mc_reg_table.valid_flag);
+
+	si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table);
+
+	return amdgpu_si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start,
+					   (u8 *)smc_mc_reg_table,
+					   sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end);
+}
+
+static int si_upload_mc_reg_table(struct amdgpu_device *adev,
+				  struct amdgpu_ps *amdgpu_new_state)
+{
+	struct si_ps *new_state = si_get_ps(amdgpu_new_state);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 address = si_pi->mc_reg_table_start +
+		offsetof(SMC_SIslands_MCRegisters,
+			 data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
+	SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
+
+	memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
+
+	si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table);
+
+	return amdgpu_si_copy_bytes_to_smc(adev, address,
+					   (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
+					   sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count,
+					   si_pi->sram_end);
+}
+
+static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
+{
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+	else
+		WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+}
+
+static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
+						      struct amdgpu_ps *amdgpu_state)
+{
+	struct si_ps *state = si_get_ps(amdgpu_state);
+	int i;
+	u16 pcie_speed, max_speed = 0;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		pcie_speed = state->performance_levels[i].pcie_gen;
+		if (max_speed < pcie_speed)
+			max_speed = pcie_speed;
+	}
+	return max_speed;
+}
+
+static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)
+{
+	u32 speed_cntl;
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
+	speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
+
+	return (u16)speed_cntl;
+}
+
+static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
+							     struct amdgpu_ps *amdgpu_new_state,
+							     struct amdgpu_ps *amdgpu_current_state)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+	enum amdgpu_pcie_gen current_link_speed;
+
+	if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
+		current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state);
+	else
+		current_link_speed = si_pi->force_pcie_gen;
+
+	si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+	si_pi->pspp_notify_required = false;
+	if (target_link_speed > current_link_speed) {
+		switch (target_link_speed) {
+#if defined(CONFIG_ACPI)
+		case AMDGPU_PCIE_GEN3:
+			if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
+				break;
+			si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
+			if (current_link_speed == AMDGPU_PCIE_GEN2)
+				break;
+		case AMDGPU_PCIE_GEN2:
+			if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
+				break;
+#endif
+		default:
+			si_pi->force_pcie_gen = si_get_current_pcie_speed(adev);
+			break;
+		}
+	} else {
+		if (target_link_speed < current_link_speed)
+			si_pi->pspp_notify_required = true;
+	}
+}
+
+static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
+							   struct amdgpu_ps *amdgpu_new_state,
+							   struct amdgpu_ps *amdgpu_current_state)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+	u8 request;
+
+	if (si_pi->pspp_notify_required) {
+		if (target_link_speed == AMDGPU_PCIE_GEN3)
+			request = PCIE_PERF_REQ_PECI_GEN3;
+		else if (target_link_speed == AMDGPU_PCIE_GEN2)
+			request = PCIE_PERF_REQ_PECI_GEN2;
+		else
+			request = PCIE_PERF_REQ_PECI_GEN1;
+
+		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
+		    (si_get_current_pcie_speed(adev) > 0))
+			return;
+
+#if defined(CONFIG_ACPI)
+		amdgpu_acpi_pcie_performance_request(adev, request, false);
+#endif
+	}
+}
+
+#if 0
+static int si_ds_request(struct amdgpu_device *adev,
+			 bool ds_status_on, u32 count_write)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+
+	if (eg_pi->sclk_deep_sleep) {
+		if (ds_status_on)
+			return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) ==
+				PPSMC_Result_OK) ?
+				0 : -EINVAL;
+		else
+			return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) ==
+				PPSMC_Result_OK) ? 0 : -EINVAL;
+	}
+	return 0;
+}
+#endif
+
+static void si_set_max_cu_value(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	if (adev->asic_type == CHIP_VERDE) {
+		switch (adev->pdev->device) {
+		case 0x6820:
+		case 0x6825:
+		case 0x6821:
+		case 0x6823:
+		case 0x6827:
+			si_pi->max_cu = 10;
+			break;
+		case 0x682D:
+		case 0x6824:
+		case 0x682F:
+		case 0x6826:
+			si_pi->max_cu = 8;
+			break;
+		case 0x6828:
+		case 0x6830:
+		case 0x6831:
+		case 0x6838:
+		case 0x6839:
+		case 0x683D:
+			si_pi->max_cu = 10;
+			break;
+		case 0x683B:
+		case 0x683F:
+		case 0x6829:
+			si_pi->max_cu = 8;
+			break;
+		default:
+			si_pi->max_cu = 0;
+			break;
+		}
+	} else {
+		si_pi->max_cu = 0;
+	}
+}
+
+static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev,
+							     struct amdgpu_clock_voltage_dependency_table *table)
+{
+	u32 i;
+	int j;
+	u16 leakage_voltage;
+
+	if (table) {
+		for (i = 0; i < table->count; i++) {
+			switch (si_get_leakage_voltage_from_leakage_index(adev,
+									  table->entries[i].v,
+									  &leakage_voltage)) {
+			case 0:
+				table->entries[i].v = leakage_voltage;
+				break;
+			case -EAGAIN:
+				return -EINVAL;
+			case -EINVAL:
+			default:
+				break;
+			}
+		}
+
+		for (j = (table->count - 2); j >= 0; j--) {
+			table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ?
+				table->entries[j].v : table->entries[j + 1].v;
+		}
+	}
+	return 0;
+}
+
+static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev)
+{
+	int ret = 0;
+
+	ret = si_patch_single_dependency_table_based_on_leakage(adev,
+								&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
+	if (ret)
+		DRM_ERROR("Could not patch vddc_on_sclk leakage table\n");
+	ret = si_patch_single_dependency_table_based_on_leakage(adev,
+								&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
+	if (ret)
+		DRM_ERROR("Could not patch vddc_on_mclk leakage table\n");
+	ret = si_patch_single_dependency_table_based_on_leakage(adev,
+								&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
+	if (ret)
+		DRM_ERROR("Could not patch vddci_on_mclk leakage table\n");
+	return ret;
+}
+
+static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
+					  struct amdgpu_ps *amdgpu_new_state,
+					  struct amdgpu_ps *amdgpu_current_state)
+{
+	u32 lane_width;
+	u32 new_lane_width =
+		(amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+	u32 current_lane_width =
+		(amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+
+	if (new_lane_width != current_lane_width) {
+		amdgpu_set_pcie_lanes(adev, new_lane_width);
+		lane_width = amdgpu_get_pcie_lanes(adev);
+		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
+	}
+}
+
+static void si_dpm_setup_asic(struct amdgpu_device *adev)
+{
+	si_read_clock_registers(adev);
+	si_enable_acpi_power_management(adev);
+}
+
+static int si_thermal_enable_alert(struct amdgpu_device *adev,
+				   bool enable)
+{
+	u32 thermal_int = RREG32(CG_THERMAL_INT);
+
+	if (enable) {
+		PPSMC_Result result;
+
+		thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+		WREG32(CG_THERMAL_INT, thermal_int);
+		result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
+		if (result != PPSMC_Result_OK) {
+			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+			return -EINVAL;
+		}
+	} else {
+		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+		WREG32(CG_THERMAL_INT, thermal_int);
+	}
+
+	return 0;
+}
+
+static int si_thermal_set_temperature_range(struct amdgpu_device *adev,
+					    int min_temp, int max_temp)
+{
+	int low_temp = 0 * 1000;
+	int high_temp = 255 * 1000;
+
+	if (low_temp < min_temp)
+		low_temp = min_temp;
+	if (high_temp > max_temp)
+		high_temp = max_temp;
+	if (high_temp < low_temp) {
+		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+		return -EINVAL;
+	}
+
+	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
+	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
+	WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
+
+	adev->pm.dpm.thermal.min_temp = low_temp;
+	adev->pm.dpm.thermal.max_temp = high_temp;
+
+	return 0;
+}
+
+static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 tmp;
+
+	if (si_pi->fan_ctrl_is_in_default_mode) {
+		tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+		si_pi->fan_ctrl_default_mode = tmp;
+		tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+		si_pi->t_min = tmp;
+		si_pi->fan_ctrl_is_in_default_mode = false;
+	}
+
+	tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+	tmp |= TMIN(0);
+	WREG32(CG_FDO_CTRL2, tmp);
+
+	tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+	tmp |= FDO_PWM_MODE(mode);
+	WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
+	u32 duty100;
+	u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+	u16 fdo_min, slope1, slope2;
+	u32 reference_clock, tmp;
+	int ret;
+	u64 tmp64;
+
+	if (!si_pi->fan_table_start) {
+		adev->pm.dpm.fan.ucode_fan_control = false;
+		return 0;
+	}
+
+	duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+	if (duty100 == 0) {
+		adev->pm.dpm.fan.ucode_fan_control = false;
+		return 0;
+	}
+
+	tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
+	do_div(tmp64, 10000);
+	fdo_min = (u16)tmp64;
+
+	t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
+	t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
+
+	pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
+	pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
+
+	slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+	slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+	fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
+	fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
+	fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
+	fan_table.slope1 = cpu_to_be16(slope1);
+	fan_table.slope2 = cpu_to_be16(slope2);
+	fan_table.fdo_min = cpu_to_be16(fdo_min);
+	fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
+	fan_table.hys_up = cpu_to_be16(1);
+	fan_table.hys_slope = cpu_to_be16(1);
+	fan_table.temp_resp_lim = cpu_to_be16(5);
+	reference_clock = amdgpu_asic_get_xclk(adev);
+
+	fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
+						reference_clock) / 1600);
+	fan_table.fdo_max = cpu_to_be16((u16)duty100);
+
+	tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+	fan_table.temp_src = (uint8_t)tmp;
+
+	ret = amdgpu_si_copy_bytes_to_smc(adev,
+					  si_pi->fan_table_start,
+					  (u8 *)(&fan_table),
+					  sizeof(fan_table),
+					  si_pi->sram_end);
+
+	if (ret) {
+		DRM_ERROR("Failed to load fan table to the SMC.");
+		adev->pm.dpm.fan.ucode_fan_control = false;
+	}
+
+	return ret;
+}
+
+static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	PPSMC_Result ret;
+
+	ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StartFanControl);
+	if (ret == PPSMC_Result_OK) {
+		si_pi->fan_is_controlled_by_smc = true;
+		return 0;
+	} else {
+		return -EINVAL;
+	}
+}
+
+static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	PPSMC_Result ret;
+
+	ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StopFanControl);
+
+	if (ret == PPSMC_Result_OK) {
+		si_pi->fan_is_controlled_by_smc = false;
+		return 0;
+	} else {
+		return -EINVAL;
+	}
+}
+
+static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
+				      u32 *speed)
+{
+	u32 duty, duty100;
+	u64 tmp64;
+
+	if (adev->pm.no_fan)
+		return -ENOENT;
+
+	duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+	duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+
+	if (duty100 == 0)
+		return -EINVAL;
+
+	tmp64 = (u64)duty * 100;
+	do_div(tmp64, duty100);
+	*speed = (u32)tmp64;
+
+	if (*speed > 100)
+		*speed = 100;
+
+	return 0;
+}
+
+static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
+				      u32 speed)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 tmp;
+	u32 duty, duty100;
+	u64 tmp64;
+
+	if (adev->pm.no_fan)
+		return -ENOENT;
+
+	if (si_pi->fan_is_controlled_by_smc)
+		return -EINVAL;
+
+	if (speed > 100)
+		return -EINVAL;
+
+	duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+	if (duty100 == 0)
+		return -EINVAL;
+
+	tmp64 = (u64)speed * duty100;
+	do_div(tmp64, 100);
+	duty = (u32)tmp64;
+
+	tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
+	tmp |= FDO_STATIC_DUTY(duty);
+	WREG32(CG_FDO_CTRL0, tmp);
+
+	return 0;
+}
+
+static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
+{
+	if (mode) {
+		/* stop auto-manage */
+		if (adev->pm.dpm.fan.ucode_fan_control)
+			si_fan_ctrl_stop_smc_fan_control(adev);
+		si_fan_ctrl_set_static_mode(adev, mode);
+	} else {
+		/* restart auto-manage */
+		if (adev->pm.dpm.fan.ucode_fan_control)
+			si_thermal_start_smc_fan_control(adev);
+		else
+			si_fan_ctrl_set_default_mode(adev);
+	}
+}
+
+static u32 si_dpm_get_fan_control_mode(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 tmp;
+
+	if (si_pi->fan_is_controlled_by_smc)
+		return 0;
+
+	tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
+	return (tmp >> FDO_PWM_MODE_SHIFT);
+}
+
+#if 0
+static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
+					 u32 *speed)
+{
+	u32 tach_period;
+	u32 xclk = amdgpu_asic_get_xclk(adev);
+
+	if (adev->pm.no_fan)
+		return -ENOENT;
+
+	if (adev->pm.fan_pulses_per_revolution == 0)
+		return -ENOENT;
+
+	tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+	if (tach_period == 0)
+		return -ENOENT;
+
+	*speed = 60 * xclk * 10000 / tach_period;
+
+	return 0;
+}
+
+static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
+					 u32 speed)
+{
+	u32 tach_period, tmp;
+	u32 xclk = amdgpu_asic_get_xclk(adev);
+
+	if (adev->pm.no_fan)
+		return -ENOENT;
+
+	if (adev->pm.fan_pulses_per_revolution == 0)
+		return -ENOENT;
+
+	if ((speed < adev->pm.fan_min_rpm) ||
+	    (speed > adev->pm.fan_max_rpm))
+		return -EINVAL;
+
+	if (adev->pm.dpm.fan.ucode_fan_control)
+		si_fan_ctrl_stop_smc_fan_control(adev);
+
+	tach_period = 60 * xclk * 10000 / (8 * speed);
+	tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
+	tmp |= TARGET_PERIOD(tach_period);
+	WREG32(CG_TACH_CTRL, tmp);
+
+	si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
+
+	return 0;
+}
+#endif
+
+static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 tmp;
+
+	if (!si_pi->fan_ctrl_is_in_default_mode) {
+		tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+		tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
+		WREG32(CG_FDO_CTRL2, tmp);
+
+		tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+		tmp |= TMIN(si_pi->t_min);
+		WREG32(CG_FDO_CTRL2, tmp);
+		si_pi->fan_ctrl_is_in_default_mode = true;
+	}
+}
+
+static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev)
+{
+	if (adev->pm.dpm.fan.ucode_fan_control) {
+		si_fan_ctrl_start_smc_fan_control(adev);
+		si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
+	}
+}
+
+static void si_thermal_initialize(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	if (adev->pm.fan_pulses_per_revolution) {
+		tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
+		tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1);
+		WREG32(CG_TACH_CTRL, tmp);
+	}
+
+	tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
+	tmp |= TACH_PWM_RESP_RATE(0x28);
+	WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)
+{
+	int ret;
+
+	si_thermal_initialize(adev);
+	ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+	if (ret)
+		return ret;
+	ret = si_thermal_enable_alert(adev, true);
+	if (ret)
+		return ret;
+	if (adev->pm.dpm.fan.ucode_fan_control) {
+		ret = si_halt_smc(adev);
+		if (ret)
+			return ret;
+		ret = si_thermal_setup_fan_table(adev);
+		if (ret)
+			return ret;
+		ret = si_resume_smc(adev);
+		if (ret)
+			return ret;
+		si_thermal_start_smc_fan_control(adev);
+	}
+
+	return 0;
+}
+
+static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev)
+{
+	if (!adev->pm.no_fan) {
+		si_fan_ctrl_set_default_mode(adev);
+		si_fan_ctrl_stop_smc_fan_control(adev);
+	}
+}
+
+static int si_dpm_enable(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
+	int ret;
+
+	if (amdgpu_si_is_smc_running(adev))
+		return -EINVAL;
+	if (pi->voltage_control || si_pi->voltage_control_svi2)
+		si_enable_voltage_control(adev, true);
+	if (pi->mvdd_control)
+		si_get_mvdd_configuration(adev);
+	if (pi->voltage_control || si_pi->voltage_control_svi2) {
+		ret = si_construct_voltage_tables(adev);
+		if (ret) {
+			DRM_ERROR("si_construct_voltage_tables failed\n");
+			return ret;
+		}
+	}
+	if (eg_pi->dynamic_ac_timing) {
+		ret = si_initialize_mc_reg_table(adev);
+		if (ret)
+			eg_pi->dynamic_ac_timing = false;
+	}
+	if (pi->dynamic_ss)
+		si_enable_spread_spectrum(adev, true);
+	if (pi->thermal_protection)
+		si_enable_thermal_protection(adev, true);
+	si_setup_bsp(adev);
+	si_program_git(adev);
+	si_program_tp(adev);
+	si_program_tpp(adev);
+	si_program_sstp(adev);
+	si_enable_display_gap(adev);
+	si_program_vc(adev);
+	ret = si_upload_firmware(adev);
+	if (ret) {
+		DRM_ERROR("si_upload_firmware failed\n");
+		return ret;
+	}
+	ret = si_process_firmware_header(adev);
+	if (ret) {
+		DRM_ERROR("si_process_firmware_header failed\n");
+		return ret;
+	}
+	ret = si_initial_switch_from_arb_f0_to_f1(adev);
+	if (ret) {
+		DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
+		return ret;
+	}
+	ret = si_init_smc_table(adev);
+	if (ret) {
+		DRM_ERROR("si_init_smc_table failed\n");
+		return ret;
+	}
+	ret = si_init_smc_spll_table(adev);
+	if (ret) {
+		DRM_ERROR("si_init_smc_spll_table failed\n");
+		return ret;
+	}
+	ret = si_init_arb_table_index(adev);
+	if (ret) {
+		DRM_ERROR("si_init_arb_table_index failed\n");
+		return ret;
+	}
+	if (eg_pi->dynamic_ac_timing) {
+		ret = si_populate_mc_reg_table(adev, boot_ps);
+		if (ret) {
+			DRM_ERROR("si_populate_mc_reg_table failed\n");
+			return ret;
+		}
+	}
+	ret = si_initialize_smc_cac_tables(adev);
+	if (ret) {
+		DRM_ERROR("si_initialize_smc_cac_tables failed\n");
+		return ret;
+	}
+	ret = si_initialize_hardware_cac_manager(adev);
+	if (ret) {
+		DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
+		return ret;
+	}
+	ret = si_initialize_smc_dte_tables(adev);
+	if (ret) {
+		DRM_ERROR("si_initialize_smc_dte_tables failed\n");
+		return ret;
+	}
+	ret = si_populate_smc_tdp_limits(adev, boot_ps);
+	if (ret) {
+		DRM_ERROR("si_populate_smc_tdp_limits failed\n");
+		return ret;
+	}
+	ret = si_populate_smc_tdp_limits_2(adev, boot_ps);
+	if (ret) {
+		DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
+		return ret;
+	}
+	si_program_response_times(adev);
+	si_program_ds_registers(adev);
+	si_dpm_start_smc(adev);
+	ret = si_notify_smc_display_change(adev, false);
+	if (ret) {
+		DRM_ERROR("si_notify_smc_display_change failed\n");
+		return ret;
+	}
+	si_enable_sclk_control(adev, true);
+	si_start_dpm(adev);
+
+	si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+	si_thermal_start_thermal_controller(adev);
+	ni_update_current_ps(adev, boot_ps);
+
+	return 0;
+}
+
+static int si_set_temperature_range(struct amdgpu_device *adev)
+{
+	int ret;
+
+	ret = si_thermal_enable_alert(adev, false);
+	if (ret)
+		return ret;
+	ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+	if (ret)
+		return ret;
+	ret = si_thermal_enable_alert(adev, true);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static void si_dpm_disable(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
+
+	if (!amdgpu_si_is_smc_running(adev))
+		return;
+	si_thermal_stop_thermal_controller(adev);
+	si_disable_ulv(adev);
+	si_clear_vc(adev);
+	if (pi->thermal_protection)
+		si_enable_thermal_protection(adev, false);
+	si_enable_power_containment(adev, boot_ps, false);
+	si_enable_smc_cac(adev, boot_ps, false);
+	si_enable_spread_spectrum(adev, false);
+	si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+	si_stop_dpm(adev);
+	si_reset_to_default(adev);
+	si_dpm_stop_smc(adev);
+	si_force_switch_to_arb_f0(adev);
+
+	ni_update_current_ps(adev, boot_ps);
+}
+
+static int si_dpm_pre_set_power_state(struct amdgpu_device *adev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
+	struct amdgpu_ps *new_ps = &requested_ps;
+
+	ni_update_requested_ps(adev, new_ps);
+	si_apply_state_adjust_rules(adev, &eg_pi->requested_rps);
+
+	return 0;
+}
+
+static int si_power_control_set_level(struct amdgpu_device *adev)
+{
+	struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps;
+	int ret;
+
+	ret = si_restrict_performance_levels_before_switch(adev);
+	if (ret)
+		return ret;
+	ret = si_halt_smc(adev);
+	if (ret)
+		return ret;
+	ret = si_populate_smc_tdp_limits(adev, new_ps);
+	if (ret)
+		return ret;
+	ret = si_populate_smc_tdp_limits_2(adev, new_ps);
+	if (ret)
+		return ret;
+	ret = si_resume_smc(adev);
+	if (ret)
+		return ret;
+	ret = si_set_sw_state(adev);
+	if (ret)
+		return ret;
+	return 0;
+}
+
+static int si_dpm_set_power_state(struct amdgpu_device *adev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
+	struct amdgpu_ps *old_ps = &eg_pi->current_rps;
+	int ret;
+
+	ret = si_disable_ulv(adev);
+	if (ret) {
+		DRM_ERROR("si_disable_ulv failed\n");
+		return ret;
+	}
+	ret = si_restrict_performance_levels_before_switch(adev);
+	if (ret) {
+		DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
+		return ret;
+	}
+	if (eg_pi->pcie_performance_request)
+		si_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
+	ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps);
+	ret = si_enable_power_containment(adev, new_ps, false);
+	if (ret) {
+		DRM_ERROR("si_enable_power_containment failed\n");
+		return ret;
+	}
+	ret = si_enable_smc_cac(adev, new_ps, false);
+	if (ret) {
+		DRM_ERROR("si_enable_smc_cac failed\n");
+		return ret;
+	}
+	ret = si_halt_smc(adev);
+	if (ret) {
+		DRM_ERROR("si_halt_smc failed\n");
+		return ret;
+	}
+	ret = si_upload_sw_state(adev, new_ps);
+	if (ret) {
+		DRM_ERROR("si_upload_sw_state failed\n");
+		return ret;
+	}
+	ret = si_upload_smc_data(adev);
+	if (ret) {
+		DRM_ERROR("si_upload_smc_data failed\n");
+		return ret;
+	}
+	ret = si_upload_ulv_state(adev);
+	if (ret) {
+		DRM_ERROR("si_upload_ulv_state failed\n");
+		return ret;
+	}
+	if (eg_pi->dynamic_ac_timing) {
+		ret = si_upload_mc_reg_table(adev, new_ps);
+		if (ret) {
+			DRM_ERROR("si_upload_mc_reg_table failed\n");
+			return ret;
+		}
+	}
+	ret = si_program_memory_timing_parameters(adev, new_ps);
+	if (ret) {
+		DRM_ERROR("si_program_memory_timing_parameters failed\n");
+		return ret;
+	}
+	si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps);
+
+	ret = si_resume_smc(adev);
+	if (ret) {
+		DRM_ERROR("si_resume_smc failed\n");
+		return ret;
+	}
+	ret = si_set_sw_state(adev);
+	if (ret) {
+		DRM_ERROR("si_set_sw_state failed\n");
+		return ret;
+	}
+	ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps);
+	if (eg_pi->pcie_performance_request)
+		si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
+	ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps);
+	if (ret) {
+		DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
+		return ret;
+	}
+	ret = si_enable_smc_cac(adev, new_ps, true);
+	if (ret) {
+		DRM_ERROR("si_enable_smc_cac failed\n");
+		return ret;
+	}
+	ret = si_enable_power_containment(adev, new_ps, true);
+	if (ret) {
+		DRM_ERROR("si_enable_power_containment failed\n");
+		return ret;
+	}
+
+	ret = si_power_control_set_level(adev);
+	if (ret) {
+		DRM_ERROR("si_power_control_set_level failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static void si_dpm_post_set_power_state(struct amdgpu_device *adev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
+
+	ni_update_current_ps(adev, new_ps);
+}
+
+#if 0
+void si_dpm_reset_asic(struct amdgpu_device *adev)
+{
+	si_restrict_performance_levels_before_switch(adev);
+	si_disable_ulv(adev);
+	si_set_boot_state(adev);
+}
+#endif
+
+static void si_dpm_display_configuration_changed(struct amdgpu_device *adev)
+{
+	si_program_display_gap(adev);
+}
+
+
+static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev,
+					  struct amdgpu_ps *rps,
+					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+					  u8 table_rev)
+{
+	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	rps->class = le16_to_cpu(non_clock_info->usClassification);
+	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+	} else if (r600_is_uvd_state(rps->class, rps->class2)) {
+		rps->vclk = RV770_DEFAULT_VCLK_FREQ;
+		rps->dclk = RV770_DEFAULT_DCLK_FREQ;
+	} else {
+		rps->vclk = 0;
+		rps->dclk = 0;
+	}
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+		adev->pm.dpm.boot_ps = rps;
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+		adev->pm.dpm.uvd_ps = rps;
+}
+
+static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
+				      struct amdgpu_ps *rps, int index,
+				      union pplib_clock_info *clock_info)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct  si_ps *ps = si_get_ps(rps);
+	u16 leakage_voltage;
+	struct rv7xx_pl *pl = &ps->performance_levels[index];
+	int ret;
+
+	ps->performance_level_count = index + 1;
+
+	pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+	pl->sclk |= clock_info->si.ucEngineClockHigh << 16;
+	pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+	pl->mclk |= clock_info->si.ucMemoryClockHigh << 16;
+
+	pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
+	pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
+	pl->flags = le32_to_cpu(clock_info->si.ulFlags);
+	pl->pcie_gen = r600_get_pcie_gen_support(adev,
+						 si_pi->sys_pcie_mask,
+						 si_pi->boot_pcie_gen,
+						 clock_info->si.ucPCIEGen);
+
+	/* patch up vddc if necessary */
+	ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
+							&leakage_voltage);
+	if (ret == 0)
+		pl->vddc = leakage_voltage;
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
+		pi->acpi_vddc = pl->vddc;
+		eg_pi->acpi_vddci = pl->vddci;
+		si_pi->acpi_pcie_gen = pl->pcie_gen;
+	}
+
+	if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
+	    index == 0) {
+		/* XXX disable for A0 tahiti */
+		si_pi->ulv.supported = false;
+		si_pi->ulv.pl = *pl;
+		si_pi->ulv.one_pcie_lane_in_ulv = false;
+		si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
+		si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT;
+		si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT;
+	}
+
+	if (pi->min_vddc_in_table > pl->vddc)
+		pi->min_vddc_in_table = pl->vddc;
+
+	if (pi->max_vddc_in_table < pl->vddc)
+		pi->max_vddc_in_table = pl->vddc;
+
+	/* patch up boot state */
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		u16 vddc, vddci, mvdd;
+		amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd);
+		pl->mclk = adev->clock.default_mclk;
+		pl->sclk = adev->clock.default_sclk;
+		pl->vddc = vddc;
+		pl->vddci = vddci;
+		si_pi->mvdd_bootup_value = mvdd;
+	}
+
+	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+	    ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+		adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
+		adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
+		adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
+		adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
+	}
+}
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static int si_parse_power_table(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j, k, non_clock_array_index, clock_array_index;
+	union pplib_clock_info *clock_info;
+	struct _StateArray *state_array;
+	struct _ClockInfoArray *clock_info_array;
+	struct _NonClockInfoArray *non_clock_info_array;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	u8 *power_state_offset;
+	struct  si_ps *ps;
+
+	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return -EINVAL;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	amdgpu_add_thermal_controller(adev);
+
+	state_array = (struct _StateArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
+	clock_info_array = (struct _ClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+	non_clock_info_array = (struct _NonClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+	adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
+				  state_array->ucNumEntries, GFP_KERNEL);
+	if (!adev->pm.dpm.ps)
+		return -ENOMEM;
+	power_state_offset = (u8 *)state_array->states;
+	for (i = 0; i < state_array->ucNumEntries; i++) {
+		u8 *idx;
+		power_state = (union pplib_power_state *)power_state_offset;
+		non_clock_array_index = power_state->v2.nonClockInfoIndex;
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+		ps = kzalloc(sizeof(struct  si_ps), GFP_KERNEL);
+		if (ps == NULL) {
+			kfree(adev->pm.dpm.ps);
+			return -ENOMEM;
+		}
+		adev->pm.dpm.ps[i].ps_priv = ps;
+		si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
+					      non_clock_info,
+					      non_clock_info_array->ucEntrySize);
+		k = 0;
+		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+			clock_array_index = idx[j];
+			if (clock_array_index >= clock_info_array->ucNumEntries)
+				continue;
+			if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
+				break;
+			clock_info = (union pplib_clock_info *)
+				((u8 *)&clock_info_array->clockInfo[0] +
+				 (clock_array_index * clock_info_array->ucEntrySize));
+			si_parse_pplib_clock_info(adev,
+						  &adev->pm.dpm.ps[i], k,
+						  clock_info);
+			k++;
+		}
+		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+	}
+	adev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+	/* fill in the vce power states */
+	for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+		u32 sclk, mclk;
+		clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
+		clock_info = (union pplib_clock_info *)
+			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+		sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+		sclk |= clock_info->si.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+		mclk |= clock_info->si.ucMemoryClockHigh << 16;
+		adev->pm.dpm.vce_states[i].sclk = sclk;
+		adev->pm.dpm.vce_states[i].mclk = mclk;
+	}
+
+	return 0;
+}
+
+static int si_dpm_init(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi;
+	struct evergreen_power_info *eg_pi;
+	struct ni_power_info *ni_pi;
+	struct si_power_info *si_pi;
+	struct atom_clock_dividers dividers;
+	int ret;
+	u32 mask;
+
+	si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
+	if (si_pi == NULL)
+		return -ENOMEM;
+	adev->pm.dpm.priv = si_pi;
+	ni_pi = &si_pi->ni;
+	eg_pi = &ni_pi->eg;
+	pi = &eg_pi->rv7xx;
+
+	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+	if (ret)
+		si_pi->sys_pcie_mask = 0;
+	else
+		si_pi->sys_pcie_mask = mask;
+	si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+	si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
+
+	si_set_max_cu_value(adev);
+
+	rv770_get_max_vddc(adev);
+	si_get_leakage_vddc(adev);
+	si_patch_dependency_tables_based_on_leakage(adev);
+
+	pi->acpi_vddc = 0;
+	eg_pi->acpi_vddci = 0;
+	pi->min_vddc_in_table = 0;
+	pi->max_vddc_in_table = 0;
+
+	ret = amdgpu_get_platform_caps(adev);
+	if (ret)
+		return ret;
+
+	ret = amdgpu_parse_extended_power_table(adev);
+	if (ret)
+		return ret;
+
+	ret = si_parse_power_table(adev);
+	if (ret)
+		return ret;
+
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
+		kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
+	if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+		amdgpu_free_extended_power_table(adev);
+		return -ENOMEM;
+	}
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
+
+	if (adev->pm.dpm.voltage_response_time == 0)
+		adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
+	if (adev->pm.dpm.backbias_response_time == 0)
+		adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
+
+	ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+					     0, false, &dividers);
+	if (ret)
+		pi->ref_div = dividers.ref_div + 1;
+	else
+		pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
+
+	eg_pi->smu_uvd_hs = false;
+
+	pi->mclk_strobe_mode_threshold = 40000;
+	if (si_is_special_1gb_platform(adev))
+		pi->mclk_stutter_mode_threshold = 0;
+	else
+		pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold;
+	pi->mclk_edc_enable_threshold = 40000;
+	eg_pi->mclk_edc_wr_enable_threshold = 40000;
+
+	ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
+
+	pi->voltage_control =
+		amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+					    VOLTAGE_OBJ_GPIO_LUT);
+	if (!pi->voltage_control) {
+		si_pi->voltage_control_svi2 =
+			amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+						    VOLTAGE_OBJ_SVID2);
+		if (si_pi->voltage_control_svi2)
+			amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+						  &si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
+	}
+
+	pi->mvdd_control =
+		amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
+					    VOLTAGE_OBJ_GPIO_LUT);
+
+	eg_pi->vddci_control =
+		amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+					    VOLTAGE_OBJ_GPIO_LUT);
+	if (!eg_pi->vddci_control)
+		si_pi->vddci_control_svi2 =
+			amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+						    VOLTAGE_OBJ_SVID2);
+
+	si_pi->vddc_phase_shed_control =
+		amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+					    VOLTAGE_OBJ_PHASE_LUT);
+
+	rv770_get_engine_memory_ss(adev);
+
+	pi->asi = RV770_ASI_DFLT;
+	pi->pasi = CYPRESS_HASI_DFLT;
+	pi->vrc = SISLANDS_VRC_DFLT;
+
+	pi->gfx_clock_gating = true;
+
+	eg_pi->sclk_deep_sleep = true;
+	si_pi->sclk_deep_sleep_above_low = false;
+
+	if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
+		pi->thermal_protection = true;
+	else
+		pi->thermal_protection = false;
+
+	eg_pi->dynamic_ac_timing = true;
+
+	eg_pi->light_sleep = true;
+#if defined(CONFIG_ACPI)
+	eg_pi->pcie_performance_request =
+		amdgpu_acpi_is_pcie_performance_request_supported(adev);
+#else
+	eg_pi->pcie_performance_request = false;
+#endif
+
+	si_pi->sram_end = SMC_RAM_END;
+
+	adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
+	adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
+	adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
+	adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
+	adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
+	adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
+	adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
+
+	si_initialize_powertune_defaults(adev);
+
+	/* make sure dc limits are valid */
+	if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+	    (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+		adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+			adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+	si_pi->fan_ctrl_is_in_default_mode = true;
+
+	return 0;
+}
+
+static void si_dpm_fini(struct amdgpu_device *adev)
+{
+	int i;
+
+	if (adev->pm.dpm.ps)
+		for (i = 0; i < adev->pm.dpm.num_ps; i++)
+			kfree(adev->pm.dpm.ps[i].ps_priv);
+	kfree(adev->pm.dpm.ps);
+	kfree(adev->pm.dpm.priv);
+	kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
+	amdgpu_free_extended_power_table(adev);
+}
+
+static void si_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+						    struct seq_file *m)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct amdgpu_ps *rps = &eg_pi->current_rps;
+	struct  si_ps *ps = si_get_ps(rps);
+	struct rv7xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+		CURRENT_STATE_INDEX_SHIFT;
+
+	if (current_index >= ps->performance_level_count) {
+		seq_printf(m, "invalid dpm profile %d\n", current_index);
+	} else {
+		pl = &ps->performance_levels[current_index];
+		seq_printf(m, "uvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+		seq_printf(m, "power level %d    sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+			   current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
+	}
+}
+
+static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
+				      struct amdgpu_irq_src *source,
+				      unsigned type,
+				      enum amdgpu_interrupt_state state)
+{
+	u32 cg_thermal_int;
+
+	switch (type) {
+	case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
+		switch (state) {
+		case AMDGPU_IRQ_STATE_DISABLE:
+			cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+			cg_thermal_int |= THERM_INT_MASK_HIGH;
+			WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+			break;
+		case AMDGPU_IRQ_STATE_ENABLE:
+			cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+			cg_thermal_int &= ~THERM_INT_MASK_HIGH;
+			WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+			break;
+		default:
+			break;
+		}
+		break;
+
+	case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
+		switch (state) {
+		case AMDGPU_IRQ_STATE_DISABLE:
+			cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+			cg_thermal_int |= THERM_INT_MASK_LOW;
+			WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+			break;
+		case AMDGPU_IRQ_STATE_ENABLE:
+			cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+			cg_thermal_int &= ~THERM_INT_MASK_LOW;
+			WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+			break;
+		default:
+			break;
+		}
+		break;
+
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int si_dpm_process_interrupt(struct amdgpu_device *adev,
+				    struct amdgpu_irq_src *source,
+				    struct amdgpu_iv_entry *entry)
+{
+	bool queue_thermal = false;
+
+	if (entry == NULL)
+		return -EINVAL;
+
+	switch (entry->src_id) {
+	case 230: /* thermal low to high */
+		DRM_DEBUG("IH: thermal low to high\n");
+		adev->pm.dpm.thermal.high_to_low = false;
+		queue_thermal = true;
+		break;
+	case 231: /* thermal high to low */
+		DRM_DEBUG("IH: thermal high to low\n");
+		adev->pm.dpm.thermal.high_to_low = true;
+		queue_thermal = true;
+		break;
+	default:
+		break;
+	}
+
+	if (queue_thermal)
+		schedule_work(&adev->pm.dpm.thermal.work);
+
+	return 0;
+}
+
+static int si_dpm_late_init(void *handle)
+{
+	int ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!amdgpu_dpm)
+		return 0;
+
+	/* init the sysfs and debugfs files late */
+	ret = amdgpu_pm_sysfs_init(adev);
+	if (ret)
+		return ret;
+
+	ret = si_set_temperature_range(adev);
+	if (ret)
+		return ret;
+#if 0 //TODO ?
+	si_dpm_powergate_uvd(adev, true);
+#endif
+	return 0;
+}
+
+/**
+ * si_dpm_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int si_dpm_init_microcode(struct amdgpu_device *adev)
+{
+	const char *chip_name;
+	char fw_name[30];
+	int err;
+
+	DRM_DEBUG("\n");
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+		chip_name = "tahiti";
+		break;
+	case CHIP_PITCAIRN:
+		if ((adev->pdev->revision == 0x81) ||
+		    (adev->pdev->device == 0x6810) ||
+		    (adev->pdev->device == 0x6811) ||
+		    (adev->pdev->device == 0x6816) ||
+		    (adev->pdev->device == 0x6817) ||
+		    (adev->pdev->device == 0x6806))
+			chip_name = "pitcairn_k";
+		else
+			chip_name = "pitcairn";
+		break;
+	case CHIP_VERDE:
+		if ((adev->pdev->revision == 0x81) ||
+		    (adev->pdev->revision == 0x83) ||
+		    (adev->pdev->revision == 0x87) ||
+		    (adev->pdev->device == 0x6820) ||
+		    (adev->pdev->device == 0x6821) ||
+		    (adev->pdev->device == 0x6822) ||
+		    (adev->pdev->device == 0x6823) ||
+		    (adev->pdev->device == 0x682A) ||
+		    (adev->pdev->device == 0x682B))
+			chip_name = "verde_k";
+		else
+			chip_name = "verde";
+		break;
+	case CHIP_OLAND:
+		if ((adev->pdev->revision == 0xC7) ||
+		    (adev->pdev->revision == 0x80) ||
+		    (adev->pdev->revision == 0x81) ||
+		    (adev->pdev->revision == 0x83) ||
+		    (adev->pdev->device == 0x6604) ||
+		    (adev->pdev->device == 0x6605))
+			chip_name = "oland_k";
+		else
+			chip_name = "oland";
+		break;
+	case CHIP_HAINAN:
+		if ((adev->pdev->revision == 0x81) ||
+		    (adev->pdev->revision == 0x83) ||
+		    (adev->pdev->revision == 0xC3) ||
+		    (adev->pdev->device == 0x6664) ||
+		    (adev->pdev->device == 0x6665) ||
+		    (adev->pdev->device == 0x6667))
+			chip_name = "hainan_k";
+		else
+			chip_name = "hainan";
+		break;
+	default: BUG();
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+	err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+	err = amdgpu_ucode_validate(adev->pm.fw);
+
+out:
+	if (err) {
+		DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
+			  err, fw_name);
+		release_firmware(adev->pm.fw);
+		adev->pm.fw = NULL;
+	}
+	return err;
+
+}
+
+static int si_dpm_sw_init(void *handle)
+{
+	int ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
+	if (ret)
+		return ret;
+
+	ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
+	if (ret)
+		return ret;
+
+	/* default to balanced state */
+	adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
+	adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+	adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+	adev->pm.default_sclk = adev->clock.default_sclk;
+	adev->pm.default_mclk = adev->clock.default_mclk;
+	adev->pm.current_sclk = adev->clock.default_sclk;
+	adev->pm.current_mclk = adev->clock.default_mclk;
+	adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
+
+	if (amdgpu_dpm == 0)
+		return 0;
+
+	ret = si_dpm_init_microcode(adev);
+	if (ret)
+		return ret;
+
+	INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
+	mutex_lock(&adev->pm.mutex);
+	ret = si_dpm_init(adev);
+	if (ret)
+		goto dpm_failed;
+	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+	if (amdgpu_dpm == 1)
+		amdgpu_pm_print_power_states(adev);
+	mutex_unlock(&adev->pm.mutex);
+	DRM_INFO("amdgpu: dpm initialized\n");
+
+	return 0;
+
+dpm_failed:
+	si_dpm_fini(adev);
+	mutex_unlock(&adev->pm.mutex);
+	DRM_ERROR("amdgpu: dpm initialization failed\n");
+	return ret;
+}
+
+static int si_dpm_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	mutex_lock(&adev->pm.mutex);
+	amdgpu_pm_sysfs_fini(adev);
+	si_dpm_fini(adev);
+	mutex_unlock(&adev->pm.mutex);
+
+	return 0;
+}
+
+static int si_dpm_hw_init(void *handle)
+{
+	int ret;
+
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!amdgpu_dpm)
+		return 0;
+
+	mutex_lock(&adev->pm.mutex);
+	si_dpm_setup_asic(adev);
+	ret = si_dpm_enable(adev);
+	if (ret)
+		adev->pm.dpm_enabled = false;
+	else
+		adev->pm.dpm_enabled = true;
+	mutex_unlock(&adev->pm.mutex);
+
+	return ret;
+}
+
+static int si_dpm_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->pm.dpm_enabled) {
+		mutex_lock(&adev->pm.mutex);
+		si_dpm_disable(adev);
+		mutex_unlock(&adev->pm.mutex);
+	}
+
+	return 0;
+}
+
+static int si_dpm_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->pm.dpm_enabled) {
+		mutex_lock(&adev->pm.mutex);
+		/* disable dpm */
+		si_dpm_disable(adev);
+		/* reset the power state */
+		adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+		mutex_unlock(&adev->pm.mutex);
+	}
+	return 0;
+}
+
+static int si_dpm_resume(void *handle)
+{
+	int ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->pm.dpm_enabled) {
+		/* asic init will reset to the boot state */
+		mutex_lock(&adev->pm.mutex);
+		si_dpm_setup_asic(adev);
+		ret = si_dpm_enable(adev);
+		if (ret)
+			adev->pm.dpm_enabled = false;
+		else
+			adev->pm.dpm_enabled = true;
+		mutex_unlock(&adev->pm.mutex);
+		if (adev->pm.dpm_enabled)
+			amdgpu_pm_compute_clocks(adev);
+	}
+	return 0;
+}
+
+static bool si_dpm_is_idle(void *handle)
+{
+	/* XXX */
+	return true;
+}
+
+static int si_dpm_wait_for_idle(void *handle)
+{
+	/* XXX */
+	return 0;
+}
+
+static int si_dpm_soft_reset(void *handle)
+{
+	return 0;
+}
+
+static int si_dpm_set_clockgating_state(void *handle,
+					enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int si_dpm_set_powergating_state(void *handle,
+					enum amd_powergating_state state)
+{
+	return 0;
+}
+
+/* get temperature in millidegrees */
+static int si_dpm_get_temp(struct amdgpu_device *adev)
+{
+	u32 temp;
+	int actual_temp = 0;
+
+	temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
+		CTF_TEMP_SHIFT;
+
+	if (temp & 0x200)
+		actual_temp = 255;
+	else
+		actual_temp = temp & 0x1ff;
+
+	actual_temp = (actual_temp * 1000);
+
+	return actual_temp;
+}
+
+static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct  si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
+
+	if (low)
+		return requested_state->performance_levels[0].sclk;
+	else
+		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
+}
+
+static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct  si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
+
+	if (low)
+		return requested_state->performance_levels[0].mclk;
+	else
+		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
+}
+
+static void si_dpm_print_power_state(struct amdgpu_device *adev,
+				     struct amdgpu_ps *rps)
+{
+	struct  si_ps *ps = si_get_ps(rps);
+	struct rv7xx_pl *pl;
+	int i;
+
+	amdgpu_dpm_print_class_info(rps->class, rps->class2);
+	amdgpu_dpm_print_cap_info(rps->caps);
+	DRM_INFO("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+	for (i = 0; i < ps->performance_level_count; i++) {
+		pl = &ps->performance_levels[i];
+		if (adev->asic_type >= CHIP_TAHITI)
+			DRM_INFO("\t\tpower level %d    sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+				 i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
+		else
+			DRM_INFO("\t\tpower level %d    sclk: %u mclk: %u vddc: %u vddci: %u\n",
+				 i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+	}
+	amdgpu_dpm_print_ps_status(adev, rps);
+}
+
+static int si_dpm_early_init(void *handle)
+{
+
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	si_dpm_set_dpm_funcs(adev);
+	si_dpm_set_irq_funcs(adev);
+	return 0;
+}
+
+
+const struct amd_ip_funcs si_dpm_ip_funcs = {
+	.name = "si_dpm",
+	.early_init = si_dpm_early_init,
+	.late_init = si_dpm_late_init,
+	.sw_init = si_dpm_sw_init,
+	.sw_fini = si_dpm_sw_fini,
+	.hw_init = si_dpm_hw_init,
+	.hw_fini = si_dpm_hw_fini,
+	.suspend = si_dpm_suspend,
+	.resume = si_dpm_resume,
+	.is_idle = si_dpm_is_idle,
+	.wait_for_idle = si_dpm_wait_for_idle,
+	.soft_reset = si_dpm_soft_reset,
+	.set_clockgating_state = si_dpm_set_clockgating_state,
+	.set_powergating_state = si_dpm_set_powergating_state,
+};
+
+static const struct amdgpu_dpm_funcs si_dpm_funcs = {
+	.get_temperature = &si_dpm_get_temp,
+	.pre_set_power_state = &si_dpm_pre_set_power_state,
+	.set_power_state = &si_dpm_set_power_state,
+	.post_set_power_state = &si_dpm_post_set_power_state,
+	.display_configuration_changed = &si_dpm_display_configuration_changed,
+	.get_sclk = &si_dpm_get_sclk,
+	.get_mclk = &si_dpm_get_mclk,
+	.print_power_state = &si_dpm_print_power_state,
+	.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
+	.force_performance_level = &si_dpm_force_performance_level,
+	.vblank_too_short = &si_dpm_vblank_too_short,
+	.set_fan_control_mode = &si_dpm_set_fan_control_mode,
+	.get_fan_control_mode = &si_dpm_get_fan_control_mode,
+	.set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
+	.get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
+};
+
+static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
+{
+	if (adev->pm.funcs == NULL)
+		adev->pm.funcs = &si_dpm_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
+	.set = si_dpm_set_interrupt_state,
+	.process = si_dpm_process_interrupt,
+};
+
+static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
+	adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
new file mode 100644
index 0000000..51ce21c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
@@ -0,0 +1,1015 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SI_DPM_H__
+#define __SI_DPM_H__
+
+#include "amdgpu_atombios.h"
+#include "sislands_smc.h"
+
+#define MC_CG_CONFIG                                    0x96f
+#define MC_ARB_CG                                       0x9fa
+#define		CG_ARB_REQ(x)				((x) << 0)
+#define		CG_ARB_REQ_MASK				(0xff << 0)
+
+#define	MC_ARB_DRAM_TIMING_1				0x9fc
+#define	MC_ARB_DRAM_TIMING_2				0x9fd
+#define	MC_ARB_DRAM_TIMING_3				0x9fe
+#define	MC_ARB_DRAM_TIMING2_1				0x9ff
+#define	MC_ARB_DRAM_TIMING2_2				0xa00
+#define	MC_ARB_DRAM_TIMING2_3				0xa01
+
+#define MAX_NO_OF_MVDD_VALUES 2
+#define MAX_NO_VREG_STEPS 32
+#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32
+#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
+#define RV770_ASI_DFLT                                1000
+#define CYPRESS_HASI_DFLT                               400000
+#define PCIE_PERF_REQ_PECI_GEN1         2
+#define PCIE_PERF_REQ_PECI_GEN2         3
+#define PCIE_PERF_REQ_PECI_GEN3         4
+#define RV770_DEFAULT_VCLK_FREQ  53300 /* 10 khz */
+#define RV770_DEFAULT_DCLK_FREQ  40000 /* 10 khz */
+
+#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16
+
+#define RV770_SMC_TABLE_ADDRESS 0xB000
+#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE    3
+
+#define SMC_STROBE_RATIO    0x0F
+#define SMC_STROBE_ENABLE   0x10
+
+#define SMC_MC_EDC_RD_FLAG  0x01
+#define SMC_MC_EDC_WR_FLAG  0x02
+#define SMC_MC_RTT_ENABLE   0x04
+#define SMC_MC_STUTTER_EN   0x08
+
+#define RV770_SMC_VOLTAGEMASK_VDDC 0
+#define RV770_SMC_VOLTAGEMASK_MVDD 1
+#define RV770_SMC_VOLTAGEMASK_VDDCI 2
+#define RV770_SMC_VOLTAGEMASK_MAX  4
+
+#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+#define NISLANDS_SMC_STROBE_RATIO    0x0F
+#define NISLANDS_SMC_STROBE_ENABLE   0x10
+
+#define NISLANDS_SMC_MC_EDC_RD_FLAG  0x01
+#define NISLANDS_SMC_MC_EDC_WR_FLAG  0x02
+#define NISLANDS_SMC_MC_RTT_ENABLE   0x04
+#define NISLANDS_SMC_MC_STUTTER_EN   0x08
+
+#define MAX_NO_VREG_STEPS 32
+
+#define NISLANDS_SMC_VOLTAGEMASK_VDDC  0
+#define NISLANDS_SMC_VOLTAGEMASK_MVDD  1
+#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
+#define NISLANDS_SMC_VOLTAGEMASK_MAX   4
+
+#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT               0
+#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT                  1
+#define SISLANDS_MCREGISTERTABLE_ULV_SLOT                   2
+#define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT     3
+
+#define SISLANDS_LEAKAGE_INDEX0     0xff01
+#define SISLANDS_MAX_LEAKAGE_COUNT  4
+
+#define SISLANDS_MAX_HARDWARE_POWERLEVELS 5
+#define SISLANDS_INITIAL_STATE_ARB_INDEX    0
+#define SISLANDS_ACPI_STATE_ARB_INDEX       1
+#define SISLANDS_ULV_STATE_ARB_INDEX        2
+#define SISLANDS_DRIVER_STATE_ARB_INDEX     3
+
+#define SISLANDS_DPM2_MAX_PULSE_SKIP        256
+
+#define SISLANDS_DPM2_NEAR_TDP_DEC          10
+#define SISLANDS_DPM2_ABOVE_SAFE_INC        5
+#define SISLANDS_DPM2_BELOW_SAFE_INC        20
+
+#define SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT            80
+
+#define SISLANDS_DPM2_MAXPS_PERCENT_H                   99
+#define SISLANDS_DPM2_MAXPS_PERCENT_M                   99
+
+#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER                 0x3FFF
+#define SISLANDS_DPM2_SQ_RAMP_MIN_POWER                 0x12
+#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA           0x15
+#define SISLANDS_DPM2_SQ_RAMP_STI_SIZE                  0x1E
+#define SISLANDS_DPM2_SQ_RAMP_LTI_RATIO                 0xF
+
+#define SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN         10
+
+#define SISLANDS_VRC_DFLT                               0xC000B3
+#define SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT             1687
+#define SISLANDS_CGULVPARAMETER_DFLT                    0x00040035
+#define SISLANDS_CGULVCONTROL_DFLT                      0x1f007550
+
+#define SI_ASI_DFLT                                10000
+#define SI_BSP_DFLT                                0x41EB
+#define SI_BSU_DFLT                                0x2
+#define SI_AH_DFLT                                 5
+#define SI_RLP_DFLT                                25
+#define SI_RMP_DFLT                                65
+#define SI_LHP_DFLT                                40
+#define SI_LMP_DFLT                                15
+#define SI_TD_DFLT                                 0
+#define SI_UTC_DFLT_00                             0x24
+#define SI_UTC_DFLT_01                             0x22
+#define SI_UTC_DFLT_02                             0x22
+#define SI_UTC_DFLT_03                             0x22
+#define SI_UTC_DFLT_04                             0x22
+#define SI_UTC_DFLT_05                             0x22
+#define SI_UTC_DFLT_06                             0x22
+#define SI_UTC_DFLT_07                             0x22
+#define SI_UTC_DFLT_08                             0x22
+#define SI_UTC_DFLT_09                             0x22
+#define SI_UTC_DFLT_10                             0x22
+#define SI_UTC_DFLT_11                             0x22
+#define SI_UTC_DFLT_12                             0x22
+#define SI_UTC_DFLT_13                             0x22
+#define SI_UTC_DFLT_14                             0x22
+#define SI_DTC_DFLT_00                             0x24
+#define SI_DTC_DFLT_01                             0x22
+#define SI_DTC_DFLT_02                             0x22
+#define SI_DTC_DFLT_03                             0x22
+#define SI_DTC_DFLT_04                             0x22
+#define SI_DTC_DFLT_05                             0x22
+#define SI_DTC_DFLT_06                             0x22
+#define SI_DTC_DFLT_07                             0x22
+#define SI_DTC_DFLT_08                             0x22
+#define SI_DTC_DFLT_09                             0x22
+#define SI_DTC_DFLT_10                             0x22
+#define SI_DTC_DFLT_11                             0x22
+#define SI_DTC_DFLT_12                             0x22
+#define SI_DTC_DFLT_13                             0x22
+#define SI_DTC_DFLT_14                             0x22
+#define SI_VRC_DFLT                                0x0000C003
+#define SI_VOLTAGERESPONSETIME_DFLT                1000
+#define SI_BACKBIASRESPONSETIME_DFLT               1000
+#define SI_VRU_DFLT                                0x3
+#define SI_SPLLSTEPTIME_DFLT                       0x1000
+#define SI_SPLLSTEPUNIT_DFLT                       0x3
+#define SI_TPU_DFLT                                0
+#define SI_TPC_DFLT                                0x200
+#define SI_SSTU_DFLT                               0
+#define SI_SST_DFLT                                0x00C8
+#define SI_GICST_DFLT                              0x200
+#define SI_FCT_DFLT                                0x0400
+#define SI_FCTU_DFLT                               0
+#define SI_CTXCGTT3DRPHC_DFLT                      0x20
+#define SI_CTXCGTT3DRSDC_DFLT                      0x40
+#define SI_VDDC3DOORPHC_DFLT                       0x100
+#define SI_VDDC3DOORSDC_DFLT                       0x7
+#define SI_VDDC3DOORSU_DFLT                        0
+#define SI_MPLLLOCKTIME_DFLT                       100
+#define SI_MPLLRESETTIME_DFLT                      150
+#define SI_VCOSTEPPCT_DFLT                          20
+#define SI_ENDINGVCOSTEPPCT_DFLT                    5
+#define SI_REFERENCEDIVIDER_DFLT                    4
+
+#define SI_PM_NUMBER_OF_TC 15
+#define SI_PM_NUMBER_OF_SCLKS 20
+#define SI_PM_NUMBER_OF_MCLKS 4
+#define SI_PM_NUMBER_OF_VOLTAGE_LEVELS 4
+#define SI_PM_NUMBER_OF_ACTIVITY_LEVELS 3
+
+/* XXX are these ok? */
+#define SI_TEMP_RANGE_MIN (90 * 1000)
+#define SI_TEMP_RANGE_MAX (120 * 1000)
+
+#define FDO_PWM_MODE_STATIC  1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+enum ni_dc_cac_level
+{
+	NISLANDS_DCCAC_LEVEL_0 = 0,
+	NISLANDS_DCCAC_LEVEL_1,
+	NISLANDS_DCCAC_LEVEL_2,
+	NISLANDS_DCCAC_LEVEL_3,
+	NISLANDS_DCCAC_LEVEL_4,
+	NISLANDS_DCCAC_LEVEL_5,
+	NISLANDS_DCCAC_LEVEL_6,
+	NISLANDS_DCCAC_LEVEL_7,
+	NISLANDS_DCCAC_MAX_LEVELS
+};
+
+enum si_cac_config_reg_type
+{
+	SISLANDS_CACCONFIG_MMR = 0,
+	SISLANDS_CACCONFIG_CGIND,
+	SISLANDS_CACCONFIG_MAX
+};
+
+enum si_power_level {
+	SI_POWER_LEVEL_LOW = 0,
+	SI_POWER_LEVEL_MEDIUM = 1,
+	SI_POWER_LEVEL_HIGH = 2,
+	SI_POWER_LEVEL_CTXSW = 3,
+};
+
+enum si_td {
+	SI_TD_AUTO,
+	SI_TD_UP,
+	SI_TD_DOWN,
+};
+
+enum si_display_watermark {
+	SI_DISPLAY_WATERMARK_LOW = 0,
+	SI_DISPLAY_WATERMARK_HIGH = 1,
+};
+
+enum si_display_gap
+{
+    SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
+    SI_PM_DISPLAY_GAP_VBLANK       = 1,
+    SI_PM_DISPLAY_GAP_WATERMARK    = 2,
+    SI_PM_DISPLAY_GAP_IGNORE       = 3,
+};
+
+extern const struct amd_ip_funcs si_dpm_ip_funcs;
+
+struct ni_leakage_coeffients
+{
+	u32 at;
+	u32 bt;
+	u32 av;
+	u32 bv;
+	s32 t_slope;
+	s32 t_intercept;
+	u32 t_ref;
+};
+
+struct SMC_Evergreen_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress;
+
+struct evergreen_mc_reg_entry {
+	u32 mclk_max;
+	u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct evergreen_mc_reg_table {
+	u8 last;
+	u8 num_entries;
+	u16 valid_flag;
+	struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+	SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct SMC_Evergreen_MCRegisterSet
+{
+    uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet;
+
+struct SMC_Evergreen_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMC_Evergreen_MCRegisterAddress     address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+    SMC_Evergreen_MCRegisterSet         data[5];
+};
+
+typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
+
+struct SMC_NIslands_MCRegisterSet
+{
+    uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet;
+
+struct ni_mc_reg_entry {
+	u32 mclk_max;
+	u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct SMC_NIslands_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress;
+
+struct SMC_NIslands_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMC_NIslands_MCRegisterAddress      address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+    SMC_NIslands_MCRegisterSet          data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters;
+
+struct evergreen_ulv_param {
+	bool supported;
+	struct rv7xx_pl *pl;
+};
+
+struct evergreen_arb_registers {
+	u32 mc_arb_dram_timing;
+	u32 mc_arb_dram_timing2;
+	u32 mc_arb_rfsh_rate;
+	u32 mc_arb_burst_time;
+};
+
+struct at {
+	u32 rlp;
+	u32 rmp;
+	u32 lhp;
+	u32 lmp;
+};
+
+struct ni_clock_registers {
+	u32 cg_spll_func_cntl;
+	u32 cg_spll_func_cntl_2;
+	u32 cg_spll_func_cntl_3;
+	u32 cg_spll_func_cntl_4;
+	u32 cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2;
+	u32 mclk_pwrmgt_cntl;
+	u32 dll_cntl;
+	u32 mpll_ad_func_cntl;
+	u32 mpll_ad_func_cntl_2;
+	u32 mpll_dq_func_cntl;
+	u32 mpll_dq_func_cntl_2;
+	u32 mpll_ss1;
+	u32 mpll_ss2;
+};
+
+struct RV770_SMC_SCLK_VALUE
+{
+    uint32_t        vCG_SPLL_FUNC_CNTL;
+    uint32_t        vCG_SPLL_FUNC_CNTL_2;
+    uint32_t        vCG_SPLL_FUNC_CNTL_3;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM_2;
+    uint32_t        sclk_value;
+};
+
+typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE;
+
+struct RV770_SMC_MCLK_VALUE
+{
+    uint32_t        vMPLL_AD_FUNC_CNTL;
+    uint32_t        vMPLL_AD_FUNC_CNTL_2;
+    uint32_t        vMPLL_DQ_FUNC_CNTL;
+    uint32_t        vMPLL_DQ_FUNC_CNTL_2;
+    uint32_t        vMCLK_PWRMGT_CNTL;
+    uint32_t        vDLL_CNTL;
+    uint32_t        vMPLL_SS;
+    uint32_t        vMPLL_SS2;
+    uint32_t        mclk_value;
+};
+
+typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE;
+
+
+struct RV730_SMC_MCLK_VALUE
+{
+    uint32_t        vMCLK_PWRMGT_CNTL;
+    uint32_t        vDLL_CNTL;
+    uint32_t        vMPLL_FUNC_CNTL;
+    uint32_t        vMPLL_FUNC_CNTL2;
+    uint32_t        vMPLL_FUNC_CNTL3;
+    uint32_t        vMPLL_SS;
+    uint32_t        vMPLL_SS2;
+    uint32_t        mclk_value;
+};
+
+typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE;
+
+struct RV770_SMC_VOLTAGE_VALUE
+{
+    uint16_t             value;
+    uint8_t              index;
+    uint8_t              padding;
+};
+
+typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE;
+
+union RV7XX_SMC_MCLK_VALUE
+{
+    RV770_SMC_MCLK_VALUE    mclk770;
+    RV730_SMC_MCLK_VALUE    mclk730;
+};
+
+typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE;
+
+struct RV770_SMC_HW_PERFORMANCE_LEVEL
+{
+    uint8_t                 arbValue;
+    union{
+        uint8_t             seqValue;
+        uint8_t             ACIndex;
+    };
+    uint8_t                 displayWatermark;
+    uint8_t                 gen2PCIE;
+    uint8_t                 gen2XSP;
+    uint8_t                 backbias;
+    uint8_t                 strobeMode;
+    uint8_t                 mcFlags;
+    uint32_t                aT;
+    uint32_t                bSP;
+    RV770_SMC_SCLK_VALUE    sclk;
+    RV7XX_SMC_MCLK_VALUE    mclk;
+    RV770_SMC_VOLTAGE_VALUE vddc;
+    RV770_SMC_VOLTAGE_VALUE mvdd;
+    RV770_SMC_VOLTAGE_VALUE vddci;
+    uint8_t                 reserved1;
+    uint8_t                 reserved2;
+    uint8_t                 stateFlags;
+    uint8_t                 padding;
+};
+
+typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL;
+
+struct RV770_SMC_SWSTATE
+{
+    uint8_t           flags;
+    uint8_t           padding1;
+    uint8_t           padding2;
+    uint8_t           padding3;
+    RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
+};
+
+typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE;
+
+struct RV770_SMC_VOLTAGEMASKTABLE
+{
+    uint8_t  highMask[RV770_SMC_VOLTAGEMASK_MAX];
+    uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE;
+
+struct RV770_SMC_STATETABLE
+{
+    uint8_t             thermalProtectType;
+    uint8_t             systemFlags;
+    uint8_t             maxVDDCIndexInPPTable;
+    uint8_t             extraFlags;
+    uint8_t             highSMIO[MAX_NO_VREG_STEPS];
+    uint32_t            lowSMIO[MAX_NO_VREG_STEPS];
+    RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable;
+    RV770_SMC_SWSTATE   initialState;
+    RV770_SMC_SWSTATE   ACPIState;
+    RV770_SMC_SWSTATE   driverState;
+    RV770_SMC_SWSTATE   ULVState;
+};
+
+typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
+
+struct vddc_table_entry {
+	u16 vddc;
+	u8 vddc_index;
+	u8 high_smio;
+	u32 low_smio;
+};
+
+struct rv770_clock_registers {
+	u32 cg_spll_func_cntl;
+	u32 cg_spll_func_cntl_2;
+	u32 cg_spll_func_cntl_3;
+	u32 cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2;
+	u32 mpll_ad_func_cntl;
+	u32 mpll_ad_func_cntl_2;
+	u32 mpll_dq_func_cntl;
+	u32 mpll_dq_func_cntl_2;
+	u32 mclk_pwrmgt_cntl;
+	u32 dll_cntl;
+	u32 mpll_ss1;
+	u32 mpll_ss2;
+};
+
+struct rv730_clock_registers {
+	u32 cg_spll_func_cntl;
+	u32 cg_spll_func_cntl_2;
+	u32 cg_spll_func_cntl_3;
+	u32 cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2;
+	u32 mclk_pwrmgt_cntl;
+	u32 dll_cntl;
+	u32 mpll_func_cntl;
+	u32 mpll_func_cntl2;
+	u32 mpll_func_cntl3;
+	u32 mpll_ss;
+	u32 mpll_ss2;
+};
+
+union r7xx_clock_registers {
+	struct rv770_clock_registers rv770;
+	struct rv730_clock_registers rv730;
+};
+
+struct rv7xx_power_info {
+	/* flags */
+	bool mem_gddr5;
+	bool pcie_gen2;
+	bool dynamic_pcie_gen2;
+	bool acpi_pcie_gen2;
+	bool boot_in_gen2;
+	bool voltage_control; /* vddc */
+	bool mvdd_control;
+	bool sclk_ss;
+	bool mclk_ss;
+	bool dynamic_ss;
+	bool gfx_clock_gating;
+	bool mg_clock_gating;
+	bool mgcgtssm;
+	bool power_gating;
+	bool thermal_protection;
+	bool display_gap;
+	bool dcodt;
+	bool ulps;
+	/* registers */
+	union r7xx_clock_registers clk_regs;
+	u32 s0_vid_lower_smio_cntl;
+	/* voltage */
+	u32 vddc_mask_low;
+	u32 mvdd_mask_low;
+	u32 mvdd_split_frequency;
+	u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES];
+	u16 max_vddc;
+	u16 max_vddc_in_table;
+	u16 min_vddc_in_table;
+	struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS];
+	u8 valid_vddc_entries;
+	/* dc odt */
+	u32 mclk_odt_threshold;
+	u8 odt_value_0[2];
+	u8 odt_value_1[2];
+	/* stored values */
+	u32 boot_sclk;
+	u16 acpi_vddc;
+	u32 ref_div;
+	u32 active_auto_throttle_sources;
+	u32 mclk_stutter_mode_threshold;
+	u32 mclk_strobe_mode_threshold;
+	u32 mclk_edc_enable_threshold;
+	u32 bsp;
+	u32 bsu;
+	u32 pbsp;
+	u32 pbsu;
+	u32 dsp;
+	u32 psp;
+	u32 asi;
+	u32 pasi;
+	u32 vrc;
+	u32 restricted_levels;
+	u32 rlp;
+	u32 rmp;
+	u32 lhp;
+	u32 lmp;
+	/* smc offsets */
+	u16 state_table_start;
+	u16 soft_regs_start;
+	u16 sram_end;
+	/* scratch structs */
+	RV770_SMC_STATETABLE smc_statetable;
+};
+
+struct rv7xx_pl {
+	u32 sclk;
+	u32 mclk;
+	u16 vddc;
+	u16 vddci; /* eg+ only */
+	u32 flags;
+	enum amdgpu_pcie_gen pcie_gen; /* si+ only */
+};
+
+struct rv7xx_ps {
+	struct rv7xx_pl high;
+	struct rv7xx_pl medium;
+	struct rv7xx_pl low;
+	bool dc_compatible;
+};
+
+struct si_ps {
+	u16 performance_level_count;
+	bool dc_compatible;
+	struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
+};
+
+struct ni_mc_reg_table {
+	u8 last;
+	u8 num_entries;
+	u16 valid_flag;
+	struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+	SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ni_cac_data
+{
+	struct ni_leakage_coeffients leakage_coefficients;
+	u32 i_leakage;
+	s32 leakage_minimum_temperature;
+	u32 pwr_const;
+	u32 dc_cac_value;
+	u32 bif_cac_value;
+	u32 lkge_pwr;
+	u8 mc_wr_weight;
+	u8 mc_rd_weight;
+	u8 allow_ovrflw;
+	u8 num_win_tdp;
+	u8 l2num_win_tdp;
+	u8 lts_truncate_n;
+};
+
+struct evergreen_power_info {
+	/* must be first! */
+	struct rv7xx_power_info rv7xx;
+	/* flags */
+	bool vddci_control;
+	bool dynamic_ac_timing;
+	bool abm;
+	bool mcls;
+	bool light_sleep;
+	bool memory_transition;
+	bool pcie_performance_request;
+	bool pcie_performance_request_registered;
+	bool sclk_deep_sleep;
+	bool dll_default_on;
+	bool ls_clock_gating;
+	bool smu_uvd_hs;
+	bool uvd_enabled;
+	/* stored values */
+	u16 acpi_vddci;
+	u8 mvdd_high_index;
+	u8 mvdd_low_index;
+	u32 mclk_edc_wr_enable_threshold;
+	struct evergreen_mc_reg_table mc_reg_table;
+	struct atom_voltage_table vddc_voltage_table;
+	struct atom_voltage_table vddci_voltage_table;
+	struct evergreen_arb_registers bootup_arb_registers;
+	struct evergreen_ulv_param ulv;
+	struct at ats[2];
+	/* smc offsets */
+	u16 mc_reg_table_start;
+	struct amdgpu_ps current_rps;
+	struct rv7xx_ps current_ps;
+	struct amdgpu_ps requested_rps;
+	struct rv7xx_ps requested_ps;
+};
+
+struct PP_NIslands_Dpm2PerfLevel
+{
+    uint8_t     MaxPS;
+    uint8_t     TgtAct;
+    uint8_t     MaxPS_StepInc;
+    uint8_t     MaxPS_StepDec;
+    uint8_t     PSST;
+    uint8_t     NearTDPDec;
+    uint8_t     AboveSafeInc;
+    uint8_t     BelowSafeInc;
+    uint8_t     PSDeltaLimit;
+    uint8_t     PSDeltaWin;
+    uint8_t     Reserved[6];
+};
+
+typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel;
+
+struct PP_NIslands_DPM2Parameters
+{
+    uint32_t    TDPLimit;
+    uint32_t    NearTDPLimit;
+    uint32_t    SafePowerLimit;
+    uint32_t    PowerBoostLimit;
+};
+typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters;
+
+struct NISLANDS_SMC_SCLK_VALUE
+{
+    uint32_t        vCG_SPLL_FUNC_CNTL;
+    uint32_t        vCG_SPLL_FUNC_CNTL_2;
+    uint32_t        vCG_SPLL_FUNC_CNTL_3;
+    uint32_t        vCG_SPLL_FUNC_CNTL_4;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM_2;
+    uint32_t        sclk_value;
+};
+
+typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE;
+
+struct NISLANDS_SMC_MCLK_VALUE
+{
+    uint32_t        vMPLL_FUNC_CNTL;
+    uint32_t        vMPLL_FUNC_CNTL_1;
+    uint32_t        vMPLL_FUNC_CNTL_2;
+    uint32_t        vMPLL_AD_FUNC_CNTL;
+    uint32_t        vMPLL_AD_FUNC_CNTL_2;
+    uint32_t        vMPLL_DQ_FUNC_CNTL;
+    uint32_t        vMPLL_DQ_FUNC_CNTL_2;
+    uint32_t        vMCLK_PWRMGT_CNTL;
+    uint32_t        vDLL_CNTL;
+    uint32_t        vMPLL_SS;
+    uint32_t        vMPLL_SS2;
+    uint32_t        mclk_value;
+};
+
+typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE;
+
+struct NISLANDS_SMC_VOLTAGE_VALUE
+{
+    uint16_t             value;
+    uint8_t              index;
+    uint8_t              padding;
+};
+
+typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE;
+
+struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
+{
+    uint8_t                     arbValue;
+    uint8_t                     ACIndex;
+    uint8_t                     displayWatermark;
+    uint8_t                     gen2PCIE;
+    uint8_t                     reserved1;
+    uint8_t                     reserved2;
+    uint8_t                     strobeMode;
+    uint8_t                     mcFlags;
+    uint32_t                    aT;
+    uint32_t                    bSP;
+    NISLANDS_SMC_SCLK_VALUE     sclk;
+    NISLANDS_SMC_MCLK_VALUE     mclk;
+    NISLANDS_SMC_VOLTAGE_VALUE  vddc;
+    NISLANDS_SMC_VOLTAGE_VALUE  mvdd;
+    NISLANDS_SMC_VOLTAGE_VALUE  vddci;
+    NISLANDS_SMC_VOLTAGE_VALUE  std_vddc;
+    uint32_t                    powergate_en;
+    uint8_t                     hUp;
+    uint8_t                     hDown;
+    uint8_t                     stateFlags;
+    uint8_t                     arbRefreshState;
+    uint32_t                    SQPowerThrottle;
+    uint32_t                    SQPowerThrottle_2;
+    uint32_t                    reserved[2];
+    PP_NIslands_Dpm2PerfLevel   dpm2;
+};
+
+typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL;
+
+struct NISLANDS_SMC_SWSTATE
+{
+    uint8_t                             flags;
+    uint8_t                             levelCount;
+    uint8_t                             padding2;
+    uint8_t                             padding3;
+    NISLANDS_SMC_HW_PERFORMANCE_LEVEL   levels[1];
+};
+
+typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE;
+
+struct NISLANDS_SMC_VOLTAGEMASKTABLE
+{
+    uint8_t  highMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
+    uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
+
+#define NISLANDS_MAX_NO_VREG_STEPS 32
+
+struct NISLANDS_SMC_STATETABLE
+{
+    uint8_t                             thermalProtectType;
+    uint8_t                             systemFlags;
+    uint8_t                             maxVDDCIndexInPPTable;
+    uint8_t                             extraFlags;
+    uint8_t                             highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+    uint32_t                            lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+    NISLANDS_SMC_VOLTAGEMASKTABLE       voltageMaskTable;
+    PP_NIslands_DPM2Parameters          dpm2Params;
+    NISLANDS_SMC_SWSTATE                initialState;
+    NISLANDS_SMC_SWSTATE                ACPIState;
+    NISLANDS_SMC_SWSTATE                ULVState;
+    NISLANDS_SMC_SWSTATE                driverState;
+    NISLANDS_SMC_HW_PERFORMANCE_LEVEL   dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+};
+
+typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
+
+struct ni_power_info {
+	/* must be first! */
+	struct evergreen_power_info eg;
+	struct ni_clock_registers clock_registers;
+	struct ni_mc_reg_table mc_reg_table;
+	u32 mclk_rtt_mode_threshold;
+	/* flags */
+	bool use_power_boost_limit;
+	bool support_cac_long_term_average;
+	bool cac_enabled;
+	bool cac_configuration_required;
+	bool driver_calculate_cac_leakage;
+	bool pc_enabled;
+	bool enable_power_containment;
+	bool enable_cac;
+	bool enable_sq_ramping;
+	/* smc offsets */
+	u16 arb_table_start;
+	u16 fan_table_start;
+	u16 cac_table_start;
+	u16 spll_table_start;
+	/* CAC stuff */
+	struct ni_cac_data cac_data;
+	u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS];
+	const struct ni_cac_weights *cac_weights;
+	u8 lta_window_size;
+	u8 lts_truncate;
+	struct si_ps current_ps;
+	struct si_ps requested_ps;
+	/* scratch structs */
+	SMC_NIslands_MCRegisters smc_mc_reg_table;
+	NISLANDS_SMC_STATETABLE smc_statetable;
+};
+
+struct si_cac_config_reg
+{
+	u32 offset;
+	u32 mask;
+	u32 shift;
+	u32 value;
+	enum si_cac_config_reg_type type;
+};
+
+struct si_powertune_data
+{
+	u32 cac_window;
+	u32 l2_lta_window_size_default;
+	u8 lts_truncate_default;
+	u8 shift_n_default;
+	u8 operating_temp;
+	struct ni_leakage_coeffients leakage_coefficients;
+	u32 fixed_kt;
+	u32 lkge_lut_v0_percent;
+	u8 dc_cac[NISLANDS_DCCAC_MAX_LEVELS];
+	bool enable_powertune_by_default;
+};
+
+struct si_dyn_powertune_data
+{
+	u32 cac_leakage;
+	s32 leakage_minimum_temperature;
+	u32 wintime;
+	u32 l2_lta_window_size;
+	u8 lts_truncate;
+	u8 shift_n;
+	u8 dc_pwr_value;
+	bool disable_uvd_powertune;
+};
+
+struct si_dte_data
+{
+	u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+	u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+	u32 k;
+	u32 t0;
+	u32 max_t;
+	u8 window_size;
+	u8 temp_select;
+	u8 dte_mode;
+	u8 tdep_count;
+	u8 t_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+	u32 tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+	u32 tdep_r[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+	u32 t_threshold;
+	bool enable_dte_by_default;
+};
+
+struct si_clock_registers {
+	u32 cg_spll_func_cntl;
+	u32 cg_spll_func_cntl_2;
+	u32 cg_spll_func_cntl_3;
+	u32 cg_spll_func_cntl_4;
+	u32 cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2;
+	u32 dll_cntl;
+	u32 mclk_pwrmgt_cntl;
+	u32 mpll_ad_func_cntl;
+	u32 mpll_dq_func_cntl;
+	u32 mpll_func_cntl;
+	u32 mpll_func_cntl_1;
+	u32 mpll_func_cntl_2;
+	u32 mpll_ss1;
+	u32 mpll_ss2;
+};
+
+struct si_mc_reg_entry {
+	u32 mclk_max;
+	u32 mc_data[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct si_mc_reg_table {
+	u8 last;
+	u8 num_entries;
+	u16 valid_flag;
+	struct si_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+	SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct si_leakage_voltage_entry
+{
+	u16 voltage;
+	u16 leakage_index;
+};
+
+struct si_leakage_voltage
+{
+	u16 count;
+	struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT];
+};
+
+
+struct si_ulv_param {
+	bool supported;
+	u32 cg_ulv_control;
+	u32 cg_ulv_parameter;
+	u32 volt_change_delay;
+	struct rv7xx_pl pl;
+	bool one_pcie_lane_in_ulv;
+};
+
+struct si_power_info {
+	/* must be first! */
+	struct ni_power_info ni;
+	struct si_clock_registers clock_registers;
+	struct si_mc_reg_table mc_reg_table;
+	struct atom_voltage_table mvdd_voltage_table;
+	struct atom_voltage_table vddc_phase_shed_table;
+	struct si_leakage_voltage leakage_voltage;
+	u16 mvdd_bootup_value;
+	struct si_ulv_param ulv;
+	u32 max_cu;
+	/* pcie gen */
+	enum amdgpu_pcie_gen force_pcie_gen;
+	enum amdgpu_pcie_gen boot_pcie_gen;
+	enum amdgpu_pcie_gen acpi_pcie_gen;
+	u32 sys_pcie_mask;
+	/* flags */
+	bool enable_dte;
+	bool enable_ppm;
+	bool vddc_phase_shed_control;
+	bool pspp_notify_required;
+	bool sclk_deep_sleep_above_low;
+	bool voltage_control_svi2;
+	bool vddci_control_svi2;
+	/* smc offsets */
+	u32 sram_end;
+	u32 state_table_start;
+	u32 soft_regs_start;
+	u32 mc_reg_table_start;
+	u32 arb_table_start;
+	u32 cac_table_start;
+	u32 dte_table_start;
+	u32 spll_table_start;
+	u32 papm_cfg_table_start;
+	u32 fan_table_start;
+	/* CAC stuff */
+	const struct si_cac_config_reg *cac_weights;
+	const struct si_cac_config_reg *lcac_config;
+	const struct si_cac_config_reg *cac_override;
+	const struct si_powertune_data *powertune_data;
+	struct si_dyn_powertune_data dyn_powertune_data;
+	/* DTE stuff */
+	struct si_dte_data dte_data;
+	/* scratch structs */
+	SMC_SIslands_MCRegisters smc_mc_reg_table;
+	SISLANDS_SMC_STATETABLE smc_statetable;
+	PP_SIslands_PAPMParameters papm_parm;
+	/* SVI2 */
+	u8 svd_gpio_id;
+	u8 svc_gpio_id;
+	/* fan control */
+	bool fan_ctrl_is_in_default_mode;
+	u32 t_min;
+	u32 fan_ctrl_default_mode;
+	bool fan_is_controlled_by_smc;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
new file mode 100644
index 0000000..8fae3d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "si/sid.h"
+#include "si_ih.h"
+
+static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
+
+static void si_ih_enable_interrupts(struct amdgpu_device *adev)
+{
+	u32 ih_cntl = RREG32(IH_CNTL);
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+	ih_cntl |= ENABLE_INTR;
+	ih_rb_cntl |= IH_RB_ENABLE;
+	WREG32(IH_CNTL, ih_cntl);
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	adev->irq.ih.enabled = true;
+}
+  
+static void si_ih_disable_interrupts(struct amdgpu_device *adev)
+{
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+	u32 ih_cntl = RREG32(IH_CNTL);
+
+	ih_rb_cntl &= ~IH_RB_ENABLE;
+	ih_cntl &= ~ENABLE_INTR;
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	WREG32(IH_CNTL, ih_cntl);
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+	adev->irq.ih.enabled = false;
+	adev->irq.ih.rptr = 0;
+}
+
+static int si_ih_irq_init(struct amdgpu_device *adev)
+{
+	int rb_bufsz;
+	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+	u64 wptr_off;
+
+	si_ih_disable_interrupts(adev);
+	WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
+	interrupt_cntl = RREG32(INTERRUPT_CNTL);
+	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+	WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+	WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
+	rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
+
+	ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
+		     IH_WPTR_OVERFLOW_CLEAR |
+		     (rb_bufsz << 1) |
+		     IH_WPTR_WRITEBACK_ENABLE;
+
+	wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
+	WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
+	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+
+	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
+	if (adev->irq.msi_enabled)
+		ih_cntl |= RPTR_REARM;
+	WREG32(IH_CNTL, ih_cntl);
+
+	pci_set_master(adev->pdev);
+	si_ih_enable_interrupts(adev);
+
+	return 0;
+}
+
+static void si_ih_irq_disable(struct amdgpu_device *adev)
+{
+	si_ih_disable_interrupts(adev);
+	mdelay(1);
+}
+
+static u32 si_ih_get_wptr(struct amdgpu_device *adev)
+{
+	u32 wptr, tmp;
+
+	wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+
+	if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
+		wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
+		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+			wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
+		adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+		tmp = RREG32(IH_RB_CNTL);
+		tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+		WREG32(IH_RB_CNTL, tmp);
+	}
+	return (wptr & adev->irq.ih.ptr_mask);
+}
+
+static void si_ih_decode_iv(struct amdgpu_device *adev,
+			     struct amdgpu_iv_entry *entry)
+{
+	u32 ring_index = adev->irq.ih.rptr >> 2;
+	uint32_t dw[4];
+
+	dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
+	dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
+	dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
+	dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+
+	entry->src_id = dw[0] & 0xff;
+	entry->src_data = dw[1] & 0xfffffff;
+	entry->ring_id = dw[2] & 0xff;
+	entry->vm_id = (dw[2] >> 8) & 0xff;
+
+	adev->irq.ih.rptr += 16;
+}
+
+static void si_ih_set_rptr(struct amdgpu_device *adev)
+{
+	WREG32(IH_RB_RPTR, adev->irq.ih.rptr);
+}
+
+static int si_ih_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	si_ih_set_interrupt_funcs(adev);
+
+	return 0;
+}
+
+static int si_ih_sw_init(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+	if (r)
+		return r;
+
+	return amdgpu_irq_init(adev);
+}
+
+static int si_ih_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	amdgpu_irq_fini(adev);
+	amdgpu_ih_ring_fini(adev);
+
+	return 0;
+}
+
+static int si_ih_hw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return si_ih_irq_init(adev);
+}
+
+static int si_ih_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	si_ih_irq_disable(adev);
+
+	return 0;
+}
+
+static int si_ih_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return si_ih_hw_fini(adev);
+}
+
+static int si_ih_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return si_ih_hw_init(adev);
+}
+
+static bool si_ih_is_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 tmp = RREG32(SRBM_STATUS);
+
+	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+		return false;
+
+	return true;
+}
+
+static int si_ih_wait_for_idle(void *handle)
+{
+	unsigned i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (si_ih_is_idle(handle))
+			return 0;
+		udelay(1);
+	}
+	return -ETIMEDOUT;
+}
+
+static int si_ih_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	u32 srbm_soft_reset = 0;
+	u32 tmp = RREG32(SRBM_STATUS);
+
+	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
+
+	if (srbm_soft_reset) {
+		tmp = RREG32(SRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+
+		udelay(50);
+	}
+
+	return 0;
+}
+
+static int si_ih_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int si_ih_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs si_ih_ip_funcs = {
+	.name = "si_ih",
+	.early_init = si_ih_early_init,
+	.late_init = NULL,
+	.sw_init = si_ih_sw_init,
+	.sw_fini = si_ih_sw_fini,
+	.hw_init = si_ih_hw_init,
+	.hw_fini = si_ih_hw_fini,
+	.suspend = si_ih_suspend,
+	.resume = si_ih_resume,
+	.is_idle = si_ih_is_idle,
+	.wait_for_idle = si_ih_wait_for_idle,
+	.soft_reset = si_ih_soft_reset,
+	.set_clockgating_state = si_ih_set_clockgating_state,
+	.set_powergating_state = si_ih_set_powergating_state,
+};
+
+static const struct amdgpu_ih_funcs si_ih_funcs = {
+	.get_wptr = si_ih_get_wptr,
+	.decode_iv = si_ih_decode_iv,
+	.set_rptr = si_ih_set_rptr
+};
+
+static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+	if (adev->irq.ih_funcs == NULL)
+		adev->irq.ih_funcs = &si_ih_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/amdgpu/si_ih.h
similarity index 71%
copy from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
copy to drivers/gpu/drm/amd/amdgpu/si_ih.h
index d24b888..f3e3a95 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.h
@@ -20,16 +20,10 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#ifndef TONGA_PROCESSPPTABLES_H
-#define TONGA_PROCESSPPTABLES_H
 
-#include "hwmgr.h"
+#ifndef __SI_IH_H__
+#define __SI_IH_H__
 
-extern const struct pp_table_func tonga_pptable_funcs;
-extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
-extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
-		struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
-				struct pp_power_state *, void *, uint32_t));
+extern const struct amd_ip_funcs si_ih_ip_funcs;
 
 #endif
-
diff --git a/drivers/gpu/drm/amd/amdgpu/si_smc.c b/drivers/gpu/drm/amd/amdgpu/si_smc.c
new file mode 100644
index 0000000..668ba99
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_smc.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "si/sid.h"
+#include "ppsmc.h"
+#include "amdgpu_ucode.h"
+#include "sislands_smc.h"
+
+static int si_set_smc_sram_address(struct amdgpu_device *adev,
+				   u32 smc_address, u32 limit)
+{
+	if (smc_address & 3)
+		return -EINVAL;
+	if ((smc_address + 3) > limit)
+		return -EINVAL;
+
+	WREG32(SMC_IND_INDEX_0, smc_address);
+	WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+	return 0;
+}
+
+int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
+				u32 smc_start_address,
+				const u8 *src, u32 byte_count, u32 limit)
+{
+	unsigned long flags;
+	int ret = 0;
+	u32 data, original_data, addr, extra_shift;
+
+	if (smc_start_address & 3)
+		return -EINVAL;
+	if ((smc_start_address + byte_count) > limit)
+		return -EINVAL;
+
+	addr = smc_start_address;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	while (byte_count >= 4) {
+		/* SMC address space is BE */
+		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+		ret = si_set_smc_sram_address(adev, addr, limit);
+		if (ret)
+			goto done;
+
+		WREG32(SMC_IND_DATA_0, data);
+
+		src += 4;
+		byte_count -= 4;
+		addr += 4;
+	}
+
+	/* RMW for the final bytes */
+	if (byte_count > 0) {
+		data = 0;
+
+		ret = si_set_smc_sram_address(adev, addr, limit);
+		if (ret)
+			goto done;
+
+		original_data = RREG32(SMC_IND_DATA_0);
+		extra_shift = 8 * (4 - byte_count);
+
+		while (byte_count > 0) {
+			/* SMC address space is BE */
+			data = (data << 8) + *src++;
+			byte_count--;
+		}
+
+		data <<= extra_shift;
+		data |= (original_data & ~((~0UL) << extra_shift));
+
+		ret = si_set_smc_sram_address(adev, addr, limit);
+		if (ret)
+			goto done;
+
+		WREG32(SMC_IND_DATA_0, data);
+	}
+
+done:
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+	return ret;
+}
+
+void amdgpu_si_start_smc(struct amdgpu_device *adev)
+{
+	u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+
+	tmp &= ~RST_REG;
+
+	WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+void amdgpu_si_reset_smc(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	RREG32(CB_CGTT_SCLK_CTRL);
+	RREG32(CB_CGTT_SCLK_CTRL);
+	RREG32(CB_CGTT_SCLK_CTRL);
+	RREG32(CB_CGTT_SCLK_CTRL);
+
+	tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
+	      RST_REG;
+	WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev)
+{
+	static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
+
+	return amdgpu_si_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
+}
+
+void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable)
+{
+	u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+	if (enable)
+		tmp &= ~CK_DISABLE;
+	else
+		tmp |= CK_DISABLE;
+
+	WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+bool amdgpu_si_is_smc_running(struct amdgpu_device *adev)
+{
+	u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+	u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+	if (!(rst & RST_REG) && !(clk & CK_DISABLE))
+		return true;
+
+	return false;
+}
+
+PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
+				       PPSMC_Msg msg)
+{
+	u32 tmp;
+	int i;
+
+	if (!amdgpu_si_is_smc_running(adev))
+		return PPSMC_Result_Failed;
+
+	WREG32(SMC_MESSAGE_0, msg);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = RREG32(SMC_RESP_0);
+		if (tmp != 0)
+			break;
+		udelay(1);
+	}
+
+	return (PPSMC_Result)RREG32(SMC_RESP_0);
+}
+
+PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
+{
+	u32 tmp;
+	int i;
+
+	if (!amdgpu_si_is_smc_running(adev))
+		return PPSMC_Result_OK;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+		if ((tmp & CKEN) == 0)
+			break;
+		udelay(1);
+	}
+
+	return PPSMC_Result_OK;
+}
+
+int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
+{
+	const struct smc_firmware_header_v1_0 *hdr;
+	unsigned long flags;
+	u32 ucode_start_address;
+	u32 ucode_size;
+	const u8 *src;
+	u32 data;
+
+	if (!adev->pm.fw)
+		return -EINVAL;
+
+	hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+
+	amdgpu_ucode_print_smc_hdr(&hdr->header);
+
+	adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+	ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+	src = (const u8 *)
+		(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+	if (ucode_size & 3)
+		return -EINVAL;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	WREG32(SMC_IND_INDEX_0, ucode_start_address);
+	WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
+	while (ucode_size >= 4) {
+		/* SMC address space is BE */
+		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+		WREG32(SMC_IND_DATA_0, data);
+
+		src += 4;
+		ucode_size -= 4;
+	}
+	WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+	return 0;
+}
+
+int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+				  u32 *value, u32 limit)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	ret = si_set_smc_sram_address(adev, smc_address, limit);
+	if (ret == 0)
+		*value = RREG32(SMC_IND_DATA_0);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+	return ret;
+}
+
+int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+				   u32 value, u32 limit)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	ret = si_set_smc_sram_address(adev, smc_address, limit);
+	if (ret == 0)
+		WREG32(SMC_IND_DATA_0, value);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
new file mode 100644
index 0000000..ee4b846
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
@@ -0,0 +1,422 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef PP_SISLANDS_SMC_H
+#define PP_SISLANDS_SMC_H
+
+#include "ppsmc.h"
+
+#pragma pack(push, 1)
+
+#define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+
+struct PP_SIslands_Dpm2PerfLevel
+{
+    uint8_t MaxPS;
+    uint8_t TgtAct;
+    uint8_t MaxPS_StepInc;
+    uint8_t MaxPS_StepDec;
+    uint8_t PSSamplingTime;
+    uint8_t NearTDPDec;
+    uint8_t AboveSafeInc;
+    uint8_t BelowSafeInc;
+    uint8_t PSDeltaLimit;
+    uint8_t PSDeltaWin;
+    uint16_t PwrEfficiencyRatio;
+    uint8_t Reserved[4];
+};
+
+typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel;
+
+struct PP_SIslands_DPM2Status
+{
+    uint32_t    dpm2Flags;
+    uint8_t     CurrPSkip;
+    uint8_t     CurrPSkipPowerShift;
+    uint8_t     CurrPSkipTDP;
+    uint8_t     CurrPSkipOCP;
+    uint8_t     MaxSPLLIndex;
+    uint8_t     MinSPLLIndex;
+    uint8_t     CurrSPLLIndex;
+    uint8_t     InfSweepMode;
+    uint8_t     InfSweepDir;
+    uint8_t     TDPexceeded;
+    uint8_t     reserved;
+    uint8_t     SwitchDownThreshold;
+    uint32_t    SwitchDownCounter;
+    uint32_t    SysScalingFactor;
+};
+
+typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status;
+
+struct PP_SIslands_DPM2Parameters
+{
+    uint32_t    TDPLimit;
+    uint32_t    NearTDPLimit;
+    uint32_t    SafePowerLimit;
+    uint32_t    PowerBoostLimit;
+    uint32_t    MinLimitDelta;
+};
+typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters;
+
+struct PP_SIslands_PAPMStatus
+{
+    uint32_t    EstimatedDGPU_T;
+    uint32_t    EstimatedDGPU_P;
+    uint32_t    EstimatedAPU_T;
+    uint32_t    EstimatedAPU_P;
+    uint8_t     dGPU_T_Limit_Exceeded;
+    uint8_t     reserved[3];
+};
+typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus;
+
+struct PP_SIslands_PAPMParameters
+{
+    uint32_t    NearTDPLimitTherm;
+    uint32_t    NearTDPLimitPAPM;
+    uint32_t    PlatformPowerLimit;
+    uint32_t    dGPU_T_Limit;
+    uint32_t    dGPU_T_Warning;
+    uint32_t    dGPU_T_Hysteresis;
+};
+typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters;
+
+struct SISLANDS_SMC_SCLK_VALUE
+{
+    uint32_t    vCG_SPLL_FUNC_CNTL;
+    uint32_t    vCG_SPLL_FUNC_CNTL_2;
+    uint32_t    vCG_SPLL_FUNC_CNTL_3;
+    uint32_t    vCG_SPLL_FUNC_CNTL_4;
+    uint32_t    vCG_SPLL_SPREAD_SPECTRUM;
+    uint32_t    vCG_SPLL_SPREAD_SPECTRUM_2;
+    uint32_t    sclk_value;
+};
+
+typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE;
+
+struct SISLANDS_SMC_MCLK_VALUE
+{
+    uint32_t    vMPLL_FUNC_CNTL;
+    uint32_t    vMPLL_FUNC_CNTL_1;
+    uint32_t    vMPLL_FUNC_CNTL_2;
+    uint32_t    vMPLL_AD_FUNC_CNTL;
+    uint32_t    vMPLL_DQ_FUNC_CNTL;
+    uint32_t    vMCLK_PWRMGT_CNTL;
+    uint32_t    vDLL_CNTL;
+    uint32_t    vMPLL_SS;
+    uint32_t    vMPLL_SS2;
+    uint32_t    mclk_value;
+};
+
+typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE;
+
+struct SISLANDS_SMC_VOLTAGE_VALUE
+{
+    uint16_t    value;
+    uint8_t     index;
+    uint8_t     phase_settings;
+};
+
+typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE;
+
+struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
+{
+    uint8_t                     ACIndex;
+    uint8_t                     displayWatermark;
+    uint8_t                     gen2PCIE;
+    uint8_t                     UVDWatermark;
+    uint8_t                     VCEWatermark;
+    uint8_t                     strobeMode;
+    uint8_t                     mcFlags;
+    uint8_t                     padding;
+    uint32_t                    aT;
+    uint32_t                    bSP;
+    SISLANDS_SMC_SCLK_VALUE     sclk;
+    SISLANDS_SMC_MCLK_VALUE     mclk;
+    SISLANDS_SMC_VOLTAGE_VALUE  vddc;
+    SISLANDS_SMC_VOLTAGE_VALUE  mvdd;
+    SISLANDS_SMC_VOLTAGE_VALUE  vddci;
+    SISLANDS_SMC_VOLTAGE_VALUE  std_vddc;
+    uint8_t                     hysteresisUp;
+    uint8_t                     hysteresisDown;
+    uint8_t                     stateFlags;
+    uint8_t                     arbRefreshState;
+    uint32_t                    SQPowerThrottle;
+    uint32_t                    SQPowerThrottle_2;
+    uint32_t                    MaxPoweredUpCU;
+    SISLANDS_SMC_VOLTAGE_VALUE  high_temp_vddc;
+    SISLANDS_SMC_VOLTAGE_VALUE  low_temp_vddc;
+    uint32_t                    reserved[2];
+    PP_SIslands_Dpm2PerfLevel   dpm2;
+};
+
+#define SISLANDS_SMC_STROBE_RATIO    0x0F
+#define SISLANDS_SMC_STROBE_ENABLE   0x10
+
+#define SISLANDS_SMC_MC_EDC_RD_FLAG  0x01
+#define SISLANDS_SMC_MC_EDC_WR_FLAG  0x02
+#define SISLANDS_SMC_MC_RTT_ENABLE   0x04
+#define SISLANDS_SMC_MC_STUTTER_EN   0x08
+#define SISLANDS_SMC_MC_PG_EN        0x10
+
+typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL;
+
+struct SISLANDS_SMC_SWSTATE
+{
+    uint8_t                             flags;
+    uint8_t                             levelCount;
+    uint8_t                             padding2;
+    uint8_t                             padding3;
+    SISLANDS_SMC_HW_PERFORMANCE_LEVEL   levels[1];
+};
+
+typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
+
+#define SISLANDS_SMC_VOLTAGEMASK_VDDC  0
+#define SISLANDS_SMC_VOLTAGEMASK_MVDD  1
+#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
+#define SISLANDS_SMC_VOLTAGEMASK_MAX   4
+
+struct SISLANDS_SMC_VOLTAGEMASKTABLE
+{
+    uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
+
+#define SISLANDS_MAX_NO_VREG_STEPS 32
+
+struct SISLANDS_SMC_STATETABLE
+{
+    uint8_t                             thermalProtectType;
+    uint8_t                             systemFlags;
+    uint8_t                             maxVDDCIndexInPPTable;
+    uint8_t                             extraFlags;
+    uint32_t                            lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
+    SISLANDS_SMC_VOLTAGEMASKTABLE       voltageMaskTable;
+    SISLANDS_SMC_VOLTAGEMASKTABLE       phaseMaskTable;
+    PP_SIslands_DPM2Parameters          dpm2Params;
+    SISLANDS_SMC_SWSTATE                initialState;
+    SISLANDS_SMC_SWSTATE                ACPIState;
+    SISLANDS_SMC_SWSTATE                ULVState;
+    SISLANDS_SMC_SWSTATE                driverState;
+    SISLANDS_SMC_HW_PERFORMANCE_LEVEL   dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+};
+
+typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
+
+#define SI_SMC_SOFT_REGISTER_mclk_chg_timeout         0x0
+#define SI_SMC_SOFT_REGISTER_delay_vreg               0xC
+#define SI_SMC_SOFT_REGISTER_delay_acpi               0x28
+#define SI_SMC_SOFT_REGISTER_seq_index                0x5C
+#define SI_SMC_SOFT_REGISTER_mvdd_chg_time            0x60
+#define SI_SMC_SOFT_REGISTER_mclk_switch_lim          0x70
+#define SI_SMC_SOFT_REGISTER_watermark_threshold      0x78
+#define SI_SMC_SOFT_REGISTER_phase_shedding_delay     0x88
+#define SI_SMC_SOFT_REGISTER_ulv_volt_change_delay    0x8C
+#define SI_SMC_SOFT_REGISTER_mc_block_delay           0x98
+#define SI_SMC_SOFT_REGISTER_ticks_per_us             0xA8
+#define SI_SMC_SOFT_REGISTER_crtc_index               0xC4
+#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min 0xC8
+#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max 0xCC
+#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width  0xF4
+#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen   0xFC
+#define SI_SMC_SOFT_REGISTER_vr_hot_gpio              0x100
+#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type     0x118
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd   0x11c
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc   0x120
+
+struct PP_SIslands_FanTable
+{
+	uint8_t  fdo_mode;
+	uint8_t  padding;
+	int16_t  temp_min;
+	int16_t  temp_med;
+	int16_t  temp_max;
+	int16_t  slope1;
+	int16_t  slope2;
+	int16_t  fdo_min;
+	int16_t  hys_up;
+	int16_t  hys_down;
+	int16_t  hys_slope;
+	int16_t  temp_resp_lim;
+	int16_t  temp_curr;
+	int16_t  slope_curr;
+	int16_t  pwm_curr;
+	uint32_t refresh_period;
+	int16_t  fdo_max;
+	uint8_t  temp_src;
+	int8_t  padding2;
+};
+
+typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
+
+#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
+#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
+
+#define SMC_SISLANDS_SCALE_I  7
+#define SMC_SISLANDS_SCALE_R 12
+
+struct PP_SIslands_CacConfig
+{
+    uint16_t   cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
+    uint32_t   lkge_lut_V0;
+    uint32_t   lkge_lut_Vstep;
+    uint32_t   WinTime;
+    uint32_t   R_LL;
+    uint32_t   calculation_repeats;
+    uint32_t   l2numWin_TDP;
+    uint32_t   dc_cac;
+    uint8_t    lts_truncate_n;
+    uint8_t    SHIFT_N;
+    uint8_t    log2_PG_LKG_SCALE;
+    uint8_t    cac_temp;
+    uint32_t   lkge_lut_T0;
+    uint32_t   lkge_lut_Tstep;
+};
+
+typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig;
+
+#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16
+#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
+
+struct SMC_SIslands_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress;
+
+struct SMC_SIslands_MCRegisterSet
+{
+    uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet;
+
+struct SMC_SIslands_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMC_SIslands_MCRegisterAddress      address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+    SMC_SIslands_MCRegisterSet          data[SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters;
+
+struct SMC_SIslands_MCArbDramTimingRegisterSet
+{
+    uint32_t mc_arb_dram_timing;
+    uint32_t mc_arb_dram_timing2;
+    uint8_t  mc_arb_rfsh_rate;
+    uint8_t  mc_arb_burst_time;
+    uint8_t  padding[2];
+};
+
+typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet;
+
+struct SMC_SIslands_MCArbDramTimingRegisters
+{
+    uint8_t                                     arb_current;
+    uint8_t                                     reserved[3];
+    SMC_SIslands_MCArbDramTimingRegisterSet     data[16];
+};
+
+typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters;
+
+struct SMC_SISLANDS_SPLL_DIV_TABLE
+{
+    uint32_t    freq[256];
+    uint32_t    ss[256];
+};
+
+#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK  0x01ffffff
+#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0
+#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK   0xfe000000
+#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT  25
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK   0x000fffff
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT  0
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK   0xfff00000
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT  20
+
+typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE;
+
+#define SMC_SISLANDS_DTE_MAX_FILTER_STAGES 5
+
+#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16
+
+struct Smc_SIslands_DTE_Configuration
+{
+    uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+    uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+    uint32_t K;
+    uint32_t T0;
+    uint32_t MaxT;
+    uint8_t  WindowSize;
+    uint8_t  Tdep_count;
+    uint8_t  temp_select;
+    uint8_t  DTE_mode;
+    uint8_t  T_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+    uint32_t Tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+    uint32_t Tdep_R[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+    uint32_t Tthreshold;
+};
+
+typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration;
+
+#define SMC_SISLANDS_DTE_STATUS_FLAG_DTE_ON 1
+
+#define SISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x10000
+
+#define SISLANDS_SMC_FIRMWARE_HEADER_version                   0x0
+#define SISLANDS_SMC_FIRMWARE_HEADER_flags                     0x4
+#define SISLANDS_SMC_FIRMWARE_HEADER_softRegisters             0xC
+#define SISLANDS_SMC_FIRMWARE_HEADER_stateTable                0x10
+#define SISLANDS_SMC_FIRMWARE_HEADER_fanTable                  0x14
+#define SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable            0x18
+#define SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable           0x24
+#define SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x30
+#define SISLANDS_SMC_FIRMWARE_HEADER_spllTable                 0x38
+#define SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration          0x40
+#define SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters            0x48
+
+#pragma pack(pop)
+
+int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
+				u32 smc_start_address,
+				const u8 *src, u32 byte_count, u32 limit);
+void amdgpu_si_start_smc(struct amdgpu_device *adev);
+void amdgpu_si_reset_smc(struct amdgpu_device *adev);
+int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev);
+void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable);
+bool amdgpu_si_is_smc_running(struct amdgpu_device *adev);
+PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg);
+PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev);
+int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit);
+int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+				  u32 *value, u32 limit);
+int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+				   u32 value, u32 limit);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index c920558..d127d59 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -373,10 +373,10 @@
 	return -ETIMEDOUT;
 }
 
-static int tonga_ih_soft_reset(void *handle)
+static int tonga_ih_check_soft_reset(void *handle)
 {
-	u32 srbm_soft_reset = 0;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset = 0;
 	u32 tmp = RREG32(mmSRBM_STATUS);
 
 	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
@@ -384,6 +384,48 @@
 						SOFT_RESET_IH, 1);
 
 	if (srbm_soft_reset) {
+		adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true;
+		adev->irq.srbm_soft_reset = srbm_soft_reset;
+	} else {
+		adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false;
+		adev->irq.srbm_soft_reset = 0;
+	}
+
+	return 0;
+}
+
+static int tonga_ih_pre_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+		return 0;
+
+	return tonga_ih_hw_fini(adev);
+}
+
+static int tonga_ih_post_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+		return 0;
+
+	return tonga_ih_hw_init(adev);
+}
+
+static int tonga_ih_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset;
+
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+		return 0;
+	srbm_soft_reset = adev->irq.srbm_soft_reset;
+
+	if (srbm_soft_reset) {
+		u32 tmp;
+
 		tmp = RREG32(mmSRBM_SOFT_RESET);
 		tmp |= srbm_soft_reset;
 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -427,7 +469,10 @@
 	.resume = tonga_ih_resume,
 	.is_idle = tonga_ih_is_idle,
 	.wait_for_idle = tonga_ih_wait_for_idle,
+	.check_soft_reset = tonga_ih_check_soft_reset,
+	.pre_soft_reset = tonga_ih_pre_soft_reset,
 	.soft_reset = tonga_ih_soft_reset,
+	.post_soft_reset = tonga_ih_post_soft_reset,
 	.set_clockgating_state = tonga_ih_set_clockgating_state,
 	.set_powergating_state = tonga_ih_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 132e613..f6c9415 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -116,7 +116,7 @@
 
 	ring = &adev->uvd.ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+	r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
 			     &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
 
 	return r;
@@ -526,6 +526,20 @@
 	amdgpu_ring_write(ring, ib->length_dw);
 }
 
+static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		4; /* uvd_v4_2_ring_emit_ib */
+}
+
+static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		2 + /* uvd_v4_2_ring_emit_hdp_flush */
+		2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
+		14; /* uvd_v4_2_ring_emit_fence  x1 no user fence */
+}
+
 /**
  * uvd_v4_2_mc_resume - memory controller programming
  *
@@ -756,6 +770,8 @@
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,
+	.get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size,
+	.get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size,
 };
 
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 101de136..400c16f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -112,7 +112,7 @@
 
 	ring = &adev->uvd.ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+	r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
 			     &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
 
 	return r;
@@ -577,6 +577,20 @@
 	amdgpu_ring_write(ring, ib->length_dw);
 }
 
+static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		6; /* uvd_v5_0_ring_emit_ib */
+}
+
+static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		2 + /* uvd_v5_0_ring_emit_hdp_flush */
+		2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
+		14; /* uvd_v5_0_ring_emit_fence  x1 no user fence */
+}
+
 static bool uvd_v5_0_is_idle(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -807,6 +821,8 @@
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,
+	.get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
+	.get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
 };
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 7f21102..e0fd9f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -116,7 +116,7 @@
 
 	ring = &adev->uvd.ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+	r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
 			     &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
 
 	return r;
@@ -396,21 +396,14 @@
 
 	uvd_v6_0_mc_resume(adev);
 
-	/* Set dynamic clock gating in S/W control mode */
-	if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
-		uvd_v6_0_set_sw_clock_gating(adev);
-	} else {
-		/* disable clock gating */
-		uint32_t data = RREG32(mmUVD_CGC_CTRL);
-		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
-		WREG32(mmUVD_CGC_CTRL, data);
-	}
+	/* disable clock gating */
+	WREG32_FIELD(UVD_CGC_CTRL, DYN_CLOCK_MODE, 0);
 
 	/* disable interupt */
-	WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK);
+	WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
 
 	/* stall UMC and register bus before resetting VCPU */
-	WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+	WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
 	mdelay(1);
 
 	/* put LMI, VCPU, RBC etc... into reset */
@@ -426,7 +419,7 @@
 	mdelay(5);
 
 	/* take UVD block out of reset */
-	WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
+	WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
 	mdelay(5);
 
 	/* initialize UVD memory controller */
@@ -461,7 +454,7 @@
 	WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
 
 	/* enable UMC */
-	WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+	WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
 
 	/* boot up the VCPU */
 	WREG32(mmUVD_SOFT_RESET, 0);
@@ -481,11 +474,9 @@
 			break;
 
 		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
-		WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
-				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+		WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
 		mdelay(10);
-		WREG32_P(mmUVD_SOFT_RESET, 0,
-			 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+		WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
 		mdelay(10);
 		r = -1;
 	}
@@ -502,15 +493,14 @@
 	/* clear the bit 4 of UVD_STATUS */
 	WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
 
+	/* force RBC into idle state */
 	rb_bufsz = order_base_2(ring->ring_size);
-	tmp = 0;
-	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
-	/* force RBC into idle state */
 	WREG32(mmUVD_RBC_RB_CNTL, tmp);
 
 	/* set the write pointer delay */
@@ -531,7 +521,7 @@
 	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
 	WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
 
-	WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
+	WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
 
 	return 0;
 }
@@ -735,6 +725,31 @@
 	amdgpu_ring_write(ring, 0xE);
 }
 
+static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		8; /* uvd_v6_0_ring_emit_ib */
+}
+
+static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		2 + /* uvd_v6_0_ring_emit_hdp_flush */
+		2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+		14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
+}
+
+static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
+{
+	return
+		2 + /* uvd_v6_0_ring_emit_hdp_flush */
+		2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+		20 + /* uvd_v6_0_ring_emit_vm_flush */
+		14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
+}
+
 static bool uvd_v6_0_is_idle(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -748,20 +763,82 @@
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	for (i = 0; i < adev->usec_timeout; i++) {
-		if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
+		if (uvd_v6_0_is_idle(handle))
 			return 0;
 	}
 	return -ETIMEDOUT;
 }
 
-static int uvd_v6_0_soft_reset(void *handle)
+#define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
+static int uvd_v6_0_check_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset = 0;
+	u32 tmp = RREG32(mmSRBM_STATUS);
+
+	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
+	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
+	    (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
+
+	if (srbm_soft_reset) {
+		adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true;
+		adev->uvd.srbm_soft_reset = srbm_soft_reset;
+	} else {
+		adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false;
+		adev->uvd.srbm_soft_reset = 0;
+	}
+	return 0;
+}
+static int uvd_v6_0_pre_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	uvd_v6_0_stop(adev);
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+		return 0;
 
-	WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
-			~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
+	uvd_v6_0_stop(adev);
+	return 0;
+}
+
+static int uvd_v6_0_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset;
+
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+		return 0;
+	srbm_soft_reset = adev->uvd.srbm_soft_reset;
+
+	if (srbm_soft_reset) {
+		u32 tmp;
+
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		/* Wait a little for things to settle down */
+		udelay(50);
+	}
+
+	return 0;
+}
+
+static int uvd_v6_0_post_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+		return 0;
+
 	mdelay(5);
 
 	return uvd_v6_0_start(adev);
@@ -902,21 +979,15 @@
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
-	static int curstate = -1;
 
 	if (adev->asic_type == CHIP_FIJI ||
-			adev->asic_type == CHIP_POLARIS10)
-		uvd_v6_set_bypass_mode(adev, enable);
+	    adev->asic_type == CHIP_POLARIS10)
+		uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false);
 
 	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
 		return 0;
 
-	if (curstate == state)
-		return 0;
-
-	curstate = state;
-	if (enable) {
+	if (state == AMD_CG_STATE_GATE) {
 		/* disable HW gating and enable Sw gating */
 		uvd_v6_0_set_sw_clock_gating(adev);
 	} else {
@@ -946,6 +1017,8 @@
 	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
 		return 0;
 
+	WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
+
 	if (state == AMD_PG_STATE_GATE) {
 		uvd_v6_0_stop(adev);
 		return 0;
@@ -966,7 +1039,10 @@
 	.resume = uvd_v6_0_resume,
 	.is_idle = uvd_v6_0_is_idle,
 	.wait_for_idle = uvd_v6_0_wait_for_idle,
+	.check_soft_reset = uvd_v6_0_check_soft_reset,
+	.pre_soft_reset = uvd_v6_0_pre_soft_reset,
 	.soft_reset = uvd_v6_0_soft_reset,
+	.post_soft_reset = uvd_v6_0_post_soft_reset,
 	.set_clockgating_state = uvd_v6_0_set_clockgating_state,
 	.set_powergating_state = uvd_v6_0_set_powergating_state,
 };
@@ -986,6 +1062,8 @@
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,
+	.get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
+	.get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
 };
 
 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
@@ -1005,6 +1083,8 @@
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,
+	.get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
+	.get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
 };
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 80a37a6..76e64ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -30,16 +30,17 @@
 #include "amdgpu.h"
 #include "amdgpu_vce.h"
 #include "cikd.h"
-
 #include "vce/vce_2_0_d.h"
 #include "vce/vce_2_0_sh_mask.h"
-
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
 
 #define VCE_V2_0_FW_SIZE	(256 * 1024)
 #define VCE_V2_0_STACK_SIZE	(64 * 1024)
 #define VCE_V2_0_DATA_SIZE	(23552 * AMDGPU_MAX_VCE_HANDLES)
+#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK	0x02
 
 static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -96,6 +97,49 @@
 		WREG32(mmVCE_RB_WPTR2, ring->wptr);
 }
 
+static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
+{
+	int i, j;
+
+	for (i = 0; i < 10; ++i) {
+		for (j = 0; j < 100; ++j) {
+			uint32_t status = RREG32(mmVCE_LMI_STATUS);
+
+			if (status & 0x337f)
+				return 0;
+			mdelay(10);
+		}
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
+{
+	int i, j;
+
+	for (i = 0; i < 10; ++i) {
+		for (j = 0; j < 100; ++j) {
+			uint32_t status = RREG32(mmVCE_STATUS);
+
+			if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
+				return 0;
+			mdelay(10);
+		}
+
+		DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
+		WREG32_P(mmVCE_SOFT_RESET,
+			VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+		mdelay(10);
+		WREG32_P(mmVCE_SOFT_RESET, 0,
+			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+		mdelay(10);
+	}
+
+	return -ETIMEDOUT;
+}
+
 /**
  * vce_v2_0_start - start VCE block
  *
@@ -106,7 +150,7 @@
 static int vce_v2_0_start(struct amdgpu_device *adev)
 {
 	struct amdgpu_ring *ring;
-	int i, j, r;
+	int r;
 
 	vce_v2_0_mc_resume(adev);
 
@@ -127,36 +171,12 @@
 	WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
 	WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
 
-	WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-
-	WREG32_P(mmVCE_SOFT_RESET,
-		 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-		 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
+	WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
+	WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
 	mdelay(100);
+	WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
 
-	WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
-	for (i = 0; i < 10; ++i) {
-		uint32_t status;
-		for (j = 0; j < 100; ++j) {
-			status = RREG32(mmVCE_STATUS);
-			if (status & 2)
-				break;
-			mdelay(10);
-		}
-		r = 0;
-		if (status & 2)
-			break;
-
-		DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
-		WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-				~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-		mdelay(10);
-		WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-		mdelay(10);
-		r = -1;
-	}
+	r = vce_v2_0_firmware_loaded(adev);
 
 	/* clear BUSY flag */
 	WREG32_P(mmVCE_STATUS, 0, ~1);
@@ -173,6 +193,8 @@
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	adev->vce.num_rings = 2;
+
 	vce_v2_0_set_ring_funcs(adev);
 	vce_v2_0_set_irq_funcs(adev);
 
@@ -182,7 +204,7 @@
 static int vce_v2_0_sw_init(void *handle)
 {
 	struct amdgpu_ring *ring;
-	int r;
+	int r, i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	/* VCE */
@@ -199,19 +221,14 @@
 	if (r)
 		return r;
 
-	ring = &adev->vce.ring[0];
-	sprintf(ring->name, "vce0");
-	r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
-	if (r)
-		return r;
-
-	ring = &adev->vce.ring[1];
-	sprintf(ring->name, "vce1");
-	r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
-	if (r)
-		return r;
+	for (i = 0; i < adev->vce.num_rings; i++) {
+		ring = &adev->vce.ring[i];
+		sprintf(ring->name, "vce%d", i);
+		r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
+				     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+		if (r)
+			return r;
+	}
 
 	return r;
 }
@@ -234,29 +251,23 @@
 
 static int vce_v2_0_hw_init(void *handle)
 {
-	struct amdgpu_ring *ring;
-	int r;
+	int r, i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	r = vce_v2_0_start(adev);
+	/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
 	if (r)
-/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
 		return 0;
 
-	ring = &adev->vce.ring[0];
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
-		return r;
-	}
+	for (i = 0; i < adev->vce.num_rings; i++)
+		adev->vce.ring[i].ready = false;
 
-	ring = &adev->vce.ring[1];
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
-		return r;
+	for (i = 0; i < adev->vce.num_rings; i++) {
+		r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+		if (r)
+			return r;
+		else
+			adev->vce.ring[i].ready = true;
 	}
 
 	DRM_INFO("VCE initialized successfully.\n");
@@ -338,47 +349,50 @@
 
 static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
 {
-	u32 orig, tmp;
-
-	if (gated) {
-		if (vce_v2_0_wait_for_idle(adev)) {
-			DRM_INFO("VCE is busy, Can't set clock gateing");
-			return;
-		}
-		WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-		WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-		mdelay(100);
-		WREG32(mmVCE_STATUS, 0);
-	} else {
-		WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-		WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-		mdelay(100);
+	if (vce_v2_0_wait_for_idle(adev)) {
+		DRM_INFO("VCE is busy, Can't set clock gateing");
+		return;
 	}
 
-	tmp = RREG32(mmVCE_CLOCK_GATING_B);
-	tmp &= ~0x00060006;
-	if (gated) {
-		tmp |= 0xe10000;
-	} else {
-		tmp |= 0xe1;
-		tmp &= ~0xe10000;
+	WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100);
+
+	if (vce_v2_0_lmi_clean(adev)) {
+		DRM_INFO("LMI is busy, Can't set clock gateing");
+		return;
 	}
-	WREG32(mmVCE_CLOCK_GATING_B, tmp);
 
-	orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
-	tmp &= ~0x1fe000;
-	tmp &= ~0xff000000;
-	if (tmp != orig)
-		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
-
-	orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
-	tmp &= ~0x3fc;
-	if (tmp != orig)
-		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+	WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+	WREG32_P(mmVCE_SOFT_RESET,
+		 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+		 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+	WREG32(mmVCE_STATUS, 0);
 
 	if (gated)
 		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
-	WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+	/* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */
+	if (gated) {
+		/* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */
+		WREG32(mmVCE_CLOCK_GATING_B, 0xe90010);
+	} else {
+		/* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */
+		WREG32(mmVCE_CLOCK_GATING_B, 0x800f1);
+	}
+
+	/* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/;
+	WREG32(mmVCE_UENC_CLOCK_GATING, 0x40);
+
+	/* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
+	WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
+
+	WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100);
+	if(!gated) {
+		WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+		mdelay(100);
+		WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+
+		vce_v2_0_firmware_loaded(adev);
+		WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
+	}
 }
 
 static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
@@ -458,9 +472,7 @@
 	WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
 
 	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
-
-	WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
-		 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+	WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
 
 	vce_v2_0_init_cg(adev);
 }
@@ -474,11 +486,11 @@
 
 static int vce_v2_0_wait_for_idle(void *handle)
 {
-	unsigned i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	unsigned i;
 
 	for (i = 0; i < adev->usec_timeout; i++) {
-		if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
+		if (vce_v2_0_is_idle(handle))
 			return 0;
 	}
 	return -ETIMEDOUT;
@@ -488,8 +500,7 @@
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
-			~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
+	WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
 	mdelay(5);
 
 	return vce_v2_0_start(adev);
@@ -516,10 +527,8 @@
 	DRM_DEBUG("IH: VCE\n");
 	switch (entry->src_data) {
 	case 0:
-		amdgpu_fence_process(&adev->vce.ring[0]);
-		break;
 	case 1:
-		amdgpu_fence_process(&adev->vce.ring[1]);
+		amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
 		break;
 	default:
 		DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -530,11 +539,28 @@
 	return 0;
 }
 
+static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+	if (enable)
+		tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+	else
+		tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+
+	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
+
 static int vce_v2_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	bool gate = false;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+
+	vce_v2_0_set_bypass_mode(adev, enable);
 
 	if (state == AMD_CG_STATE_GATE)
 		gate = true;
@@ -596,12 +622,16 @@
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_vce_ring_begin_use,
 	.end_use = amdgpu_vce_ring_end_use,
+	.get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size,
+	.get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size,
 };
 
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
 {
-	adev->vce.ring[0].funcs = &vce_v2_0_ring_funcs;
-	adev->vce.ring[1].funcs = &vce_v2_0_ring_funcs;
+	int i;
+
+	for (i = 0; i < adev->vce.num_rings; i++)
+		adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
 }
 
 static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index c271abf..a6b4e27 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -37,6 +37,9 @@
 #include "gca/gfx_8_0_d.h"
 #include "smu/smu_7_1_2_d.h"
 #include "smu/smu_7_1_2_sh_mask.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+
 
 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT	0x04
 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK	0x10
@@ -67,8 +70,10 @@
 
 	if (ring == &adev->vce.ring[0])
 		return RREG32(mmVCE_RB_RPTR);
-	else
+	else if (ring == &adev->vce.ring[1])
 		return RREG32(mmVCE_RB_RPTR2);
+	else
+		return RREG32(mmVCE_RB_RPTR3);
 }
 
 /**
@@ -84,8 +89,10 @@
 
 	if (ring == &adev->vce.ring[0])
 		return RREG32(mmVCE_RB_WPTR);
-	else
+	else if (ring == &adev->vce.ring[1])
 		return RREG32(mmVCE_RB_WPTR2);
+	else
+		return RREG32(mmVCE_RB_WPTR3);
 }
 
 /**
@@ -101,108 +108,80 @@
 
 	if (ring == &adev->vce.ring[0])
 		WREG32(mmVCE_RB_WPTR, ring->wptr);
-	else
+	else if (ring == &adev->vce.ring[1])
 		WREG32(mmVCE_RB_WPTR2, ring->wptr);
+	else
+		WREG32(mmVCE_RB_WPTR3, ring->wptr);
 }
 
 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
 {
-	u32 tmp, data;
-
-	tmp = data = RREG32(mmVCE_RB_ARB_CTRL);
-	if (override)
-		data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
-	else
-		data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
-
-	if (tmp != data)
-		WREG32(mmVCE_RB_ARB_CTRL, data);
+	WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
 }
 
 static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
 					     bool gated)
 {
-	u32 tmp, data;
+	u32 data;
+
 	/* Set Override to disable Clock Gating */
 	vce_v3_0_override_vce_clock_gating(adev, true);
 
-	if (!gated) {
-		/* Force CLOCK ON for VCE_CLOCK_GATING_B,
-		 * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
-		 * VREG can be FORCE ON or set to Dynamic, but can't be OFF
-		 */
-		tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
+	/* This function enables MGCG which is controlled by firmware.
+	   With the clocks in the gated state the core is still
+	   accessible but the firmware will throttle the clocks on the
+	   fly as necessary.
+	*/
+	if (gated) {
+		data = RREG32(mmVCE_CLOCK_GATING_B);
 		data |= 0x1ff;
 		data &= ~0xef0000;
-		if (tmp != data)
-			WREG32(mmVCE_CLOCK_GATING_B, data);
+		WREG32(mmVCE_CLOCK_GATING_B, data);
 
-		/* Force CLOCK ON for VCE_UENC_CLOCK_GATING,
-		 * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
-		 */
-		tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
+		data = RREG32(mmVCE_UENC_CLOCK_GATING);
 		data |= 0x3ff000;
 		data &= ~0xffc00000;
-		if (tmp != data)
-			WREG32(mmVCE_UENC_CLOCK_GATING, data);
+		WREG32(mmVCE_UENC_CLOCK_GATING, data);
 
-		/* set VCE_UENC_CLOCK_GATING_2 */
-		tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
+		data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
 		data |= 0x2;
-		data &= ~0x2;
-		if (tmp != data)
-			WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
+		data &= ~0x00010000;
+		WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
 
-		/* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */
-		tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+		data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
 		data |= 0x37f;
-		if (tmp != data)
-			WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
+		WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
 
-		/* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */
-		tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
+		data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
 		data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
-				VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
-				VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
-				0x8;
-		if (tmp != data)
-			WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
+			VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
+			VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
+			0x8;
+		WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
 	} else {
-		/* Force CLOCK OFF for VCE_CLOCK_GATING_B,
-		 * {*, *_FORCE_OFF} = {*, 1}
-		 * set VREG to Dynamic, as it can't be OFF
-		 */
-		tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
+		data = RREG32(mmVCE_CLOCK_GATING_B);
 		data &= ~0x80010;
 		data |= 0xe70008;
-		if (tmp != data)
-			WREG32(mmVCE_CLOCK_GATING_B, data);
-		/* Force CLOCK OFF for VCE_UENC_CLOCK_GATING,
-		 * Force ClOCK OFF takes precedent over Force CLOCK ON setting.
-		 * {*_FORCE_ON, *_FORCE_OFF} = {*, 1}
-		 */
-		tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
+		WREG32(mmVCE_CLOCK_GATING_B, data);
+
+		data = RREG32(mmVCE_UENC_CLOCK_GATING);
 		data |= 0xffc00000;
-		if (tmp != data)
-			WREG32(mmVCE_UENC_CLOCK_GATING, data);
-		/* Set VCE_UENC_CLOCK_GATING_2 */
-		tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
+		WREG32(mmVCE_UENC_CLOCK_GATING, data);
+
+		data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
 		data |= 0x10000;
-		if (tmp != data)
-			WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
-		/* Set VCE_UENC_REG_CLOCK_GATING to dynamic */
-		tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+		WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
+
+		data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
 		data &= ~0xffc00000;
-		if (tmp != data)
-			WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
-		/* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */
-		tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
+		WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
+
+		data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
 		data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
-				VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
-				VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
-				0x8);
-		if (tmp != data)
-			WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
+			  VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
+			  VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
+			  0x8);
+		WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
 	}
 	vce_v3_0_override_vce_clock_gating(adev, false);
 }
@@ -221,12 +200,9 @@
 		}
 
 		DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
-		WREG32_P(mmVCE_SOFT_RESET,
-			VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
 		mdelay(10);
-		WREG32_P(mmVCE_SOFT_RESET, 0,
-			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
 		mdelay(10);
 	}
 
@@ -259,43 +235,34 @@
 	WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
 	WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
 
+	ring = &adev->vce.ring[2];
+	WREG32(mmVCE_RB_RPTR3, ring->wptr);
+	WREG32(mmVCE_RB_WPTR3, ring->wptr);
+	WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
+	WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
+	WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
+
 	mutex_lock(&adev->grbm_idx_mutex);
 	for (idx = 0; idx < 2; ++idx) {
 		if (adev->vce.harvest_config & (1 << idx))
 			continue;
 
-		if (idx == 0)
-			WREG32_P(mmGRBM_GFX_INDEX, 0,
-				~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
-		else
-			WREG32_P(mmGRBM_GFX_INDEX,
-				GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
-				~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
-
+		WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
 		vce_v3_0_mc_resume(adev, idx);
-
-		WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK,
-		         ~VCE_STATUS__JOB_BUSY_MASK);
+		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
 
 		if (adev->asic_type >= CHIP_STONEY)
 			WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
 		else
-			WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
-				~VCE_VCPU_CNTL__CLK_EN_MASK);
+			WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
 
-		WREG32_P(mmVCE_SOFT_RESET, 0,
-			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
+		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
 		mdelay(100);
 
 		r = vce_v3_0_firmware_loaded(adev);
 
 		/* clear BUSY flag */
-		WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
-
-		/* Set Clock-Gating off */
-		if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
-			vce_v3_0_set_vce_sw_clock_gating(adev, false);
+		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
 
 		if (r) {
 			DRM_ERROR("VCE not responding, giving up!!!\n");
@@ -304,7 +271,7 @@
 		}
 	}
 
-	WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+	WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
 	mutex_unlock(&adev->grbm_idx_mutex);
 
 	return 0;
@@ -319,33 +286,25 @@
 		if (adev->vce.harvest_config & (1 << idx))
 			continue;
 
-		if (idx == 0)
-			WREG32_P(mmGRBM_GFX_INDEX, 0,
-				~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
-		else
-			WREG32_P(mmGRBM_GFX_INDEX,
-				GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
-				~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+		WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
 
 		if (adev->asic_type >= CHIP_STONEY)
 			WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
 		else
-			WREG32_P(mmVCE_VCPU_CNTL, 0,
-				~VCE_VCPU_CNTL__CLK_EN_MASK);
+			WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
+
 		/* hold on ECPU */
-		WREG32_P(mmVCE_SOFT_RESET,
-			 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-			 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
 
 		/* clear BUSY flag */
-		WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
+		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
 
 		/* Set Clock-Gating off */
 		if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
 			vce_v3_0_set_vce_sw_clock_gating(adev, false);
 	}
 
-	WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+	WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
 	mutex_unlock(&adev->grbm_idx_mutex);
 
 	return 0;
@@ -399,6 +358,8 @@
 	    (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
 		return -ENOENT;
 
+	adev->vce.num_rings = 3;
+
 	vce_v3_0_set_ring_funcs(adev);
 	vce_v3_0_set_irq_funcs(adev);
 
@@ -409,7 +370,7 @@
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_ring *ring;
-	int r;
+	int r, i;
 
 	/* VCE */
 	r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
@@ -425,19 +386,14 @@
 	if (r)
 		return r;
 
-	ring = &adev->vce.ring[0];
-	sprintf(ring->name, "vce0");
-	r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
-	if (r)
-		return r;
-
-	ring = &adev->vce.ring[1];
-	sprintf(ring->name, "vce1");
-	r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
-	if (r)
-		return r;
+	for (i = 0; i < adev->vce.num_rings; i++) {
+		ring = &adev->vce.ring[i];
+		sprintf(ring->name, "vce%d", i);
+		r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
+				     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+		if (r)
+			return r;
+	}
 
 	return r;
 }
@@ -467,10 +423,10 @@
 	if (r)
 		return r;
 
-	adev->vce.ring[0].ready = false;
-	adev->vce.ring[1].ready = false;
+	for (i = 0; i < adev->vce.num_rings; i++)
+		adev->vce.ring[i].ready = false;
 
-	for (i = 0; i < 2; i++) {
+	for (i = 0; i < adev->vce.num_rings; i++) {
 		r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
 		if (r)
 			return r;
@@ -534,7 +490,7 @@
 	WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
 	WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
 	WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
-	WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
+	WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
 
 	WREG32(mmVCE_LMI_CTRL, 0x00398000);
 	WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
@@ -573,9 +529,7 @@
 	}
 
 	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
-
-	WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
-		 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+	WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
 }
 
 static bool vce_v3_0_is_idle(void *handle)
@@ -601,20 +555,108 @@
 	return -ETIMEDOUT;
 }
 
+#define  VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK  0x00000008L   /* AUTO_BUSY */
+#define  VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK   0x00000010L   /* RB0_BUSY */
+#define  VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK   0x00000020L   /* RB1_BUSY */
+#define  AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
+				      VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
+
+static int vce_v3_0_check_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset = 0;
+
+	/* According to VCE team , we should use VCE_STATUS instead
+	 * SRBM_STATUS.VCE_BUSY bit for busy status checking.
+	 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
+	 * instance's registers are accessed
+	 * (0 for 1st instance, 10 for 2nd instance).
+	 *
+	 *VCE_STATUS
+	 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 |          |FW_LOADED|JOB |
+	 *|----+----+-----------+----+----+----+----------+---------+----|
+	 *|bit8|bit7|    bit6   |bit5|bit4|bit3|   bit2   |  bit1   |bit0|
+	 *
+	 * VCE team suggest use bit 3--bit 6 for busy status check
+	 */
+	mutex_lock(&adev->grbm_idx_mutex);
+	WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+	if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
+	}
+	WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
+	if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
+	}
+	WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+
+	if (srbm_soft_reset) {
+		adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true;
+		adev->vce.srbm_soft_reset = srbm_soft_reset;
+	} else {
+		adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false;
+		adev->vce.srbm_soft_reset = 0;
+	}
+	mutex_unlock(&adev->grbm_idx_mutex);
+	return 0;
+}
+
 static int vce_v3_0_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	u32 mask = 0;
+	u32 srbm_soft_reset;
 
-	mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
-	mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+		return 0;
+	srbm_soft_reset = adev->vce.srbm_soft_reset;
 
-	WREG32_P(mmSRBM_SOFT_RESET, mask,
-		 ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
-		   SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
+	if (srbm_soft_reset) {
+		u32 tmp;
+
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		/* Wait a little for things to settle down */
+		udelay(50);
+	}
+
+	return 0;
+}
+
+static int vce_v3_0_pre_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+		return 0;
+
 	mdelay(5);
 
-	return vce_v3_0_start(adev);
+	return vce_v3_0_suspend(adev);
+}
+
+
+static int vce_v3_0_post_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+		return 0;
+
+	mdelay(5);
+
+	return vce_v3_0_resume(adev);
 }
 
 static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -637,13 +679,12 @@
 {
 	DRM_DEBUG("IH: VCE\n");
 
-	WREG32_P(mmVCE_SYS_INT_STATUS,
-		VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
-		~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
+	WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
 
 	switch (entry->src_data) {
 	case 0:
 	case 1:
+	case 2:
 		amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
 		break;
 	default:
@@ -655,7 +696,7 @@
 	return 0;
 }
 
-static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+static void vce_v3_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
 {
 	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
 
@@ -674,8 +715,9 @@
 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 	int i;
 
-	if (adev->asic_type == CHIP_POLARIS10)
-		vce_v3_set_bypass_mode(adev, enable);
+	if ((adev->asic_type == CHIP_POLARIS10) ||
+	    (adev->asic_type == CHIP_TONGA))
+		vce_v3_0_set_bypass_mode(adev, enable);
 
 	if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
 		return 0;
@@ -686,13 +728,7 @@
 		if (adev->vce.harvest_config & (1 << i))
 			continue;
 
-		if (i == 0)
-			WREG32_P(mmGRBM_GFX_INDEX, 0,
-					~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
-		else
-			WREG32_P(mmGRBM_GFX_INDEX,
-					GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
-					~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+		WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
 
 		if (enable) {
 			/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@@ -711,7 +747,7 @@
 		vce_v3_0_set_vce_sw_clock_gating(adev, enable);
 	}
 
-	WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+	WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
 	mutex_unlock(&adev->grbm_idx_mutex);
 
 	return 0;
@@ -739,6 +775,60 @@
 		return vce_v3_0_start(adev);
 }
 
+static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
+		struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+{
+	amdgpu_ring_write(ring, VCE_CMD_IB_VM);
+	amdgpu_ring_write(ring, vm_id);
+	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+	amdgpu_ring_write(ring, ib->length_dw);
+}
+
+static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
+			 unsigned int vm_id, uint64_t pd_addr)
+{
+	amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
+	amdgpu_ring_write(ring, vm_id);
+	amdgpu_ring_write(ring, pd_addr >> 12);
+
+	amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
+	amdgpu_ring_write(ring, vm_id);
+	amdgpu_ring_write(ring, VCE_CMD_END);
+}
+
+static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+	uint32_t seq = ring->fence_drv.sync_seq;
+	uint64_t addr = ring->fence_drv.gpu_addr;
+
+	amdgpu_ring_write(ring, VCE_CMD_WAIT_GE);
+	amdgpu_ring_write(ring, lower_32_bits(addr));
+	amdgpu_ring_write(ring, upper_32_bits(addr));
+	amdgpu_ring_write(ring, seq);
+}
+
+static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		5; /* vce_v3_0_ring_emit_ib */
+}
+
+static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		4 + /* vce_v3_0_emit_pipeline_sync */
+		6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
+}
+
+static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
+{
+	return
+		6 + /* vce_v3_0_emit_vm_flush */
+		4 + /* vce_v3_0_emit_pipeline_sync */
+		6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */
+}
+
 const struct amd_ip_funcs vce_v3_0_ip_funcs = {
 	.name = "vce_v3_0",
 	.early_init = vce_v3_0_early_init,
@@ -751,12 +841,15 @@
 	.resume = vce_v3_0_resume,
 	.is_idle = vce_v3_0_is_idle,
 	.wait_for_idle = vce_v3_0_wait_for_idle,
+	.check_soft_reset = vce_v3_0_check_soft_reset,
+	.pre_soft_reset = vce_v3_0_pre_soft_reset,
 	.soft_reset = vce_v3_0_soft_reset,
+	.post_soft_reset = vce_v3_0_post_soft_reset,
 	.set_clockgating_state = vce_v3_0_set_clockgating_state,
 	.set_powergating_state = vce_v3_0_set_powergating_state,
 };
 
-static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
+static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
 	.get_rptr = vce_v3_0_ring_get_rptr,
 	.get_wptr = vce_v3_0_ring_get_wptr,
 	.set_wptr = vce_v3_0_ring_set_wptr,
@@ -769,12 +862,42 @@
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_vce_ring_begin_use,
 	.end_use = amdgpu_vce_ring_end_use,
+	.get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
+	.get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size,
+};
+
+static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
+	.get_rptr = vce_v3_0_ring_get_rptr,
+	.get_wptr = vce_v3_0_ring_get_wptr,
+	.set_wptr = vce_v3_0_ring_set_wptr,
+	.parse_cs = NULL,
+	.emit_ib = vce_v3_0_ring_emit_ib,
+	.emit_vm_flush = vce_v3_0_emit_vm_flush,
+	.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
+	.emit_fence = amdgpu_vce_ring_emit_fence,
+	.test_ring = amdgpu_vce_ring_test_ring,
+	.test_ib = amdgpu_vce_ring_test_ib,
+	.insert_nop = amdgpu_ring_insert_nop,
+	.pad_ib = amdgpu_ring_generic_pad_ib,
+	.begin_use = amdgpu_vce_ring_begin_use,
+	.end_use = amdgpu_vce_ring_end_use,
+	.get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
+	.get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm,
 };
 
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
 {
-	adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs;
-	adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs;
+	int i;
+
+	if (adev->asic_type >= CHIP_STONEY) {
+		for (i = 0; i < adev->vce.num_rings; i++)
+			adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
+		DRM_INFO("VCE enabled in VM mode\n");
+	} else {
+		for (i = 0; i < adev->vce.num_rings; i++)
+			adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
+		DRM_INFO("VCE enabled in physical mode\n");
+	}
 }
 
 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 03a31c5..b688e2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -77,6 +77,7 @@
 #if defined(CONFIG_DRM_AMD_ACP)
 #include "amdgpu_acp.h"
 #endif
+#include "dce_virtual.h"
 
 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
@@ -822,6 +823,60 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vi_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 7,
+		.minor = 4,
+		.rev = 0,
+		.funcs = &gmc_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 2,
+		.minor = 4,
+		.rev = 0,
+		.funcs = &iceland_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 2,
+		.minor = 4,
+		.rev = 0,
+		.funcs = &sdma_v2_4_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -890,6 +945,74 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vi_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &tonga_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 10,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &sdma_v3_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 5,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &uvd_v5_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v3_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -958,6 +1081,74 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vi_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 8,
+		.minor = 5,
+		.rev = 0,
+		.funcs = &gmc_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &tonga_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 10,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &sdma_v3_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &uvd_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v3_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -1026,6 +1217,74 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vi_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 8,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &gmc_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 3,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &tonga_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 11,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 3,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &sdma_v3_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 6,
+		.minor = 3,
+		.rev = 0,
+		.funcs = &uvd_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 3,
+		.minor = 4,
+		.rev = 0,
+		.funcs = &vce_v3_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version cz_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -1103,34 +1362,142 @@
 #endif
 };
 
+static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vi_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cz_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 11,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &sdma_v3_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &uvd_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v3_0_ip_funcs,
+	},
+#if defined(CONFIG_DRM_AMD_ACP)
+	{
+		.type = AMD_IP_BLOCK_TYPE_ACP,
+		.major = 2,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &acp_ip_funcs,
+	},
+#endif
+};
+
 int vi_set_ip_blocks(struct amdgpu_device *adev)
 {
-	switch (adev->asic_type) {
-	case CHIP_TOPAZ:
-		adev->ip_blocks = topaz_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
-		break;
-	case CHIP_FIJI:
-		adev->ip_blocks = fiji_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
-		break;
-	case CHIP_TONGA:
-		adev->ip_blocks = tonga_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
-		break;
-	case CHIP_POLARIS11:
-	case CHIP_POLARIS10:
-		adev->ip_blocks = polaris11_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
-		break;
-	case CHIP_CARRIZO:
-	case CHIP_STONEY:
-		adev->ip_blocks = cz_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
-		break;
-	default:
-		/* FIXME: not supported yet */
-		return -EINVAL;
+	if (adev->enable_virtual_display) {
+		switch (adev->asic_type) {
+		case CHIP_TOPAZ:
+			adev->ip_blocks = topaz_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd);
+			break;
+		case CHIP_FIJI:
+			adev->ip_blocks = fiji_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd);
+			break;
+		case CHIP_TONGA:
+			adev->ip_blocks = tonga_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd);
+			break;
+		case CHIP_POLARIS11:
+		case CHIP_POLARIS10:
+			adev->ip_blocks = polaris11_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd);
+			break;
+
+		case CHIP_CARRIZO:
+		case CHIP_STONEY:
+			adev->ip_blocks = cz_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd);
+			break;
+		default:
+			/* FIXME: not supported yet */
+			return -EINVAL;
+		}
+	} else {
+		switch (adev->asic_type) {
+		case CHIP_TOPAZ:
+			adev->ip_blocks = topaz_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
+			break;
+		case CHIP_FIJI:
+			adev->ip_blocks = fiji_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
+			break;
+		case CHIP_TONGA:
+			adev->ip_blocks = tonga_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
+			break;
+		case CHIP_POLARIS11:
+		case CHIP_POLARIS10:
+			adev->ip_blocks = polaris11_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
+			break;
+		case CHIP_CARRIZO:
+		case CHIP_STONEY:
+			adev->ip_blocks = cz_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
+			break;
+		default:
+			/* FIXME: not supported yet */
+			return -EINVAL;
+		}
 	}
 
 	return 0;
@@ -1248,8 +1615,17 @@
 			AMD_CG_SUPPORT_HDP_MGCG |
 			AMD_CG_SUPPORT_HDP_LS |
 			AMD_CG_SUPPORT_SDMA_MGCG |
-			AMD_CG_SUPPORT_SDMA_LS;
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_VCE_MGCG;
+		/* rev0 hardware requires workarounds to support PG */
 		adev->pg_flags = 0;
+		if (adev->rev_id != 0x00) {
+			adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+				AMD_PG_SUPPORT_GFX_SMG |
+				AMD_PG_SUPPORT_GFX_PIPELINE |
+				AMD_PG_SUPPORT_UVD |
+				AMD_PG_SUPPORT_VCE;
+		}
 		adev->external_rev_id = adev->rev_id + 0x1;
 		break;
 	case CHIP_STONEY:
@@ -1267,8 +1643,14 @@
 			AMD_CG_SUPPORT_HDP_MGCG |
 			AMD_CG_SUPPORT_HDP_LS |
 			AMD_CG_SUPPORT_SDMA_MGCG |
-			AMD_CG_SUPPORT_SDMA_LS;
-		adev->external_rev_id = adev->rev_id + 0x1;
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_VCE_MGCG;
+		adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+			AMD_PG_SUPPORT_GFX_SMG |
+			AMD_PG_SUPPORT_GFX_PIPELINE |
+			AMD_PG_SUPPORT_UVD |
+			AMD_PG_SUPPORT_VCE;
+		adev->external_rev_id = adev->rev_id + 0x61;
 		break;
 	default:
 		/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 062ee16..f62b261 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -369,4 +369,8 @@
 #define VCE_CMD_IB_AUTO	0x00000005
 #define VCE_CMD_SEMAPHORE	0x00000006
 
+#define VCE_CMD_IB_VM           0x00000102
+#define VCE_CMD_WAIT_GE         0x00000106
+#define VCE_CMD_UPDATE_PTB      0x00000107
+#define VCE_CMD_FLUSH_TLB       0x00000108
 #endif
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index a74a0d2..c934b78c 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -29,7 +29,12 @@
  * Supported ASIC types
  */
 enum amd_asic_type {
-	CHIP_BONAIRE = 0,
+	CHIP_TAHITI = 0,
+	CHIP_PITCAIRN,
+	CHIP_VERDE,
+	CHIP_OLAND,
+	CHIP_HAINAN,
+	CHIP_BONAIRE,
 	CHIP_KAVERI,
 	CHIP_KABINI,
 	CHIP_HAWAII,
@@ -159,8 +164,14 @@
 	bool (*is_idle)(void *handle);
 	/* poll for idle */
 	int (*wait_for_idle)(void *handle);
+	/* check soft reset the IP block */
+	int (*check_soft_reset)(void *handle);
+	/* pre soft reset the IP block */
+	int (*pre_soft_reset)(void *handle);
 	/* soft reset the IP block */
 	int (*soft_reset)(void *handle);
+	/* post soft reset the IP block */
+	int (*post_soft_reset)(void *handle);
 	/* enable/disable cg for the IP block */
 	int (*set_clockgating_state)(void *handle,
 				     enum amd_clockgating_state state);
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h b/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h
new file mode 100644
index 0000000..66e39cd
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h
@@ -0,0 +1,941 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const u32 si_SECT_CONTEXT_def_1[] =
+{
+    0x00000000, // DB_RENDER_CONTROL
+    0x00000000, // DB_COUNT_CONTROL
+    0x00000000, // DB_DEPTH_VIEW
+    0x00000000, // DB_RENDER_OVERRIDE
+    0x00000000, // DB_RENDER_OVERRIDE2
+    0x00000000, // DB_HTILE_DATA_BASE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_DEPTH_BOUNDS_MIN
+    0x00000000, // DB_DEPTH_BOUNDS_MAX
+    0x00000000, // DB_STENCIL_CLEAR
+    0x00000000, // DB_DEPTH_CLEAR
+    0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+    0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+    0, // HOLE
+    0x00000000, // DB_DEPTH_INFO
+    0x00000000, // DB_Z_INFO
+    0x00000000, // DB_STENCIL_INFO
+    0x00000000, // DB_Z_READ_BASE
+    0x00000000, // DB_STENCIL_READ_BASE
+    0x00000000, // DB_Z_WRITE_BASE
+    0x00000000, // DB_STENCIL_WRITE_BASE
+    0x00000000, // DB_DEPTH_SIZE
+    0x00000000, // DB_DEPTH_SLICE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // TA_BC_BASE_ADDR
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // COHER_DEST_BASE_2
+    0x00000000, // COHER_DEST_BASE_3
+    0x00000000, // PA_SC_WINDOW_OFFSET
+    0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+    0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+    0x0000ffff, // PA_SC_CLIPRECT_RULE
+    0x00000000, // PA_SC_CLIPRECT_0_TL
+    0x40004000, // PA_SC_CLIPRECT_0_BR
+    0x00000000, // PA_SC_CLIPRECT_1_TL
+    0x40004000, // PA_SC_CLIPRECT_1_BR
+    0x00000000, // PA_SC_CLIPRECT_2_TL
+    0x40004000, // PA_SC_CLIPRECT_2_BR
+    0x00000000, // PA_SC_CLIPRECT_3_TL
+    0x40004000, // PA_SC_CLIPRECT_3_BR
+    0xaa99aaaa, // PA_SC_EDGERULE
+    0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+    0xffffffff, // CB_TARGET_MASK
+    0xffffffff, // CB_SHADER_MASK
+    0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+    0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+    0x00000000, // COHER_DEST_BASE_0
+    0x00000000, // COHER_DEST_BASE_1
+    0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+    0x00000000, // PA_SC_VPORT_ZMIN_0
+    0x3f800000, // PA_SC_VPORT_ZMAX_0
+    0x00000000, // PA_SC_VPORT_ZMIN_1
+    0x3f800000, // PA_SC_VPORT_ZMAX_1
+    0x00000000, // PA_SC_VPORT_ZMIN_2
+    0x3f800000, // PA_SC_VPORT_ZMAX_2
+    0x00000000, // PA_SC_VPORT_ZMIN_3
+    0x3f800000, // PA_SC_VPORT_ZMAX_3
+    0x00000000, // PA_SC_VPORT_ZMIN_4
+    0x3f800000, // PA_SC_VPORT_ZMAX_4
+    0x00000000, // PA_SC_VPORT_ZMIN_5
+    0x3f800000, // PA_SC_VPORT_ZMAX_5
+    0x00000000, // PA_SC_VPORT_ZMIN_6
+    0x3f800000, // PA_SC_VPORT_ZMAX_6
+    0x00000000, // PA_SC_VPORT_ZMIN_7
+    0x3f800000, // PA_SC_VPORT_ZMAX_7
+    0x00000000, // PA_SC_VPORT_ZMIN_8
+    0x3f800000, // PA_SC_VPORT_ZMAX_8
+    0x00000000, // PA_SC_VPORT_ZMIN_9
+    0x3f800000, // PA_SC_VPORT_ZMAX_9
+    0x00000000, // PA_SC_VPORT_ZMIN_10
+    0x3f800000, // PA_SC_VPORT_ZMAX_10
+    0x00000000, // PA_SC_VPORT_ZMIN_11
+    0x3f800000, // PA_SC_VPORT_ZMAX_11
+    0x00000000, // PA_SC_VPORT_ZMIN_12
+    0x3f800000, // PA_SC_VPORT_ZMAX_12
+    0x00000000, // PA_SC_VPORT_ZMIN_13
+    0x3f800000, // PA_SC_VPORT_ZMAX_13
+    0x00000000, // PA_SC_VPORT_ZMIN_14
+    0x3f800000, // PA_SC_VPORT_ZMAX_14
+    0x00000000, // PA_SC_VPORT_ZMIN_15
+    0x3f800000, // PA_SC_VPORT_ZMAX_15
+};
+static const u32 si_SECT_CONTEXT_def_2[] =
+{
+    0x00000000, // CP_PERFMON_CNTX_CNTL
+    0x00000000, // CP_RINGID
+    0x00000000, // CP_VMID
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0xffffffff, // VGT_MAX_VTX_INDX
+    0x00000000, // VGT_MIN_VTX_INDX
+    0x00000000, // VGT_INDX_OFFSET
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+    0, // HOLE
+    0x00000000, // CB_BLEND_RED
+    0x00000000, // CB_BLEND_GREEN
+    0x00000000, // CB_BLEND_BLUE
+    0x00000000, // CB_BLEND_ALPHA
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_STENCIL_CONTROL
+    0x00000000, // DB_STENCILREFMASK
+    0x00000000, // DB_STENCILREFMASK_BF
+    0, // HOLE
+    0x00000000, // PA_CL_VPORT_XSCALE
+    0x00000000, // PA_CL_VPORT_XOFFSET
+    0x00000000, // PA_CL_VPORT_YSCALE
+    0x00000000, // PA_CL_VPORT_YOFFSET
+    0x00000000, // PA_CL_VPORT_ZSCALE
+    0x00000000, // PA_CL_VPORT_ZOFFSET
+    0x00000000, // PA_CL_VPORT_XSCALE_1
+    0x00000000, // PA_CL_VPORT_XOFFSET_1
+    0x00000000, // PA_CL_VPORT_YSCALE_1
+    0x00000000, // PA_CL_VPORT_YOFFSET_1
+    0x00000000, // PA_CL_VPORT_ZSCALE_1
+    0x00000000, // PA_CL_VPORT_ZOFFSET_1
+    0x00000000, // PA_CL_VPORT_XSCALE_2
+    0x00000000, // PA_CL_VPORT_XOFFSET_2
+    0x00000000, // PA_CL_VPORT_YSCALE_2
+    0x00000000, // PA_CL_VPORT_YOFFSET_2
+    0x00000000, // PA_CL_VPORT_ZSCALE_2
+    0x00000000, // PA_CL_VPORT_ZOFFSET_2
+    0x00000000, // PA_CL_VPORT_XSCALE_3
+    0x00000000, // PA_CL_VPORT_XOFFSET_3
+    0x00000000, // PA_CL_VPORT_YSCALE_3
+    0x00000000, // PA_CL_VPORT_YOFFSET_3
+    0x00000000, // PA_CL_VPORT_ZSCALE_3
+    0x00000000, // PA_CL_VPORT_ZOFFSET_3
+    0x00000000, // PA_CL_VPORT_XSCALE_4
+    0x00000000, // PA_CL_VPORT_XOFFSET_4
+    0x00000000, // PA_CL_VPORT_YSCALE_4
+    0x00000000, // PA_CL_VPORT_YOFFSET_4
+    0x00000000, // PA_CL_VPORT_ZSCALE_4
+    0x00000000, // PA_CL_VPORT_ZOFFSET_4
+    0x00000000, // PA_CL_VPORT_XSCALE_5
+    0x00000000, // PA_CL_VPORT_XOFFSET_5
+    0x00000000, // PA_CL_VPORT_YSCALE_5
+    0x00000000, // PA_CL_VPORT_YOFFSET_5
+    0x00000000, // PA_CL_VPORT_ZSCALE_5
+    0x00000000, // PA_CL_VPORT_ZOFFSET_5
+    0x00000000, // PA_CL_VPORT_XSCALE_6
+    0x00000000, // PA_CL_VPORT_XOFFSET_6
+    0x00000000, // PA_CL_VPORT_YSCALE_6
+    0x00000000, // PA_CL_VPORT_YOFFSET_6
+    0x00000000, // PA_CL_VPORT_ZSCALE_6
+    0x00000000, // PA_CL_VPORT_ZOFFSET_6
+    0x00000000, // PA_CL_VPORT_XSCALE_7
+    0x00000000, // PA_CL_VPORT_XOFFSET_7
+    0x00000000, // PA_CL_VPORT_YSCALE_7
+    0x00000000, // PA_CL_VPORT_YOFFSET_7
+    0x00000000, // PA_CL_VPORT_ZSCALE_7
+    0x00000000, // PA_CL_VPORT_ZOFFSET_7
+    0x00000000, // PA_CL_VPORT_XSCALE_8
+    0x00000000, // PA_CL_VPORT_XOFFSET_8
+    0x00000000, // PA_CL_VPORT_YSCALE_8
+    0x00000000, // PA_CL_VPORT_YOFFSET_8
+    0x00000000, // PA_CL_VPORT_ZSCALE_8
+    0x00000000, // PA_CL_VPORT_ZOFFSET_8
+    0x00000000, // PA_CL_VPORT_XSCALE_9
+    0x00000000, // PA_CL_VPORT_XOFFSET_9
+    0x00000000, // PA_CL_VPORT_YSCALE_9
+    0x00000000, // PA_CL_VPORT_YOFFSET_9
+    0x00000000, // PA_CL_VPORT_ZSCALE_9
+    0x00000000, // PA_CL_VPORT_ZOFFSET_9
+    0x00000000, // PA_CL_VPORT_XSCALE_10
+    0x00000000, // PA_CL_VPORT_XOFFSET_10
+    0x00000000, // PA_CL_VPORT_YSCALE_10
+    0x00000000, // PA_CL_VPORT_YOFFSET_10
+    0x00000000, // PA_CL_VPORT_ZSCALE_10
+    0x00000000, // PA_CL_VPORT_ZOFFSET_10
+    0x00000000, // PA_CL_VPORT_XSCALE_11
+    0x00000000, // PA_CL_VPORT_XOFFSET_11
+    0x00000000, // PA_CL_VPORT_YSCALE_11
+    0x00000000, // PA_CL_VPORT_YOFFSET_11
+    0x00000000, // PA_CL_VPORT_ZSCALE_11
+    0x00000000, // PA_CL_VPORT_ZOFFSET_11
+    0x00000000, // PA_CL_VPORT_XSCALE_12
+    0x00000000, // PA_CL_VPORT_XOFFSET_12
+    0x00000000, // PA_CL_VPORT_YSCALE_12
+    0x00000000, // PA_CL_VPORT_YOFFSET_12
+    0x00000000, // PA_CL_VPORT_ZSCALE_12
+    0x00000000, // PA_CL_VPORT_ZOFFSET_12
+    0x00000000, // PA_CL_VPORT_XSCALE_13
+    0x00000000, // PA_CL_VPORT_XOFFSET_13
+    0x00000000, // PA_CL_VPORT_YSCALE_13
+    0x00000000, // PA_CL_VPORT_YOFFSET_13
+    0x00000000, // PA_CL_VPORT_ZSCALE_13
+    0x00000000, // PA_CL_VPORT_ZOFFSET_13
+    0x00000000, // PA_CL_VPORT_XSCALE_14
+    0x00000000, // PA_CL_VPORT_XOFFSET_14
+    0x00000000, // PA_CL_VPORT_YSCALE_14
+    0x00000000, // PA_CL_VPORT_YOFFSET_14
+    0x00000000, // PA_CL_VPORT_ZSCALE_14
+    0x00000000, // PA_CL_VPORT_ZOFFSET_14
+    0x00000000, // PA_CL_VPORT_XSCALE_15
+    0x00000000, // PA_CL_VPORT_XOFFSET_15
+    0x00000000, // PA_CL_VPORT_YSCALE_15
+    0x00000000, // PA_CL_VPORT_YOFFSET_15
+    0x00000000, // PA_CL_VPORT_ZSCALE_15
+    0x00000000, // PA_CL_VPORT_ZOFFSET_15
+    0x00000000, // PA_CL_UCP_0_X
+    0x00000000, // PA_CL_UCP_0_Y
+    0x00000000, // PA_CL_UCP_0_Z
+    0x00000000, // PA_CL_UCP_0_W
+    0x00000000, // PA_CL_UCP_1_X
+    0x00000000, // PA_CL_UCP_1_Y
+    0x00000000, // PA_CL_UCP_1_Z
+    0x00000000, // PA_CL_UCP_1_W
+    0x00000000, // PA_CL_UCP_2_X
+    0x00000000, // PA_CL_UCP_2_Y
+    0x00000000, // PA_CL_UCP_2_Z
+    0x00000000, // PA_CL_UCP_2_W
+    0x00000000, // PA_CL_UCP_3_X
+    0x00000000, // PA_CL_UCP_3_Y
+    0x00000000, // PA_CL_UCP_3_Z
+    0x00000000, // PA_CL_UCP_3_W
+    0x00000000, // PA_CL_UCP_4_X
+    0x00000000, // PA_CL_UCP_4_Y
+    0x00000000, // PA_CL_UCP_4_Z
+    0x00000000, // PA_CL_UCP_4_W
+    0x00000000, // PA_CL_UCP_5_X
+    0x00000000, // PA_CL_UCP_5_Y
+    0x00000000, // PA_CL_UCP_5_Z
+    0x00000000, // PA_CL_UCP_5_W
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_CNTL_0
+    0x00000000, // SPI_PS_INPUT_CNTL_1
+    0x00000000, // SPI_PS_INPUT_CNTL_2
+    0x00000000, // SPI_PS_INPUT_CNTL_3
+    0x00000000, // SPI_PS_INPUT_CNTL_4
+    0x00000000, // SPI_PS_INPUT_CNTL_5
+    0x00000000, // SPI_PS_INPUT_CNTL_6
+    0x00000000, // SPI_PS_INPUT_CNTL_7
+    0x00000000, // SPI_PS_INPUT_CNTL_8
+    0x00000000, // SPI_PS_INPUT_CNTL_9
+    0x00000000, // SPI_PS_INPUT_CNTL_10
+    0x00000000, // SPI_PS_INPUT_CNTL_11
+    0x00000000, // SPI_PS_INPUT_CNTL_12
+    0x00000000, // SPI_PS_INPUT_CNTL_13
+    0x00000000, // SPI_PS_INPUT_CNTL_14
+    0x00000000, // SPI_PS_INPUT_CNTL_15
+    0x00000000, // SPI_PS_INPUT_CNTL_16
+    0x00000000, // SPI_PS_INPUT_CNTL_17
+    0x00000000, // SPI_PS_INPUT_CNTL_18
+    0x00000000, // SPI_PS_INPUT_CNTL_19
+    0x00000000, // SPI_PS_INPUT_CNTL_20
+    0x00000000, // SPI_PS_INPUT_CNTL_21
+    0x00000000, // SPI_PS_INPUT_CNTL_22
+    0x00000000, // SPI_PS_INPUT_CNTL_23
+    0x00000000, // SPI_PS_INPUT_CNTL_24
+    0x00000000, // SPI_PS_INPUT_CNTL_25
+    0x00000000, // SPI_PS_INPUT_CNTL_26
+    0x00000000, // SPI_PS_INPUT_CNTL_27
+    0x00000000, // SPI_PS_INPUT_CNTL_28
+    0x00000000, // SPI_PS_INPUT_CNTL_29
+    0x00000000, // SPI_PS_INPUT_CNTL_30
+    0x00000000, // SPI_PS_INPUT_CNTL_31
+    0x00000000, // SPI_VS_OUT_CONFIG
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_ENA
+    0x00000000, // SPI_PS_INPUT_ADDR
+    0x00000000, // SPI_INTERP_CONTROL_0
+    0x00000002, // SPI_PS_IN_CONTROL
+    0, // HOLE
+    0x00000000, // SPI_BARYC_CNTL
+    0, // HOLE
+    0x00000000, // SPI_TMPRING_SIZE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_WAVE_MGMT_1
+    0x00000000, // SPI_WAVE_MGMT_2
+    0x00000000, // SPI_SHADER_POS_FORMAT
+    0x00000000, // SPI_SHADER_Z_FORMAT
+    0x00000000, // SPI_SHADER_COL_FORMAT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_BLEND0_CONTROL
+    0x00000000, // CB_BLEND1_CONTROL
+    0x00000000, // CB_BLEND2_CONTROL
+    0x00000000, // CB_BLEND3_CONTROL
+    0x00000000, // CB_BLEND4_CONTROL
+    0x00000000, // CB_BLEND5_CONTROL
+    0x00000000, // CB_BLEND6_CONTROL
+    0x00000000, // CB_BLEND7_CONTROL
+};
+static const u32 si_SECT_CONTEXT_def_3[] =
+{
+    0x00000000, // PA_CL_POINT_X_RAD
+    0x00000000, // PA_CL_POINT_Y_RAD
+    0x00000000, // PA_CL_POINT_SIZE
+    0x00000000, // PA_CL_POINT_CULL_RAD
+    0x00000000, // VGT_DMA_BASE_HI
+    0x00000000, // VGT_DMA_BASE
+};
+static const u32 si_SECT_CONTEXT_def_4[] =
+{
+    0x00000000, // DB_DEPTH_CONTROL
+    0x00000000, // DB_EQAA
+    0x00000000, // CB_COLOR_CONTROL
+    0x00000000, // DB_SHADER_CONTROL
+    0x00090000, // PA_CL_CLIP_CNTL
+    0x00000004, // PA_SU_SC_MODE_CNTL
+    0x00000000, // PA_CL_VTE_CNTL
+    0x00000000, // PA_CL_VS_OUT_CNTL
+    0x00000000, // PA_CL_NANINF_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+    0x00000000, // PA_SU_PRIM_FILTER_CNTL
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SU_POINT_SIZE
+    0x00000000, // PA_SU_POINT_MINMAX
+    0x00000000, // PA_SU_LINE_CNTL
+    0x00000000, // PA_SC_LINE_STIPPLE
+    0x00000000, // VGT_OUTPUT_PATH_CNTL
+    0x00000000, // VGT_HOS_CNTL
+    0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+    0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+    0x00000000, // VGT_HOS_REUSE_DEPTH
+    0x00000000, // VGT_GROUP_PRIM_TYPE
+    0x00000000, // VGT_GROUP_FIRST_DECR
+    0x00000000, // VGT_GROUP_DECR
+    0x00000000, // VGT_GROUP_VECT_0_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_CNTL
+    0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+    0x00000000, // VGT_GS_MODE
+    0, // HOLE
+    0x00000000, // PA_SC_MODE_CNTL_0
+    0x00000000, // PA_SC_MODE_CNTL_1
+    0x00000000, // VGT_ENHANCE
+    0x00000100, // VGT_GS_PER_ES
+    0x00000080, // VGT_ES_PER_GS
+    0x00000002, // VGT_GS_PER_VS
+    0x00000000, // VGT_GSVS_RING_OFFSET_1
+    0x00000000, // VGT_GSVS_RING_OFFSET_2
+    0x00000000, // VGT_GSVS_RING_OFFSET_3
+    0x00000000, // VGT_GS_OUT_PRIM_TYPE
+    0x00000000, // IA_ENHANCE
+};
+static const u32 si_SECT_CONTEXT_def_5[] =
+{
+    0x00000000, // VGT_PRIMITIVEID_EN
+};
+static const u32 si_SECT_CONTEXT_def_6[] =
+{
+    0x00000000, // VGT_PRIMITIVEID_RESET
+};
+static const u32 si_SECT_CONTEXT_def_7[] =
+{
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_INSTANCE_STEP_RATE_0
+    0x00000000, // VGT_INSTANCE_STEP_RATE_1
+    0x000000ff, // IA_MULTI_VGT_PARAM
+    0x00000000, // VGT_ESGS_RING_ITEMSIZE
+    0x00000000, // VGT_GSVS_RING_ITEMSIZE
+    0x00000000, // VGT_REUSE_OFF
+    0x00000000, // VGT_VTX_CNT_EN
+    0x00000000, // DB_HTILE_SURFACE
+    0x00000000, // DB_SRESULTS_COMPARE_STATE0
+    0x00000000, // DB_SRESULTS_COMPARE_STATE1
+    0x00000000, // DB_PRELOAD_CONTROL
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+    0, // HOLE
+    0x00000000, // VGT_GS_MAX_VERT_OUT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_SHADER_STAGES_EN
+    0x00000000, // VGT_LS_HS_CONFIG
+    0x00000000, // VGT_GS_VERT_ITEMSIZE
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_1
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_2
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_3
+    0x00000000, // VGT_TF_PARAM
+    0x00000000, // DB_ALPHA_TO_MASK
+    0, // HOLE
+    0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+    0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+    0x00000000, // VGT_GS_INSTANCE_CNT
+    0x00000000, // VGT_STRMOUT_CONFIG
+    0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SC_CENTROID_PRIORITY_0
+    0x00000000, // PA_SC_CENTROID_PRIORITY_1
+    0x00001000, // PA_SC_LINE_CNTL
+    0x00000000, // PA_SC_AA_CONFIG
+    0x00000005, // PA_SU_VTX_CNTL
+    0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+    0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+    0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+    0x00000010, // VGT_OUT_DEALLOC_CNTL
+    0x00000000, // CB_COLOR0_BASE
+    0x00000000, // CB_COLOR0_PITCH
+    0x00000000, // CB_COLOR0_SLICE
+    0x00000000, // CB_COLOR0_VIEW
+    0x00000000, // CB_COLOR0_INFO
+    0x00000000, // CB_COLOR0_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR0_CMASK
+    0x00000000, // CB_COLOR0_CMASK_SLICE
+    0x00000000, // CB_COLOR0_FMASK
+    0x00000000, // CB_COLOR0_FMASK_SLICE
+    0x00000000, // CB_COLOR0_CLEAR_WORD0
+    0x00000000, // CB_COLOR0_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR1_BASE
+    0x00000000, // CB_COLOR1_PITCH
+    0x00000000, // CB_COLOR1_SLICE
+    0x00000000, // CB_COLOR1_VIEW
+    0x00000000, // CB_COLOR1_INFO
+    0x00000000, // CB_COLOR1_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR1_CMASK
+    0x00000000, // CB_COLOR1_CMASK_SLICE
+    0x00000000, // CB_COLOR1_FMASK
+    0x00000000, // CB_COLOR1_FMASK_SLICE
+    0x00000000, // CB_COLOR1_CLEAR_WORD0
+    0x00000000, // CB_COLOR1_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR2_BASE
+    0x00000000, // CB_COLOR2_PITCH
+    0x00000000, // CB_COLOR2_SLICE
+    0x00000000, // CB_COLOR2_VIEW
+    0x00000000, // CB_COLOR2_INFO
+    0x00000000, // CB_COLOR2_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR2_CMASK
+    0x00000000, // CB_COLOR2_CMASK_SLICE
+    0x00000000, // CB_COLOR2_FMASK
+    0x00000000, // CB_COLOR2_FMASK_SLICE
+    0x00000000, // CB_COLOR2_CLEAR_WORD0
+    0x00000000, // CB_COLOR2_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR3_BASE
+    0x00000000, // CB_COLOR3_PITCH
+    0x00000000, // CB_COLOR3_SLICE
+    0x00000000, // CB_COLOR3_VIEW
+    0x00000000, // CB_COLOR3_INFO
+    0x00000000, // CB_COLOR3_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR3_CMASK
+    0x00000000, // CB_COLOR3_CMASK_SLICE
+    0x00000000, // CB_COLOR3_FMASK
+    0x00000000, // CB_COLOR3_FMASK_SLICE
+    0x00000000, // CB_COLOR3_CLEAR_WORD0
+    0x00000000, // CB_COLOR3_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR4_BASE
+    0x00000000, // CB_COLOR4_PITCH
+    0x00000000, // CB_COLOR4_SLICE
+    0x00000000, // CB_COLOR4_VIEW
+    0x00000000, // CB_COLOR4_INFO
+    0x00000000, // CB_COLOR4_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR4_CMASK
+    0x00000000, // CB_COLOR4_CMASK_SLICE
+    0x00000000, // CB_COLOR4_FMASK
+    0x00000000, // CB_COLOR4_FMASK_SLICE
+    0x00000000, // CB_COLOR4_CLEAR_WORD0
+    0x00000000, // CB_COLOR4_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR5_BASE
+    0x00000000, // CB_COLOR5_PITCH
+    0x00000000, // CB_COLOR5_SLICE
+    0x00000000, // CB_COLOR5_VIEW
+    0x00000000, // CB_COLOR5_INFO
+    0x00000000, // CB_COLOR5_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR5_CMASK
+    0x00000000, // CB_COLOR5_CMASK_SLICE
+    0x00000000, // CB_COLOR5_FMASK
+    0x00000000, // CB_COLOR5_FMASK_SLICE
+    0x00000000, // CB_COLOR5_CLEAR_WORD0
+    0x00000000, // CB_COLOR5_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR6_BASE
+    0x00000000, // CB_COLOR6_PITCH
+    0x00000000, // CB_COLOR6_SLICE
+    0x00000000, // CB_COLOR6_VIEW
+    0x00000000, // CB_COLOR6_INFO
+    0x00000000, // CB_COLOR6_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR6_CMASK
+    0x00000000, // CB_COLOR6_CMASK_SLICE
+    0x00000000, // CB_COLOR6_FMASK
+    0x00000000, // CB_COLOR6_FMASK_SLICE
+    0x00000000, // CB_COLOR6_CLEAR_WORD0
+    0x00000000, // CB_COLOR6_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR7_BASE
+    0x00000000, // CB_COLOR7_PITCH
+    0x00000000, // CB_COLOR7_SLICE
+    0x00000000, // CB_COLOR7_VIEW
+    0x00000000, // CB_COLOR7_INFO
+    0x00000000, // CB_COLOR7_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR7_CMASK
+    0x00000000, // CB_COLOR7_CMASK_SLICE
+    0x00000000, // CB_COLOR7_FMASK
+    0x00000000, // CB_COLOR7_FMASK_SLICE
+    0x00000000, // CB_COLOR7_CLEAR_WORD0
+    0x00000000, // CB_COLOR7_CLEAR_WORD1
+};
+static const struct cs_extent_def si_SECT_CONTEXT_defs[] =
+{
+    {si_SECT_CONTEXT_def_1, 0x0000a000, 212 },
+    {si_SECT_CONTEXT_def_2, 0x0000a0d8, 272 },
+    {si_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
+    {si_SECT_CONTEXT_def_4, 0x0000a200, 157 },
+    {si_SECT_CONTEXT_def_5, 0x0000a2a1, 1 },
+    {si_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
+    {si_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
+    { NULL, 0, 0 }
+};
+static const struct cs_section_def si_cs_data[] = {
+    { si_SECT_CONTEXT_defs, SECT_CONTEXT },
+    { NULL, SECT_NONE }
+};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h b/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h
new file mode 100644
index 0000000..895c8e2
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __SI_REG_H__
+#define __SI_REG_H__
+
+/* SI */
+#define SI_DC_GPIO_HPD_MASK                      0x196c
+#define SI_DC_GPIO_HPD_A                         0x196d
+#define SI_DC_GPIO_HPD_EN                        0x196e
+#define SI_DC_GPIO_HPD_Y                         0x196f
+
+#define SI_GRPH_CONTROL                          0x1a01
+#       define SI_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#       define SI_GRPH_DEPTH_8BPP                0
+#       define SI_GRPH_DEPTH_16BPP               1
+#       define SI_GRPH_DEPTH_32BPP               2
+#       define SI_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define SI_ADDR_SURF_2_BANK               0
+#       define SI_ADDR_SURF_4_BANK               1
+#       define SI_ADDR_SURF_8_BANK               2
+#       define SI_ADDR_SURF_16_BANK              3
+#       define SI_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define SI_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define SI_ADDR_SURF_BANK_WIDTH_1         0
+#       define SI_ADDR_SURF_BANK_WIDTH_2         1
+#       define SI_ADDR_SURF_BANK_WIDTH_4         2
+#       define SI_ADDR_SURF_BANK_WIDTH_8         3
+#       define SI_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+/* 8 BPP */
+#       define SI_GRPH_FORMAT_INDEXED            0
+/* 16 BPP */
+#       define SI_GRPH_FORMAT_ARGB1555           0
+#       define SI_GRPH_FORMAT_ARGB565            1
+#       define SI_GRPH_FORMAT_ARGB4444           2
+#       define SI_GRPH_FORMAT_AI88               3
+#       define SI_GRPH_FORMAT_MONO16             4
+#       define SI_GRPH_FORMAT_BGRA5551           5
+/* 32 BPP */
+#       define SI_GRPH_FORMAT_ARGB8888           0
+#       define SI_GRPH_FORMAT_ARGB2101010        1
+#       define SI_GRPH_FORMAT_32BPP_DIG          2
+#       define SI_GRPH_FORMAT_8B_ARGB2101010     3
+#       define SI_GRPH_FORMAT_BGRA1010102        4
+#       define SI_GRPH_FORMAT_8B_BGRA1010102     5
+#       define SI_GRPH_FORMAT_RGB111110          6
+#       define SI_GRPH_FORMAT_BGR101111          7
+#       define SI_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define SI_ADDR_SURF_BANK_HEIGHT_1        0
+#       define SI_ADDR_SURF_BANK_HEIGHT_2        1
+#       define SI_ADDR_SURF_BANK_HEIGHT_4        2
+#       define SI_ADDR_SURF_BANK_HEIGHT_8        3
+#       define SI_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define SI_ADDR_SURF_TILE_SPLIT_64B       0
+#       define SI_ADDR_SURF_TILE_SPLIT_128B      1
+#       define SI_ADDR_SURF_TILE_SPLIT_256B      2
+#       define SI_ADDR_SURF_TILE_SPLIT_512B      3
+#       define SI_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define SI_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define SI_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define SI_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#       define SI_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#       define SI_GRPH_ARRAY_LINEAR_GENERAL      0
+#       define SI_GRPH_ARRAY_LINEAR_ALIGNED      1
+#       define SI_GRPH_ARRAY_1D_TILED_THIN1      2
+#       define SI_GRPH_ARRAY_2D_TILED_THIN1      4
+#       define SI_GRPH_PIPE_CONFIG(x)		 (((x) & 0x1f) << 24)
+#       define SI_ADDR_SURF_P2			 0
+#       define SI_ADDR_SURF_P4_8x16		 4
+#       define SI_ADDR_SURF_P4_16x16		 5
+#       define SI_ADDR_SURF_P4_16x32		 6
+#       define SI_ADDR_SURF_P4_32x32		 7
+#       define SI_ADDR_SURF_P8_16x16_8x16	 8
+#       define SI_ADDR_SURF_P8_16x32_8x16	 9
+#       define SI_ADDR_SURF_P8_32x32_8x16	 10
+#       define SI_ADDR_SURF_P8_16x32_16x16	 11
+#       define SI_ADDR_SURF_P8_32x32_16x16	 12
+#       define SI_ADDR_SURF_P8_32x32_16x32	 13
+#       define SI_ADDR_SURF_P8_32x64_32x32	 14
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/sid.h b/drivers/gpu/drm/amd/include/asic_reg/si/sid.h
new file mode 100644
index 0000000..8c5608a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/si/sid.h
@@ -0,0 +1,2426 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef SI_H
+#define SI_H
+
+#define TAHITI_RB_BITMAP_WIDTH_PER_SH  2
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN        0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN         0x12010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN        0x02010001
+
+#define SI_MAX_SH_GPRS		 	256
+#define SI_MAX_TEMP_GPRS         	16
+#define SI_MAX_SH_THREADS        	256
+#define SI_MAX_SH_STACK_ENTRIES  	4096
+#define SI_MAX_FRC_EOV_CNT       	16384
+#define SI_MAX_BACKENDS          	8
+#define SI_MAX_BACKENDS_MASK     	0xFF
+#define SI_MAX_BACKENDS_PER_SE_MASK     0x0F
+#define SI_MAX_SIMDS             	12
+#define SI_MAX_SIMDS_MASK        	0x0FFF
+#define SI_MAX_SIMDS_PER_SE_MASK        0x00FF
+#define SI_MAX_PIPES            	8
+#define SI_MAX_PIPES_MASK        	0xFF
+#define SI_MAX_PIPES_PER_SIMD_MASK      0x3F
+#define SI_MAX_LDS_NUM           	0xFFFF
+#define SI_MAX_TCC               	16
+#define SI_MAX_TCC_MASK          	0xFFFF
+
+#define AMDGPU_NUM_OF_VMIDS 		8
+
+/* SMC IND accessor regs */
+#define SMC_IND_INDEX_0                              0x80
+#define SMC_IND_DATA_0                               0x81
+
+#define SMC_IND_ACCESS_CNTL                          0x8A
+#       define AUTO_INCREMENT_IND_0                  (1 << 0)
+#define SMC_MESSAGE_0                                0x8B
+#define SMC_RESP_0                                   0x8C
+
+/* CG IND registers are accessed via SMC indirect space + SMC_CG_IND_START */
+#define SMC_CG_IND_START                    0xc0030000
+#define SMC_CG_IND_END                      0xc0040000
+
+#define	CG_CGTT_LOCAL_0				0x400
+#define	CG_CGTT_LOCAL_1				0x401
+
+/* SMC IND registers */
+#define	SMC_SYSCON_RESET_CNTL				0x80000000
+#       define RST_REG                                  (1 << 0)
+#define	SMC_SYSCON_CLOCK_CNTL_0				0x80000004
+#       define CK_DISABLE                               (1 << 0)
+#       define CKEN                                     (1 << 24)
+
+#define VGA_HDP_CONTROL  				0xCA
+#define		VGA_MEMORY_DISABLE				(1 << 4)
+
+#define DCCG_DISP_SLOW_SELECT_REG                       0x13F
+#define		DCCG_DISP1_SLOW_SELECT(x)		((x) << 0)
+#define		DCCG_DISP1_SLOW_SELECT_MASK		(7 << 0)
+#define		DCCG_DISP1_SLOW_SELECT_SHIFT		0
+#define		DCCG_DISP2_SLOW_SELECT(x)		((x) << 4)
+#define		DCCG_DISP2_SLOW_SELECT_MASK		(7 << 4)
+#define		DCCG_DISP2_SLOW_SELECT_SHIFT		4
+
+#define	CG_SPLL_FUNC_CNTL				0x180
+#define		SPLL_RESET				(1 << 0)
+#define		SPLL_SLEEP				(1 << 1)
+#define		SPLL_BYPASS_EN				(1 << 3)
+#define		SPLL_REF_DIV(x)				((x) << 4)
+#define		SPLL_REF_DIV_MASK			(0x3f << 4)
+#define		SPLL_PDIV_A(x)				((x) << 20)
+#define		SPLL_PDIV_A_MASK			(0x7f << 20)
+#define		SPLL_PDIV_A_SHIFT			20
+#define	CG_SPLL_FUNC_CNTL_2				0x181
+#define		SCLK_MUX_SEL(x)				((x) << 0)
+#define		SCLK_MUX_SEL_MASK			(0x1ff << 0)
+#define		SPLL_CTLREQ_CHG				(1 << 23)
+#define		SCLK_MUX_UPDATE				(1 << 26)
+#define	CG_SPLL_FUNC_CNTL_3				0x182
+#define		SPLL_FB_DIV(x)				((x) << 0)
+#define		SPLL_FB_DIV_MASK			(0x3ffffff << 0)
+#define		SPLL_FB_DIV_SHIFT			0
+#define		SPLL_DITHEN				(1 << 28)
+#define	CG_SPLL_FUNC_CNTL_4				0x183
+
+#define	SPLL_STATUS					0x185
+#define		SPLL_CHG_STATUS				(1 << 1)
+#define	SPLL_CNTL_MODE					0x186
+#define		SPLL_SW_DIR_CONTROL			(1 << 0)
+#	define SPLL_REFCLK_SEL(x)			((x) << 26)
+#	define SPLL_REFCLK_SEL_MASK			(3 << 26)
+
+#define	CG_SPLL_SPREAD_SPECTRUM				0x188
+#define		SSEN					(1 << 0)
+#define		CLK_S(x)				((x) << 4)
+#define		CLK_S_MASK				(0xfff << 4)
+#define		CLK_S_SHIFT				4
+#define	CG_SPLL_SPREAD_SPECTRUM_2			0x189
+#define		CLK_V(x)				((x) << 0)
+#define		CLK_V_MASK				(0x3ffffff << 0)
+#define		CLK_V_SHIFT				0
+
+#define	CG_SPLL_AUTOSCALE_CNTL				0x18b
+#       define AUTOSCALE_ON_SS_CLEAR                    (1 << 9)
+
+/* discrete uvd clocks */
+#define	CG_UPLL_FUNC_CNTL				0x18d
+#	define UPLL_RESET_MASK				0x00000001
+#	define UPLL_SLEEP_MASK				0x00000002
+#	define UPLL_BYPASS_EN_MASK			0x00000004
+#	define UPLL_CTLREQ_MASK				0x00000008
+#	define UPLL_VCO_MODE_MASK			0x00000600
+#	define UPLL_REF_DIV_MASK			0x003F0000
+#	define UPLL_CTLACK_MASK				0x40000000
+#	define UPLL_CTLACK2_MASK			0x80000000
+#define	CG_UPLL_FUNC_CNTL_2				0x18e
+#	define UPLL_PDIV_A(x)				((x) << 0)
+#	define UPLL_PDIV_A_MASK				0x0000007F
+#	define UPLL_PDIV_B(x)				((x) << 8)
+#	define UPLL_PDIV_B_MASK				0x00007F00
+#	define VCLK_SRC_SEL(x)				((x) << 20)
+#	define VCLK_SRC_SEL_MASK			0x01F00000
+#	define DCLK_SRC_SEL(x)				((x) << 25)
+#	define DCLK_SRC_SEL_MASK			0x3E000000
+#define	CG_UPLL_FUNC_CNTL_3				0x18f
+#	define UPLL_FB_DIV(x)				((x) << 0)
+#	define UPLL_FB_DIV_MASK				0x01FFFFFF
+#define	CG_UPLL_FUNC_CNTL_4                             0x191
+#	define UPLL_SPARE_ISPARE9			0x00020000
+#define	CG_UPLL_FUNC_CNTL_5				0x192
+#	define RESET_ANTI_MUX_MASK			0x00000200
+#define	CG_UPLL_SPREAD_SPECTRUM				0x194
+#	define SSEN_MASK				0x00000001
+
+#define	MPLL_BYPASSCLK_SEL				0x197
+#	define MPLL_CLKOUT_SEL(x)			((x) << 8)
+#	define MPLL_CLKOUT_SEL_MASK			0xFF00
+
+#define CG_CLKPIN_CNTL                                    0x198
+#       define XTALIN_DIVIDE                              (1 << 1)
+#       define BCLK_AS_XCLK                               (1 << 2)
+#define CG_CLKPIN_CNTL_2                                  0x199
+#       define FORCE_BIF_REFCLK_EN                        (1 << 3)
+#       define MUX_TCLK_TO_XCLK                           (1 << 8)
+
+#define	THM_CLK_CNTL					0x19b
+#	define CMON_CLK_SEL(x)				((x) << 0)
+#	define CMON_CLK_SEL_MASK			0xFF
+#	define TMON_CLK_SEL(x)				((x) << 8)
+#	define TMON_CLK_SEL_MASK			0xFF00
+#define	MISC_CLK_CNTL					0x19c
+#	define DEEP_SLEEP_CLK_SEL(x)			((x) << 0)
+#	define DEEP_SLEEP_CLK_SEL_MASK			0xFF
+#	define ZCLK_SEL(x)				((x) << 8)
+#	define ZCLK_SEL_MASK				0xFF00
+
+#define	CG_THERMAL_CTRL					0x1c0
+#define 	DPM_EVENT_SRC(x)			((x) << 0)
+#define 	DPM_EVENT_SRC_MASK			(7 << 0)
+#define		DIG_THERM_DPM(x)			((x) << 14)
+#define		DIG_THERM_DPM_MASK			0x003FC000
+#define		DIG_THERM_DPM_SHIFT			14
+#define	CG_THERMAL_STATUS				0x1c1
+#define		FDO_PWM_DUTY(x)				((x) << 9)
+#define		FDO_PWM_DUTY_MASK			(0xff << 9)
+#define		FDO_PWM_DUTY_SHIFT			9
+#define	CG_THERMAL_INT					0x1c2
+#define		DIG_THERM_INTH(x)			((x) << 8)
+#define		DIG_THERM_INTH_MASK			0x0000FF00
+#define		DIG_THERM_INTH_SHIFT			8
+#define		DIG_THERM_INTL(x)			((x) << 16)
+#define		DIG_THERM_INTL_MASK			0x00FF0000
+#define		DIG_THERM_INTL_SHIFT			16
+#define 	THERM_INT_MASK_HIGH			(1 << 24)
+#define 	THERM_INT_MASK_LOW			(1 << 25)
+
+#define	CG_MULT_THERMAL_CTRL					0x1c4
+#define		TEMP_SEL(x)					((x) << 20)
+#define		TEMP_SEL_MASK					(0xff << 20)
+#define		TEMP_SEL_SHIFT					20
+#define	CG_MULT_THERMAL_STATUS					0x1c5
+#define		ASIC_MAX_TEMP(x)				((x) << 0)
+#define		ASIC_MAX_TEMP_MASK				0x000001ff
+#define		ASIC_MAX_TEMP_SHIFT				0
+#define		CTF_TEMP(x)					((x) << 9)
+#define		CTF_TEMP_MASK					0x0003fe00
+#define		CTF_TEMP_SHIFT					9
+
+#define	CG_FDO_CTRL0					0x1d5
+#define		FDO_STATIC_DUTY(x)			((x) << 0)
+#define		FDO_STATIC_DUTY_MASK			0x000000FF
+#define		FDO_STATIC_DUTY_SHIFT			0
+#define	CG_FDO_CTRL1					0x1d6
+#define		FMAX_DUTY100(x)				((x) << 0)
+#define		FMAX_DUTY100_MASK			0x000000FF
+#define		FMAX_DUTY100_SHIFT			0
+#define	CG_FDO_CTRL2					0x1d7
+#define		TMIN(x)					((x) << 0)
+#define		TMIN_MASK				0x000000FF
+#define		TMIN_SHIFT				0
+#define		FDO_PWM_MODE(x)				((x) << 11)
+#define		FDO_PWM_MODE_MASK			(7 << 11)
+#define		FDO_PWM_MODE_SHIFT			11
+#define		TACH_PWM_RESP_RATE(x)			((x) << 25)
+#define		TACH_PWM_RESP_RATE_MASK			(0x7f << 25)
+#define		TACH_PWM_RESP_RATE_SHIFT		25
+
+#define CG_TACH_CTRL                                    0x1dc
+#       define EDGE_PER_REV(x)                          ((x) << 0)
+#       define EDGE_PER_REV_MASK                        (0x7 << 0)
+#       define EDGE_PER_REV_SHIFT                       0
+#       define TARGET_PERIOD(x)                         ((x) << 3)
+#       define TARGET_PERIOD_MASK                       0xfffffff8
+#       define TARGET_PERIOD_SHIFT                      3
+#define CG_TACH_STATUS                                  0x1dd
+#       define TACH_PERIOD(x)                           ((x) << 0)
+#       define TACH_PERIOD_MASK                         0xffffffff
+#       define TACH_PERIOD_SHIFT                        0
+
+#define GENERAL_PWRMGT                                  0x1e0
+#       define GLOBAL_PWRMGT_EN                         (1 << 0)
+#       define STATIC_PM_EN                             (1 << 1)
+#       define THERMAL_PROTECTION_DIS                   (1 << 2)
+#       define THERMAL_PROTECTION_TYPE                  (1 << 3)
+#       define SW_SMIO_INDEX(x)                         ((x) << 6)
+#       define SW_SMIO_INDEX_MASK                       (1 << 6)
+#       define SW_SMIO_INDEX_SHIFT                      6
+#       define VOLT_PWRMGT_EN                           (1 << 10)
+#       define DYN_SPREAD_SPECTRUM_EN                   (1 << 23)
+#define CG_TPC                                            0x1e1
+#define SCLK_PWRMGT_CNTL                                  0x1e2
+#       define SCLK_PWRMGT_OFF                            (1 << 0)
+#       define SCLK_LOW_D1                                (1 << 1)
+#       define FIR_RESET                                  (1 << 4)
+#       define FIR_FORCE_TREND_SEL                        (1 << 5)
+#       define FIR_TREND_MODE                             (1 << 6)
+#       define DYN_GFX_CLK_OFF_EN                         (1 << 7)
+#       define GFX_CLK_FORCE_ON                           (1 << 8)
+#       define GFX_CLK_REQUEST_OFF                        (1 << 9)
+#       define GFX_CLK_FORCE_OFF                          (1 << 10)
+#       define GFX_CLK_OFF_ACPI_D1                        (1 << 11)
+#       define GFX_CLK_OFF_ACPI_D2                        (1 << 12)
+#       define GFX_CLK_OFF_ACPI_D3                        (1 << 13)
+#       define DYN_LIGHT_SLEEP_EN                         (1 << 14)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX                  0x1e6
+#       define CURRENT_STATE_INDEX_MASK                   (0xf << 4)
+#       define CURRENT_STATE_INDEX_SHIFT                  4
+
+#define CG_FTV                                            0x1ef
+
+#define CG_FFCT_0                                         0x1f0
+#       define UTC_0(x)                                   ((x) << 0)
+#       define UTC_0_MASK                                 (0x3ff << 0)
+#       define DTC_0(x)                                   ((x) << 10)
+#       define DTC_0_MASK                                 (0x3ff << 10)
+
+#define CG_BSP                                          0x1ff
+#       define BSP(x)					((x) << 0)
+#       define BSP_MASK					(0xffff << 0)
+#       define BSU(x)					((x) << 16)
+#       define BSU_MASK					(0xf << 16)
+#define CG_AT                                           0x200
+#       define CG_R(x)					((x) << 0)
+#       define CG_R_MASK				(0xffff << 0)
+#       define CG_L(x)					((x) << 16)
+#       define CG_L_MASK				(0xffff << 16)
+
+#define CG_GIT                                          0x201
+#       define CG_GICST(x)                              ((x) << 0)
+#       define CG_GICST_MASK                            (0xffff << 0)
+#       define CG_GIPOT(x)                              ((x) << 16)
+#       define CG_GIPOT_MASK                            (0xffff << 16)
+
+#define CG_SSP                                            0x203
+#       define SST(x)                                     ((x) << 0)
+#       define SST_MASK                                   (0xffff << 0)
+#       define SSTU(x)                                    ((x) << 16)
+#       define SSTU_MASK                                  (0xf << 16)
+
+#define CG_DISPLAY_GAP_CNTL                               0x20a
+#       define DISP1_GAP(x)                               ((x) << 0)
+#       define DISP1_GAP_MASK                             (3 << 0)
+#       define DISP2_GAP(x)                               ((x) << 2)
+#       define DISP2_GAP_MASK                             (3 << 2)
+#       define VBI_TIMER_COUNT(x)                         ((x) << 4)
+#       define VBI_TIMER_COUNT_MASK                       (0x3fff << 4)
+#       define VBI_TIMER_UNIT(x)                          ((x) << 20)
+#       define VBI_TIMER_UNIT_MASK                        (7 << 20)
+#       define DISP1_GAP_MCHG(x)                          ((x) << 24)
+#       define DISP1_GAP_MCHG_MASK                        (3 << 24)
+#       define DISP2_GAP_MCHG(x)                          ((x) << 26)
+#       define DISP2_GAP_MCHG_MASK                        (3 << 26)
+
+#define	CG_ULV_CONTROL					0x21e
+#define	CG_ULV_PARAMETER				0x21f
+
+#define	SMC_SCRATCH0					0x221
+
+#define	CG_CAC_CTRL					0x22e
+#	define CAC_WINDOW(x)				((x) << 0)
+#	define CAC_WINDOW_MASK				0x00ffffff
+
+#define DMIF_ADDR_CONFIG  				0x2F5
+
+#define DMIF_ADDR_CALC  				0x300
+
+#define	PIPE0_DMIF_BUFFER_CONTROL			  0x0328
+#       define DMIF_BUFFERS_ALLOCATED(x)                  ((x) << 0)
+#       define DMIF_BUFFERS_ALLOCATED_COMPLETED           (1 << 4)
+
+#define	SRBM_STATUS				        0x394
+#define		GRBM_RQ_PENDING 			(1 << 5)
+#define		VMC_BUSY 				(1 << 8)
+#define		MCB_BUSY 				(1 << 9)
+#define		MCB_NON_DISPLAY_BUSY 			(1 << 10)
+#define		MCC_BUSY 				(1 << 11)
+#define		MCD_BUSY 				(1 << 12)
+#define		SEM_BUSY 				(1 << 14)
+#define		IH_BUSY 				(1 << 17)
+
+#define	SRBM_SOFT_RESET				        0x398
+#define		SOFT_RESET_BIF				(1 << 1)
+#define		SOFT_RESET_DC				(1 << 5)
+#define		SOFT_RESET_DMA1				(1 << 6)
+#define		SOFT_RESET_GRBM				(1 << 8)
+#define		SOFT_RESET_HDP				(1 << 9)
+#define		SOFT_RESET_IH				(1 << 10)
+#define		SOFT_RESET_MC				(1 << 11)
+#define		SOFT_RESET_ROM				(1 << 14)
+#define		SOFT_RESET_SEM				(1 << 15)
+#define		SOFT_RESET_VMC				(1 << 17)
+#define		SOFT_RESET_DMA				(1 << 20)
+#define		SOFT_RESET_TST				(1 << 21)
+#define		SOFT_RESET_REGBB			(1 << 22)
+#define		SOFT_RESET_ORB				(1 << 23)
+
+#define	CC_SYS_RB_BACKEND_DISABLE			0x3A0
+#define	GC_USER_SYS_RB_BACKEND_DISABLE			0x3A1
+
+#define SRBM_READ_ERROR					0x3A6
+#define SRBM_INT_CNTL					0x3A8
+#define SRBM_INT_ACK					0x3AA
+
+#define	SRBM_STATUS2				        0x3B1
+#define		DMA_BUSY 				(1 << 5)
+#define		DMA1_BUSY 				(1 << 6)
+
+#define VM_L2_CNTL					0x500
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		L2_CACHE_PTE_ENDIAN_SWAP_MODE(x)		((x) << 2)
+#define		L2_CACHE_PDE_ENDIAN_SWAP_MODE(x)		((x) << 4)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE	(1 << 10)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 15)
+#define		CONTEXT1_IDENTITY_ACCESS_MODE(x)		(((x) & 3) << 19)
+#define VM_L2_CNTL2					0x501
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define		INVALIDATE_CACHE_MODE(x)			((x) << 26)
+#define			INVALIDATE_PTE_AND_PDE_CACHES		0
+#define			INVALIDATE_ONLY_PTE_CACHES		1
+#define			INVALIDATE_ONLY_PDE_CACHES		2
+#define VM_L2_CNTL3					0x502
+#define		BANK_SELECT(x)					((x) << 0)
+#define		L2_CACHE_UPDATE_MODE(x)				((x) << 6)
+#define		L2_CACHE_BIGK_FRAGMENT_SIZE(x)			((x) << 15)
+#define		L2_CACHE_BIGK_ASSOCIATIVITY			(1 << 20)
+#define	VM_L2_STATUS					0x503
+#define		L2_BUSY						(1 << 0)
+#define VM_CONTEXT0_CNTL				0x504
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 3)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT	(1 << 6)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT	(1 << 7)
+#define		PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 9)
+#define		PDE0_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 10)
+#define		VALID_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 12)
+#define		VALID_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 13)
+#define		READ_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 15)
+#define		READ_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 16)
+#define		WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 18)
+#define		WRITE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 19)
+#define		PAGE_TABLE_BLOCK_SIZE(x)			(((x) & 0xF) << 24)
+#define VM_CONTEXT1_CNTL				0x505
+#define VM_CONTEXT0_CNTL2				0x50C
+#define VM_CONTEXT1_CNTL2				0x50D
+#define	VM_CONTEXT8_PAGE_TABLE_BASE_ADDR		0x50E
+#define	VM_CONTEXT9_PAGE_TABLE_BASE_ADDR		0x50F
+#define	VM_CONTEXT10_PAGE_TABLE_BASE_ADDR		0x510
+#define	VM_CONTEXT11_PAGE_TABLE_BASE_ADDR		0x511
+#define	VM_CONTEXT12_PAGE_TABLE_BASE_ADDR		0x512
+#define	VM_CONTEXT13_PAGE_TABLE_BASE_ADDR		0x513
+#define	VM_CONTEXT14_PAGE_TABLE_BASE_ADDR		0x514
+#define	VM_CONTEXT15_PAGE_TABLE_BASE_ADDR		0x515
+
+#define	VM_CONTEXT1_PROTECTION_FAULT_ADDR		0x53f
+#define	VM_CONTEXT1_PROTECTION_FAULT_STATUS		0x537
+#define		PROTECTIONS_MASK			(0xf << 0)
+#define		PROTECTIONS_SHIFT			0
+		/* bit 0: range
+		 * bit 1: pde0
+		 * bit 2: valid
+		 * bit 3: read
+		 * bit 4: write
+		 */
+#define		MEMORY_CLIENT_ID_MASK			(0xff << 12)
+#define		MEMORY_CLIENT_ID_SHIFT			12
+#define		MEMORY_CLIENT_RW_MASK			(1 << 24)
+#define		MEMORY_CLIENT_RW_SHIFT			24
+#define		FAULT_VMID_MASK				(0xf << 25)
+#define		FAULT_VMID_SHIFT			25
+
+#define VM_INVALIDATE_REQUEST				0x51E
+#define VM_INVALIDATE_RESPONSE				0x51F
+
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x546
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR	0x547
+
+#define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x54F
+#define	VM_CONTEXT1_PAGE_TABLE_BASE_ADDR		0x550
+#define	VM_CONTEXT2_PAGE_TABLE_BASE_ADDR		0x551
+#define	VM_CONTEXT3_PAGE_TABLE_BASE_ADDR		0x552
+#define	VM_CONTEXT4_PAGE_TABLE_BASE_ADDR		0x553
+#define	VM_CONTEXT5_PAGE_TABLE_BASE_ADDR		0x554
+#define	VM_CONTEXT6_PAGE_TABLE_BASE_ADDR		0x555
+#define	VM_CONTEXT7_PAGE_TABLE_BASE_ADDR		0x556
+#define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x557
+#define	VM_CONTEXT1_PAGE_TABLE_START_ADDR		0x558
+
+#define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x55F
+#define	VM_CONTEXT1_PAGE_TABLE_END_ADDR			0x560
+
+#define VM_L2_CG           				0x570
+#define		MC_CG_ENABLE				(1 << 18)
+#define		MC_LS_ENABLE				(1 << 19)
+
+#define MC_SHARED_CHMAP						0x801
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x0000f000
+#define MC_SHARED_CHREMAP					0x802
+
+#define	MC_VM_FB_LOCATION				0x809
+#define	MC_VM_AGP_TOP					0x80A
+#define	MC_VM_AGP_BOT					0x80B
+#define	MC_VM_AGP_BASE					0x80C
+#define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x80D
+#define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x80E
+#define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x80F
+
+#define	MC_VM_MX_L1_TLB_CNTL				0x819
+#define		ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 3)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 3)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 3)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+#define		ENABLE_ADVANCED_DRIVER_MODEL			(1 << 6)
+
+#define MC_SHARED_BLACKOUT_CNTL           		0x82B
+
+#define MC_HUB_MISC_HUB_CG           			0x82E
+#define MC_HUB_MISC_VM_CG           			0x82F
+
+#define MC_HUB_MISC_SIP_CG           			0x830
+
+#define MC_XPB_CLK_GAT           			0x91E
+
+#define MC_CITF_MISC_RD_CG           			0x992
+#define MC_CITF_MISC_WR_CG           			0x993
+#define MC_CITF_MISC_VM_CG           			0x994
+
+#define	MC_ARB_RAMCFG					0x9D8
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000003
+#define		NOOFRANK_SHIFT					2
+#define		NOOFRANK_MASK					0x00000004
+#define		NOOFROWS_SHIFT					3
+#define		NOOFROWS_MASK					0x00000038
+#define		NOOFCOLS_SHIFT					6
+#define		NOOFCOLS_MASK					0x000000C0
+#define		CHANSIZE_SHIFT					8
+#define		CHANSIZE_MASK					0x00000100
+#define		CHANSIZE_OVERRIDE				(1 << 11)
+#define		NOOFGROUPS_SHIFT				12
+#define		NOOFGROUPS_MASK					0x00001000
+
+#define	MC_ARB_DRAM_TIMING				0x9DD
+#define	MC_ARB_DRAM_TIMING2				0x9DE
+
+#define MC_ARB_BURST_TIME                               0xA02
+#define		STATE0(x)				((x) << 0)
+#define		STATE0_MASK				(0x1f << 0)
+#define		STATE0_SHIFT				0
+#define		STATE1(x)				((x) << 5)
+#define		STATE1_MASK				(0x1f << 5)
+#define		STATE1_SHIFT				5
+#define		STATE2(x)				((x) << 10)
+#define		STATE2_MASK				(0x1f << 10)
+#define		STATE2_SHIFT				10
+#define		STATE3(x)				((x) << 15)
+#define		STATE3_MASK				(0x1f << 15)
+#define		STATE3_SHIFT				15
+
+#define	MC_SEQ_TRAIN_WAKEUP_CNTL			0xA3A
+#define		TRAIN_DONE_D0      			(1 << 30)
+#define		TRAIN_DONE_D1      			(1 << 31)
+
+#define MC_SEQ_SUP_CNTL           			0xA32
+#define		RUN_MASK      				(1 << 0)
+#define MC_SEQ_SUP_PGM           			0xA33
+#define MC_PMG_AUTO_CMD           			0xA34
+
+#define MC_IO_PAD_CNTL_D0           			0xA74
+#define		MEM_FALL_OUT_CMD      			(1 << 8)
+
+#define MC_SEQ_RAS_TIMING                               0xA28
+#define MC_SEQ_CAS_TIMING                               0xA29
+#define MC_SEQ_MISC_TIMING                              0xA2A
+#define MC_SEQ_MISC_TIMING2                             0xA2B
+#define MC_SEQ_PMG_TIMING                               0xA2C
+#define MC_SEQ_RD_CTL_D0                                0xA2D
+#define MC_SEQ_RD_CTL_D1                                0xA2E
+#define MC_SEQ_WR_CTL_D0                                0xA2F
+#define MC_SEQ_WR_CTL_D1                                0xA30
+
+#define MC_SEQ_MISC0           				0xA80
+#define 	MC_SEQ_MISC0_VEN_ID_SHIFT               8
+#define 	MC_SEQ_MISC0_VEN_ID_MASK                0x00000f00
+#define 	MC_SEQ_MISC0_VEN_ID_VALUE               3
+#define 	MC_SEQ_MISC0_REV_ID_SHIFT               12
+#define 	MC_SEQ_MISC0_REV_ID_MASK                0x0000f000
+#define 	MC_SEQ_MISC0_REV_ID_VALUE               1
+#define 	MC_SEQ_MISC0_GDDR5_SHIFT                28
+#define 	MC_SEQ_MISC0_GDDR5_MASK                 0xf0000000
+#define 	MC_SEQ_MISC0_GDDR5_VALUE                5
+#define MC_SEQ_MISC1                                    0xA81
+#define MC_SEQ_RESERVE_M                                0xA82
+#define MC_PMG_CMD_EMRS                                 0xA83
+
+#define MC_SEQ_IO_DEBUG_INDEX           		0xA91
+#define MC_SEQ_IO_DEBUG_DATA           			0xA92
+
+#define MC_SEQ_MISC5                                    0xA95
+#define MC_SEQ_MISC6                                    0xA96
+
+#define MC_SEQ_MISC7                                    0xA99
+
+#define MC_SEQ_RAS_TIMING_LP                            0xA9B
+#define MC_SEQ_CAS_TIMING_LP                            0xA9C
+#define MC_SEQ_MISC_TIMING_LP                           0xA9D
+#define MC_SEQ_MISC_TIMING2_LP                          0xA9E
+#define MC_SEQ_WR_CTL_D0_LP                             0xA9F
+#define MC_SEQ_WR_CTL_D1_LP                             0xAA0
+#define MC_SEQ_PMG_CMD_EMRS_LP                          0xAA1
+#define MC_SEQ_PMG_CMD_MRS_LP                           0xAA2
+
+#define MC_PMG_CMD_MRS                                  0xAAB
+
+#define MC_SEQ_RD_CTL_D0_LP                             0xAC7
+#define MC_SEQ_RD_CTL_D1_LP                             0xAC8
+
+#define MC_PMG_CMD_MRS1                                 0xAD1
+#define MC_SEQ_PMG_CMD_MRS1_LP                          0xAD2
+#define MC_SEQ_PMG_TIMING_LP                            0xAD3
+
+#define MC_SEQ_WR_CTL_2                                 0xAD5
+#define MC_SEQ_WR_CTL_2_LP                              0xAD6
+#define MC_PMG_CMD_MRS2                                 0xAD7
+#define MC_SEQ_PMG_CMD_MRS2_LP                          0xAD8
+
+#define	MCLK_PWRMGT_CNTL				0xAE8
+#       define DLL_SPEED(x)				((x) << 0)
+#       define DLL_SPEED_MASK				(0x1f << 0)
+#       define DLL_READY                                (1 << 6)
+#       define MC_INT_CNTL                              (1 << 7)
+#       define MRDCK0_PDNB                              (1 << 8)
+#       define MRDCK1_PDNB                              (1 << 9)
+#       define MRDCK0_RESET                             (1 << 16)
+#       define MRDCK1_RESET                             (1 << 17)
+#       define DLL_READY_READ                           (1 << 24)
+#define	DLL_CNTL					0xAE9
+#       define MRDCK0_BYPASS                            (1 << 24)
+#       define MRDCK1_BYPASS                            (1 << 25)
+
+#define	MPLL_CNTL_MODE					0xAEC
+#       define MPLL_MCLK_SEL                            (1 << 11)
+#define	MPLL_FUNC_CNTL					0xAED
+#define		BWCTRL(x)				((x) << 20)
+#define		BWCTRL_MASK				(0xff << 20)
+#define	MPLL_FUNC_CNTL_1				0xAEE
+#define		VCO_MODE(x)				((x) << 0)
+#define		VCO_MODE_MASK				(3 << 0)
+#define		CLKFRAC(x)				((x) << 4)
+#define		CLKFRAC_MASK				(0xfff << 4)
+#define		CLKF(x)					((x) << 16)
+#define		CLKF_MASK				(0xfff << 16)
+#define	MPLL_FUNC_CNTL_2				0xAEF
+#define	MPLL_AD_FUNC_CNTL				0xAF0
+#define		YCLK_POST_DIV(x)			((x) << 0)
+#define		YCLK_POST_DIV_MASK			(7 << 0)
+#define	MPLL_DQ_FUNC_CNTL				0xAF1
+#define		YCLK_SEL(x)				((x) << 4)
+#define		YCLK_SEL_MASK				(1 << 4)
+
+#define	MPLL_SS1					0xAF3
+#define		CLKV(x)					((x) << 0)
+#define		CLKV_MASK				(0x3ffffff << 0)
+#define	MPLL_SS2					0xAF4
+#define		CLKS(x)					((x) << 0)
+#define		CLKS_MASK				(0xfff << 0)
+
+#define	HDP_HOST_PATH_CNTL				0xB00
+#define 	CLOCK_GATING_DIS			(1 << 23)
+#define	HDP_NONSURFACE_BASE				0xB01
+#define	HDP_NONSURFACE_INFO				0xB02
+#define	HDP_NONSURFACE_SIZE				0xB03
+
+#define HDP_DEBUG0  					0xBCC
+
+#define HDP_ADDR_CONFIG  				0xBD2
+#define HDP_MISC_CNTL					0xBD3
+#define 	HDP_FLUSH_INVALIDATE_CACHE			(1 << 0)
+#define HDP_MEM_POWER_LS				0xBD4
+#define 	HDP_LS_ENABLE				(1 << 0)
+
+#define ATC_MISC_CG           				0xCD4
+
+#define IH_RB_CNTL                                        0xF80
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1 << 31)
+#define IH_RB_BASE                                        0xF81
+#define IH_RB_RPTR                                        0xF82
+#define IH_RB_WPTR                                        0xF83
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0xF84
+#define IH_RB_WPTR_ADDR_LO                                0xF85
+#define IH_CNTL                                           0xF86
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 1)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
+#       define MC_VMID(x)                                 ((x) << 25)
+
+#define	CONFIG_MEMSIZE					0x150A
+
+#define INTERRUPT_CNTL                                    0x151A
+#       define IH_DUMMY_RD_OVERRIDE                       (1 << 0)
+#       define IH_DUMMY_RD_EN                             (1 << 1)
+#       define IH_REQ_NONSNOOP_EN                         (1 << 3)
+#       define GEN_IH_INT_EN                              (1 << 8)
+#define INTERRUPT_CNTL2                                   0x151B
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL			0x1520
+
+#define	BIF_FB_EN						0x1524
+#define		FB_READ_EN					(1 << 0)
+#define		FB_WRITE_EN					(1 << 1)
+
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x1528
+
+/* DCE6 ELD audio interface */
+#define AZ_F0_CODEC_ENDPOINT_INDEX                       0x1780
+#       define AZ_ENDPOINT_REG_INDEX(x)                  (((x) & 0xff) << 0)
+#       define AZ_ENDPOINT_REG_WRITE_EN                  (1 << 8)
+#define AZ_F0_CODEC_ENDPOINT_DATA                        0x1781
+
+#define AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER          0x25
+#define		SPEAKER_ALLOCATION(x)			(((x) & 0x7f) << 0)
+#define		SPEAKER_ALLOCATION_MASK			(0x7f << 0)
+#define		SPEAKER_ALLOCATION_SHIFT		0
+#define		HDMI_CONNECTION				(1 << 16)
+#define		DP_CONNECTION				(1 << 17)
+
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0        0x28 /* LPCM */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1        0x29 /* AC3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2        0x2A /* MPEG1 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3        0x2B /* MP3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4        0x2C /* MPEG2 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5        0x2D /* AAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6        0x2E /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7        0x2F /* ATRAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8        0x30 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9        0x31 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10       0x32 /* DTS-HD */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11       0x33 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12       0x34 /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13       0x35 /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC         0x37
+#       define VIDEO_LIPSYNC(x)                           (((x) & 0xff) << 0)
+#       define AUDIO_LIPSYNC(x)                           (((x) & 0xff) << 8)
+/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
+ * 0   = invalid
+ * x   = legal delay value
+ * 255 = sync not supported
+ */
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR             0x38
+#       define HBR_CAPABLE                                (1 << 0) /* enabled by default */
+
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0               0x3a
+#       define MANUFACTURER_ID(x)                        (((x) & 0xffff) << 0)
+#       define PRODUCT_ID(x)                             (((x) & 0xffff) << 16)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1               0x3b
+#       define SINK_DESCRIPTION_LEN(x)                   (((x) & 0xff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2               0x3c
+#       define PORT_ID0(x)                               (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3               0x3d
+#       define PORT_ID1(x)                               (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4               0x3e
+#       define DESCRIPTION0(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION1(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION2(x)                           (((x) & 0xff) << 16)
+#       define DESCRIPTION3(x)                           (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5               0x3f
+#       define DESCRIPTION4(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION5(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION6(x)                           (((x) & 0xff) << 16)
+#       define DESCRIPTION7(x)                           (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6               0x40
+#       define DESCRIPTION8(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION9(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION10(x)                          (((x) & 0xff) << 16)
+#       define DESCRIPTION11(x)                          (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7               0x41
+#       define DESCRIPTION12(x)                          (((x) & 0xff) << 0)
+#       define DESCRIPTION13(x)                          (((x) & 0xff) << 8)
+#       define DESCRIPTION14(x)                          (((x) & 0xff) << 16)
+#       define DESCRIPTION15(x)                          (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8               0x42
+#       define DESCRIPTION16(x)                          (((x) & 0xff) << 0)
+#       define DESCRIPTION17(x)                          (((x) & 0xff) << 8)
+
+#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL         0x54
+#       define AUDIO_ENABLED                             (1 << 31)
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT  0x56
+#define		PORT_CONNECTIVITY_MASK				(3 << 30)
+#define		PORT_CONNECTIVITY_SHIFT				30
+
+#define	DC_LB_MEMORY_SPLIT					0x1AC3
+#define		DC_LB_MEMORY_CONFIG(x)				((x) << 20)
+
+#define	PRIORITY_A_CNT						0x1AC6
+#define		PRIORITY_MARK_MASK				0x7fff
+#define		PRIORITY_OFF					(1 << 16)
+#define		PRIORITY_ALWAYS_ON				(1 << 20)
+#define	PRIORITY_B_CNT						0x1AC7
+
+#define	DPG_PIPE_ARBITRATION_CONTROL3				0x1B32
+#       define LATENCY_WATERMARK_MASK(x)			((x) << 16)
+#define	DPG_PIPE_LATENCY_CONTROL				0x1B33
+#       define LATENCY_LOW_WATERMARK(x)				((x) << 0)
+#       define LATENCY_HIGH_WATERMARK(x)			((x) << 16)
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS                                    0x1AEE
+#       define VLINE_OCCURRED                           (1 << 0)
+#       define VLINE_ACK                                (1 << 4)
+#       define VLINE_STAT                               (1 << 12)
+#       define VLINE_INTERRUPT                          (1 << 16)
+#       define VLINE_INTERRUPT_TYPE                     (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS                                   0x1AEF
+#       define VBLANK_OCCURRED                          (1 << 0)
+#       define VBLANK_ACK                               (1 << 4)
+#       define VBLANK_STAT                              (1 << 12)
+#       define VBLANK_INTERRUPT                         (1 << 16)
+#       define VBLANK_INTERRUPT_TYPE                    (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK                                        0x1AD0
+#       define VBLANK_INT_MASK                          (1 << 0)
+#       define VLINE_INT_MASK                           (1 << 4)
+
+#define DISP_INTERRUPT_STATUS                           0x183D
+#       define LB_D1_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D1_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD1_INTERRUPT                        (1 << 17)
+#       define DC_HPD1_RX_INTERRUPT                     (1 << 18)
+#       define DACA_AUTODETECT_INTERRUPT                (1 << 22)
+#       define DACB_AUTODETECT_INTERRUPT                (1 << 23)
+#       define DC_I2C_SW_DONE_INTERRUPT                 (1 << 24)
+#       define DC_I2C_HW_DONE_INTERRUPT                 (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE                  0x183E
+#       define LB_D2_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D2_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD2_INTERRUPT                        (1 << 17)
+#       define DC_HPD2_RX_INTERRUPT                     (1 << 18)
+#       define DISP_TIMER_INTERRUPT                     (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2                 0x183F
+#       define LB_D3_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D3_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD3_INTERRUPT                        (1 << 17)
+#       define DC_HPD3_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3                 0x1840
+#       define LB_D4_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D4_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD4_INTERRUPT                        (1 << 17)
+#       define DC_HPD4_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4                 0x1853
+#       define LB_D5_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D5_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD5_INTERRUPT                        (1 << 17)
+#       define DC_HPD5_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5                 0x1854
+#       define LB_D6_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D6_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD6_INTERRUPT                        (1 << 17)
+#       define DC_HPD6_RX_INTERRUPT                     (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS                                 0x1A16
+#       define GRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define GRPH_PFLIP_INT_CLEAR                     (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define	GRPH_INT_CONTROL			        0x1A17
+#       define GRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+#define	DAC_AUTODETECT_INT_CONTROL			0x19F2
+
+#define DC_HPD1_INT_STATUS                              0x1807
+#define DC_HPD2_INT_STATUS                              0x180A
+#define DC_HPD3_INT_STATUS                              0x180D
+#define DC_HPD4_INT_STATUS                              0x1810
+#define DC_HPD5_INT_STATUS                              0x1813
+#define DC_HPD6_INT_STATUS                              0x1816
+#       define DC_HPDx_INT_STATUS                       (1 << 0)
+#       define DC_HPDx_SENSE                            (1 << 1)
+#       define DC_HPDx_RX_INT_STATUS                    (1 << 8)
+
+#define DC_HPD1_INT_CONTROL                             0x1808
+#define DC_HPD2_INT_CONTROL                             0x180B
+#define DC_HPD3_INT_CONTROL                             0x180E
+#define DC_HPD4_INT_CONTROL                             0x1811
+#define DC_HPD5_INT_CONTROL                             0x1814
+#define DC_HPD6_INT_CONTROL                             0x1817
+#       define DC_HPDx_INT_ACK                          (1 << 0)
+#       define DC_HPDx_INT_POLARITY                     (1 << 8)
+#       define DC_HPDx_INT_EN                           (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                       (1 << 20)
+#       define DC_HPDx_RX_INT_EN                        (1 << 24)
+
+#define DC_HPD1_CONTROL                                   0x1809
+#define DC_HPD2_CONTROL                                   0x180C
+#define DC_HPD3_CONTROL                                   0x180F
+#define DC_HPD4_CONTROL                                   0x1812
+#define DC_HPD5_CONTROL                                   0x1815
+#define DC_HPD6_CONTROL                                   0x1818
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+#       define DC_HPDx_EN                                 (1 << 28)
+
+#define DPG_PIPE_STUTTER_CONTROL                          0x1B35
+#       define STUTTER_ENABLE                             (1 << 0)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT                         0x1BA6
+
+/* Audio clocks */
+#define DCCG_AUDIO_DTO_SOURCE                           0x05ac
+#       define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
+#       define DCCG_AUDIO_DTO_SEL            (1 << 4)   /* 0=dto0 1=dto1 */
+
+#define DCCG_AUDIO_DTO0_PHASE                           0x05b0
+#define DCCG_AUDIO_DTO0_MODULE                          0x05b4
+#define DCCG_AUDIO_DTO1_PHASE                           0x05c0
+#define DCCG_AUDIO_DTO1_MODULE                          0x05c4
+
+#define AFMT_AUDIO_SRC_CONTROL                          0x1c4f
+#define		AFMT_AUDIO_SRC_SELECT(x)		(((x) & 7) << 0)
+/* AFMT_AUDIO_SRC_SELECT
+ * 0 = stream0
+ * 1 = stream1
+ * 2 = stream2
+ * 3 = stream3
+ * 4 = stream4
+ * 5 = stream5
+ */
+
+#define	GRBM_CNTL					0x2000
+#define		GRBM_READ_TIMEOUT(x)				((x) << 0)
+
+#define	GRBM_STATUS2					0x2002
+#define		RLC_RQ_PENDING 					(1 << 0)
+#define		RLC_BUSY 					(1 << 8)
+#define		TC_BUSY 					(1 << 9)
+
+#define	GRBM_STATUS					0x2004
+#define		CMDFIFO_AVAIL_MASK				0x0000000F
+#define		RING2_RQ_PENDING				(1 << 4)
+#define		SRBM_RQ_PENDING					(1 << 5)
+#define		RING1_RQ_PENDING				(1 << 6)
+#define		CF_RQ_PENDING					(1 << 7)
+#define		PF_RQ_PENDING					(1 << 8)
+#define		GDS_DMA_RQ_PENDING				(1 << 9)
+#define		GRBM_EE_BUSY					(1 << 10)
+#define		DB_CLEAN					(1 << 12)
+#define		CB_CLEAN					(1 << 13)
+#define		TA_BUSY 					(1 << 14)
+#define		GDS_BUSY 					(1 << 15)
+#define		VGT_BUSY					(1 << 17)
+#define		IA_BUSY_NO_DMA					(1 << 18)
+#define		IA_BUSY						(1 << 19)
+#define		SX_BUSY 					(1 << 20)
+#define		SPI_BUSY					(1 << 22)
+#define		BCI_BUSY					(1 << 23)
+#define		SC_BUSY 					(1 << 24)
+#define		PA_BUSY 					(1 << 25)
+#define		DB_BUSY 					(1 << 26)
+#define		CP_COHERENCY_BUSY      				(1 << 28)
+#define		CP_BUSY 					(1 << 29)
+#define		CB_BUSY 					(1 << 30)
+#define		GUI_ACTIVE					(1 << 31)
+#define	GRBM_STATUS_SE0					0x2005
+#define	GRBM_STATUS_SE1					0x2006
+#define		SE_DB_CLEAN					(1 << 1)
+#define		SE_CB_CLEAN					(1 << 2)
+#define		SE_BCI_BUSY					(1 << 22)
+#define		SE_VGT_BUSY					(1 << 23)
+#define		SE_PA_BUSY					(1 << 24)
+#define		SE_TA_BUSY					(1 << 25)
+#define		SE_SX_BUSY					(1 << 26)
+#define		SE_SPI_BUSY					(1 << 27)
+#define		SE_SC_BUSY					(1 << 29)
+#define		SE_DB_BUSY					(1 << 30)
+#define		SE_CB_BUSY					(1 << 31)
+
+#define	GRBM_SOFT_RESET					0x2008
+#define		SOFT_RESET_CP					(1 << 0)
+#define		SOFT_RESET_CB					(1 << 1)
+#define		SOFT_RESET_RLC					(1 << 2)
+#define		SOFT_RESET_DB					(1 << 3)
+#define		SOFT_RESET_GDS					(1 << 4)
+#define		SOFT_RESET_PA					(1 << 5)
+#define		SOFT_RESET_SC					(1 << 6)
+#define		SOFT_RESET_BCI					(1 << 7)
+#define		SOFT_RESET_SPI					(1 << 8)
+#define		SOFT_RESET_SX					(1 << 10)
+#define		SOFT_RESET_TC					(1 << 11)
+#define		SOFT_RESET_TA					(1 << 12)
+#define		SOFT_RESET_VGT					(1 << 14)
+#define		SOFT_RESET_IA					(1 << 15)
+
+#define GRBM_GFX_INDEX          			0x200B
+#define		INSTANCE_INDEX(x)			((x) << 0)
+#define		SH_INDEX(x)     			((x) << 8)
+#define		SE_INDEX(x)     			((x) << 16)
+#define		SH_BROADCAST_WRITES      		(1 << 29)
+#define		INSTANCE_BROADCAST_WRITES      		(1 << 30)
+#define		SE_BROADCAST_WRITES      		(1 << 31)
+
+#define GRBM_INT_CNTL                                   0x2018
+#       define RDERR_INT_ENABLE                         (1 << 0)
+#       define GUI_IDLE_INT_ENABLE                      (1 << 19)
+
+#define	CP_STRMOUT_CNTL					0x213F
+#define	SCRATCH_REG0					0x2140
+#define	SCRATCH_REG1					0x2141
+#define	SCRATCH_REG2					0x2142
+#define	SCRATCH_REG3					0x2143
+#define	SCRATCH_REG4					0x2144
+#define	SCRATCH_REG5					0x2145
+#define	SCRATCH_REG6					0x2146
+#define	SCRATCH_REG7					0x2147
+
+#define	SCRATCH_UMSK					0x2150
+#define	SCRATCH_ADDR					0x2151
+
+#define	CP_SEM_WAIT_TIMER				0x216F
+
+#define	CP_SEM_INCOMPLETE_TIMER_CNTL			0x2172
+
+#define CP_ME_CNTL					0x21B6
+#define		CP_CE_HALT					(1 << 24)
+#define		CP_PFP_HALT					(1 << 26)
+#define		CP_ME_HALT					(1 << 28)
+
+#define	CP_COHER_CNTL2					0x217A
+
+#define	CP_RB2_RPTR					0x21BE
+#define	CP_RB1_RPTR					0x21BF
+#define	CP_RB0_RPTR					0x21C0
+#define	CP_RB_WPTR_DELAY				0x21C1
+
+#define	CP_QUEUE_THRESHOLDS				0x21D8
+#define		ROQ_IB1_START(x)				((x) << 0)
+#define		ROQ_IB2_START(x)				((x) << 8)
+#define CP_MEQ_THRESHOLDS				0x21D9
+#define		MEQ1_START(x)				((x) << 0)
+#define		MEQ2_START(x)				((x) << 8)
+
+#define	CP_PERFMON_CNTL					0x21FF
+
+#define	VGT_VTX_VECT_EJECT_REG				0x222C
+
+#define	VGT_CACHE_INVALIDATION				0x2231
+#define		CACHE_INVALIDATION(x)				((x) << 0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define		AUTO_INVLD_EN(x)				((x) << 6)
+#define			NO_AUTO						0
+#define			ES_AUTO						1
+#define			GS_AUTO						2
+#define			ES_AND_GS_AUTO					3
+#define	VGT_ESGS_RING_SIZE				0x2232
+#define	VGT_GSVS_RING_SIZE				0x2233
+
+#define	VGT_GS_VERTEX_REUSE				0x2235
+
+#define	VGT_PRIMITIVE_TYPE				0x2256
+#define	VGT_INDEX_TYPE					0x2257
+
+#define	VGT_NUM_INDICES					0x225C
+#define	VGT_NUM_INSTANCES				0x225D
+
+#define	VGT_TF_RING_SIZE				0x2262
+
+#define	VGT_HS_OFFCHIP_PARAM				0x226C
+
+#define	VGT_TF_MEMORY_BASE				0x226E
+
+#define CC_GC_SHADER_ARRAY_CONFIG			0x226F
+#define		INACTIVE_CUS_MASK			0xFFFF0000
+#define		INACTIVE_CUS_SHIFT			16
+#define GC_USER_SHADER_ARRAY_CONFIG			0x2270
+
+#define	PA_CL_ENHANCE					0x2285
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+
+#define	PA_SU_LINE_STIPPLE_VALUE			0x2298
+
+#define	PA_SC_LINE_STIPPLE_STATE			0x22C4
+
+#define	PA_SC_FORCE_EOV_MAX_CNTS			0x22C9
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
+#define		FORCE_EOV_MAX_REZ_CNT(x)			((x) << 16)
+
+#define	PA_SC_FIFO_SIZE					0x22F3
+#define		SC_FRONTEND_PRIM_FIFO_SIZE(x)			((x) << 0)
+#define		SC_BACKEND_PRIM_FIFO_SIZE(x)			((x) << 6)
+#define		SC_HIZ_TILE_FIFO_SIZE(x)			((x) << 15)
+#define		SC_EARLYZ_TILE_FIFO_SIZE(x)			((x) << 23)
+
+#define	PA_SC_ENHANCE					0x22FC
+
+#define	SQ_CONFIG					0x2300
+
+#define	SQC_CACHES					0x2302
+
+#define SQ_POWER_THROTTLE                               0x2396
+#define		MIN_POWER(x)				((x) << 0)
+#define		MIN_POWER_MASK				(0x3fff << 0)
+#define		MIN_POWER_SHIFT				0
+#define		MAX_POWER(x)				((x) << 16)
+#define		MAX_POWER_MASK				(0x3fff << 16)
+#define		MAX_POWER_SHIFT				0
+#define SQ_POWER_THROTTLE2                              0x2397
+#define		MAX_POWER_DELTA(x)			((x) << 0)
+#define		MAX_POWER_DELTA_MASK			(0x3fff << 0)
+#define		MAX_POWER_DELTA_SHIFT			0
+#define		STI_SIZE(x)				((x) << 16)
+#define		STI_SIZE_MASK				(0x3ff << 16)
+#define		STI_SIZE_SHIFT				16
+#define		LTI_RATIO(x)				((x) << 27)
+#define		LTI_RATIO_MASK				(0xf << 27)
+#define		LTI_RATIO_SHIFT				27
+
+#define	SX_DEBUG_1					0x2418
+
+#define	SPI_STATIC_THREAD_MGMT_1			0x2438
+#define	SPI_STATIC_THREAD_MGMT_2			0x2439
+#define	SPI_STATIC_THREAD_MGMT_3			0x243A
+#define	SPI_PS_MAX_WAVE_ID				0x243B
+
+#define	SPI_CONFIG_CNTL					0x2440
+
+#define	SPI_CONFIG_CNTL_1				0x244F
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+
+#define	CGTS_TCC_DISABLE				0x2452
+#define	CGTS_USER_TCC_DISABLE				0x2453
+#define		TCC_DISABLE_MASK				0xFFFF0000
+#define		TCC_DISABLE_SHIFT				16
+#define	CGTS_SM_CTRL_REG				0x2454
+#define		OVERRIDE				(1 << 21)
+#define		LS_OVERRIDE				(1 << 22)
+
+#define	SPI_LB_CU_MASK					0x24D5
+
+#define	TA_CNTL_AUX					0x2542
+
+#define CC_RB_BACKEND_DISABLE				0x263D
+#define		BACKEND_DISABLE(x)     			((x) << 16)
+#define GB_ADDR_CONFIG  				0x263E
+#define		NUM_PIPES(x)				((x) << 0)
+#define		NUM_PIPES_MASK				0x00000007
+#define		NUM_PIPES_SHIFT				0
+#define		PIPE_INTERLEAVE_SIZE(x)			((x) << 4)
+#define		PIPE_INTERLEAVE_SIZE_MASK		0x00000070
+#define		PIPE_INTERLEAVE_SIZE_SHIFT		4
+#define		NUM_SHADER_ENGINES(x)			((x) << 12)
+#define		NUM_SHADER_ENGINES_MASK			0x00003000
+#define		NUM_SHADER_ENGINES_SHIFT		12
+#define		SHADER_ENGINE_TILE_SIZE(x)     		((x) << 16)
+#define		SHADER_ENGINE_TILE_SIZE_MASK		0x00070000
+#define		SHADER_ENGINE_TILE_SIZE_SHIFT		16
+#define		NUM_GPUS(x)     			((x) << 20)
+#define		NUM_GPUS_MASK				0x00700000
+#define		NUM_GPUS_SHIFT				20
+#define		MULTI_GPU_TILE_SIZE(x)     		((x) << 24)
+#define		MULTI_GPU_TILE_SIZE_MASK		0x03000000
+#define		MULTI_GPU_TILE_SIZE_SHIFT		24
+#define		ROW_SIZE(x)             		((x) << 28)
+#define		ROW_SIZE_MASK				0x30000000
+#define		ROW_SIZE_SHIFT				28
+
+#define	GB_TILE_MODE0					0x2644
+#       define MICRO_TILE_MODE(x)				((x) << 0)
+#              define	ADDR_SURF_DISPLAY_MICRO_TILING		0
+#              define	ADDR_SURF_THIN_MICRO_TILING		1
+#              define	ADDR_SURF_DEPTH_MICRO_TILING		2
+#       define ARRAY_MODE(x)					((x) << 2)
+#              define	ARRAY_LINEAR_GENERAL			0
+#              define	ARRAY_LINEAR_ALIGNED			1
+#              define	ARRAY_1D_TILED_THIN1			2
+#              define	ARRAY_2D_TILED_THIN1			4
+#       define PIPE_CONFIG(x)					((x) << 6)
+#              define	ADDR_SURF_P2				0
+#              define	ADDR_SURF_P4_8x16			4
+#              define	ADDR_SURF_P4_16x16			5
+#              define	ADDR_SURF_P4_16x32			6
+#              define	ADDR_SURF_P4_32x32			7
+#              define	ADDR_SURF_P8_16x16_8x16			8
+#              define	ADDR_SURF_P8_16x32_8x16			9
+#              define	ADDR_SURF_P8_32x32_8x16			10
+#              define	ADDR_SURF_P8_16x32_16x16		11
+#              define	ADDR_SURF_P8_32x32_16x16		12
+#              define	ADDR_SURF_P8_32x32_16x32		13
+#              define	ADDR_SURF_P8_32x64_32x32		14
+#       define TILE_SPLIT(x)					((x) << 11)
+#              define	ADDR_SURF_TILE_SPLIT_64B		0
+#              define	ADDR_SURF_TILE_SPLIT_128B		1
+#              define	ADDR_SURF_TILE_SPLIT_256B		2
+#              define	ADDR_SURF_TILE_SPLIT_512B		3
+#              define	ADDR_SURF_TILE_SPLIT_1KB		4
+#              define	ADDR_SURF_TILE_SPLIT_2KB		5
+#              define	ADDR_SURF_TILE_SPLIT_4KB		6
+#       define BANK_WIDTH(x)					((x) << 14)
+#              define	ADDR_SURF_BANK_WIDTH_1			0
+#              define	ADDR_SURF_BANK_WIDTH_2			1
+#              define	ADDR_SURF_BANK_WIDTH_4			2
+#              define	ADDR_SURF_BANK_WIDTH_8			3
+#       define BANK_HEIGHT(x)					((x) << 16)
+#              define	ADDR_SURF_BANK_HEIGHT_1			0
+#              define	ADDR_SURF_BANK_HEIGHT_2			1
+#              define	ADDR_SURF_BANK_HEIGHT_4			2
+#              define	ADDR_SURF_BANK_HEIGHT_8			3
+#       define MACRO_TILE_ASPECT(x)				((x) << 18)
+#              define	ADDR_SURF_MACRO_ASPECT_1		0
+#              define	ADDR_SURF_MACRO_ASPECT_2		1
+#              define	ADDR_SURF_MACRO_ASPECT_4		2
+#              define	ADDR_SURF_MACRO_ASPECT_8		3
+#       define NUM_BANKS(x)					((x) << 20)
+#              define	ADDR_SURF_2_BANK			0
+#              define	ADDR_SURF_4_BANK			1
+#              define	ADDR_SURF_8_BANK			2
+#              define	ADDR_SURF_16_BANK			3
+#define	GB_TILE_MODE1					0x2645
+#define	GB_TILE_MODE2					0x2646
+#define	GB_TILE_MODE3					0x2647
+#define	GB_TILE_MODE4					0x2648
+#define	GB_TILE_MODE5					0x2649
+#define	GB_TILE_MODE6					0x264a
+#define	GB_TILE_MODE7					0x264b
+#define	GB_TILE_MODE8					0x264c
+#define	GB_TILE_MODE9					0x264d
+#define	GB_TILE_MODE10					0x264e
+#define	GB_TILE_MODE11					0x264f
+#define	GB_TILE_MODE12					0x2650
+#define	GB_TILE_MODE13					0x2651
+#define	GB_TILE_MODE14					0x2652
+#define	GB_TILE_MODE15					0x2653
+#define	GB_TILE_MODE16					0x2654
+#define	GB_TILE_MODE17					0x2655
+#define	GB_TILE_MODE18					0x2656
+#define	GB_TILE_MODE19					0x2657
+#define	GB_TILE_MODE20					0x2658
+#define	GB_TILE_MODE21					0x2659
+#define	GB_TILE_MODE22					0x265a
+#define	GB_TILE_MODE23					0x265b
+#define	GB_TILE_MODE24					0x265c
+#define	GB_TILE_MODE25					0x265d
+#define	GB_TILE_MODE26					0x265e
+#define	GB_TILE_MODE27					0x265f
+#define	GB_TILE_MODE28					0x2660
+#define	GB_TILE_MODE29					0x2661
+#define	GB_TILE_MODE30					0x2662
+#define	GB_TILE_MODE31					0x2663
+
+#define	CB_PERFCOUNTER0_SELECT0				0x2688
+#define	CB_PERFCOUNTER0_SELECT1				0x2689
+#define	CB_PERFCOUNTER1_SELECT0				0x268A
+#define	CB_PERFCOUNTER1_SELECT1				0x268B
+#define	CB_PERFCOUNTER2_SELECT0				0x268C
+#define	CB_PERFCOUNTER2_SELECT1				0x268D
+#define	CB_PERFCOUNTER3_SELECT0				0x268E
+#define	CB_PERFCOUNTER3_SELECT1				0x268F
+
+#define	CB_CGTT_SCLK_CTRL				0x2698
+
+#define	GC_USER_RB_BACKEND_DISABLE			0x26DF
+#define		BACKEND_DISABLE_MASK			0x00FF0000
+#define		BACKEND_DISABLE_SHIFT			16
+
+#define	TCP_CHAN_STEER_LO				0x2B03
+#define	TCP_CHAN_STEER_HI				0x2B94
+
+#define	CP_RB0_BASE					0x3040
+#define	CP_RB0_CNTL					0x3041
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
+
+#define	CP_RB0_RPTR_ADDR				0x3043
+#define	CP_RB0_RPTR_ADDR_HI				0x3044
+#define	CP_RB0_WPTR					0x3045
+
+#define	CP_PFP_UCODE_ADDR				0x3054
+#define	CP_PFP_UCODE_DATA				0x3055
+#define	CP_ME_RAM_RADDR					0x3056
+#define	CP_ME_RAM_WADDR					0x3057
+#define	CP_ME_RAM_DATA					0x3058
+
+#define	CP_CE_UCODE_ADDR				0x305A
+#define	CP_CE_UCODE_DATA				0x305B
+
+#define	CP_RB1_BASE					0x3060
+#define	CP_RB1_CNTL					0x3061
+#define	CP_RB1_RPTR_ADDR				0x3062
+#define	CP_RB1_RPTR_ADDR_HI				0x3063
+#define	CP_RB1_WPTR					0x3064
+#define	CP_RB2_BASE					0x3065
+#define	CP_RB2_CNTL					0x3066
+#define	CP_RB2_RPTR_ADDR				0x3067
+#define	CP_RB2_RPTR_ADDR_HI				0x3068
+#define	CP_RB2_WPTR					0x3069
+#define CP_INT_CNTL_RING0                               0x306A
+#define CP_INT_CNTL_RING1                               0x306B
+#define CP_INT_CNTL_RING2                               0x306C
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define WAIT_MEM_SEM_INT_ENABLE                  (1 << 21)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+#       define CP_RINGID2_INT_ENABLE                    (1 << 29)
+#       define CP_RINGID1_INT_ENABLE                    (1 << 30)
+#       define CP_RINGID0_INT_ENABLE                    (1 << 31)
+#define CP_INT_STATUS_RING0                             0x306D
+#define CP_INT_STATUS_RING1                             0x306E
+#define CP_INT_STATUS_RING2                             0x306F
+#       define WAIT_MEM_SEM_INT_STAT                    (1 << 21)
+#       define TIME_STAMP_INT_STAT                      (1 << 26)
+#       define CP_RINGID2_INT_STAT                      (1 << 29)
+#       define CP_RINGID1_INT_STAT                      (1 << 30)
+#       define CP_RINGID0_INT_STAT                      (1 << 31)
+
+#define	CP_MEM_SLP_CNTL					0x3079
+#       define CP_MEM_LS_EN                             (1 << 0)
+
+#define	CP_DEBUG					0x307F
+
+#define RLC_CNTL                                          0x30C0
+#       define RLC_ENABLE                                 (1 << 0)
+#define RLC_RL_BASE                                       0x30C1
+#define RLC_RL_SIZE                                       0x30C2
+#define RLC_LB_CNTL                                       0x30C3
+#       define LOAD_BALANCE_ENABLE                        (1 << 0)
+#define RLC_SAVE_AND_RESTORE_BASE                         0x30C4
+#define RLC_LB_CNTR_MAX                                   0x30C5
+#define RLC_LB_CNTR_INIT                                  0x30C6
+
+#define RLC_CLEAR_STATE_RESTORE_BASE                      0x30C8
+
+#define RLC_UCODE_ADDR                                    0x30CB
+#define RLC_UCODE_DATA                                    0x30CC
+
+#define RLC_GPU_CLOCK_COUNT_LSB                           0x30CE
+#define RLC_GPU_CLOCK_COUNT_MSB                           0x30CF
+#define RLC_CAPTURE_GPU_CLOCK_COUNT                       0x30D0
+#define RLC_MC_CNTL                                       0x30D1
+#define RLC_UCODE_CNTL                                    0x30D2
+#define RLC_STAT                                          0x30D3
+#       define RLC_BUSY_STATUS                            (1 << 0)
+#       define GFX_POWER_STATUS                           (1 << 1)
+#       define GFX_CLOCK_STATUS                           (1 << 2)
+#       define GFX_LS_STATUS                              (1 << 3)
+
+#define	RLC_PG_CNTL					0x30D7
+#	define GFX_PG_ENABLE				(1 << 0)
+#	define GFX_PG_SRC				(1 << 1)
+
+#define	RLC_CGTT_MGCG_OVERRIDE				0x3100
+#define	RLC_CGCG_CGLS_CTRL				0x3101
+#	define CGCG_EN					(1 << 0)
+#	define CGLS_EN					(1 << 1)
+
+#define	RLC_TTOP_D					0x3105
+#	define RLC_PUD(x)				((x) << 0)
+#	define RLC_PUD_MASK				(0xff << 0)
+#	define RLC_PDD(x)				((x) << 8)
+#	define RLC_PDD_MASK				(0xff << 8)
+#	define RLC_TTPD(x)				((x) << 16)
+#	define RLC_TTPD_MASK				(0xff << 16)
+#	define RLC_MSD(x)				((x) << 24)
+#	define RLC_MSD_MASK				(0xff << 24)
+
+#define RLC_LB_INIT_CU_MASK                               0x3107
+
+#define	RLC_PG_AO_CU_MASK				0x310B
+#define	RLC_MAX_PG_CU					0x310C
+#	define MAX_PU_CU(x)				((x) << 0)
+#	define MAX_PU_CU_MASK				(0xff << 0)
+#define	RLC_AUTO_PG_CTRL				0x310C
+#	define AUTO_PG_EN				(1 << 0)
+#	define GRBM_REG_SGIT(x)				((x) << 3)
+#	define GRBM_REG_SGIT_MASK			(0xffff << 3)
+#	define PG_AFTER_GRBM_REG_ST(x)			((x) << 19)
+#	define PG_AFTER_GRBM_REG_ST_MASK		(0x1fff << 19)
+
+#define RLC_SERDES_WR_MASTER_MASK_0                       0x3115
+#define RLC_SERDES_WR_MASTER_MASK_1                       0x3116
+#define RLC_SERDES_WR_CTRL                                0x3117
+
+#define RLC_SERDES_MASTER_BUSY_0                          0x3119
+#define RLC_SERDES_MASTER_BUSY_1                          0x311A
+
+#define RLC_GCPM_GENERAL_3                                0x311E
+
+#define	DB_RENDER_CONTROL				0xA000
+
+#define DB_DEPTH_INFO                                   0xA00F
+
+#define PA_SC_RASTER_CONFIG                             0xA0D4
+#       define RASTER_CONFIG_RB_MAP_0                   0
+#       define RASTER_CONFIG_RB_MAP_1                   1
+#       define RASTER_CONFIG_RB_MAP_2                   2
+#       define RASTER_CONFIG_RB_MAP_3                   3
+
+#define VGT_EVENT_INITIATOR                             0xA2A4
+#       define SAMPLE_STREAMOUTSTATS1                   (1 << 0)
+#       define SAMPLE_STREAMOUTSTATS2                   (2 << 0)
+#       define SAMPLE_STREAMOUTSTATS3                   (3 << 0)
+#       define CACHE_FLUSH_TS                           (4 << 0)
+#       define CACHE_FLUSH                              (6 << 0)
+#       define CS_PARTIAL_FLUSH                         (7 << 0)
+#       define VGT_STREAMOUT_RESET                      (10 << 0)
+#       define END_OF_PIPE_INCR_DE                      (11 << 0)
+#       define END_OF_PIPE_IB_END                       (12 << 0)
+#       define RST_PIX_CNT                              (13 << 0)
+#       define VS_PARTIAL_FLUSH                         (15 << 0)
+#       define PS_PARTIAL_FLUSH                         (16 << 0)
+#       define CACHE_FLUSH_AND_INV_TS_EVENT             (20 << 0)
+#       define ZPASS_DONE                               (21 << 0)
+#       define CACHE_FLUSH_AND_INV_EVENT                (22 << 0)
+#       define PERFCOUNTER_START                        (23 << 0)
+#       define PERFCOUNTER_STOP                         (24 << 0)
+#       define PIPELINESTAT_START                       (25 << 0)
+#       define PIPELINESTAT_STOP                        (26 << 0)
+#       define PERFCOUNTER_SAMPLE                       (27 << 0)
+#       define SAMPLE_PIPELINESTAT                      (30 << 0)
+#       define SAMPLE_STREAMOUTSTATS                    (32 << 0)
+#       define RESET_VTX_CNT                            (33 << 0)
+#       define VGT_FLUSH                                (36 << 0)
+#       define BOTTOM_OF_PIPE_TS                        (40 << 0)
+#       define DB_CACHE_FLUSH_AND_INV                   (42 << 0)
+#       define FLUSH_AND_INV_DB_DATA_TS                 (43 << 0)
+#       define FLUSH_AND_INV_DB_META                    (44 << 0)
+#       define FLUSH_AND_INV_CB_DATA_TS                 (45 << 0)
+#       define FLUSH_AND_INV_CB_META                    (46 << 0)
+#       define CS_DONE                                  (47 << 0)
+#       define PS_DONE                                  (48 << 0)
+#       define FLUSH_AND_INV_CB_PIXEL_DATA              (49 << 0)
+#       define THREAD_TRACE_START                       (51 << 0)
+#       define THREAD_TRACE_STOP                        (52 << 0)
+#       define THREAD_TRACE_FLUSH                       (54 << 0)
+#       define THREAD_TRACE_FINISH                      (55 << 0)
+
+/* PIF PHY0 registers idx/data 0x8/0xc */
+#define PB0_PIF_CNTL                                      0x10
+#       define LS2_EXIT_TIME(x)                           ((x) << 17)
+#       define LS2_EXIT_TIME_MASK                         (0x7 << 17)
+#       define LS2_EXIT_TIME_SHIFT                        17
+#define PB0_PIF_PAIRING                                   0x11
+#       define MULTI_PIF                                  (1 << 25)
+#define PB0_PIF_PWRDOWN_0                                 0x12
+#       define PLL_POWER_STATE_IN_TXS2_0(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_0_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_0_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_0(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_0_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_0_SHIFT             10
+#       define PLL_RAMP_UP_TIME_0(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_0_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_0_SHIFT                   24
+#define PB0_PIF_PWRDOWN_1                                 0x13
+#       define PLL_POWER_STATE_IN_TXS2_1(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_1_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_1_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_1(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_1_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_1_SHIFT             10
+#       define PLL_RAMP_UP_TIME_1(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_1_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_1_SHIFT                   24
+
+#define PB0_PIF_PWRDOWN_2                                 0x17
+#       define PLL_POWER_STATE_IN_TXS2_2(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_2_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_2_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_2(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_2_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_2_SHIFT             10
+#       define PLL_RAMP_UP_TIME_2(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_2_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_2_SHIFT                   24
+#define PB0_PIF_PWRDOWN_3                                 0x18
+#       define PLL_POWER_STATE_IN_TXS2_3(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_3_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_3_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_3(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_3_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_3_SHIFT             10
+#       define PLL_RAMP_UP_TIME_3(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_3_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_3_SHIFT                   24
+/* PIF PHY1 registers idx/data 0x10/0x14 */
+#define PB1_PIF_CNTL                                      0x10
+#define PB1_PIF_PAIRING                                   0x11
+#define PB1_PIF_PWRDOWN_0                                 0x12
+#define PB1_PIF_PWRDOWN_1                                 0x13
+
+#define PB1_PIF_PWRDOWN_2                                 0x17
+#define PB1_PIF_PWRDOWN_3                                 0x18
+/* PCIE registers idx/data 0x30/0x34 */
+#define PCIE_CNTL2                                        0x1c /* PCIE */
+#       define SLV_MEM_LS_EN                              (1 << 16)
+#       define SLV_MEM_AGGRESSIVE_LS_EN                   (1 << 17)
+#       define MST_MEM_LS_EN                              (1 << 18)
+#       define REPLAY_MEM_LS_EN                           (1 << 19)
+#define PCIE_LC_STATUS1                                   0x28 /* PCIE */
+#       define LC_REVERSE_RCVR                            (1 << 0)
+#       define LC_REVERSE_XMIT                            (1 << 1)
+#       define LC_OPERATING_LINK_WIDTH_MASK               (0x7 << 2)
+#       define LC_OPERATING_LINK_WIDTH_SHIFT              2
+#       define LC_DETECTED_LINK_WIDTH_MASK                (0x7 << 5)
+#       define LC_DETECTED_LINK_WIDTH_SHIFT               5
+
+#define PCIE_P_CNTL                                       0x40 /* PCIE */
+#       define P_IGNORE_EDB_ERR                           (1 << 6)
+
+/* PCIE PORT registers idx/data 0x38/0x3c */
+#define PCIE_LC_CNTL                                      0xa0
+#       define LC_L0S_INACTIVITY(x)                       ((x) << 8)
+#       define LC_L0S_INACTIVITY_MASK                     (0xf << 8)
+#       define LC_L0S_INACTIVITY_SHIFT                    8
+#       define LC_L1_INACTIVITY(x)                        ((x) << 12)
+#       define LC_L1_INACTIVITY_MASK                      (0xf << 12)
+#       define LC_L1_INACTIVITY_SHIFT                     12
+#       define LC_PMI_TO_L1_DIS                           (1 << 16)
+#       define LC_ASPM_TO_L1_DIS                          (1 << 24)
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#       define LC_DYN_LANES_PWR_STATE(x)                  ((x) << 21)
+#       define LC_DYN_LANES_PWR_STATE_MASK                (0x3 << 21)
+#       define LC_DYN_LANES_PWR_STATE_SHIFT               21
+#define PCIE_LC_N_FTS_CNTL                                0xa3 /* PCIE_P */
+#       define LC_XMIT_N_FTS(x)                           ((x) << 0)
+#       define LC_XMIT_N_FTS_MASK                         (0xff << 0)
+#       define LC_XMIT_N_FTS_SHIFT                        0
+#       define LC_XMIT_N_FTS_OVERRIDE_EN                  (1 << 8)
+#       define LC_N_FTS_MASK                              (0xff << 24)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_GEN3_EN_STRAP                           (1 << 1)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 2)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_MASK         (0x3 << 3)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT        3
+#       define LC_FORCE_EN_SW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_SW_SPEED_CHANGE               (1 << 6)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 7)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 8)
+#       define LC_INITIATE_LINK_SPEED_CHANGE              (1 << 9)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 10)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     10
+#       define LC_CURRENT_DATA_RATE_MASK                  (0x3 << 13) /* 0/1/2 = gen1/2/3 */
+#       define LC_CURRENT_DATA_RATE_SHIFT                 13
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 16)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 18)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 19)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN3               (1 << 20)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN3                (1 << 21)
+
+#define PCIE_LC_CNTL2                                     0xb1
+#       define LC_ALLOW_PDWN_IN_L1                        (1 << 17)
+#       define LC_ALLOW_PDWN_IN_L23                       (1 << 18)
+
+#define PCIE_LC_CNTL3                                     0xb5 /* PCIE_P */
+#       define LC_GO_TO_RECOVERY                          (1 << 30)
+#define PCIE_LC_CNTL4                                     0xb6 /* PCIE_P */
+#       define LC_REDO_EQ                                 (1 << 5)
+#       define LC_SET_QUIESCE                             (1 << 13)
+
+/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG				0x3bd3
+#define UVD_UDEC_DB_ADDR_CONFIG				0x3bd4
+#define UVD_UDEC_DBW_ADDR_CONFIG			0x3bd5
+#define UVD_RBC_RB_RPTR					0x3da4
+#define UVD_RBC_RB_WPTR					0x3da5
+#define UVD_STATUS					0x3daf
+
+#define	UVD_CGC_CTRL					0x3dc2
+#	define DCM					(1 << 0)
+#	define CG_DT(x)					((x) << 2)
+#	define CG_DT_MASK				(0xf << 2)
+#	define CLK_OD(x)				((x) << 6)
+#	define CLK_OD_MASK				(0x1f << 6)
+
+ /* UVD CTX indirect */
+#define	UVD_CGC_MEM_CTRL				0xC0
+#define	UVD_CGC_CTRL2					0xC1
+#	define DYN_OR_EN				(1 << 0)
+#	define DYN_RR_EN				(1 << 1)
+#	define G_DIV_ID(x)				((x) << 2)
+#	define G_DIV_ID_MASK				(0x7 << 2)
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n)	((RADEON_PACKET_TYPE0 << 30) |			\
+			 (((reg) >> 2) & 0xFFFF) |			\
+			 ((n) & 0x3FFF) << 16)
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define RADEON_PACKET_TYPE3 3
+#define PACKET3(op, n)	((RADEON_PACKET_TYPE3 << 30) |			\
+			 (((op) & 0xFF) << 8) |				\
+			 ((n) & 0x3FFF) << 16)
+
+#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+
+/* Packet 3 types */
+#define	PACKET3_NOP					0x10
+#define	PACKET3_SET_BASE				0x11
+#define		PACKET3_BASE_INDEX(x)                  ((x) << 0)
+#define			GDS_PARTITION_BASE		2
+#define			CE_PARTITION_BASE		3
+#define	PACKET3_CLEAR_STATE				0x12
+#define	PACKET3_INDEX_BUFFER_SIZE			0x13
+#define	PACKET3_DISPATCH_DIRECT				0x15
+#define	PACKET3_DISPATCH_INDIRECT			0x16
+#define	PACKET3_ALLOC_GDS				0x1B
+#define	PACKET3_WRITE_GDS_RAM				0x1C
+#define	PACKET3_ATOMIC_GDS				0x1D
+#define	PACKET3_ATOMIC					0x1E
+#define	PACKET3_OCCLUSION_QUERY				0x1F
+#define	PACKET3_SET_PREDICATION				0x20
+#define	PACKET3_REG_RMW					0x21
+#define	PACKET3_COND_EXEC				0x22
+#define	PACKET3_PRED_EXEC				0x23
+#define	PACKET3_DRAW_INDIRECT				0x24
+#define	PACKET3_DRAW_INDEX_INDIRECT			0x25
+#define	PACKET3_INDEX_BASE				0x26
+#define	PACKET3_DRAW_INDEX_2				0x27
+#define	PACKET3_CONTEXT_CONTROL				0x28
+#define	PACKET3_INDEX_TYPE				0x2A
+#define	PACKET3_DRAW_INDIRECT_MULTI			0x2C
+#define	PACKET3_DRAW_INDEX_AUTO				0x2D
+#define	PACKET3_DRAW_INDEX_IMMD				0x2E
+#define	PACKET3_NUM_INSTANCES				0x2F
+#define	PACKET3_DRAW_INDEX_MULTI_AUTO			0x30
+#define	PACKET3_INDIRECT_BUFFER_CONST			0x31
+#define	PACKET3_INDIRECT_BUFFER				0x3F
+#define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
+#define	PACKET3_DRAW_INDEX_OFFSET_2			0x35
+#define	PACKET3_DRAW_INDEX_MULTI_ELEMENT		0x36
+#define	PACKET3_WRITE_DATA				0x37
+#define		WRITE_DATA_DST_SEL(x)                   ((x) << 8)
+                /* 0 - register
+		 * 1 - memory (sync - via GRBM)
+		 * 2 - tc/l2
+		 * 3 - gds
+		 * 4 - reserved
+		 * 5 - memory (async - direct)
+		 */
+#define		WR_ONE_ADDR                             (1 << 16)
+#define		WR_CONFIRM                              (1 << 20)
+#define		WRITE_DATA_ENGINE_SEL(x)                ((x) << 30)
+                /* 0 - me
+		 * 1 - pfp
+		 * 2 - ce
+		 */
+#define	PACKET3_DRAW_INDEX_INDIRECT_MULTI		0x38
+#define	PACKET3_MEM_SEMAPHORE				0x39
+#define	PACKET3_MPEG_INDEX				0x3A
+#define	PACKET3_COPY_DW					0x3B
+#define	PACKET3_WAIT_REG_MEM				0x3C
+#define		WAIT_REG_MEM_FUNCTION(x)                ((x) << 0)
+                /* 0 - always
+		 * 1 - <
+		 * 2 - <=
+		 * 3 - ==
+		 * 4 - !=
+		 * 5 - >=
+		 * 6 - >
+		 */
+#define		WAIT_REG_MEM_MEM_SPACE(x)               ((x) << 4)
+                /* 0 - reg
+		 * 1 - mem
+		 */
+#define		WAIT_REG_MEM_ENGINE(x)                  ((x) << 8)
+                /* 0 - me
+		 * 1 - pfp
+		 */
+#define	PACKET3_MEM_WRITE				0x3D
+#define	PACKET3_COPY_DATA				0x40
+#define	PACKET3_CP_DMA					0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ *    SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
+                /* 0 - DST_ADDR
+		 * 1 - GDS
+		 */
+#              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
+                /* 0 - ME
+		 * 1 - PFP
+		 */
+#              define PACKET3_CP_DMA_SRC_SEL(x)    ((x) << 29)
+                /* 0 - SRC_ADDR
+		 * 1 - GDS
+		 * 2 - DATA
+		 */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#              define PACKET3_CP_DMA_CMD_RAW_WAIT  (1 << 30)
+#define	PACKET3_PFP_SYNC_ME				0x42
+#define	PACKET3_SURFACE_SYNC				0x43
+#              define PACKET3_DEST_BASE_0_ENA      (1 << 0)
+#              define PACKET3_DEST_BASE_1_ENA      (1 << 1)
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
+#              define PACKET3_CB2_DEST_BASE_ENA    (1 << 8)
+#              define PACKET3_CB3_DEST_BASE_ENA    (1 << 9)
+#              define PACKET3_CB4_DEST_BASE_ENA    (1 << 10)
+#              define PACKET3_CB5_DEST_BASE_ENA    (1 << 11)
+#              define PACKET3_CB6_DEST_BASE_ENA    (1 << 12)
+#              define PACKET3_CB7_DEST_BASE_ENA    (1 << 13)
+#              define PACKET3_DB_DEST_BASE_ENA     (1 << 14)
+#              define PACKET3_DEST_BASE_2_ENA      (1 << 19)
+#              define PACKET3_DEST_BASE_3_ENA      (1 << 21)
+#              define PACKET3_TCL1_ACTION_ENA      (1 << 22)
+#              define PACKET3_TC_ACTION_ENA        (1 << 23)
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
+#              define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
+#define	PACKET3_ME_INITIALIZE				0x44
+#define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define	PACKET3_COND_WRITE				0x45
+#define	PACKET3_EVENT_WRITE				0x46
+#define		EVENT_TYPE(x)                           ((x) << 0)
+#define		EVENT_INDEX(x)                          ((x) << 8)
+                /* 0 - any non-TS event
+		 * 1 - ZPASS_DONE
+		 * 2 - SAMPLE_PIPELINESTAT
+		 * 3 - SAMPLE_STREAMOUTSTAT*
+		 * 4 - *S_PARTIAL_FLUSH
+		 * 5 - EOP events
+		 * 6 - EOS events
+		 * 7 - CACHE_FLUSH, CACHE_FLUSH_AND_INV_EVENT
+		 */
+#define		INV_L2                                  (1 << 20)
+                /* INV TC L2 cache when EVENT_INDEX = 7 */
+#define	PACKET3_EVENT_WRITE_EOP				0x47
+#define		DATA_SEL(x)                             ((x) << 29)
+                /* 0 - discard
+		 * 1 - send low 32bit data
+		 * 2 - send 64bit data
+		 * 3 - send 64bit counter value
+		 */
+#define		INT_SEL(x)                              ((x) << 24)
+                /* 0 - none
+		 * 1 - interrupt only (DATA_SEL = 0)
+		 * 2 - interrupt when data write is confirmed
+		 */
+#define	PACKET3_EVENT_WRITE_EOS				0x48
+#define	PACKET3_PREAMBLE_CNTL				0x4A
+#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
+#define	PACKET3_ONE_REG_WRITE				0x57
+#define	PACKET3_LOAD_CONFIG_REG				0x5F
+#define	PACKET3_LOAD_CONTEXT_REG			0x60
+#define	PACKET3_LOAD_SH_REG				0x61
+#define	PACKET3_SET_CONFIG_REG				0x68
+#define		PACKET3_SET_CONFIG_REG_START			0x00002000
+#define		PACKET3_SET_CONFIG_REG_END			0x00002c00
+#define	PACKET3_SET_CONTEXT_REG				0x69
+#define		PACKET3_SET_CONTEXT_REG_START			0x000a000
+#define		PACKET3_SET_CONTEXT_REG_END			0x000a400
+#define	PACKET3_SET_CONTEXT_REG_INDIRECT		0x73
+#define	PACKET3_SET_RESOURCE_INDIRECT			0x74
+#define	PACKET3_SET_SH_REG				0x76
+#define		PACKET3_SET_SH_REG_START			0x00002c00
+#define		PACKET3_SET_SH_REG_END				0x00003000
+#define	PACKET3_SET_SH_REG_OFFSET			0x77
+#define	PACKET3_ME_WRITE				0x7A
+#define	PACKET3_SCRATCH_RAM_WRITE			0x7D
+#define	PACKET3_SCRATCH_RAM_READ			0x7E
+#define	PACKET3_CE_WRITE				0x7F
+#define	PACKET3_LOAD_CONST_RAM				0x80
+#define	PACKET3_WRITE_CONST_RAM				0x81
+#define	PACKET3_WRITE_CONST_RAM_OFFSET			0x82
+#define	PACKET3_DUMP_CONST_RAM				0x83
+#define	PACKET3_INCREMENT_CE_COUNTER			0x84
+#define	PACKET3_INCREMENT_DE_COUNTER			0x85
+#define	PACKET3_WAIT_ON_CE_COUNTER			0x86
+#define	PACKET3_WAIT_ON_DE_COUNTER			0x87
+#define	PACKET3_WAIT_ON_DE_COUNTER_DIFF			0x88
+#define	PACKET3_SET_CE_DE_COUNTERS			0x89
+#define	PACKET3_WAIT_ON_AVAIL_BUFFER			0x8A
+#define	PACKET3_SWITCH_BUFFER				0x8B
+
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET                              0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET                              0x200 /* not a register */
+
+#define DMA_RB_CNTL                                       0x3400
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0x3401
+#define DMA_RB_RPTR                                       0x3402
+#define DMA_RB_WPTR                                       0x3403
+
+#define DMA_RB_RPTR_ADDR_HI                               0x3407
+#define DMA_RB_RPTR_ADDR_LO                               0x3408
+
+#define DMA_IB_CNTL                                       0x3409
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#       define CMD_VMID_FORCE                             (1 << 31)
+#define DMA_IB_RPTR                                       0x340a
+#define DMA_CNTL                                          0x340b
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0x340d
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_TILING_CONFIG  				  0x342e
+
+#define	DMA_POWER_CNTL					0x342f
+#       define MEM_POWER_OVERRIDE                       (1 << 8)
+#define	DMA_CLK_CTRL					0x3430
+
+#define	DMA_PG						0x3435
+#	define PG_CNTL_ENABLE				(1 << 0)
+#define	DMA_PGFSM_CONFIG				0x3436
+#define	DMA_PGFSM_WRITE					0x3437
+
+#define DMA_PACKET(cmd, b, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((b) & 0x1) << 26) |		\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n)	((((cmd) & 0xF) << 28) |	\
+					 (((vmid) & 0xF) << 20) |	\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n)		((2 << 28) |			\
+					 (1 << 26) |			\
+					 (1 << 21) |			\
+					 (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_SRBM_WRITE				  0x9
+#define	DMA_PACKET_CONSTANT_FILL			  0xd
+#define	DMA_PACKET_POLL_REG_MEM				  0xe
+#define	DMA_PACKET_NOP					  0xf
+
+#define VCE_STATUS					0x20004
+#define VCE_VCPU_CNTL					0x20014
+#define		VCE_CLK_EN				(1 << 0)
+#define VCE_VCPU_CACHE_OFFSET0				0x20024
+#define VCE_VCPU_CACHE_SIZE0				0x20028
+#define VCE_VCPU_CACHE_OFFSET1				0x2002c
+#define VCE_VCPU_CACHE_SIZE1				0x20030
+#define VCE_VCPU_CACHE_OFFSET2				0x20034
+#define VCE_VCPU_CACHE_SIZE2				0x20038
+#define VCE_SOFT_RESET					0x20120
+#define 	VCE_ECPU_SOFT_RESET			(1 << 0)
+#define 	VCE_FME_SOFT_RESET			(1 << 2)
+#define VCE_RB_BASE_LO2					0x2016c
+#define VCE_RB_BASE_HI2					0x20170
+#define VCE_RB_SIZE2					0x20174
+#define VCE_RB_RPTR2					0x20178
+#define VCE_RB_WPTR2					0x2017c
+#define VCE_RB_BASE_LO					0x20180
+#define VCE_RB_BASE_HI					0x20184
+#define VCE_RB_SIZE					0x20188
+#define VCE_RB_RPTR					0x2018c
+#define VCE_RB_WPTR					0x20190
+#define VCE_CLOCK_GATING_A				0x202f8
+#define VCE_CLOCK_GATING_B				0x202fc
+#define VCE_UENC_CLOCK_GATING				0x205bc
+#define VCE_UENC_REG_CLOCK_GATING			0x205c0
+#define VCE_FW_REG_STATUS				0x20e10
+#	define VCE_FW_REG_STATUS_BUSY			(1 << 0)
+#	define VCE_FW_REG_STATUS_PASS			(1 << 3)
+#	define VCE_FW_REG_STATUS_DONE			(1 << 11)
+#define VCE_LMI_FW_START_KEYSEL				0x20e18
+#define VCE_LMI_FW_PERIODIC_CTRL			0x20e20
+#define VCE_LMI_CTRL2					0x20e74
+#define VCE_LMI_CTRL					0x20e98
+#define VCE_LMI_VM_CTRL					0x20ea0
+#define VCE_LMI_SWAP_CNTL				0x20eb4
+#define VCE_LMI_SWAP_CNTL1				0x20eb8
+#define VCE_LMI_CACHE_CTRL				0x20ef4
+
+#define VCE_CMD_NO_OP					0x00000000
+#define VCE_CMD_END					0x00000001
+#define VCE_CMD_IB					0x00000002
+#define VCE_CMD_FENCE					0x00000003
+#define VCE_CMD_TRAP					0x00000004
+#define VCE_CMD_IB_AUTO					0x00000005
+#define VCE_CMD_SEMAPHORE				0x00000006
+
+
+//#dce stupp
+/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
+#define SI_CRTC0_REGISTER_OFFSET                0 //(0x6df0 - 0x6df0)/4
+#define SI_CRTC1_REGISTER_OFFSET                0x300 //(0x79f0 - 0x6df0)/4
+#define SI_CRTC2_REGISTER_OFFSET                0x2600 //(0x105f0 - 0x6df0)/4
+#define SI_CRTC3_REGISTER_OFFSET                0x2900 //(0x111f0 - 0x6df0)/4
+#define SI_CRTC4_REGISTER_OFFSET                0x2c00 //(0x11df0 - 0x6df0)/4
+#define SI_CRTC5_REGISTER_OFFSET                0x2f00 //(0x129f0 - 0x6df0)/4
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+#define AMDGPU_MM_INDEX		        0x0000
+#define AMDGPU_MM_DATA		        0x0001
+
+#define VERDE_NUM_CRTC 6
+#define	BLACKOUT_MODE_MASK			0x00000007
+#define	VGA_RENDER_CONTROL			0xC0
+#define R_000300_VGA_RENDER_CONTROL             0xC0
+#define C_000300_VGA_VSTATUS_CNTL               0xFFFCFFFF
+#define EVERGREEN_CRTC_STATUS                   0x1BA3
+#define EVERGREEN_CRTC_V_BLANK                  (1 << 0)
+#define EVERGREEN_CRTC_STATUS_POSITION          0x1BA4
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END                0x1b8d
+#define EVERGREEN_CRTC_CONTROL                          0x1b9c
+#define EVERGREEN_CRTC_MASTER_EN                 (1 << 0)
+#define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL                    0x1b9d
+#define EVERGREEN_CRTC_BLANK_DATA_EN             (1 << 8)
+#define EVERGREEN_CRTC_V_BLANK                   (1 << 0)
+#define EVERGREEN_CRTC_STATUS_HV_COUNT                  0x1ba8
+#define EVERGREEN_CRTC_UPDATE_LOCK                      0x1bb5
+#define EVERGREEN_MASTER_UPDATE_LOCK                    0x1bbd
+#define EVERGREEN_MASTER_UPDATE_MODE                    0x1bbe
+#define EVERGREEN_GRPH_UPDATE_LOCK               (1 << 16)
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH     0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH   0x1a08
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS          0x1a04
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS        0x1a05
+#define EVERGREEN_GRPH_UPDATE                           0x1a11
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS               0xc4
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH          0xc9
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING    (1 << 2)
+
+#define EVERGREEN_DATA_FORMAT                           0x1ac0
+#       define EVERGREEN_INTERLEAVE_EN                  (1 << 0)
+
+#define MC_SHARED_CHMAP__NOOFCHAN_MASK 0xf000
+#define MC_SHARED_CHMAP__NOOFCHAN__SHIFT 0xc
+
+#define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL            (0 << 20)
+#define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED            (1 << 20)
+#define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1            (2 << 20)
+#define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1            (4 << 20)
+
+#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x1a45
+#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x1845
+
+#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH              0x1847
+#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH              0x1a47
+
+#define DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK 0x8
+
+#define DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK 0x4
+
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 0x20000
+
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK 0x1
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK 0x100
+
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK 0x1
+
+#define R600_D1GRPH_SWAP_CONTROL                               0x1843
+#define R600_D1GRPH_SWAP_ENDIAN_NONE                    (0 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_16BIT                   (1 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_32BIT                   (2 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_64BIT                   (3 << 0)
+
+#define AVIVO_D1VGA_CONTROL					0x00cc
+#       define AVIVO_DVGA_CONTROL_MODE_ENABLE            (1 << 0)
+#       define AVIVO_DVGA_CONTROL_TIMING_SELECT          (1 << 8)
+#       define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT   (1 << 9)
+#       define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1 << 10)
+#       define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN      (1 << 16)
+#       define AVIVO_DVGA_CONTROL_ROTATE                 (1 << 24)
+#define AVIVO_D2VGA_CONTROL					0x00ce
+
+#define R600_BUS_CNTL                                           0x1508
+#       define R600_BIOS_ROM_DIS                                (1 << 1)
+
+#define R600_ROM_CNTL                              0x580
+#       define R600_SCK_OVERWRITE                  (1 << 1)
+#       define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
+#       define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK  (0xf << 28)
+
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK 0x1
+
+#define FMT_BIT_DEPTH_CONTROL                0x1bf2
+#define FMT_TRUNCATE_EN               (1 << 0)
+#define FMT_TRUNCATE_DEPTH            (1 << 4)
+#define FMT_SPATIAL_DITHER_EN         (1 << 8)
+#define FMT_SPATIAL_DITHER_MODE(x)    ((x) << 9)
+#define FMT_SPATIAL_DITHER_DEPTH      (1 << 12)
+#define FMT_FRAME_RANDOM_ENABLE       (1 << 13)
+#define FMT_RGB_RANDOM_ENABLE         (1 << 14)
+#define FMT_HIGHPASS_RANDOM_ENABLE    (1 << 15)
+#define FMT_TEMPORAL_DITHER_EN        (1 << 16)
+#define FMT_TEMPORAL_DITHER_DEPTH     (1 << 20)
+#define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+#define FMT_TEMPORAL_LEVEL            (1 << 24)
+#define FMT_TEMPORAL_DITHER_RESET     (1 << 25)
+#define FMT_25FRC_SEL(x)              ((x) << 26)
+#define FMT_50FRC_SEL(x)              ((x) << 28)
+#define FMT_75FRC_SEL(x)              ((x) << 30)
+
+#define EVERGREEN_DC_LUT_CONTROL                        0x1a80
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE              0x1a81
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN             0x1a82
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED               0x1a83
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE              0x1a84
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN             0x1a85
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED               0x1a86
+#define EVERGREEN_DC_LUT_30_COLOR                       0x1a7c
+#define EVERGREEN_DC_LUT_RW_INDEX                       0x1a79
+#define EVERGREEN_DC_LUT_WRITE_EN_MASK                  0x1a7e
+#define EVERGREEN_DC_LUT_RW_MODE                        0x1a78
+
+#define EVERGREEN_GRPH_ENABLE                           0x1a00
+#define EVERGREEN_GRPH_CONTROL                          0x1a01
+#define EVERGREEN_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#define EVERGREEN_GRPH_DEPTH_8BPP                0
+#define EVERGREEN_GRPH_DEPTH_16BPP               1
+#define EVERGREEN_GRPH_DEPTH_32BPP               2
+#define EVERGREEN_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#define EVERGREEN_ADDR_SURF_2_BANK               0
+#define EVERGREEN_ADDR_SURF_4_BANK               1
+#define EVERGREEN_ADDR_SURF_8_BANK               2
+#define EVERGREEN_ADDR_SURF_16_BANK              3
+#define EVERGREEN_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#define EVERGREEN_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_1         0
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_2         1
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_4         2
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_8         3
+#define EVERGREEN_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+
+#define EVERGREEN_GRPH_FORMAT_INDEXED            0
+#define EVERGREEN_GRPH_FORMAT_ARGB1555           0
+#define EVERGREEN_GRPH_FORMAT_ARGB565            1
+#define EVERGREEN_GRPH_FORMAT_ARGB4444           2
+#define EVERGREEN_GRPH_FORMAT_AI88               3
+#define EVERGREEN_GRPH_FORMAT_MONO16             4
+#define EVERGREEN_GRPH_FORMAT_BGRA5551           5
+
+/* 32 BPP */
+#define EVERGREEN_GRPH_FORMAT_ARGB8888           0
+#define EVERGREEN_GRPH_FORMAT_ARGB2101010        1
+#define EVERGREEN_GRPH_FORMAT_32BPP_DIG          2
+#define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010     3
+#define EVERGREEN_GRPH_FORMAT_BGRA1010102        4
+#define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102     5
+#define EVERGREEN_GRPH_FORMAT_RGB111110          6
+#define EVERGREEN_GRPH_FORMAT_BGR101111          7
+#define EVERGREEN_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1        0
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2        1
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4        2
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8        3
+#define EVERGREEN_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B       0
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B      1
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B      2
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B      3
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB       4
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB       5
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB       6
+#define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#define EVERGREEN_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL      0
+#define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED      1
+#define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1      2
+#define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1      4
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+
+#define EVERGREEN_GRPH_SWAP_CONTROL                     0x1a03
+#define EVERGREEN_GRPH_ENDIAN_SWAP(x)            (((x) & 0x3) << 0)
+#       define EVERGREEN_GRPH_ENDIAN_NONE               0
+#       define EVERGREEN_GRPH_ENDIAN_8IN16              1
+#       define EVERGREEN_GRPH_ENDIAN_8IN32              2
+#       define EVERGREEN_GRPH_ENDIAN_8IN64              3
+
+#define EVERGREEN_D3VGA_CONTROL                         0xf8
+#define EVERGREEN_D4VGA_CONTROL                         0xf9
+#define EVERGREEN_D5VGA_CONTROL                         0xfa
+#define EVERGREEN_D6VGA_CONTROL                         0xfb
+
+#define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK      0xffffff00
+
+#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL         0x1a02
+#define EVERGREEN_LUT_10BIT_BYPASS_EN            (1 << 8)
+
+#define EVERGREEN_GRPH_PITCH                            0x1a06
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH     0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH   0x1a08
+#define EVERGREEN_GRPH_SURFACE_OFFSET_X                 0x1a09
+#define EVERGREEN_GRPH_SURFACE_OFFSET_Y                 0x1a0a
+#define EVERGREEN_GRPH_X_START                          0x1a0b
+#define EVERGREEN_GRPH_Y_START                          0x1a0c
+#define EVERGREEN_GRPH_X_END                            0x1a0d
+#define EVERGREEN_GRPH_Y_END                            0x1a0e
+#define EVERGREEN_GRPH_UPDATE                           0x1a11
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING    (1 << 2)
+#define EVERGREEN_GRPH_UPDATE_LOCK               (1 << 16)
+#define EVERGREEN_GRPH_FLIP_CONTROL                     0x1a12
+#define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
+
+#define EVERGREEN_VIEWPORT_START                        0x1b5c
+#define EVERGREEN_VIEWPORT_SIZE                         0x1b5d
+#define EVERGREEN_DESKTOP_HEIGHT                        0x1ac1
+
+/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
+#define EVERGREEN_CUR_CONTROL                           0x1a66
+#       define EVERGREEN_CURSOR_EN                      (1 << 0)
+#       define EVERGREEN_CURSOR_MODE(x)                 (((x) & 0x3) << 8)
+#       define EVERGREEN_CURSOR_MONO                    0
+#       define EVERGREEN_CURSOR_24_1                    1
+#       define EVERGREEN_CURSOR_24_8_PRE_MULT           2
+#       define EVERGREEN_CURSOR_24_8_UNPRE_MULT         3
+#       define EVERGREEN_CURSOR_2X_MAGNIFY              (1 << 16)
+#       define EVERGREEN_CURSOR_FORCE_MC_ON             (1 << 20)
+#       define EVERGREEN_CURSOR_URGENT_CONTROL(x)       (((x) & 0x7) << 24)
+#       define EVERGREEN_CURSOR_URGENT_ALWAYS           0
+#       define EVERGREEN_CURSOR_URGENT_1_8              1
+#       define EVERGREEN_CURSOR_URGENT_1_4              2
+#       define EVERGREEN_CURSOR_URGENT_3_8              3
+#       define EVERGREEN_CURSOR_URGENT_1_2              4
+#define EVERGREEN_CUR_SURFACE_ADDRESS                   0x1a67
+#       define EVERGREEN_CUR_SURFACE_ADDRESS_MASK       0xfffff000
+#define EVERGREEN_CUR_SIZE                              0x1a68
+#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH              0x1a69
+#define EVERGREEN_CUR_POSITION                          0x1a6a
+#define EVERGREEN_CUR_HOT_SPOT                          0x1a6b
+#define EVERGREEN_CUR_COLOR1                            0x1a6c
+#define EVERGREEN_CUR_COLOR2                            0x1a6d
+#define EVERGREEN_CUR_UPDATE                            0x1a6e
+#       define EVERGREEN_CURSOR_UPDATE_PENDING          (1 << 0)
+#       define EVERGREEN_CURSOR_UPDATE_TAKEN            (1 << 1)
+#       define EVERGREEN_CURSOR_UPDATE_LOCK             (1 << 16)
+#       define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
+
+
+#define NI_INPUT_CSC_CONTROL                           0x1a35
+#       define NI_INPUT_CSC_GRPH_MODE(x)               (((x) & 0x3) << 0)
+#       define NI_INPUT_CSC_BYPASS                     0
+#       define NI_INPUT_CSC_PROG_COEFF                 1
+#       define NI_INPUT_CSC_PROG_SHARED_MATRIXA        2
+#       define NI_INPUT_CSC_OVL_MODE(x)                (((x) & 0x3) << 4)
+
+#define NI_OUTPUT_CSC_CONTROL                          0x1a3c
+#       define NI_OUTPUT_CSC_GRPH_MODE(x)              (((x) & 0x7) << 0)
+#       define NI_OUTPUT_CSC_BYPASS                    0
+#       define NI_OUTPUT_CSC_TV_RGB                    1
+#       define NI_OUTPUT_CSC_YCBCR_601                 2
+#       define NI_OUTPUT_CSC_YCBCR_709                 3
+#       define NI_OUTPUT_CSC_PROG_COEFF                4
+#       define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB       5
+#       define NI_OUTPUT_CSC_OVL_MODE(x)               (((x) & 0x7) << 4)
+
+#define NI_DEGAMMA_CONTROL                             0x1a58
+#       define NI_GRPH_DEGAMMA_MODE(x)                 (((x) & 0x3) << 0)
+#       define NI_DEGAMMA_BYPASS                       0
+#       define NI_DEGAMMA_SRGB_24                      1
+#       define NI_DEGAMMA_XVYCC_222                    2
+#       define NI_OVL_DEGAMMA_MODE(x)                  (((x) & 0x3) << 4)
+#       define NI_ICON_DEGAMMA_MODE(x)                 (((x) & 0x3) << 8)
+#       define NI_CURSOR_DEGAMMA_MODE(x)               (((x) & 0x3) << 12)
+
+#define NI_GAMUT_REMAP_CONTROL                         0x1a59
+#       define NI_GRPH_GAMUT_REMAP_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_GAMUT_REMAP_BYPASS                   0
+#       define NI_GAMUT_REMAP_PROG_COEFF               1
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA      2
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB      3
+#       define NI_OVL_GAMUT_REMAP_MODE(x)              (((x) & 0x3) << 4)
+
+#define NI_REGAMMA_CONTROL                             0x1aa0
+#       define NI_GRPH_REGAMMA_MODE(x)                 (((x) & 0x7) << 0)
+#       define NI_REGAMMA_BYPASS                       0
+#       define NI_REGAMMA_SRGB_24                      1
+#       define NI_REGAMMA_XVYCC_222                    2
+#       define NI_REGAMMA_PROG_A                       3
+#       define NI_REGAMMA_PROG_B                       4
+#       define NI_OVL_REGAMMA_MODE(x)                  (((x) & 0x7) << 4)
+
+
+#define NI_PRESCALE_GRPH_CONTROL                       0x1a2d
+#       define NI_GRPH_PRESCALE_BYPASS                 (1 << 4)
+
+#define NI_PRESCALE_OVL_CONTROL                        0x1a31
+#       define NI_OVL_PRESCALE_BYPASS                  (1 << 4)
+
+#define NI_INPUT_GAMMA_CONTROL                         0x1a10
+#       define NI_GRPH_INPUT_GAMMA_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_INPUT_GAMMA_USE_LUT                  0
+#       define NI_INPUT_GAMMA_BYPASS                   1
+#       define NI_INPUT_GAMMA_SRGB_24                  2
+#       define NI_INPUT_GAMMA_XVYCC_222                3
+#       define NI_OVL_INPUT_GAMMA_MODE(x)              (((x) & 0x3) << 4)
+
+#define IH_RB_WPTR__RB_OVERFLOW_MASK	0x1
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000
+#define SRBM_STATUS__IH_BUSY_MASK	0x20000
+#define SRBM_SOFT_RESET__SOFT_RESET_IH_MASK	0x400
+
+#define	BLACKOUT_MODE_MASK			0x00000007
+#define	VGA_RENDER_CONTROL			0xC0
+#define R_000300_VGA_RENDER_CONTROL             0xC0
+#define C_000300_VGA_VSTATUS_CNTL               0xFFFCFFFF
+#define EVERGREEN_CRTC_STATUS                   0x1BA3
+#define EVERGREEN_CRTC_V_BLANK                  (1 << 0)
+#define EVERGREEN_CRTC_STATUS_POSITION          0x1BA4
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END                0x1b8d
+#define EVERGREEN_CRTC_CONTROL                          0x1b9c
+#       define EVERGREEN_CRTC_MASTER_EN                 (1 << 0)
+#       define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL                    0x1b9d
+#       define EVERGREEN_CRTC_BLANK_DATA_EN             (1 << 8)
+#       define EVERGREEN_CRTC_V_BLANK                   (1 << 0)
+#define EVERGREEN_CRTC_STATUS_HV_COUNT                  0x1ba8
+#define EVERGREEN_CRTC_UPDATE_LOCK                      0x1bb5
+#define EVERGREEN_MASTER_UPDATE_LOCK                    0x1bbd
+#define EVERGREEN_MASTER_UPDATE_MODE                    0x1bbe
+#define EVERGREEN_GRPH_UPDATE_LOCK               (1 << 16)
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH     0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH   0x1a08
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS          0x1a04
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS        0x1a05
+#define EVERGREEN_GRPH_UPDATE                           0x1a11
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS               0xc4
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH          0xc9
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING    (1 << 2)
+
+#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10
+#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
+#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80
+#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
+#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x400
+#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x2000
+#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xd
+#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10000
+#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80000
+#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x13
+
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID_MASK 0x1e000000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID__SHIFT 0x19
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS_MASK 0xff
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS__SHIFT 0x0
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID_MASK 0xff000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID__SHIFT 0xc
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW_MASK 0x1000000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW__SHIFT 0x18
+
+#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE_MASK 0x7
+#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE__SHIFT 0x0
+
+#define mmBIF_FB_EN__xxFB_READ_EN_MASK 0x1
+#define mmBIF_FB_EN__xxFB_READ_EN__SHIFT 0x0
+#define mmBIF_FB_EN__xxFB_WRITE_EN_MASK 0x2
+#define mmBIF_FB_EN__xxFB_WRITE_EN__SHIFT 0x1
+
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC_MASK 0x20000
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC__SHIFT 0x11
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC_MASK 0x800
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC__SHIFT 0xb
+
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x8
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x3
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x40
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x6
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x200
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x1000
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xc
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x8000
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x40000
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x12
+
+#define MC_SEQ_MISC0__MT__MASK	0xf0000000
+#define MC_SEQ_MISC0__MT__GDDR1  0x10000000
+#define MC_SEQ_MISC0__MT__DDR2   0x20000000
+#define MC_SEQ_MISC0__MT__GDDR3  0x30000000
+#define MC_SEQ_MISC0__MT__GDDR4  0x40000000
+#define MC_SEQ_MISC0__MT__GDDR5  0x50000000
+#define MC_SEQ_MISC0__MT__HBM    0x60000000
+#define MC_SEQ_MISC0__MT__DDR3   0xB0000000
+
+#define SRBM_STATUS__MCB_BUSY_MASK 0x200
+#define SRBM_STATUS__MCB_BUSY__SHIFT 0x9
+#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK 0x400
+#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY__SHIFT 0xa
+#define SRBM_STATUS__MCC_BUSY_MASK 0x800
+#define SRBM_STATUS__MCC_BUSY__SHIFT 0xb
+#define SRBM_STATUS__MCD_BUSY_MASK 0x1000
+#define SRBM_STATUS__MCD_BUSY__SHIFT 0xc
+#define SRBM_STATUS__VMC_BUSY_MASK 0x100
+#define SRBM_STATUS__VMC_BUSY__SHIFT 0x8
+
+
+#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000
+#define CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK 0x4000000
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x800000
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x400000
+#define PACKET3_SEM_WAIT_ON_SIGNAL    (0x1 << 12)
+#define PACKET3_SEM_SEL_SIGNAL	    (0x6 << 29)
+#define PACKET3_SEM_SEL_WAIT	    (0x7 << 29)
+
+#define CONFIG_CNTL	0x1509
+#define CC_DRM_ID_STRAPS	0X1559
+#define AMDGPU_PCIE_INDEX	0xc
+#define AMDGPU_PCIE_DATA	0xd
+
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL                     0x3411
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL                      0x3412
+#define DMA_MODE                                          0x342f
+#define DMA_RB_RPTR_ADDR_HI                               0x3407
+#define DMA_RB_RPTR_ADDR_LO                               0x3408
+#define DMA_BUSY_MASK 0x20
+#define DMA1_BUSY_MASK 0X40
+#define SDMA_MAX_INSTANCE 2
+
+#define PCIE_BUS_CLK    10000
+#define TCLK            (PCIE_BUS_CLK / 10)
+#define CC_DRM_ID_STRAPS__ATI_REV_ID_MASK		0xf0000000
+#define CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT 0x1c
+#define	PCIE_PORT_INDEX					0xe
+#define	PCIE_PORT_DATA					0xf
+#define EVERGREEN_PIF_PHY0_INDEX                        0x8
+#define EVERGREEN_PIF_PHY0_DATA                         0xc
+#define EVERGREEN_PIF_PHY1_INDEX                        0x10
+#define EVERGREEN_PIF_PHY1_DATA				0x14
+
+#define	MC_VM_FB_OFFSET					0x81a
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h
index f3e53b1..19802e9 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h
@@ -34,6 +34,7 @@
 #define mmUVD_UDEC_ADDR_CONFIG                                                  0x3bd3
 #define mmUVD_UDEC_DB_ADDR_CONFIG                                               0x3bd4
 #define mmUVD_UDEC_DBW_ADDR_CONFIG                                              0x3bd5
+#define mmUVD_NO_OP                                                             0x3bff
 #define mmUVD_SEMA_CNTL                                                         0x3d00
 #define mmUVD_LMI_EXT40_ADDR                                                    0x3d26
 #define mmUVD_CTX_INDEX                                                         0x3d28
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h
index eb4cf53..cc972d23 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h
@@ -34,6 +34,7 @@
 #define mmUVD_UDEC_ADDR_CONFIG                                                  0x3bd3
 #define mmUVD_UDEC_DB_ADDR_CONFIG                                               0x3bd4
 #define mmUVD_UDEC_DBW_ADDR_CONFIG                                              0x3bd5
+#define mmUVD_NO_OP                                                             0x3bff
 #define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW                                          0x3c69
 #define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH                                         0x3c68
 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW                                          0x3c67
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
index ec69869..378f4b6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
@@ -35,6 +35,7 @@
 #define mmUVD_UDEC_DB_ADDR_CONFIG                                               0x3bd4
 #define mmUVD_UDEC_DBW_ADDR_CONFIG                                              0x3bd5
 #define mmUVD_POWER_STATUS_U                                                    0x3bfd
+#define mmUVD_NO_OP                                                             0x3bff
 #define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW                                          0x3c69
 #define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH                                         0x3c68
 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW                                          0x3c67
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index 3493da5..4a4d379 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -494,6 +494,7 @@
   union
   {
     ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ULONG ulClockParams;                      //ULONG access for BE
     ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
   };
   UCHAR   ucRefDiv;                           //Output Parameter
@@ -526,6 +527,7 @@
   union
   {
     ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ULONG ulClockParams;                      //ULONG access for BE
     ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
   };
   UCHAR   ucRefDiv;                           //Output Parameter
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index b86aba9..6aa8938 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -119,6 +119,8 @@
 	CGS_SYSTEM_INFO_PG_FLAGS,
 	CGS_SYSTEM_INFO_GFX_CU_INFO,
 	CGS_SYSTEM_INFO_GFX_SE_INFO,
+	CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID,
+	CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID,
 	CGS_SYSTEM_INFO_ID_MAXIMUM,
 };
 
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index abbb658b..b1d1940 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -31,6 +31,7 @@
 #include "eventmanager.h"
 #include "pp_debug.h"
 
+
 #define PP_CHECK(handle)						\
 	do {								\
 		if ((handle) == NULL || (handle)->pp_valid != PP_VALID)	\
@@ -162,12 +163,12 @@
 	pp_handle = (struct pp_instance *)handle;
 	eventmgr = pp_handle->eventmgr;
 
-	if (eventmgr != NULL || eventmgr->pp_eventmgr_fini != NULL)
+	if (eventmgr != NULL && eventmgr->pp_eventmgr_fini != NULL)
 		eventmgr->pp_eventmgr_fini(eventmgr);
 
 	smumgr = pp_handle->smu_mgr;
 
-	if (smumgr != NULL || smumgr->smumgr_funcs != NULL ||
+	if (smumgr != NULL && smumgr->smumgr_funcs != NULL &&
 		smumgr->smumgr_funcs->smu_fini != NULL)
 		smumgr->smumgr_funcs->smu_fini(smumgr);
 
@@ -537,7 +538,6 @@
 		ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
 		break;
 	case AMD_PP_EVENT_READJUST_POWER_STATE:
-		pp_handle->hwmgr->current_ps = pp_handle->hwmgr->boot_ps;
 		ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
 		break;
 	default:
@@ -764,15 +764,12 @@
 	PP_CHECK_HW(hwmgr);
 
 	if (!hwmgr->hardcode_pp_table) {
-		hwmgr->hardcode_pp_table =
-				kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
+						   hwmgr->soft_pp_table_size,
+						   GFP_KERNEL);
 
 		if (!hwmgr->hardcode_pp_table)
 			return -ENOMEM;
-
-		/* to avoid powerplay crash when hardcode pptable is empty */
-		memcpy(hwmgr->hardcode_pp_table, hwmgr->soft_pp_table,
-				hwmgr->soft_pp_table_size);
 	}
 
 	memcpy(hwmgr->hardcode_pp_table, buf, size);
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
index a46225c..1d1875a 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
@@ -70,11 +70,12 @@
 	int i;
 
 	table_entries = hwmgr->num_ps;
+
 	state = hwmgr->ps;
 
 	for (i = 0; i < table_entries; i++) {
 		if (state->id == *state_id) {
-			hwmgr->request_ps = state;
+			memcpy(hwmgr->request_ps, state, hwmgr->ps_size);
 			return 0;
 		}
 		state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
@@ -106,7 +107,7 @@
 	if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
 		phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
 		phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
-		hwmgr->current_ps = requested;
+		memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
 	}
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index f7ce4cb..6e359c9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -4,13 +4,15 @@
 
 HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
 	       hardwaremanager.o pp_acpi.o cz_hwmgr.o \
-               cz_clockpowergating.o \
-	       tonga_processpptables.o ppatomctrl.o \
+               cz_clockpowergating.o tonga_powertune.o\
+	       process_pptables_v1_0.o ppatomctrl.o \
                tonga_hwmgr.o pppcielanes.o  tonga_thermal.o\
                fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \
                fiji_clockpowergating.o fiji_thermal.o \
 	       polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \
-	       polaris10_clockpowergating.o
+	       polaris10_clockpowergating.o iceland_hwmgr.o \
+	       iceland_clockpowergating.o iceland_thermal.o \
+	       iceland_powertune.o
 
 AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 8cc0df9..5ecef17 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -178,7 +178,6 @@
 	int result;
 
 	cz_hwmgr->gfx_ramp_step = 256*25/100;
-
 	cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */
 
 	for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++)
@@ -186,33 +185,19 @@
 
 	cz_hwmgr->mgcg_cgtt_local0 = 0x00000000;
 	cz_hwmgr->mgcg_cgtt_local1 = 0x00000000;
-
 	cz_hwmgr->clock_slow_down_freq = 25000;
-
 	cz_hwmgr->skip_clock_slow_down = 1;
-
 	cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
-
 	cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
-
 	cz_hwmgr->voting_rights_clients = 0x00C00033;
-
 	cz_hwmgr->static_screen_threshold = 8;
-
 	cz_hwmgr->ddi_power_gating_disabled = 0;
-
 	cz_hwmgr->bapm_enabled = 1;
-
 	cz_hwmgr->voltage_drop_threshold = 0;
-
 	cz_hwmgr->gfx_power_gating_threshold = 500;
-
 	cz_hwmgr->vce_slow_sclk_threshold = 20000;
-
 	cz_hwmgr->dce_slow_sclk_threshold = 30000;
-
 	cz_hwmgr->disable_driver_thermal_policy = 1;
-
 	cz_hwmgr->disable_nb_ps3_in_battery = 0;
 
 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -221,9 +206,6 @@
 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 				    PHM_PlatformCaps_NonABMSupportInPPLib);
 
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-					   PHM_PlatformCaps_SclkDeepSleep);
-
 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 					PHM_PlatformCaps_DynamicM3Arbiter);
 
@@ -233,9 +215,7 @@
 				  PHM_PlatformCaps_DynamicPatchPowerState);
 
 	cz_hwmgr->thermal_auto_throttling_treshold = 0;
-
 	cz_hwmgr->tdr_clock = 0;
-
 	cz_hwmgr->disable_gfx_power_gating_in_uvd = 0;
 
 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
@@ -450,19 +430,12 @@
 			(uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index;
 
 	cz_hwmgr->boot_power_level.dsDividerIndex = 0;
-
 	cz_hwmgr->boot_power_level.ssDividerIndex = 0;
-
 	cz_hwmgr->boot_power_level.allowGnbSlow = 1;
-
 	cz_hwmgr->boot_power_level.forceNBPstate = 0;
-
 	cz_hwmgr->boot_power_level.hysteresis_up = 0;
-
 	cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0;
-
 	cz_hwmgr->boot_power_level.display_wm = 0;
-
 	cz_hwmgr->boot_power_level.vce_wm = 0;
 
 	return 0;
@@ -749,7 +722,6 @@
 		cz_hwmgr->sclk_dpm.soft_max_clk  = table->entries[table->count - 1].clk;
 
 	clock = hwmgr->display_config.min_core_set_clock;
-;
 	if (clock == 0)
 		printk(KERN_INFO "[ powerplay ] min_core_set_clock not set\n");
 
@@ -832,7 +804,7 @@
 
 	smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
 					PPSMC_MSG_SetWatermarkFrequency,
-				      cz_hwmgr->sclk_dpm.soft_max_clk);
+					cz_hwmgr->sclk_dpm.soft_max_clk);
 
 	return 0;
 }
@@ -858,9 +830,9 @@
 		PP_DBG_LOG("enabling ALL SMU features.\n");
 		dpm_features |= NB_DPM_MASK;
 		ret = smum_send_msg_to_smc_with_parameter(
-							     hwmgr->smumgr,
-					 PPSMC_MSG_EnableAllSmuFeatures,
-							     dpm_features);
+							  hwmgr->smumgr,
+							  PPSMC_MSG_EnableAllSmuFeatures,
+							  dpm_features);
 		if (ret == 0)
 			cz_hwmgr->is_nb_dpm_enabled = true;
 	}
@@ -1246,7 +1218,7 @@
 
 static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
 {
-	if (hwmgr != NULL || hwmgr->backend != NULL) {
+	if (hwmgr != NULL && hwmgr->backend != NULL) {
 		kfree(hwmgr->backend);
 		kfree(hwmgr);
 	}
@@ -1402,10 +1374,12 @@
 						   PPSMC_MSG_SetUvdHardMin));
 
 			cz_enable_disable_uvd_dpm(hwmgr, true);
-		} else
+		} else {
 			cz_enable_disable_uvd_dpm(hwmgr, true);
-	} else
+		}
+	} else {
 		cz_enable_disable_uvd_dpm(hwmgr, false);
+	}
 
 	return 0;
 }
@@ -1690,13 +1664,10 @@
 	struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
 
 	if (separation_time !=
-		hw_data->cc6_settings.cpu_pstate_separation_time
-		|| cc6_disable !=
-		hw_data->cc6_settings.cpu_cc6_disable
-		|| pstate_disable !=
-		hw_data->cc6_settings.cpu_pstate_disable
-		|| pstate_switch_disable !=
-		hw_data->cc6_settings.nb_pstate_switch_disable) {
+	    hw_data->cc6_settings.cpu_pstate_separation_time ||
+	    cc6_disable != hw_data->cc6_settings.cpu_cc6_disable ||
+	    pstate_disable != hw_data->cc6_settings.cpu_pstate_disable ||
+	    pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) {
 
 		hw_data->cc6_settings.cc6_setting_changed = true;
 
@@ -1799,8 +1770,7 @@
 	ps = cast_const_PhwCzPowerState(state);
 
 	level_index = index > ps->level - 1 ? ps->level - 1 : index;
-
-	level->coreClock  = ps->levels[level_index].engineClock;
+	level->coreClock = ps->levels[level_index].engineClock;
 
 	if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
 		for (i = 1; i < ps->level; i++) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 6483d68..74300d6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -44,8 +44,8 @@
 #include "dce/dce_10_0_sh_mask.h"
 #include "pppcielanes.h"
 #include "fiji_hwmgr.h"
-#include "tonga_processpptables.h"
-#include "tonga_pptable.h"
+#include "process_pptables_v1_0.h"
+#include "pptable_v1_0.h"
 #include "pp_debug.h"
 #include "pp_acpi.h"
 #include "amd_pcie_helpers.h"
@@ -112,7 +112,7 @@
 
 static const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic);
 
-struct fiji_power_state *cast_phw_fiji_power_state(
+static struct fiji_power_state *cast_phw_fiji_power_state(
 				  struct pp_hw_power_state *hw_ps)
 {
 	PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
@@ -122,7 +122,8 @@
 	return (struct fiji_power_state *)hw_ps;
 }
 
-const struct fiji_power_state *cast_const_phw_fiji_power_state(
+static const struct
+fiji_power_state *cast_const_phw_fiji_power_state(
 				 const struct pp_hw_power_state *hw_ps)
 {
 	PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
@@ -618,9 +619,6 @@
 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_TablelessHardwareInterface);
 
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_SclkDeepSleep);
-
 	data->gpio_debug = 0;
 
 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
@@ -1629,7 +1627,7 @@
  * @param voltage - voltage to look for
  * @return 0 on success
  */
-uint8_t fiji_get_voltage_index(
+static uint8_t fiji_get_voltage_index(
 		struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
 {
 	uint8_t count = (uint8_t) (lookup_table->count);
@@ -1693,7 +1691,7 @@
 * @return   always  0
 */
 
-int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
 		struct SMU73_Discrete_DpmTable *table)
 {
 	int result;
@@ -2304,7 +2302,7 @@
 * @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
 * @param    voltage     the SMC VOLTAGE structure to be populated
 */
-int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
 		uint32_t mclk, SMIO_Pattern *smio_pat)
 {
 	const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -4008,7 +4006,7 @@
 
 	ps = (struct fiji_power_state *)(&state->hardware);
 
-	result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
+	result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
 			fiji_get_pp_table_entry_callback_func);
 
 	/* This is the earliest time we have all the dependency table and the VBIOS boot state
@@ -4625,7 +4623,7 @@
 	return 0;
 }
 
-int fiji_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
+static int fiji_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
 {
 	return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
 				  (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
@@ -4639,14 +4637,14 @@
 			PPSMC_MSG_VCEDPM_Disable);
 }
 
-int fiji_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
+static int fiji_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
 {
 	return smum_send_msg_to_smc(hwmgr->smumgr, enable?
 			PPSMC_MSG_SAMUDPM_Enable :
 			PPSMC_MSG_SAMUDPM_Disable);
 }
 
-int fiji_enable_disable_acp_dpm(struct pp_hwmgr *hwmgr, bool enable)
+static int fiji_enable_disable_acp_dpm(struct pp_hwmgr *hwmgr, bool enable)
 {
 	return smum_send_msg_to_smc(hwmgr->smumgr, enable?
 			PPSMC_MSG_ACPDPM_Enable :
@@ -4883,7 +4881,7 @@
 	return;
 }
 
-int fiji_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
+static int fiji_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
 {
 	int result;
 	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -5159,7 +5157,7 @@
 	return 0;
 }
 
-int fiji_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+static int fiji_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
 {
 	return fiji_program_display_gap(hwmgr);
 }
@@ -5190,7 +5188,7 @@
 			PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
 }
 
-int fiji_dpm_set_interrupt_state(void *private_data,
+static int fiji_dpm_set_interrupt_state(void *private_data,
 					 unsigned src_id, unsigned type,
 					 int enabled)
 {
@@ -5238,7 +5236,7 @@
 	return 0;
 }
 
-int fiji_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+static int fiji_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
 					const void *thermal_interrupt_info)
 {
 	int result;
@@ -5408,7 +5406,10 @@
 		  (pl1->pcie_lane == pl2->pcie_lane));
 }
 
-int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+static int
+fiji_check_states_equal(struct pp_hwmgr *hwmgr,
+		const struct pp_hw_power_state *pstate1,
+		const struct pp_hw_power_state *pstate2, bool *equal)
 {
 	const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
 	const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
@@ -5440,7 +5441,8 @@
 	return 0;
 }
 
-bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+static bool
+fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
 {
 	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
 	bool is_update_required = false;
@@ -5550,7 +5552,7 @@
 	.dynamic_state_management_enable = &fiji_enable_dpm_tasks,
 	.dynamic_state_management_disable = &fiji_disable_dpm_tasks,
 	.force_dpm_level = &fiji_dpm_force_dpm_level,
-	.get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries,
+	.get_num_of_pp_table_entries = &get_number_of_powerplay_table_entries_v1_0,
 	.get_power_state_size = &fiji_get_power_state_size,
 	.get_pp_table_entry = &fiji_get_pp_table_entry,
 	.patch_boot_state = &fiji_patch_boot_state,
@@ -5592,7 +5594,7 @@
 int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
 {
 	hwmgr->hwmgr_func = &fiji_hwmgr_funcs;
-	hwmgr->pptable_func = &tonga_pptable_funcs;
+	hwmgr->pptable_func = &pptable_v1_0_funcs;
 	pp_fiji_thermal_initialize(hwmgr);
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
index 4465845..f5992ea 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
@@ -57,8 +57,6 @@
 
 	/* Assume disabled */
 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_PowerContainment);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_CAC);
 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_SQRamping);
@@ -77,9 +75,8 @@
 
 		fiji_hwmgr->fast_watermark_threshold = 100;
 
-		if (hwmgr->powercontainment_enabled) {
-			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				    PHM_PlatformCaps_PowerContainment);
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_PowerContainment)) {
 			tmp = 1;
 			fiji_hwmgr->enable_dte_feature = tmp ? false : true;
 			fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
index 92976b6..7f431e7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
@@ -152,7 +152,7 @@
 	return 0;
 }
 
-int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
+static int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
 {
 	int result;
 
@@ -421,7 +421,7 @@
 * @param    Result the last failure code
 * @return   result from set temperature range routine
 */
-int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
+static int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
 		void *input, void *output, void *storage, int result)
 {
 	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -533,7 +533,7 @@
 * @param    Result the last failure code
 * @return   result from set temperature range routine
 */
-int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
+static int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
 		void *input, void *output, void *storage, int result)
 {
 /* If the fantable setup has failed we could have disabled
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 789f98a..14f8c1f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -24,8 +24,6 @@
 #include "hwmgr.h"
 #include "hardwaremanager.h"
 #include "power_state.h"
-#include "pp_acpi.h"
-#include "amd_acpi.h"
 #include "pp_debug.h"
 
 #define PHM_FUNC_CHECK(hw) \
@@ -34,38 +32,6 @@
 			return -EINVAL;				\
 	} while (0)
 
-void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr)
-{
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
-
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
-
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
-
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
-
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
-
-	if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
-		acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
-}
-
 bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr)
 {
 	return hwmgr->block_hw_access;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 27e0762..524d0dd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -32,13 +32,22 @@
 #include "pp_debug.h"
 #include "ppatomctrl.h"
 #include "ppsmc.h"
-
-#define VOLTAGE_SCALE               4
+#include "pp_acpi.h"
+#include "amd_acpi.h"
 
 extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
 extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
 extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr);
 extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
+extern int iceland_hwmgr_init(struct pp_hwmgr *hwmgr);
+
+static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
+static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
+
+uint8_t convert_to_vid(uint16_t vddc)
+{
+	return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
+}
 
 int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
 {
@@ -56,10 +65,12 @@
 	hwmgr->device = pp_init->device;
 	hwmgr->chip_family = pp_init->chip_family;
 	hwmgr->chip_id = pp_init->chip_id;
-	hwmgr->hw_revision = pp_init->rev_id;
 	hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
 	hwmgr->power_source = PP_PowerSource_AC;
-	hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled;
+	hwmgr->pp_table_version = PP_TABLE_V1;
+
+	hwmgr_init_default_caps(hwmgr);
+	hwmgr_set_user_specify_caps(hwmgr);
 
 	switch (hwmgr->chip_family) {
 	case AMDGPU_FAMILY_CZ:
@@ -67,6 +78,9 @@
 		break;
 	case AMDGPU_FAMILY_VI:
 		switch (hwmgr->chip_id) {
+		case CHIP_TOPAZ:
+			iceland_hwmgr_init(hwmgr);
+			break;
 		case CHIP_TONGA:
 			tonga_hwmgr_init(hwmgr);
 			break;
@@ -85,8 +99,6 @@
 		return -EINVAL;
 	}
 
-	phm_init_dynamic_caps(hwmgr);
-
 	return 0;
 }
 
@@ -105,6 +117,8 @@
 	kfree(hwmgr->set_temperature_range.function_list);
 
 	kfree(hwmgr->ps);
+	kfree(hwmgr->current_ps);
+	kfree(hwmgr->request_ps);
 	kfree(hwmgr);
 	return 0;
 }
@@ -129,10 +143,17 @@
 					  sizeof(struct pp_power_state);
 
 	hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL);
-
 	if (hwmgr->ps == NULL)
 		return -ENOMEM;
 
+	hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
+	if (hwmgr->request_ps == NULL)
+		return -ENOMEM;
+
+	hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
+	if (hwmgr->current_ps == NULL)
+		return -ENOMEM;
+
 	state = hwmgr->ps;
 
 	for (i = 0; i < table_entries; i++) {
@@ -140,7 +161,8 @@
 
 		if (state->classification.flags & PP_StateClassificationFlag_Boot) {
 			hwmgr->boot_ps = state;
-			hwmgr->current_ps = hwmgr->request_ps = state;
+			memcpy(hwmgr->current_ps, state, size);
+			memcpy(hwmgr->request_ps, state, size);
 		}
 
 		state->id = i + 1; /* assigned unique num for every power state id */
@@ -150,6 +172,7 @@
 		state = (struct pp_power_state *)((unsigned long)state + size);
 	}
 
+
 	return 0;
 }
 
@@ -182,30 +205,6 @@
 	return 0;
 }
 
-int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
-				uint32_t index, uint32_t value, uint32_t mask)
-{
-	uint32_t i;
-	uint32_t cur_value;
-
-	if (hwmgr == NULL || hwmgr->device == NULL) {
-		printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < hwmgr->usec_timeout; i++) {
-		cur_value = cgs_read_register(hwmgr->device, index);
-		if ((cur_value & mask) != (value & mask))
-			break;
-		udelay(1);
-	}
-
-	/* timeout means wrong logic*/
-	if (i == hwmgr->usec_timeout)
-		return -1;
-	return 0;
-}
-
 
 /**
  * Returns once the part of the register indicated by the mask has
@@ -227,21 +226,7 @@
 	phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
 }
 
-void phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
-					uint32_t indirect_port,
-					uint32_t index,
-					uint32_t value,
-					uint32_t mask)
-{
-	if (hwmgr == NULL || hwmgr->device == NULL) {
-		printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
-		return;
-	}
 
-	cgs_write_register(hwmgr->device, indirect_port, index);
-	phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
-				      value, mask);
-}
 
 bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
 {
@@ -462,6 +447,27 @@
 	return i - 1;
 }
 
+uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
+		uint32_t voltage)
+{
+	uint8_t count = (uint8_t) (voltage_table->count);
+	uint8_t i = 0;
+
+	PP_ASSERT_WITH_CODE((NULL != voltage_table),
+		"Voltage Table empty.", return 0;);
+	PP_ASSERT_WITH_CODE((0 != count),
+		"Voltage Table empty.", return 0;);
+
+	for (i = 0; i < count; i++) {
+		/* find first voltage bigger than requested */
+		if (voltage_table->entries[i].value >= voltage)
+			return i;
+	}
+
+	/* voltage is bigger than max voltage in the table */
+	return i - 1;
+}
+
 uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
 {
 	uint32_t  i;
@@ -549,7 +555,8 @@
 		table_clk_vlt->entries[2].v = 810;
 		table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
 		table_clk_vlt->entries[3].v = 900;
-		pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
+		if (pptable_info != NULL)
+			pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
 		hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
 	}
 
@@ -615,3 +622,94 @@
 	printk(KERN_ERR "DAL requested level can not"
 			" found a available voltage in VDDC DPM Table \n");
 }
+
+void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
+{
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
+
+	if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
+		acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+		PHM_PlatformCaps_DynamicPatchPowerState);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+		PHM_PlatformCaps_EnableSMU7ThermalManagement);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_DynamicPowerManagement);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_SMC);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_DynamicUVDState);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
+	return;
+}
+
+int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
+{
+	if (amdgpu_sclk_deep_sleep_en)
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkDeepSleep);
+	else
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkDeepSleep);
+
+	if (amdgpu_powercontainment)
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			    PHM_PlatformCaps_PowerContainment);
+	else
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			    PHM_PlatformCaps_PowerContainment);
+
+	hwmgr->feature_mask = amdgpu_pp_feature_mask;
+
+	return 0;
+}
+
+int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+				uint32_t sclk, uint16_t id, uint16_t *voltage)
+{
+	uint32_t vol;
+	int ret = 0;
+
+	if (hwmgr->chip_id < CHIP_POLARIS10) {
+		atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
+		if (*voltage >= 2000 || *voltage == 0)
+			*voltage = 1150;
+	} else {
+		ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
+		*voltage = (uint16_t)vol/100;
+	}
+	return ret;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c
new file mode 100644
index 0000000..47949f5
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+
+#include "hwmgr.h"
+#include "iceland_clockpowergating.h"
+#include "ppsmc.h"
+#include "iceland_hwmgr.h"
+
+int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
+{
+	/* iceland does not have MM hardware block */
+	return 0;
+}
+
+static int iceland_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
+{
+	/* iceland does not have MM hardware block */
+	return 0;
+}
+
+static int iceland_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
+{
+	/* iceland does not have MM hardware block */
+	return 0;
+}
+
+static int iceland_phm_powerup_vce(struct pp_hwmgr *hwmgr)
+{
+	/* iceland does not have MM hardware block */
+	return 0;
+}
+
+int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum
+		PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
+{
+	int ret = 0;
+
+	switch (block) {
+	case PHM_AsicBlock_UVD_MVC:
+	case PHM_AsicBlock_UVD:
+	case PHM_AsicBlock_UVD_HD:
+	case PHM_AsicBlock_UVD_SD:
+		if (gating == PHM_ClockGateSetting_StaticOff)
+			ret = iceland_phm_powerdown_uvd(hwmgr);
+		else
+			ret = iceland_phm_powerup_uvd(hwmgr);
+		break;
+	case PHM_AsicBlock_GFX:
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+
+	data->uvd_power_gated = false;
+	data->vce_power_gated = false;
+
+	iceland_phm_powerup_uvd(hwmgr);
+	iceland_phm_powerup_vce(hwmgr);
+
+	return 0;
+}
+
+int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+{
+	if (bgate) {
+		iceland_update_uvd_dpm(hwmgr, true);
+		iceland_phm_powerdown_uvd(hwmgr);
+	} else {
+		iceland_phm_powerup_uvd(hwmgr);
+		iceland_update_uvd_dpm(hwmgr, false);
+	}
+
+	return 0;
+}
+
+int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+{
+	if (bgate)
+		return iceland_phm_powerdown_vce(hwmgr);
+	else
+		return iceland_phm_powerup_vce(hwmgr);
+
+	return 0;
+}
+
+int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
+					const uint32_t *msg_id)
+{
+	/* iceland does not have MM hardware block */
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h
new file mode 100644
index 0000000..ff5ef00
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+
+#ifndef _ICELAND_CLOCK_POWER_GATING_H_
+#define _ICELAND_CLOCK_POWER_GATING_H_
+
+#include "iceland_hwmgr.h"
+#include "pp_asicblocks.h"
+
+extern int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
+extern int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
+extern int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
+extern int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
+extern int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
+extern int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id);
+#endif /* _ICELAND_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h
new file mode 100644
index 0000000..a7b4bc6
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h
@@ -0,0 +1,41 @@
+#ifndef ICELAND_DYN_DEFAULTS_H
+#define ICELAND_DYN_DEFAULTS_H
+
+enum ICELANDdpm_TrendDetection
+{
+	ICELANDdpm_TrendDetection_AUTO,
+	ICELANDdpm_TrendDetection_UP,
+	ICELANDdpm_TrendDetection_DOWN
+};
+typedef enum ICELANDdpm_TrendDetection ICELANDdpm_TrendDetection;
+
+
+#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0         0x3FFFC102
+#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1         0x000400
+#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2         0xC00080
+#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3         0xC00200
+#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4         0xC01680
+#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5         0xC00033
+#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6         0xC00033
+#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7         0x3FFFC000
+
+
+#define PPICELAND_THERMALPROTECTCOUNTER_DFLT        0x200
+
+#define PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT    0
+
+#define PPICELAND_STATICSCREENTHRESHOLD_DFLT        0x00C8
+
+#define PPICELAND_GFXIDLECLOCKSTOPTHRESHOLD_DFLT    0x200
+
+#define PPICELAND_REFERENCEDIVIDER_DFLT             4
+
+#define PPICELAND_ULVVOLTAGECHANGEDELAY_DFLT        1687
+
+#define PPICELAND_CGULVPARAMETER_DFLT               0x00040035
+#define PPICELAND_CGULVCONTROL_DFLT                 0x00007450
+#define PPICELAND_TARGETACTIVITY_DFLT               30
+#define PPICELAND_MCLK_TARGETACTIVITY_DFLT          10
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
new file mode 100644
index 0000000..5abe433
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
@@ -0,0 +1,5684 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include "linux/delay.h"
+#include "pp_acpi.h"
+#include "hwmgr.h"
+#include <atombios.h>
+#include "iceland_hwmgr.h"
+#include "pptable.h"
+#include "processpptables.h"
+#include "pp_debug.h"
+#include "ppsmc.h"
+#include "cgs_common.h"
+#include "pppcielanes.h"
+#include "iceland_dyn_defaults.h"
+#include "smumgr.h"
+#include "iceland_smumgr.h"
+#include "iceland_clockpowergating.h"
+#include "iceland_thermal.h"
+#include "iceland_powertune.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "smu/smu_7_1_1_d.h"
+#include "smu/smu_7_1_1_sh_mask.h"
+
+#include "cgs_linux.h"
+#include "eventmgr.h"
+#include "amd_pcie_helpers.h"
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+#define MC_CG_SEQ_DRAMCONF_S0       0x05
+#define MC_CG_SEQ_DRAMCONF_S1       0x06
+#define MC_CG_SEQ_YCLK_SUSPEND      0x04
+#define MC_CG_SEQ_YCLK_RESUME       0x0a
+
+#define PCIE_BUS_CLK                10000
+#define TCLK                        (PCIE_BUS_CLK / 10)
+
+#define SMC_RAM_END		    0x40000
+#define SMC_CG_IND_START            0xc0030000
+#define SMC_CG_IND_END              0xc0040000  /* First byte after SMC_CG_IND*/
+
+#define VOLTAGE_SCALE               4
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
+
+const uint32_t iceland_magic = (uint32_t)(PHM_VIslands_Magic);
+
+#define MC_SEQ_MISC0_GDDR5_SHIFT 28
+#define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
+#define MC_SEQ_MISC0_GDDR5_VALUE 5
+
+/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
+enum DPM_EVENT_SRC {
+    DPM_EVENT_SRC_ANALOG = 0,               /* Internal analog trip point */
+    DPM_EVENT_SRC_EXTERNAL = 1,             /* External (GPIO 17) signal */
+    DPM_EVENT_SRC_DIGITAL = 2,              /* Internal digital trip point (DIG_THERM_DPM) */
+    DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,   /* Internal analog or external */
+    DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4   /* Internal digital or external */
+};
+
+static int iceland_read_clock_registers(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	data->clock_registers.vCG_SPLL_FUNC_CNTL         =
+		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
+	data->clock_registers.vCG_SPLL_FUNC_CNTL_2       =
+		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
+	data->clock_registers.vCG_SPLL_FUNC_CNTL_3       =
+		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
+	data->clock_registers.vCG_SPLL_FUNC_CNTL_4       =
+		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
+	data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM   =
+		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
+	data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
+		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
+	data->clock_registers.vDLL_CNTL                  =
+		cgs_read_register(hwmgr->device, mmDLL_CNTL);
+	data->clock_registers.vMCLK_PWRMGT_CNTL          =
+		cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
+	data->clock_registers.vMPLL_AD_FUNC_CNTL         =
+		cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
+	data->clock_registers.vMPLL_DQ_FUNC_CNTL         =
+		cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
+	data->clock_registers.vMPLL_FUNC_CNTL            =
+		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
+	data->clock_registers.vMPLL_FUNC_CNTL_1          =
+		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
+	data->clock_registers.vMPLL_FUNC_CNTL_2          =
+		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
+	data->clock_registers.vMPLL_SS1                  =
+		cgs_read_register(hwmgr->device, mmMPLL_SS1);
+	data->clock_registers.vMPLL_SS2                  =
+		cgs_read_register(hwmgr->device, mmMPLL_SS2);
+
+	return 0;
+}
+
+/**
+ * Find out if memory is GDDR5.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+int iceland_get_memory_type(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+	uint32_t temp;
+
+	temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
+
+	data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
+			((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
+			 MC_SEQ_MISC0_GDDR5_SHIFT));
+
+	return 0;
+}
+
+int iceland_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+	/* iceland does not have MM hardware blocks */
+	return 0;
+}
+
+/**
+ * Enables Dynamic Power Management by SMC
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+int iceland_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
+{
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1);
+
+	return 0;
+}
+
+/**
+ * Find the MC microcode version and store it in the HwMgr struct
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+int iceland_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
+{
+	cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
+
+	hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
+
+	return 0;
+}
+
+static int iceland_init_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	data->low_sclk_interrupt_threshold = 0;
+
+	return 0;
+}
+
+
+static int iceland_setup_asic_task(struct pp_hwmgr *hwmgr)
+{
+	int tmp_result, result = 0;
+
+	tmp_result = iceland_read_clock_registers(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to read clock registers!", result = tmp_result);
+
+	tmp_result = iceland_get_memory_type(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to get memory type!", result = tmp_result);
+
+	tmp_result = iceland_enable_acpi_power_management(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to enable ACPI power management!", result = tmp_result);
+
+	tmp_result = iceland_get_mc_microcode_version(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to get MC microcode version!", result = tmp_result);
+
+	tmp_result = iceland_init_sclk_threshold(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to init sclk threshold!", result = tmp_result);
+
+	return result;
+}
+
+static bool cf_iceland_voltage_control(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+
+	return ICELAND_VOLTAGE_CONTROL_NONE != data->voltage_control;
+}
+
+/*
+ * -------------- Voltage Tables ----------------------
+ * If the voltage table would be bigger than what will fit into the
+ * state table on the SMC keep only the higher entries.
+ */
+
+static void iceland_trim_voltage_table_to_fit_state_table(
+		struct pp_hwmgr *hwmgr,
+		uint32_t max_voltage_steps,
+		pp_atomctrl_voltage_table *voltage_table)
+{
+	unsigned int i, diff;
+
+	if (voltage_table->count <= max_voltage_steps) {
+		return;
+	}
+
+	diff = voltage_table->count - max_voltage_steps;
+
+	for (i = 0; i < max_voltage_steps; i++) {
+		voltage_table->entries[i] = voltage_table->entries[i + diff];
+	}
+
+	voltage_table->count = max_voltage_steps;
+
+	return;
+}
+
+/**
+ * Enable voltage control
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+int iceland_enable_voltage_control(struct pp_hwmgr *hwmgr)
+{
+	/* enable voltage control */
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
+
+	return 0;
+}
+
+static int iceland_get_svi2_voltage_table(struct pp_hwmgr *hwmgr,
+		struct phm_clock_voltage_dependency_table *voltage_dependency_table,
+		pp_atomctrl_voltage_table *voltage_table)
+{
+	uint32_t i;
+
+	PP_ASSERT_WITH_CODE((NULL != voltage_table),
+			"Voltage Dependency Table empty.", return -EINVAL;);
+
+	voltage_table->mask_low = 0;
+	voltage_table->phase_delay = 0;
+	voltage_table->count = voltage_dependency_table->count;
+
+	for (i = 0; i < voltage_dependency_table->count; i++) {
+		voltage_table->entries[i].value =
+			voltage_dependency_table->entries[i].v;
+		voltage_table->entries[i].smio_low = 0;
+	}
+
+	return 0;
+}
+
+/**
+ * Create Voltage Tables.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+int iceland_construct_voltage_tables(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+	int result;
+
+	/* GPIO voltage */
+	if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
+		result = atomctrl_get_voltage_table_v3(hwmgr,
+					VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
+					&data->vddc_voltage_table);
+		PP_ASSERT_WITH_CODE((0 == result),
+			"Failed to retrieve VDDC table.", return result;);
+	} else if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+		/* SVI2 VDDC voltage */
+		result = iceland_get_svi2_voltage_table(hwmgr,
+					hwmgr->dyn_state.vddc_dependency_on_mclk,
+					&data->vddc_voltage_table);
+		PP_ASSERT_WITH_CODE((0 == result),
+			"Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
+	}
+
+	PP_ASSERT_WITH_CODE(
+			(data->vddc_voltage_table.count <= (SMU71_MAX_LEVELS_VDDC)),
+			"Too many voltage values for VDDC. Trimming to fit state table.",
+			iceland_trim_voltage_table_to_fit_state_table(hwmgr,
+			SMU71_MAX_LEVELS_VDDC, &(data->vddc_voltage_table));
+			);
+
+	/* GPIO */
+	if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
+		result = atomctrl_get_voltage_table_v3(hwmgr,
+					VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table));
+		PP_ASSERT_WITH_CODE((0 == result),
+			"Failed to retrieve VDDCI table.", return result;);
+	}
+
+	/* SVI2 VDDCI voltage */
+	if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
+		result = iceland_get_svi2_voltage_table(hwmgr,
+					hwmgr->dyn_state.vddci_dependency_on_mclk,
+					&data->vddci_voltage_table);
+		PP_ASSERT_WITH_CODE((0 == result),
+			"Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;);
+	}
+
+	PP_ASSERT_WITH_CODE(
+			(data->vddci_voltage_table.count <= (SMU71_MAX_LEVELS_VDDCI)),
+			"Too many voltage values for VDDCI. Trimming to fit state table.",
+			iceland_trim_voltage_table_to_fit_state_table(hwmgr,
+			SMU71_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table));
+			);
+
+
+	/* GPIO */
+	if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+		result = atomctrl_get_voltage_table_v3(hwmgr,
+					VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table));
+		PP_ASSERT_WITH_CODE((0 == result),
+			"Failed to retrieve table.", return result;);
+	}
+
+	/* SVI2 voltage control */
+	if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+		result = iceland_get_svi2_voltage_table(hwmgr,
+					hwmgr->dyn_state.mvdd_dependency_on_mclk,
+					&data->mvdd_voltage_table);
+		PP_ASSERT_WITH_CODE((0 == result),
+			"Failed to retrieve SVI2 MVDD table from dependancy table.", return result;);
+	}
+
+	PP_ASSERT_WITH_CODE(
+			(data->mvdd_voltage_table.count <= (SMU71_MAX_LEVELS_MVDD)),
+			"Too many voltage values for MVDD. Trimming to fit state table.",
+			iceland_trim_voltage_table_to_fit_state_table(hwmgr,
+			SMU71_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table));
+			);
+
+	return 0;
+}
+
+/*---------------------------MC----------------------------*/
+
+uint8_t iceland_get_memory_module_index(struct pp_hwmgr *hwmgr)
+{
+	return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+bool iceland_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
+{
+	bool result = true;
+
+	switch (inReg) {
+	case  mmMC_SEQ_RAS_TIMING:
+		*outReg = mmMC_SEQ_RAS_TIMING_LP;
+		break;
+
+	case  mmMC_SEQ_DLL_STBY:
+		*outReg = mmMC_SEQ_DLL_STBY_LP;
+		break;
+
+	case  mmMC_SEQ_G5PDX_CMD0:
+		*outReg = mmMC_SEQ_G5PDX_CMD0_LP;
+		break;
+
+	case  mmMC_SEQ_G5PDX_CMD1:
+		*outReg = mmMC_SEQ_G5PDX_CMD1_LP;
+		break;
+
+	case  mmMC_SEQ_G5PDX_CTRL:
+		*outReg = mmMC_SEQ_G5PDX_CTRL_LP;
+		break;
+
+	case mmMC_SEQ_CAS_TIMING:
+		*outReg = mmMC_SEQ_CAS_TIMING_LP;
+		break;
+
+	case mmMC_SEQ_MISC_TIMING:
+		*outReg = mmMC_SEQ_MISC_TIMING_LP;
+		break;
+
+	case mmMC_SEQ_MISC_TIMING2:
+		*outReg = mmMC_SEQ_MISC_TIMING2_LP;
+		break;
+
+	case mmMC_SEQ_PMG_DVS_CMD:
+		*outReg = mmMC_SEQ_PMG_DVS_CMD_LP;
+		break;
+
+	case mmMC_SEQ_PMG_DVS_CTL:
+		*outReg = mmMC_SEQ_PMG_DVS_CTL_LP;
+		break;
+
+	case mmMC_SEQ_RD_CTL_D0:
+		*outReg = mmMC_SEQ_RD_CTL_D0_LP;
+		break;
+
+	case mmMC_SEQ_RD_CTL_D1:
+		*outReg = mmMC_SEQ_RD_CTL_D1_LP;
+		break;
+
+	case mmMC_SEQ_WR_CTL_D0:
+		*outReg = mmMC_SEQ_WR_CTL_D0_LP;
+		break;
+
+	case mmMC_SEQ_WR_CTL_D1:
+		*outReg = mmMC_SEQ_WR_CTL_D1_LP;
+		break;
+
+	case mmMC_PMG_CMD_EMRS:
+		*outReg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+		break;
+
+	case mmMC_PMG_CMD_MRS:
+		*outReg = mmMC_SEQ_PMG_CMD_MRS_LP;
+		break;
+
+	case mmMC_PMG_CMD_MRS1:
+		*outReg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+		break;
+
+	case mmMC_SEQ_PMG_TIMING:
+		*outReg = mmMC_SEQ_PMG_TIMING_LP;
+		break;
+
+	case mmMC_PMG_CMD_MRS2:
+		*outReg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+		break;
+
+	case mmMC_SEQ_WR_CTL_2:
+		*outReg = mmMC_SEQ_WR_CTL_2_LP;
+		break;
+
+	default:
+		result = false;
+		break;
+	}
+
+	return result;
+}
+
+int iceland_set_s0_mc_reg_index(phw_iceland_mc_reg_table *table)
+{
+	uint32_t i;
+	uint16_t address;
+
+	for (i = 0; i < table->last; i++) {
+		table->mc_reg_address[i].s0 =
+			iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
+			? address : table->mc_reg_address[i].s1;
+	}
+	return 0;
+}
+
+int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_iceland_mc_reg_table *ni_table)
+{
+	uint8_t i, j;
+
+	PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+		"Invalid VramInfo table.", return -1);
+	PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+		"Invalid VramInfo table.", return -1);
+
+	for (i = 0; i < table->last; i++) {
+		ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+	}
+	ni_table->last = table->last;
+
+	for (i = 0; i < table->num_entries; i++) {
+		ni_table->mc_reg_table_entry[i].mclk_max =
+			table->mc_reg_table_entry[i].mclk_max;
+		for (j = 0; j < table->last; j++) {
+			ni_table->mc_reg_table_entry[i].mc_data[j] =
+				table->mc_reg_table_entry[i].mc_data[j];
+		}
+	}
+
+	ni_table->num_entries = table->num_entries;
+
+	return 0;
+}
+
+/**
+ * VBIOS omits some information to reduce size, we need to recover them here.
+ * 1.   when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to  mmMC_PMG_CMD_EMRS /_LP[15:0].
+ *      Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
+ * 2.   when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
+ * 3.   need to set these data for each clock range
+ *
+ * @param    hwmgr the address of the powerplay hardware manager.
+ * @param    table the address of MCRegTable
+ * @return   always 0
+ */
+static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_iceland_mc_reg_table *table)
+{
+	uint8_t i, j, k;
+	uint32_t temp_reg;
+	const iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+
+	for (i = 0, j = table->last; i < table->last; i++) {
+		PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+			"Invalid VramInfo table.", return -1);
+		switch (table->mc_reg_address[i].s1) {
+		/*
+		 * mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write
+		 * to mmMC_PMG_CMD_EMRS/_LP[15:0]. Bit[15:0] MRS, need
+		 * to be update mmMC_PMG_CMD_MRS/_LP[15:0]
+		 */
+		case mmMC_SEQ_MISC1:
+			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
+			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					((temp_reg & 0xffff0000)) |
+					((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+			}
+			j++;
+			PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+				"Invalid VramInfo table.", return -1);
+
+			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) |
+					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+				if (!data->is_memory_GDDR5) {
+					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+				}
+			}
+			j++;
+			PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+				"Invalid VramInfo table.", return -1);
+
+			if (!data->is_memory_GDDR5) {
+				table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+				table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+				for (k = 0; k < table->num_entries; k++) {
+					table->mc_reg_table_entry[k].mc_data[j] =
+						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+				}
+				j++;
+				PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+					"Invalid VramInfo table.", return -1);
+			}
+
+			break;
+
+		case mmMC_SEQ_RESERVE_M:
+			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) |
+					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+			}
+			j++;
+			PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+				"Invalid VramInfo table.", return -1);
+			break;
+
+		default:
+			break;
+		}
+
+	}
+
+	table->last = j;
+
+	return 0;
+}
+
+
+static int iceland_set_valid_flag(phw_iceland_mc_reg_table *table)
+{
+	uint8_t i, j;
+	for (i = 0; i < table->last; i++) {
+		for (j = 1; j < table->num_entries; j++) {
+			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+				table->mc_reg_table_entry[j].mc_data[i]) {
+				table->validflag |= (1<<i);
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+	pp_atomctrl_mc_reg_table *table;
+	phw_iceland_mc_reg_table *ni_table = &data->iceland_mc_reg_table;
+	uint8_t module_index = iceland_get_memory_module_index(hwmgr);
+
+	table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+	if (NULL == table)
+		return -ENOMEM;
+
+	/* Program additional LP registers that are no longer programmed by VBIOS */
+	cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+	memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+	result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+	if (0 == result)
+		result = iceland_copy_vbios_smc_reg_table(table, ni_table);
+
+	if (0 == result) {
+		iceland_set_s0_mc_reg_index(ni_table);
+		result = iceland_set_mc_special_registers(hwmgr, ni_table);
+	}
+
+	if (0 == result)
+		iceland_set_valid_flag(ni_table);
+
+	kfree(table);
+	return result;
+}
+
+/**
+ * Programs static screed detection parameters
+ *
+ * @param   hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+int iceland_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	/* Set static screen threshold unit*/
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
+		CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
+		data->static_screen_threshold_unit);
+	/* Set static screen threshold*/
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
+		CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
+		data->static_screen_threshold);
+
+	return 0;
+}
+
+/**
+ * Setup display gap for glitch free memory clock switching.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+int iceland_enable_display_gap(struct pp_hwmgr *hwmgr)
+{
+	uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
+							CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
+
+	display_gap = PHM_SET_FIELD(display_gap,
+					CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE);
+
+	display_gap = PHM_SET_FIELD(display_gap,
+					CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+		ixCG_DISPLAY_GAP_CNTL, display_gap);
+
+	return 0;
+}
+
+/**
+ * Programs activity state transition voting clients
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+int iceland_program_voting_clients(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	/* Clear reset for voting clients before enabling DPM */
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+		SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+		SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+		ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+		ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+		ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+		ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+		ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+		ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+		ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+		ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
+
+	return 0;
+}
+
+static int iceland_upload_firmware(struct pp_hwmgr *hwmgr)
+{
+	int ret = 0;
+
+	if (!iceland_is_smc_ram_running(hwmgr->smumgr))
+		ret = iceland_smu_upload_firmware_image(hwmgr->smumgr);
+
+	return ret;
+}
+
+/**
+ * Get the location of various tables inside the FW image.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	uint32_t tmp;
+	int result;
+	bool error = 0;
+
+	result = iceland_read_smc_sram_dword(hwmgr->smumgr,
+				SMU71_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU71_Firmware_Header, DpmTable),
+				&tmp, data->sram_end);
+
+	if (0 == result) {
+		data->dpm_table_start = tmp;
+	}
+
+	error |= (0 != result);
+
+	result = iceland_read_smc_sram_dword(hwmgr->smumgr,
+				SMU71_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU71_Firmware_Header, SoftRegisters),
+				&tmp, data->sram_end);
+
+	if (0 == result) {
+		data->soft_regs_start = tmp;
+	}
+
+	error |= (0 != result);
+
+
+	result = iceland_read_smc_sram_dword(hwmgr->smumgr,
+				SMU71_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU71_Firmware_Header, mcRegisterTable),
+				&tmp, data->sram_end);
+
+	if (0 == result) {
+		data->mc_reg_table_start = tmp;
+	}
+
+	result = iceland_read_smc_sram_dword(hwmgr->smumgr,
+				SMU71_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU71_Firmware_Header, FanTable),
+				&tmp, data->sram_end);
+
+	if (0 == result) {
+		data->fan_table_start = tmp;
+	}
+
+	error |= (0 != result);
+
+	result = iceland_read_smc_sram_dword(hwmgr->smumgr,
+				SMU71_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
+				&tmp, data->sram_end);
+
+	if (0 == result) {
+		data->arb_table_start = tmp;
+	}
+
+	error |= (0 != result);
+
+
+	result = iceland_read_smc_sram_dword(hwmgr->smumgr,
+				SMU71_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU71_Firmware_Header, Version),
+				&tmp, data->sram_end);
+
+	if (0 == result) {
+		hwmgr->microcode_version_info.SMC = tmp;
+	}
+
+	error |= (0 != result);
+
+	result = iceland_read_smc_sram_dword(hwmgr->smumgr,
+				SMU71_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU71_Firmware_Header, UlvSettings),
+				&tmp, data->sram_end);
+
+	if (0 == result) {
+		data->ulv_settings_start = tmp;
+	}
+
+	error |= (0 != result);
+
+	return error ? 1 : 0;
+}
+
+/*
+* Copy one arb setting to another and then switch the active set.
+* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants.
+*/
+int iceland_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
+		uint32_t arbFreqSrc, uint32_t arbFreqDest)
+{
+	uint32_t mc_arb_dram_timing;
+	uint32_t mc_arb_dram_timing2;
+	uint32_t burst_time;
+	uint32_t mc_cg_config;
+
+	switch (arbFreqSrc) {
+	case MC_CG_ARB_FREQ_F0:
+		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+		break;
+
+	case MC_CG_ARB_FREQ_F1:
+		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
+		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
+		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
+		break;
+
+	default:
+		return -1;
+	}
+
+	switch (arbFreqDest) {
+	case MC_CG_ARB_FREQ_F0:
+		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
+		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
+		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
+		break;
+
+	case MC_CG_ARB_FREQ_F1:
+		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
+		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
+		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
+		break;
+
+	default:
+		return -1;
+	}
+
+	mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
+	mc_cg_config |= 0x0000000F;
+	cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
+	PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest);
+
+	return 0;
+}
+
+/**
+ * Initial switch from ARB F0->F1
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ * This function is to be called from the SetPowerState table.
+ */
+int iceland_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr)
+{
+	return iceland_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+/* ---------------------------------------- ULV related functions ----------------------------------------------------*/
+
+
+static int iceland_reset_single_dpm_table(
+	struct pp_hwmgr *hwmgr,
+	struct iceland_single_dpm_table *dpm_table,
+	uint32_t count)
+{
+	uint32_t i;
+	if (!(count <= MAX_REGULAR_DPM_NUMBER))
+		printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \
+			table entries to exceed max number! \n");
+
+	dpm_table->count = count;
+	for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) {
+		dpm_table->dpm_levels[i].enabled = 0;
+	}
+
+	return 0;
+}
+
+static void iceland_setup_pcie_table_entry(
+	struct iceland_single_dpm_table *dpm_table,
+	uint32_t index, uint32_t pcie_gen,
+	uint32_t pcie_lanes)
+{
+	dpm_table->dpm_levels[index].value = pcie_gen;
+	dpm_table->dpm_levels[index].param1 = pcie_lanes;
+	dpm_table->dpm_levels[index].enabled = 1;
+}
+
+/*
+ * Set up the PCIe DPM table as follows:
+ *
+ * A  = Performance State, Max, Gen Speed
+ * C  = Performance State, Min, Gen Speed
+ * 1  = Performance State, Max, Lane #
+ * 3  = Performance State, Min, Lane #
+ *
+ * B  = Power Saving State, Max, Gen Speed
+ * D  = Power Saving State, Min, Gen Speed
+ * 2  = Power Saving State, Max, Lane #
+ * 4  = Power Saving State, Min, Lane #
+ *
+ *
+ * DPM Index   Gen Speed   Lane #
+ * 5           A           1
+ * 4           B           2
+ * 3           C           1
+ * 2           D           2
+ * 1           C           3
+ * 0           D           4
+ *
+ */
+static int iceland_setup_default_pcie_tables(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
+				data->use_pcie_power_saving_levels),
+			"No pcie performance levels!", return -EINVAL);
+
+	if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) {
+		data->pcie_gen_power_saving = data->pcie_gen_performance;
+		data->pcie_lane_power_saving = data->pcie_lane_performance;
+	} else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) {
+		data->pcie_gen_performance = data->pcie_gen_power_saving;
+		data->pcie_lane_performance = data->pcie_lane_power_saving;
+	}
+
+	iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU71_MAX_LEVELS_LINK);
+
+	/* Hardcode Pcie Table */
+	iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
+		get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
+		get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
+	iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
+		get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
+		get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
+	iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
+		get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
+		get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
+	iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
+		get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
+		get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
+	iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
+		get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
+		get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
+	iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
+		get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
+		get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
+	data->dpm_table.pcie_speed_table.count = 6;
+
+	return 0;
+
+}
+
+
+/*
+ * This function is to initalize all DPM state tables for SMU7 based on the dependency table.
+ * Dynamic state patching function will then trim these state tables to the allowed range based
+ * on the power policy or external client requests, such as UVD request, etc.
+ */
+static int iceland_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+	uint32_t i;
+
+	struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
+		hwmgr->dyn_state.vddc_dependency_on_sclk;
+	struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
+		hwmgr->dyn_state.vddc_dependency_on_mclk;
+	struct phm_cac_leakage_table *std_voltage_table =
+		hwmgr->dyn_state.cac_leakage_table;
+
+	PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
+		"SCLK dependency table is missing. This table is mandatory", return -1);
+	PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
+		"SCLK dependency table has to have is missing. This table is mandatory", return -1);
+
+	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
+		"MCLK dependency table is missing. This table is mandatory", return -1);
+	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
+		"VMCLK dependency table has to have is missing. This table is mandatory", return -1);
+
+	/* clear the state table to reset everything to default */
+	memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
+	iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU71_MAX_LEVELS_GRAPHICS);
+	iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU71_MAX_LEVELS_MEMORY);
+	iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.vddc_table, SMU71_MAX_LEVELS_VDDC);
+	iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.vdd_ci_table, SMU71_MAX_LEVELS_VDDCI);
+	iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.mvdd_table, SMU71_MAX_LEVELS_MVDD);
+
+	PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
+		"SCLK dependency table is missing. This table is mandatory", return -1);
+	/* Initialize Sclk DPM table based on allow Sclk values*/
+	data->dpm_table.sclk_table.count = 0;
+
+	for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
+		if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
+				allowed_vdd_sclk_table->entries[i].clk) {
+			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
+				allowed_vdd_sclk_table->entries[i].clk;
+			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
+			data->dpm_table.sclk_table.count++;
+		}
+	}
+
+	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
+		"MCLK dependency table is missing. This table is mandatory", return -1);
+	/* Initialize Mclk DPM table based on allow Mclk values */
+	data->dpm_table.mclk_table.count = 0;
+	for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
+		if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
+			allowed_vdd_mclk_table->entries[i].clk) {
+			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
+				allowed_vdd_mclk_table->entries[i].clk;
+			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
+			data->dpm_table.mclk_table.count++;
+		}
+	}
+
+	/* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
+	for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
+		data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
+		data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
+		/* param1 is for corresponding std voltage */
+		data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
+	}
+
+	data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
+	allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
+
+	if (NULL != allowed_vdd_mclk_table) {
+		/* Initialize Vddci DPM table based on allow Mclk values */
+		for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
+			data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
+			data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1;
+		}
+		data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count;
+	}
+
+	allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
+
+	if (NULL != allowed_vdd_mclk_table) {
+		/*
+		 * Initialize MVDD DPM table based on allow Mclk
+		 * values
+		 */
+		for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
+			data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
+			data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
+		}
+		data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
+	}
+
+	/* setup PCIE gen speed levels*/
+	iceland_setup_default_pcie_tables(hwmgr);
+
+	/* save a copy of the default DPM table*/
+	memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct iceland_dpm_table));
+
+	return 0;
+}
+
+/**
+ * @brief PhwIceland_GetVoltageOrder
+ *  Returns index of requested voltage record in lookup(table)
+ * @param hwmgr - pointer to hardware manager
+ * @param lookutab - lookup list to search in
+ * @param voltage - voltage to look for
+ * @return 0 on success
+ */
+uint8_t iceland_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table,
+		uint16_t voltage)
+{
+	uint8_t count = (uint8_t) (look_up_table->count);
+	uint8_t i;
+
+	PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;);
+	PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;);
+
+	for (i = 0; i < count; i++) {
+		/* find first voltage equal or bigger than requested */
+		if (look_up_table->entries[i].us_vdd >= voltage)
+			return i;
+	}
+
+	/* voltage is bigger than max voltage in the table */
+	return i-1;
+}
+
+
+static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
+		pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
+		uint16_t *lo)
+{
+	uint16_t v_index;
+	bool vol_found = false;
+	*hi = tab->value * VOLTAGE_SCALE;
+	*lo = tab->value * VOLTAGE_SCALE;
+
+	/* SCLK/VDDC Dependency Table has to exist. */
+	PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
+	                    "The SCLK/VDDC Dependency Table does not exist.\n",
+	                    return -EINVAL);
+
+	if (NULL == hwmgr->dyn_state.cac_leakage_table) {
+		pr_warning("CAC Leakage Table does not exist, using vddc.\n");
+		return 0;
+	}
+
+	/*
+	 * Since voltage in the sclk/vddc dependency table is not
+	 * necessarily in ascending order because of ELB voltage
+	 * patching, loop through entire list to find exact voltage.
+	 */
+	for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+		if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+			vol_found = true;
+			if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+				*lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+				*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
+			} else {
+				pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
+				*lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+				*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+			}
+			break;
+		}
+	}
+
+	/*
+	 * If voltage is not found in the first pass, loop again to
+	 * find the best match, equal or higher value.
+	 */
+	if (!vol_found) {
+		for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+			if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+				vol_found = true;
+				if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+					*lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+					*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
+				} else {
+					pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
+					*lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+					*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+				}
+				break;
+			}
+		}
+
+		if (!vol_found)
+			pr_warning("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
+	}
+
+	return 0;
+}
+
+static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
+		pp_atomctrl_voltage_table_entry *tab,
+		SMU71_Discrete_VoltageLevel *smc_voltage_tab) {
+	int result;
+
+
+	result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
+			&smc_voltage_tab->StdVoltageHiSidd,
+			&smc_voltage_tab->StdVoltageLoSidd);
+	if (0 != result) {
+		smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
+		smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
+	}
+
+	smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
+	CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+	CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+
+	return 0;
+}
+
+/**
+ * Vddc table preparation for SMC.
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    table     the SMC DPM table structure to be populated
+ * @return   always 0
+ */
+static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+			SMU71_Discrete_DpmTable *table)
+{
+	unsigned int count;
+	int result;
+
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	table->VddcLevelCount = data->vddc_voltage_table.count;
+	for (count = 0; count < table->VddcLevelCount; count++) {
+		result = iceland_populate_smc_voltage_table(hwmgr,
+				&data->vddc_voltage_table.entries[count],
+				&table->VddcLevel[count]);
+		PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
+
+		/* GPIO voltage control */
+		if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
+			table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
+		else if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+			table->VddcLevel[count].Smio = 0;
+	}
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+
+	return 0;
+}
+
+/**
+ * Vddci table preparation for SMC.
+ *
+ * @param    *hwmgr The address of the hardware manager.
+ * @param    *table The SMC DPM table structure to be populated.
+ * @return   0
+ */
+static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+			SMU71_Discrete_DpmTable *table)
+{
+	int result;
+	uint32_t count;
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	table->VddciLevelCount = data->vddci_voltage_table.count;
+	for (count = 0; count < table->VddciLevelCount; count++) {
+		result = iceland_populate_smc_voltage_table(hwmgr,
+				&data->vddci_voltage_table.entries[count],
+				&table->VddciLevel[count]);
+		PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDCI voltage table", return -EINVAL);
+
+		/* GPIO voltage control */
+		if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control)
+			table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
+		else
+			table->VddciLevel[count].Smio = 0;
+	}
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+
+	return 0;
+}
+
+/**
+ * Mvdd table preparation for SMC.
+ *
+ * @param    *hwmgr The address of the hardware manager.
+ * @param    *table The SMC DPM table structure to be populated.
+ * @return   0
+ */
+static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+			SMU71_Discrete_DpmTable *table)
+{
+	int result;
+	uint32_t count;
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	table->MvddLevelCount = data->mvdd_voltage_table.count;
+	for (count = 0; count < table->MvddLevelCount; count++) {
+		result = iceland_populate_smc_voltage_table(hwmgr,
+				&data->mvdd_voltage_table.entries[count],
+				&table->MvddLevel[count]);
+		PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDCI voltage table", return -EINVAL);
+
+		/* GPIO voltage control */
+		if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
+			table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
+		else
+			table->MvddLevel[count].Smio = 0;
+	}
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+
+	return 0;
+}
+
+int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	uint8_t * hi_vid = data->power_tune_table.BapmVddCVidHiSidd;
+	uint8_t * lo_vid = data->power_tune_table.BapmVddCVidLoSidd;
+
+	PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
+			    "The CAC Leakage table does not exist!", return -EINVAL);
+	PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
+			    "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
+	PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
+			    "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
+		for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
+			lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
+			hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
+		}
+	} else {
+		PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
+	}
+
+	return 0;
+}
+
+int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	uint8_t *vid = data->power_tune_table.VddCVid;
+
+	PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
+		"There should never be more than 8 entries for VddcVid!!!",
+		return -EINVAL);
+
+	for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
+		vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
+	}
+
+	return 0;
+}
+
+/**
+ * Preparation of voltage tables for SMC.
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    table     the SMC DPM table structure to be populated
+ * @return   always 0
+ */
+
+int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+	SMU71_Discrete_DpmTable *table)
+{
+	int result;
+
+	result = iceland_populate_smc_vddc_table(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"can not populate VDDC voltage table to SMC", return -1);
+
+	result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"can not populate VDDCI voltage table to SMC", return -1);
+
+	result = iceland_populate_smc_mvdd_table(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"can not populate MVDD voltage table to SMC", return -1);
+
+	return 0;
+}
+
+
+/**
+ * Re-generate the DPM level mask value
+ * @param    hwmgr      the address of the hardware manager
+ */
+static uint32_t iceland_get_dpm_level_enable_mask_value(
+			struct iceland_single_dpm_table * dpm_table)
+{
+	uint32_t i;
+	uint32_t mask_value = 0;
+
+	for (i = dpm_table->count; i > 0; i--) {
+		mask_value = mask_value << 1;
+
+		if (dpm_table->dpm_levels[i-1].enabled)
+			mask_value |= 0x1;
+		else
+			mask_value &= 0xFFFFFFFE;
+	}
+	return mask_value;
+}
+
+int iceland_populate_memory_timing_parameters(
+		struct pp_hwmgr *hwmgr,
+		uint32_t engine_clock,
+		uint32_t memory_clock,
+		struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
+		)
+{
+	uint32_t dramTiming;
+	uint32_t dramTiming2;
+	uint32_t burstTime;
+	int result;
+
+	result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+				engine_clock, memory_clock);
+
+	PP_ASSERT_WITH_CODE(result == 0,
+		"Error calling VBIOS to set DRAM_TIMING.", return result);
+
+	dramTiming  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+	dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+	burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+	arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dramTiming);
+	arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+	arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+	return 0;
+}
+
+/**
+ * Setup parameters for the MC ARB.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ * This function is to be called from the SetPowerState table.
+ */
+int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+	int result = 0;
+	SMU71_Discrete_MCArbDramTimingTable  arb_regs;
+	uint32_t i, j;
+
+	memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
+
+	for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+		for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+			result = iceland_populate_memory_timing_parameters
+				(hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+				 data->dpm_table.mclk_table.dpm_levels[j].value,
+				 &arb_regs.entries[i][j]);
+
+			if (0 != result) {
+				break;
+			}
+		}
+	}
+
+	if (0 == result) {
+		result = iceland_copy_bytes_to_smc(
+				hwmgr->smumgr,
+				data->arb_table_start,
+				(uint8_t *)&arb_regs,
+				sizeof(SMU71_Discrete_MCArbDramTimingTable),
+				data->sram_end
+				);
+	}
+
+	return result;
+}
+
+static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+	struct iceland_dpm_table *dpm_table = &data->dpm_table;
+	uint32_t i;
+
+	/* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+	for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+		table->LinkLevel[i].PcieGenSpeed  =
+			(uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+		table->LinkLevel[i].PcieLaneCount =
+			(uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+		table->LinkLevel[i].EnabledForActivity =
+			1;
+		table->LinkLevel[i].SPC =
+			(uint8_t)(data->pcie_spc_cap & 0xff);
+		table->LinkLevel[i].DownThreshold =
+			PP_HOST_TO_SMC_UL(5);
+		table->LinkLevel[i].UpThreshold =
+			PP_HOST_TO_SMC_UL(30);
+	}
+
+	data->smc_state_table.LinkLevelCount =
+		(uint8_t)dpm_table->pcie_speed_table.count;
+	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+		iceland_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+	return 0;
+}
+
+static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+					SMU71_Discrete_DpmTable *table)
+{
+	return 0;
+}
+
+uint8_t iceland_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
+		uint32_t voltage)
+{
+	uint8_t count = (uint8_t) (voltage_table->count);
+	uint8_t i = 0;
+
+	PP_ASSERT_WITH_CODE((NULL != voltage_table),
+		"Voltage Table empty.", return 0;);
+	PP_ASSERT_WITH_CODE((0 != count),
+		"Voltage Table empty.", return 0;);
+
+	for (i = 0; i < count; i++) {
+		/* find first voltage bigger than requested */
+		if (voltage_table->entries[i].value >= voltage)
+			return i;
+	}
+
+	/* voltage is bigger than max voltage in the table */
+	return i - 1;
+}
+
+static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+					  SMU71_Discrete_DpmTable *table)
+{
+	return 0;
+}
+
+static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+					  SMU71_Discrete_DpmTable *table)
+{
+	return 0;
+}
+
+static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+					   SMU71_Discrete_DpmTable *table)
+{
+	return 0;
+}
+
+
+static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
+					    SMU71_Discrete_DpmTable *tab)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+		tab->SVI2Enable |= VDDC_ON_SVI2;
+
+	if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control)
+		tab->SVI2Enable |= VDDCI_ON_SVI2;
+	else
+		tab->MergedVddci = 1;
+
+	if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
+		tab->SVI2Enable |= MVDD_ON_SVI2;
+
+	PP_ASSERT_WITH_CODE( tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
+	        (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
+
+	return 0;
+}
+
+static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
+	struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
+	uint32_t clock, uint32_t *vol)
+{
+	uint32_t i = 0;
+
+	/* clock - voltage dependency table is empty table */
+	if (allowed_clock_voltage_table->count == 0)
+		return -EINVAL;
+
+	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+		/* find first sclk bigger than request */
+		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+			*vol = allowed_clock_voltage_table->entries[i].v;
+			return 0;
+		}
+	}
+
+	/* sclk is bigger than max sclk in the dependence table */
+	*vol = allowed_clock_voltage_table->entries[i - 1].v;
+
+	return 0;
+}
+
+static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
+		bool strobe_mode)
+{
+	uint8_t mc_para_index;
+
+	if (strobe_mode) {
+		if (memory_clock < 12500) {
+			mc_para_index = 0x00;
+		} else if (memory_clock > 47500) {
+			mc_para_index = 0x0f;
+		} else {
+			mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+		}
+	} else {
+		if (memory_clock < 65000) {
+			mc_para_index = 0x00;
+		} else if (memory_clock > 135000) {
+			mc_para_index = 0x0f;
+		} else {
+			mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+		}
+	}
+
+	return mc_para_index;
+}
+
+static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+	uint8_t mc_para_index;
+
+	if (memory_clock < 10000) {
+		mc_para_index = 0;
+	} else if (memory_clock >= 80000) {
+		mc_para_index = 0x0f;
+	} else {
+		mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+	}
+
+	return mc_para_index;
+}
+
+static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
+					uint32_t sclk, uint32_t *p_shed)
+{
+	unsigned int i;
+
+	/* use the minimum phase shedding */
+	*p_shed = 1;
+
+	/*
+	 * PPGen ensures the phase shedding limits table is sorted
+	 * from lowest voltage/sclk/mclk to highest voltage/sclk/mclk.
+	 * VBIOS ensures the phase shedding masks table is sorted from
+	 * least phases enabled (phase shedding on) to most phases
+	 * enabled (phase shedding off).
+	 */
+	for (i = 0; i < pl->count; i++) {
+	    if (sclk < pl->entries[i].Sclk) {
+	        /* Enable phase shedding */
+	        *p_shed = i;
+	        break;
+	    }
+	}
+
+	return 0;
+}
+
+static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
+					uint32_t memory_clock, uint32_t *p_shed)
+{
+	unsigned int i;
+
+	/* use the minimum phase shedding */
+	*p_shed = 1;
+
+	/*
+	 * PPGen ensures the phase shedding limits table is sorted
+	 * from lowest voltage/sclk/mclk to highest voltage/sclk/mclk.
+	 * VBIOS ensures the phase shedding masks table is sorted from
+	 * least phases enabled (phase shedding on) to most phases
+	 * enabled (phase shedding off).
+	 */
+	for (i = 0; i < pl->count; i++) {
+	    if (memory_clock < pl->entries[i].Mclk) {
+	        /* Enable phase shedding */
+	        *p_shed = i;
+	        break;
+	    }
+	}
+
+	return 0;
+}
+
+/**
+ * Populates the SMC MCLK structure using the provided memory clock
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    memory_clock the memory clock to use to populate the structure
+ * @param    sclk        the SMC SCLK structure to be populated
+ */
+static int iceland_calculate_mclk_params(
+		struct pp_hwmgr *hwmgr,
+		uint32_t memory_clock,
+		SMU71_Discrete_MemoryLevel *mclk,
+		bool strobe_mode,
+		bool dllStateOn
+		)
+{
+	const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+	uint32_t  dll_cntl = data->clock_registers.vDLL_CNTL;
+	uint32_t  mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+	uint32_t  mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+	uint32_t  mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+	uint32_t  mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+	uint32_t  mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+	uint32_t  mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+	uint32_t  mpll_ss1 = data->clock_registers.vMPLL_SS1;
+	uint32_t  mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+	pp_atomctrl_memory_clock_param mpll_param;
+	int result;
+
+	result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+				memory_clock, &mpll_param, strobe_mode);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Error retrieving Memory Clock Parameters from VBIOS.", return result);
+
+	/* MPLL_FUNC_CNTL setup*/
+	mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
+
+	/* MPLL_FUNC_CNTL_1 setup*/
+	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+							MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
+	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+							MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
+	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+							MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
+
+	/* MPLL_AD_FUNC_CNTL setup*/
+	mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+							MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+
+	if (data->is_memory_GDDR5) {
+		/* MPLL_DQ_FUNC_CNTL setup*/
+		mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
+								MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
+		mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
+								MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+		/*
+		 ************************************
+		 Fref = Reference Frequency
+		 NF = Feedback divider ratio
+		 NR = Reference divider ratio
+		 Fnom = Nominal VCO output frequency = Fref * NF / NR
+		 Fs = Spreading Rate
+		 D = Percentage down-spread / 2
+		 Fint = Reference input frequency to PFD = Fref / NR
+		 NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
+		 CLKS = NS - 1 = ISS_STEP_NUM[11:0]
+		 NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
+		 CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
+		 *************************************
+		 */
+		pp_atomctrl_internal_ss_info ss_info;
+		uint32_t freq_nom;
+		uint32_t tmp;
+		uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+		/* for GDDR5 for all modes and DDR3 */
+		if (1 == mpll_param.qdr)
+			freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+		else
+			freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+		/* tmp = (freq_nom / reference_clock * reference_divider) ^ 2  Note: S.I. reference_divider = 1*/
+		tmp = (freq_nom / reference_clock);
+		tmp = tmp * tmp;
+
+		if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+			/* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
+			/* ss.Info.speed_spectrum_rate -- in unit of khz */
+			/* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
+			/*     = reference_clock * 5 / speed_spectrum_rate */
+			uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+
+			/* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
+			/*     = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
+			uint32_t clkv =
+				(uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+							ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+			mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+			mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+		}
+	}
+
+	/* MCLK_PWRMGT_CNTL setup */
+	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+
+	/* Save the result data to outpupt memory level structure */
+	mclk->MclkFrequency   = memory_clock;
+	mclk->MpllFuncCntl    = mpll_func_cntl;
+	mclk->MpllFuncCntl_1  = mpll_func_cntl_1;
+	mclk->MpllFuncCntl_2  = mpll_func_cntl_2;
+	mclk->MpllAdFuncCntl  = mpll_ad_func_cntl;
+	mclk->MpllDqFuncCntl  = mpll_dq_func_cntl;
+	mclk->MclkPwrmgtCntl  = mclk_pwrmgt_cntl;
+	mclk->DllCntl         = dll_cntl;
+	mclk->MpllSs1         = mpll_ss1;
+	mclk->MpllSs2         = mpll_ss2;
+
+	return 0;
+}
+
+static int iceland_populate_single_memory_level(
+		struct pp_hwmgr *hwmgr,
+		uint32_t memory_clock,
+		SMU71_Discrete_MemoryLevel *memory_level
+		)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+	int result = 0;
+	bool dllStateOn;
+	struct cgs_display_info info = {0};
+
+
+	if (NULL != hwmgr->dyn_state.vddc_dependency_on_mclk) {
+		result = iceland_get_dependecy_volt_by_clk(hwmgr,
+			hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
+		PP_ASSERT_WITH_CODE((0 == result),
+			"can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
+	}
+
+	if (data->vdd_ci_control == ICELAND_VOLTAGE_CONTROL_NONE) {
+		memory_level->MinVddci = memory_level->MinVddc;
+	} else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
+		result = iceland_get_dependecy_volt_by_clk(hwmgr,
+				hwmgr->dyn_state.vddci_dependency_on_mclk,
+				memory_clock,
+				&memory_level->MinVddci);
+		PP_ASSERT_WITH_CODE((0 == result),
+			"can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
+	}
+
+	if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
+		result = iceland_get_dependecy_volt_by_clk(hwmgr,
+			hwmgr->dyn_state.mvdd_dependency_on_mclk, memory_clock, &memory_level->MinMvdd);
+		PP_ASSERT_WITH_CODE((0 == result),
+			"can not find MinMVDD voltage value from memory MVDD voltage dependency table", return result);
+	}
+
+	memory_level->MinVddcPhases = 1;
+
+	if (data->vddc_phase_shed_control) {
+		iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
+				memory_clock, &memory_level->MinVddcPhases);
+	}
+
+	memory_level->EnabledForThrottle = 1;
+	memory_level->EnabledForActivity = 1;
+	memory_level->UpHyst = 0;
+	memory_level->DownHyst = 100;
+	memory_level->VoltageDownHyst = 0;
+
+	/* Indicates maximum activity level for this performance level.*/
+	memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+	memory_level->StutterEnable = 0;
+	memory_level->StrobeEnable = 0;
+	memory_level->EdcReadEnable = 0;
+	memory_level->EdcWriteEnable = 0;
+	memory_level->RttEnable = 0;
+
+	/* default set to low watermark. Highest level will be set to high later.*/
+	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+	data->display_timing.num_existing_displays = info.display_count;
+
+	//if ((data->mclk_stutter_mode_threshold != 0) &&
+	//    (memory_clock <= data->mclk_stutter_mode_threshold) &&
+	//    (data->is_uvd_enabled == 0)
+	//    && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
+	//    && (data->display_timing.num_existing_displays <= 2)
+	//    && (data->display_timing.num_existing_displays != 0))
+	//	memory_level->StutterEnable = 1;
+
+	/* decide strobe mode*/
+	memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) &&
+		(memory_clock <= data->mclk_strobe_mode_threshold);
+
+	/* decide EDC mode and memory clock ratio*/
+	if (data->is_memory_GDDR5) {
+		memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
+					memory_level->StrobeEnable);
+
+		if ((data->mclk_edc_enable_threshold != 0) &&
+				(memory_clock > data->mclk_edc_enable_threshold)) {
+			memory_level->EdcReadEnable = 1;
+		}
+
+		if ((data->mclk_edc_wr_enable_threshold != 0) &&
+				(memory_clock > data->mclk_edc_wr_enable_threshold)) {
+			memory_level->EdcWriteEnable = 1;
+		}
+
+		if (memory_level->StrobeEnable) {
+			if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
+					((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
+				dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+			} else {
+				dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+			}
+
+		} else {
+			dllStateOn = data->dll_defaule_on;
+		}
+	} else {
+		memory_level->StrobeRatio =
+			iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
+		dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+	}
+
+	result = iceland_calculate_mclk_params(hwmgr,
+		memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn);
+
+	if (0 == result) {
+		memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
+		memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
+		memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
+		/* MCLK frequency in units of 10KHz*/
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+		/* Indicates maximum activity level for this performance level.*/
+		CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+	}
+
+	return result;
+}
+
+/**
+ * Populates the SMC MVDD structure using the provided memory clock.
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
+ * @param    voltage     the SMC VOLTAGE structure to be populated
+ */
+int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMU71_Discrete_VoltageLevel *voltage)
+{
+	const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+	uint32_t i = 0;
+
+	if (ICELAND_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+		/* find mvdd value which clock is more than request */
+		for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
+			if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
+				/* Always round to higher voltage. */
+				voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
+				break;
+			}
+		}
+
+		PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
+			"MVDD Voltage is outside the supported range.", return -1);
+
+	} else {
+		return -1;
+	}
+
+	return 0;
+}
+
+
+static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+	SMU71_Discrete_DpmTable *table)
+{
+	int result = 0;
+	const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+	pp_atomctrl_clock_dividers_vi dividers;
+	SMU71_Discrete_VoltageLevel voltage_level;
+	uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+	uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+	uint32_t dll_cntl          = data->clock_registers.vDLL_CNTL;
+	uint32_t mclk_pwrmgt_cntl  = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+	/* The ACPI state should not do DPM on DC (or ever).*/
+	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+	if (data->acpi_vddc)
+		table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
+	else
+		table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pp_table * VOLTAGE_SCALE);
+
+	table->ACPILevel.MinVddcPhases = (data->vddc_phase_shed_control) ? 0 : 1;
+
+	/* assign zero for now*/
+	table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+	/* get the engine clock dividers for this clock value*/
+	result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+		table->ACPILevel.SclkFrequency,  &dividers);
+
+	PP_ASSERT_WITH_CODE(result == 0,
+		"Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+	/* divider ID for required SCLK*/
+	table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+	table->ACPILevel.DeepSleepDivId = 0;
+
+	spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
+							CG_SPLL_FUNC_CNTL,   SPLL_PWRON,     0);
+	spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
+							CG_SPLL_FUNC_CNTL,   SPLL_RESET,     1);
+	spll_func_cntl_2    = PHM_SET_FIELD(spll_func_cntl_2,
+							CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL,   4);
+
+	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+	table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+	table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+	table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+	table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+	table->ACPILevel.CcPwrDynRm = 0;
+	table->ACPILevel.CcPwrDynRm1 = 0;
+
+
+	/* For various features to be enabled/disabled while this level is active.*/
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+	/* SCLK frequency in units of 10KHz*/
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
+	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
+
+	/*  CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
+
+	if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
+		table->MemoryACPILevel.MinMvdd =
+			PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+	else
+		table->MemoryACPILevel.MinMvdd = 0;
+
+	/* Force reset on DLL*/
+	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+	/* Disable DLL in ACPIState*/
+	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+	/* Enable DLL bypass signal*/
+	dll_cntl            = PHM_SET_FIELD(dll_cntl,
+		DLL_CNTL, MRDCK0_BYPASS, 0);
+	dll_cntl            = PHM_SET_FIELD(dll_cntl,
+		DLL_CNTL, MRDCK1_BYPASS, 0);
+
+	table->MemoryACPILevel.DllCntl            =
+		PP_HOST_TO_SMC_UL(dll_cntl);
+	table->MemoryACPILevel.MclkPwrmgtCntl     =
+		PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+	table->MemoryACPILevel.MpllAdFuncCntl     =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+	table->MemoryACPILevel.MpllDqFuncCntl     =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+	table->MemoryACPILevel.MpllFuncCntl       =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+	table->MemoryACPILevel.MpllFuncCntl_1     =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+	table->MemoryACPILevel.MpllFuncCntl_2     =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+	table->MemoryACPILevel.MpllSs1            =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+	table->MemoryACPILevel.MpllSs2            =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+	table->MemoryACPILevel.EnabledForThrottle = 0;
+	table->MemoryACPILevel.EnabledForActivity = 0;
+	table->MemoryACPILevel.UpHyst = 0;
+	table->MemoryACPILevel.DownHyst = 100;
+	table->MemoryACPILevel.VoltageDownHyst = 0;
+	/* Indicates maximum activity level for this performance level.*/
+	table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+	table->MemoryACPILevel.StutterEnable = 0;
+	table->MemoryACPILevel.StrobeEnable = 0;
+	table->MemoryACPILevel.EdcReadEnable = 0;
+	table->MemoryACPILevel.EdcWriteEnable = 0;
+	table->MemoryACPILevel.RttEnable = 0;
+
+	return result;
+}
+
+static int iceland_find_boot_level(struct iceland_single_dpm_table *table, uint32_t value, uint32_t *boot_level)
+{
+	int result = 0;
+	uint32_t i;
+
+	for (i = 0; i < table->count; i++) {
+		if (value == table->dpm_levels[i].value) {
+			*boot_level = i;
+			result = 0;
+		}
+	}
+	return result;
+}
+
+/**
+ * Calculates the SCLK dividers using the provided engine clock
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    engine_clock the engine clock to use to populate the structure
+ * @param    sclk        the SMC SCLK structure to be populated
+ */
+int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+		uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
+{
+	const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+	pp_atomctrl_clock_dividers_vi dividers;
+	uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+	uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+	uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+	uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+	uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+	uint32_t    reference_clock;
+	uint32_t reference_divider;
+	uint32_t fbdiv;
+	int result;
+
+	/* get the engine clock dividers for this clock value*/
+	result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock,  &dividers);
+
+	PP_ASSERT_WITH_CODE(result == 0,
+		"Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+	/* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
+	reference_clock = atomctrl_get_reference_clock(hwmgr);
+
+	reference_divider = 1 + dividers.uc_pll_ref_div;
+
+	/* low 14 bits is fraction and high 12 bits is divider*/
+	fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+	/* SPLL_FUNC_CNTL setup*/
+	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+		CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
+	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+		CG_SPLL_FUNC_CNTL, SPLL_PDIV_A,  dividers.uc_pll_post_div);
+
+	/* SPLL_FUNC_CNTL_3 setup*/
+	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+		CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
+
+	/* set to use fractional accumulation*/
+	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+		CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+		pp_atomctrl_internal_ss_info ss_info;
+
+		uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
+		if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
+			/*
+			* ss_info.speed_spectrum_percentage -- in unit of 0.01%
+			* ss_info.speed_spectrum_rate -- in unit of khz
+			*/
+			/* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
+			uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
+
+			/* clkv = 2 * D * fbdiv / NS */
+			uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
+
+			cg_spll_spread_spectrum =
+				PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
+			cg_spll_spread_spectrum =
+				PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+			cg_spll_spread_spectrum_2 =
+				PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
+		}
+	}
+
+	sclk->SclkFrequency        = engine_clock;
+	sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
+	sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
+	sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
+	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
+	sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
+
+	return 0;
+}
+
+static uint8_t iceland_get_sleep_divider_id_from_clock(struct pp_hwmgr *hwmgr,
+		uint32_t engine_clock, uint32_t min_engine_clock_in_sr)
+{
+	uint32_t i, temp;
+	uint32_t min = (min_engine_clock_in_sr > ICELAND_MINIMUM_ENGINE_CLOCK) ?
+			min_engine_clock_in_sr : ICELAND_MINIMUM_ENGINE_CLOCK;
+
+	PP_ASSERT_WITH_CODE((engine_clock >= min),
+			"Engine clock can't satisfy stutter requirement!", return 0);
+
+	for (i = ICELAND_MAX_DEEPSLEEP_DIVIDER_ID;; i--) {
+		temp = engine_clock / (1 << i);
+
+		if(temp >= min || i == 0)
+			break;
+	}
+	return (uint8_t)i;
+}
+
+/**
+ * Populates single SMC SCLK structure using the provided engine clock
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    engine_clock the engine clock to use to populate the structure
+ * @param    sclk        the SMC SCLK structure to be populated
+ */
+static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+		uint32_t engine_clock, uint16_t	sclk_activity_level_threshold,
+		SMU71_Discrete_GraphicsLevel *graphic_level)
+{
+	int result;
+	uint32_t threshold;
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+
+
+	/* populate graphics levels*/
+	result = iceland_get_dependecy_volt_by_clk(hwmgr,
+			hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock, &graphic_level->MinVddc);
+	PP_ASSERT_WITH_CODE((0 == result),
+		"can not find VDDC voltage value for VDDC engine clock dependency table", return result);
+
+	/* SCLK frequency in units of 10KHz*/
+	graphic_level->SclkFrequency = engine_clock;
+
+	/*
+	 * Minimum VDDC phases required to support this level, it
+	 * should get from dependence table.
+	 */
+	graphic_level->MinVddcPhases = 1;
+
+	if (data->vddc_phase_shed_control) {
+		iceland_populate_phase_value_based_on_sclk(hwmgr,
+				hwmgr->dyn_state.vddc_phase_shed_limits_table,
+				engine_clock,
+				&graphic_level->MinVddcPhases);
+	}
+
+	/* Indicates maximum activity level for this performance level. 50% for now*/
+	graphic_level->ActivityLevel = sclk_activity_level_threshold;
+
+	graphic_level->CcPwrDynRm = 0;
+	graphic_level->CcPwrDynRm1 = 0;
+	/* this level can be used if activity is high enough.*/
+	graphic_level->EnabledForActivity = 1;
+	/* this level can be used for throttling.*/
+	graphic_level->EnabledForThrottle = 1;
+	graphic_level->UpHyst = 0;
+	graphic_level->DownHyst = 100;
+	graphic_level->VoltageDownHyst = 0;
+	graphic_level->PowerThrottle = 0;
+
+	threshold = engine_clock * data->fast_watermark_threshold / 100;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkDeepSleep)) {
+		graphic_level->DeepSleepDivId =
+				iceland_get_sleep_divider_id_from_clock(hwmgr, engine_clock,
+						data->display_timing.min_clock_insr);
+	}
+
+	/* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+	if (0 == result) {
+		graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
+		/* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
+		CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
+	}
+
+	return result;
+}
+
+/**
+ * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+ *
+ * @param    hwmgr      the address of the hardware manager
+ */
+static int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+	struct iceland_dpm_table *dpm_table = &data->dpm_table;
+	int result = 0;
+	uint32_t level_array_adress = data->dpm_table_start +
+		offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
+
+	uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) * SMU71_MAX_LEVELS_GRAPHICS;
+	SMU71_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel;
+	uint32_t i;
+	uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0;
+	memset(levels, 0x00, level_array_size);
+
+	for (i = 0; i < dpm_table->sclk_table.count; i++) {
+		result = iceland_populate_single_graphic_level(hwmgr,
+					dpm_table->sclk_table.dpm_levels[i].value,
+					(uint16_t)data->activity_target[i],
+					&(data->smc_state_table.GraphicsLevel[i]));
+		if (0 != result)
+			return result;
+
+		/* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+		if (i > 1)
+			data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+	}
+
+	/* set highest level watermark to high */
+	if (dpm_table->sclk_table.count > 1)
+		data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
+			PPSMC_DISPLAY_WATERMARK_HIGH;
+
+	data->smc_state_table.GraphicsDpmLevelCount =
+		(uint8_t)dpm_table->sclk_table.count;
+	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+		iceland_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+	while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+				(1 << (highest_pcie_level_enabled + 1))) != 0) {
+		highest_pcie_level_enabled++;
+	}
+
+	while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+	       (1 << lowest_pcie_level_enabled)) == 0) {
+		lowest_pcie_level_enabled++;
+	}
+
+	while ((count < highest_pcie_level_enabled) &&
+			((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+				(1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
+		count++;
+	}
+
+	mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
+		(lowest_pcie_level_enabled + 1 + count) : highest_pcie_level_enabled;
+
+	/* set pcieDpmLevel to highest_pcie_level_enabled*/
+	for (i = 2; i < dpm_table->sclk_table.count; i++) {
+		data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
+	}
+
+	/* set pcieDpmLevel to lowest_pcie_level_enabled*/
+	data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+	/* set pcieDpmLevel to mid_pcie_level_enabled*/
+	data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
+
+	/* level count will send to smc once at init smc table and never change*/
+	result = iceland_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
+
+	if (0 != result)
+		return result;
+
+	return 0;
+}
+
+/**
+ * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
+ *
+ * @param    hwmgr      the address of the hardware manager
+ */
+
+static int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+	struct iceland_dpm_table *dpm_table = &data->dpm_table;
+	int result;
+	/* populate MCLK dpm table to SMU7 */
+	uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
+	uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
+	SMU71_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel;
+	uint32_t i;
+
+	memset(levels, 0x00, level_array_size);
+
+	for (i = 0; i < dpm_table->mclk_table.count; i++) {
+		PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+			"can not populate memory level as memory clock is zero", return -1);
+		result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
+			&(data->smc_state_table.MemoryLevel[i]));
+		if (0 != result) {
+			return result;
+		}
+	}
+
+	/* Only enable level 0 for now.*/
+	data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+	/*
+	* in order to prevent MC activity from stutter mode to push DPM up.
+	* the UVD change complements this by putting the MCLK in a higher state
+	* by default such that we are not effected by up threshold or and MCLK DPM latency.
+	*/
+	data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+	CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+	data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+	data->dpm_level_enable_mask.mclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+	/* set highest level watermark to high*/
+	data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+	/* level count will send to smc once at init smc table and never change*/
+	result = iceland_copy_bytes_to_smc(hwmgr->smumgr,
+		level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
+
+	if (0 != result) {
+		return result;
+	}
+
+	return 0;
+}
+
+struct ICELAND_DLL_SPEED_SETTING
+{
+	uint16_t        Min;           /* Minimum Data Rate*/
+	uint16_t        Max;           /* Maximum Data Rate*/
+	uint32_t	dll_speed;     /* The desired DLL_SPEED setting*/
+};
+
+static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_Ulv *pstate)
+{
+	int result = 0;
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+	uint32_t voltage_response_time, ulv_voltage;
+
+	pstate->CcPwrDynRm = 0;
+	pstate->CcPwrDynRm1 = 0;
+
+	//backbiasResponseTime is use for ULV state voltage value.
+	result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
+	PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
+
+	if(!ulv_voltage) {
+		data->ulv.ulv_supported = false;
+		return 0;
+	}
+
+	if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 != data->voltage_control) {
+		/* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+		if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) {
+			pstate->VddcOffset = 0;
+		}
+		else {
+			/* used in SMIO Mode. not implemented for now. this is backup only for CI. */
+			pstate->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
+		}
+	} else {
+		/* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+		if(ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) {
+			pstate->VddcOffsetVid = 0;
+		} else {
+			/* used in SVI2 Mode */
+			pstate->VddcOffsetVid = (uint8_t)((hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage) * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+		}
+	}
+
+	/* used in SVI2 Mode to shed phase */
+	pstate->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
+
+	if (0 == result) {
+		CONVERT_FROM_HOST_TO_SMC_UL(pstate->CcPwrDynRm);
+		CONVERT_FROM_HOST_TO_SMC_UL(pstate->CcPwrDynRm1);
+		CONVERT_FROM_HOST_TO_SMC_US(pstate->VddcOffset);
+	}
+
+	return result;
+}
+
+static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr, SMU71_Discrete_Ulv *ulv)
+{
+	return iceland_populate_ulv_level(hwmgr, ulv);
+}
+
+static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+	uint8_t count, level;
+
+	count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
+
+	for (level = 0; level < count; level++) {
+		if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
+			 >= data->vbios_boot_state.sclk_bootup_value) {
+			data->smc_state_table.GraphicsBootLevel = level;
+			break;
+		}
+	}
+
+	count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
+
+	for (level = 0; level < count; level++) {
+		if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
+			>= data->vbios_boot_state.mclk_bootup_value) {
+			data->smc_state_table.MemoryBootLevel = level;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * Initializes the SMC table and uploads it
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @param    pInput  the pointer to input data (PowerState)
+ * @return   always 0
+ */
+static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+	SMU71_Discrete_DpmTable  *table = &(data->smc_state_table);
+	const struct phw_iceland_ulv_parm *ulv = &(data->ulv);
+
+	result = iceland_setup_default_dpm_tables(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to setup default DPM tables!", return result;);
+	memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table));
+
+	if (ICELAND_VOLTAGE_CONTROL_NONE != data->voltage_control) {
+		iceland_populate_smc_voltage_tables(hwmgr, table);
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_AutomaticDCTransition)) {
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StepVddc)) {
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+	}
+
+	if (data->is_memory_GDDR5) {
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+	}
+
+	if (ulv->ulv_supported) {
+		result = iceland_populate_ulv_state(hwmgr, &data->ulv_setting);
+		PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize ULV state!", return result;);
+
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter);
+	}
+
+	result = iceland_populate_smc_link_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize Link Level!", return result;);
+
+	result = iceland_populate_all_graphic_levels(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize Graphics Level!", return result;);
+
+	result = iceland_populate_all_memory_levels(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize Memory Level!", return result;);
+
+	result = iceland_populate_smc_acpi_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize ACPI Level!", return result;);
+
+	result = iceland_populate_smc_vce_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize VCE Level!", return result;);
+
+	result = iceland_populate_smc_acp_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize ACP Level!", return result;);
+
+	result = iceland_populate_smc_samu_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize SAMU Level!", return result;);
+
+	/*
+	 * Since only the initial state is completely set up at this
+	 * point (the other states are just copies of the boot state)
+	 * we only need to populate the  ARB settings for the initial
+	 * state.
+	 */
+	result = iceland_program_memory_timing_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to Write ARB settings for the initial state.", return result;);
+
+	result = iceland_populate_smc_uvd_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize UVD Level!", return result;);
+
+	table->GraphicsBootLevel = 0;
+	table->MemoryBootLevel = 0;
+
+	/* find boot level from dpm table */
+	result = iceland_find_boot_level(&(data->dpm_table.sclk_table),
+			data->vbios_boot_state.sclk_bootup_value,
+			(uint32_t *)&(data->smc_state_table.GraphicsBootLevel));
+
+	if (result)
+		pr_warning("VBIOS did not find boot engine clock value in dependency table.\n");
+
+	result = iceland_find_boot_level(&(data->dpm_table.mclk_table),
+				data->vbios_boot_state.mclk_bootup_value,
+				(uint32_t *)&(data->smc_state_table.MemoryBootLevel));
+
+	if (result)
+		pr_warning("VBIOS did not find boot memory clock value in dependency table.\n");
+
+	table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
+	if (ICELAND_VOLTAGE_CONTROL_NONE == data->vdd_ci_control) {
+		table->BootVddci = table->BootVddc;
+	}
+	else {
+		table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
+	}
+	table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+	result = iceland_populate_smc_initial_state(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
+
+	result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
+
+	table->GraphicsVoltageChangeEnable  = 1;
+	table->GraphicsThermThrottleEnable  = 1;
+	table->GraphicsInterval = 1;
+	table->VoltageInterval  = 1;
+	table->ThermalInterval  = 1;
+	table->TemperatureLimitHigh =
+		(data->thermal_temp_setting.temperature_high *
+		 ICELAND_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+	table->TemperatureLimitLow =
+		(data->thermal_temp_setting.temperature_low *
+		ICELAND_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+	table->MemoryVoltageChangeEnable  = 1;
+	table->MemoryInterval  = 1;
+	table->VoltageResponseTime  = 0;
+	table->PhaseResponseTime  = 0;
+	table->MemoryThermThrottleEnable  = 1;
+	table->PCIeBootLinkLevel = 0;
+	table->PCIeGenInterval = 1;
+
+	result = iceland_populate_smc_svi2_config(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to populate SVI2 setting!", return result);
+
+	table->ThermGpio  = 17;
+	table->SclkStepSize = 0x4000;
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+	CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+	CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+	table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
+	table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
+	table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
+
+	/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+	result = iceland_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start +
+				offsetof(SMU71_Discrete_DpmTable, SystemFlags),
+				(uint8_t *)&(table->SystemFlags),
+				sizeof(SMU71_Discrete_DpmTable) - 3 * sizeof(SMU71_PIDController),
+				data->sram_end);
+
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to upload dpm data to SMC memory!", return result);
+
+	/* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
+	result = iceland_copy_bytes_to_smc(hwmgr->smumgr,
+			data->ulv_settings_start,
+			(uint8_t *)&(data->ulv_setting),
+			sizeof(SMU71_Discrete_Ulv),
+			data->sram_end);
+
+#if 0
+	/* Notify SMC to follow new GPIO scheme */
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_AutomaticDCTransition)) {
+		if (0 == iceland_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_UseNewGPIOScheme))
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
+	}
+#endif
+
+	return result;
+}
+
+int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU71_Discrete_MCRegisters *mc_reg_table)
+{
+	const struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+
+	uint32_t i, j;
+
+	for (i = 0, j = 0; j < data->iceland_mc_reg_table.last; j++) {
+		if (data->iceland_mc_reg_table.validflag & 1<<j) {
+			PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+				"Index of mc_reg_table->address[] array out of boundary", return -1);
+			mc_reg_table->address[i].s0 =
+				PP_HOST_TO_SMC_US(data->iceland_mc_reg_table.mc_reg_address[j].s0);
+			mc_reg_table->address[i].s1 =
+				PP_HOST_TO_SMC_US(data->iceland_mc_reg_table.mc_reg_address[j].s1);
+			i++;
+		}
+	}
+
+	mc_reg_table->last = (uint8_t)i;
+
+	return 0;
+}
+
+/* convert register values from driver to SMC format */
+void iceland_convert_mc_registers(
+	const phw_iceland_mc_reg_entry * pEntry,
+	SMU71_Discrete_MCRegisterSet *pData,
+	uint32_t numEntries, uint32_t validflag)
+{
+	uint32_t i, j;
+
+	for (i = 0, j = 0; j < numEntries; j++) {
+		if (validflag & 1<<j) {
+			pData->value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]);
+			i++;
+		}
+	}
+}
+
+/* find the entry in the memory range table, then populate the value to SMC's iceland_mc_reg_table */
+int iceland_convert_mc_reg_table_entry_to_smc(
+		struct pp_hwmgr *hwmgr,
+		const uint32_t memory_clock,
+		SMU71_Discrete_MCRegisterSet *mc_reg_table_data
+		)
+{
+	const iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	uint32_t i = 0;
+
+	for (i = 0; i < data->iceland_mc_reg_table.num_entries; i++) {
+		if (memory_clock <=
+			data->iceland_mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+			break;
+		}
+	}
+
+	if ((i == data->iceland_mc_reg_table.num_entries) && (i > 0))
+		--i;
+
+	iceland_convert_mc_registers(&data->iceland_mc_reg_table.mc_reg_table_entry[i],
+		mc_reg_table_data, data->iceland_mc_reg_table.last, data->iceland_mc_reg_table.validflag);
+
+	return 0;
+}
+
+int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+		SMU71_Discrete_MCRegisters *mc_reg_table)
+{
+	int result = 0;
+	iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	int res;
+	uint32_t i;
+
+	for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+		res = iceland_convert_mc_reg_table_entry_to_smc(
+				hwmgr,
+				data->dpm_table.mclk_table.dpm_levels[i].value,
+				&mc_reg_table->data[i]
+				);
+
+		if (0 != res)
+			result = res;
+	}
+
+	return result;
+}
+
+int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+
+	memset(&data->mc_reg_table, 0x00, sizeof(SMU71_Discrete_MCRegisters));
+	result = iceland_populate_mc_reg_address(hwmgr, &(data->mc_reg_table));
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize MCRegTable for the MC register addresses!", return result;);
+
+	result = iceland_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize MCRegTable for driver state!", return result;);
+
+	return iceland_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start,
+			(uint8_t *)&data->mc_reg_table, sizeof(SMU71_Discrete_MCRegisters), data->sram_end);
+}
+
+int iceland_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
+{
+	PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
+
+	return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ?  0 : -1;
+}
+
+int iceland_enable_sclk_control(struct pp_hwmgr *hwmgr)
+{
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0);
+
+	return 0;
+}
+
+int iceland_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	/* enable SCLK dpm */
+	if (0 == data->sclk_dpm_key_disabled) {
+		PP_ASSERT_WITH_CODE(
+				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+						   PPSMC_MSG_DPM_Enable)),
+				"Failed to enable SCLK DPM during DPM Start Function!",
+				return -1);
+	}
+
+	/* enable MCLK dpm */
+	if (0 == data->mclk_dpm_key_disabled) {
+		PP_ASSERT_WITH_CODE(
+				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+					     PPSMC_MSG_MCLKDPM_Enable)),
+				"Failed to enable MCLK DPM during DPM Start Function!",
+				return -1);
+
+		PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
+
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixLCAC_CPL_CNTL, 0x100005);/*Read */
+
+		udelay(10);
+
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixLCAC_CPL_CNTL, 0x500005);/* write */
+
+	}
+
+	return 0;
+}
+
+int iceland_start_dpm(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	/* enable general power management */
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1);
+	/* enable sclk deep sleep */
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1);
+
+	/* prepare for PCIE DPM */
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_12, VoltageChangeTimeout, 0x1000);
+
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0);
+
+	PP_ASSERT_WITH_CODE(
+			(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+					PPSMC_MSG_Voltage_Cntl_Enable)),
+			"Failed to enable voltage DPM during DPM Start Function!",
+			return -1);
+
+	if (0 != iceland_enable_sclk_mclk_dpm(hwmgr)) {
+		PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1);
+	}
+
+	/* enable PCIE dpm */
+	if (0 == data->pcie_dpm_key_disabled) {
+		PP_ASSERT_WITH_CODE(
+				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+						PPSMC_MSG_PCIeDPM_Enable)),
+				"Failed to enable pcie DPM during DPM Start Function!",
+				return -1
+				);
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			    PHM_PlatformCaps_Falcon_QuickTransition)) {
+		smum_send_msg_to_smc(hwmgr->smumgr,
+				     PPSMC_MSG_EnableACDCGPIOInterrupt);
+	}
+
+	return 0;
+}
+
+static void iceland_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
+		uint32_t sources)
+{
+	bool protection;
+	enum DPM_EVENT_SRC src;
+
+	switch (sources) {
+	default:
+		printk(KERN_ERR "Unknown throttling event sources.");
+		/* fall through */
+	case 0:
+		protection = false;
+		/* src is unused */
+		break;
+	case (1 << PHM_AutoThrottleSource_Thermal):
+		protection = true;
+		src = DPM_EVENT_SRC_DIGITAL;
+		break;
+	case (1 << PHM_AutoThrottleSource_External):
+		protection = true;
+		src = DPM_EVENT_SRC_EXTERNAL;
+		break;
+	case (1 << PHM_AutoThrottleSource_External) |
+			(1 << PHM_AutoThrottleSource_Thermal):
+		protection = true;
+		src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
+		break;
+	}
+	/* Order matters - don't enable thermal protection for the wrong source. */
+	if (protection) {
+		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
+				DPM_EVENT_SRC, src);
+		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+				THERMAL_PROTECTION_DIS,
+				!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_ThermalController));
+	} else
+		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+				THERMAL_PROTECTION_DIS, 1);
+}
+
+static int iceland_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+		PHM_AutoThrottleSource source)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+
+	if (!(data->active_auto_throttle_sources & (1 << source))) {
+		data->active_auto_throttle_sources |= 1 << source;
+		iceland_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+	}
+	return 0;
+}
+
+static int iceland_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+	return iceland_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
+static int iceland_tf_start_smc(struct pp_hwmgr *hwmgr)
+{
+	int ret = 0;
+
+	if (!iceland_is_smc_ram_running(hwmgr->smumgr))
+		ret = iceland_smu_start_smc(hwmgr->smumgr);
+
+	return ret;
+}
+
+/**
+* Programs the Deep Sleep registers
+*
+* @param    pHwMgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data (PhwEvergreen_DisplayConfiguration)
+* @param    pOutput the pointer to output data (unused)
+* @param    pStorage the pointer to temporary storage (unused)
+* @param    Result the last failure code (unused)
+* @return   always 0
+*/
+static int iceland_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			    PHM_PlatformCaps_SclkDeepSleep)) {
+		if (smum_send_msg_to_smc(hwmgr->smumgr,
+					 PPSMC_MSG_MASTER_DeepSleep_ON) != 0)
+			PP_ASSERT_WITH_CODE(false,
+					    "Attempt to enable Master Deep Sleep switch failed!",
+					    return -EINVAL);
+	} else {
+		if (smum_send_msg_to_smc(hwmgr->smumgr,
+					 PPSMC_MSG_MASTER_DeepSleep_OFF) != 0)
+			PP_ASSERT_WITH_CODE(false,
+					    "Attempt to disable Master Deep Sleep switch failed!",
+					    return -EINVAL);
+	}
+
+	return 0;
+}
+
+static int iceland_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+	int tmp_result, result = 0;
+
+	if (cf_iceland_voltage_control(hwmgr)) {
+		tmp_result = iceland_enable_voltage_control(hwmgr);
+		PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable voltage control!", return tmp_result);
+
+		tmp_result = iceland_construct_voltage_tables(hwmgr);
+		PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to contruct voltage tables!", return tmp_result);
+	}
+
+	tmp_result = iceland_initialize_mc_reg_table(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to initialize MC reg table!", return tmp_result);
+
+	tmp_result = iceland_program_static_screen_threshold_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to program static screen threshold parameters!", return tmp_result);
+
+	tmp_result = iceland_enable_display_gap(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to enable display gap!", return tmp_result);
+
+	tmp_result = iceland_program_voting_clients(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to program voting clients!", return tmp_result);
+
+	tmp_result = iceland_upload_firmware(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to upload firmware header!", return tmp_result);
+
+	tmp_result = iceland_process_firmware_header(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to process firmware header!", return tmp_result);
+
+	tmp_result = iceland_initial_switch_from_arb_f0_to_f1(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to initialize switch from ArbF0 to F1!", return tmp_result);
+
+	tmp_result = iceland_init_smc_table(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to initialize SMC table!", return tmp_result);
+
+	tmp_result = iceland_populate_initial_mc_reg_table(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to populate initialize MC Reg table!", return tmp_result);
+
+	tmp_result = iceland_populate_pm_fuses(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to populate PM fuses!", return tmp_result);
+
+	/* start SMC */
+	tmp_result = iceland_tf_start_smc(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to start SMC!", return tmp_result);
+
+	/* enable SCLK control */
+	tmp_result = iceland_enable_sclk_control(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to enable SCLK control!", return tmp_result);
+
+	tmp_result = iceland_enable_deep_sleep_master_switch(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+		"Failed to enable deep sleep!", return tmp_result);
+
+	/* enable DPM */
+	tmp_result = iceland_start_dpm(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to start DPM!", return tmp_result);
+
+	tmp_result = iceland_enable_smc_cac(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to enable SMC CAC!", return tmp_result);
+
+	tmp_result = iceland_enable_power_containment(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to enable power containment!", return tmp_result);
+
+	tmp_result = iceland_power_control_set_level(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+		"Failed to power control set level!", result = tmp_result);
+
+	tmp_result = iceland_enable_thermal_auto_throttle(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable thermal auto throttle!", result = tmp_result);
+
+	return result;
+}
+
+static int iceland_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
+{
+	return phm_hwmgr_backend_fini(hwmgr);
+}
+
+static void iceland_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	struct phw_iceland_ulv_parm *ulv;
+
+	ulv = &data->ulv;
+	ulv->ch_ulv_parameter = PPICELAND_CGULVPARAMETER_DFLT;
+	data->voting_rights_clients0 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0;
+	data->voting_rights_clients1 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1;
+	data->voting_rights_clients2 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2;
+	data->voting_rights_clients3 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3;
+	data->voting_rights_clients4 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4;
+	data->voting_rights_clients5 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5;
+	data->voting_rights_clients6 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6;
+	data->voting_rights_clients7 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7;
+
+	data->static_screen_threshold_unit = PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT;
+	data->static_screen_threshold = PPICELAND_STATICSCREENTHRESHOLD_DFLT;
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_ABM);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+		    PHM_PlatformCaps_NonABMSupportInPPLib);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+		    PHM_PlatformCaps_DynamicACTiming);
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_DisableMemoryTransition);
+
+	iceland_initialize_power_tune_defaults(hwmgr);
+
+	data->mclk_strobe_mode_threshold = 40000;
+	data->mclk_stutter_mode_threshold = 30000;
+	data->mclk_edc_enable_threshold = 40000;
+	data->mclk_edc_wr_enable_threshold = 40000;
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_DisableMCLS);
+
+	data->pcie_gen_performance.max = PP_PCIEGen1;
+	data->pcie_gen_performance.min = PP_PCIEGen3;
+	data->pcie_gen_power_saving.max = PP_PCIEGen1;
+	data->pcie_gen_power_saving.min = PP_PCIEGen3;
+
+	data->pcie_lane_performance.max = 0;
+	data->pcie_lane_performance.min = 16;
+	data->pcie_lane_power_saving.max = 0;
+	data->pcie_lane_power_saving.min = 16;
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_SclkThrottleLowNotification);
+}
+
+static int iceland_get_evv_voltage(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+	uint16_t    virtual_voltage_id;
+	uint16_t    vddc = 0;
+	uint16_t    i;
+
+	/* the count indicates actual number of entries */
+	data->vddc_leakage.count = 0;
+	data->vddci_leakage.count = 0;
+
+	if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
+		pr_err("Iceland should always support EVV\n");
+		return -EINVAL;
+	}
+
+	/* retrieve voltage for leakage ID (0xff01 + i) */
+	for (i = 0; i < ICELAND_MAX_LEAKAGE_COUNT; i++) {
+		virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+
+		PP_ASSERT_WITH_CODE((0 == atomctrl_get_voltage_evv(hwmgr, virtual_voltage_id, &vddc)),
+				    "Error retrieving EVV voltage value!\n", continue);
+
+		if (vddc >= 2000)
+			pr_warning("Invalid VDDC value!\n");
+
+		if (vddc != 0 && vddc != virtual_voltage_id) {
+			data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
+			data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
+			data->vddc_leakage.count++;
+		}
+	}
+
+	return 0;
+}
+
+static void iceland_patch_with_vddc_leakage(struct pp_hwmgr *hwmgr,
+					    uint32_t *vddc)
+{
+	iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	uint32_t leakage_index;
+	struct phw_iceland_leakage_voltage *leakage_table = &data->vddc_leakage;
+
+	/* search for leakage voltage ID 0xff01 ~ 0xff08 */
+	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
+		/*
+		 * If this voltage matches a leakage voltage ID, patch
+		 * with actual leakage voltage.
+		 */
+		if (leakage_table->leakage_id[leakage_index] == *vddc) {
+			/*
+			 * Need to make sure vddc is less than 2v or
+			 * else, it could burn the ASIC.
+			 */
+			if (leakage_table->actual_voltage[leakage_index] >= 2000)
+				pr_warning("Invalid VDDC value!\n");
+			*vddc = leakage_table->actual_voltage[leakage_index];
+			/* we found leakage voltage */
+			break;
+		}
+	}
+
+	if (*vddc >= ATOM_VIRTUAL_VOLTAGE_ID0)
+		pr_warning("Voltage value looks like a Leakage ID but it's not patched\n");
+}
+
+static void iceland_patch_with_vddci_leakage(struct pp_hwmgr *hwmgr,
+					     uint32_t *vddci)
+{
+	iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	uint32_t leakage_index;
+	struct phw_iceland_leakage_voltage *leakage_table = &data->vddci_leakage;
+
+	/* search for leakage voltage ID 0xff01 ~ 0xff08 */
+	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
+		/*
+		 * If this voltage matches a leakage voltage ID, patch
+		 * with actual leakage voltage.
+		 */
+		if (leakage_table->leakage_id[leakage_index] == *vddci) {
+			*vddci = leakage_table->actual_voltage[leakage_index];
+			/* we found leakage voltage */
+			break;
+		}
+	}
+
+	if (*vddci >= ATOM_VIRTUAL_VOLTAGE_ID0)
+		pr_warning("Voltage value looks like a Leakage ID but it's not patched\n");
+}
+
+static int iceland_patch_vddc(struct pp_hwmgr *hwmgr,
+			      struct phm_clock_voltage_dependency_table *tab)
+{
+	uint16_t i;
+
+	if (tab)
+		for (i = 0; i < tab->count; i++)
+			iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
+
+	return 0;
+}
+
+static int iceland_patch_vddci(struct pp_hwmgr *hwmgr,
+			       struct phm_clock_voltage_dependency_table *tab)
+{
+	uint16_t i;
+
+	if (tab)
+		for (i = 0; i < tab->count; i++)
+			iceland_patch_with_vddci_leakage(hwmgr, &tab->entries[i].v);
+
+	return 0;
+}
+
+static int iceland_patch_vce_vddc(struct pp_hwmgr *hwmgr,
+				  struct phm_vce_clock_voltage_dependency_table *tab)
+{
+	uint16_t i;
+
+	if (tab)
+		for (i = 0; i < tab->count; i++)
+			iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
+
+	return 0;
+}
+
+
+static int iceland_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
+				  struct phm_uvd_clock_voltage_dependency_table *tab)
+{
+	uint16_t i;
+
+	if (tab)
+		for (i = 0; i < tab->count; i++)
+			iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
+
+	return 0;
+}
+
+static int iceland_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
+					 struct phm_phase_shedding_limits_table *tab)
+{
+	uint16_t i;
+
+	if (tab)
+		for (i = 0; i < tab->count; i++)
+			iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].Voltage);
+
+	return 0;
+}
+
+static int iceland_patch_samu_vddc(struct pp_hwmgr *hwmgr,
+				   struct phm_samu_clock_voltage_dependency_table *tab)
+{
+	uint16_t i;
+
+	if (tab)
+		for (i = 0; i < tab->count; i++)
+			iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
+
+	return 0;
+}
+
+static int iceland_patch_acp_vddc(struct pp_hwmgr *hwmgr,
+				  struct phm_acp_clock_voltage_dependency_table *tab)
+{
+	uint16_t i;
+
+	if (tab)
+		for (i = 0; i < tab->count; i++)
+			iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
+
+	return 0;
+}
+
+static int iceland_patch_limits_vddc(struct pp_hwmgr *hwmgr,
+				     struct phm_clock_and_voltage_limits *tab)
+{
+	if (tab) {
+		iceland_patch_with_vddc_leakage(hwmgr, (uint32_t *)&tab->vddc);
+		iceland_patch_with_vddci_leakage(hwmgr, (uint32_t *)&tab->vddci);
+	}
+
+	return 0;
+}
+
+static int iceland_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
+{
+	uint32_t i;
+	uint32_t vddc;
+
+	if (tab) {
+		for (i = 0; i < tab->count; i++) {
+			vddc = (uint32_t)(tab->entries[i].Vddc);
+			iceland_patch_with_vddc_leakage(hwmgr, &vddc);
+			tab->entries[i].Vddc = (uint16_t)vddc;
+		}
+	}
+
+	return 0;
+}
+
+static int iceland_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
+{
+	int tmp;
+
+	tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
+	if(tmp)
+		return -EINVAL;
+
+	tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
+	if(tmp)
+		return -EINVAL;
+
+	tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+	if(tmp)
+		return -EINVAL;
+
+	tmp = iceland_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
+	if(tmp)
+		return -EINVAL;
+
+	tmp = iceland_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
+	if(tmp)
+		return -EINVAL;
+
+	tmp = iceland_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
+	if(tmp)
+		return -EINVAL;
+
+	tmp = iceland_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
+	if(tmp)
+		return -EINVAL;
+
+	tmp = iceland_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
+	if(tmp)
+		return -EINVAL;
+
+	tmp = iceland_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
+	if(tmp)
+		return -EINVAL;
+
+	tmp = iceland_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
+	if(tmp)
+		return -EINVAL;
+
+	tmp = iceland_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
+	if(tmp)
+		return -EINVAL;
+
+	tmp = iceland_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
+	if(tmp)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int iceland_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
+	struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
+	struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
+
+	PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
+		"VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL);
+	PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
+		"VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
+
+	PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
+		"VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL);
+	PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
+		"VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
+
+	data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
+	data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+
+	hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
+		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
+	hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
+		allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
+	hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
+		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+
+	if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
+		data->min_vddci_in_pp_table = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
+		data->max_vddci_in_pp_table = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
+	}
+
+	if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1)
+		hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
+
+	return 0;
+}
+
+static int iceland_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
+{
+	uint32_t table_size;
+	struct phm_clock_voltage_dependency_table *table_clk_vlt;
+
+	hwmgr->dyn_state.mclk_sclk_ratio = 4;
+	hwmgr->dyn_state.sclk_mclk_delta = 15000;      /* 150 MHz */
+	hwmgr->dyn_state.vddc_vddci_delta = 200;       /* 200mV */
+
+	/* initialize vddc_dep_on_dal_pwrl table */
+	table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
+	table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL);
+
+	if (NULL == table_clk_vlt) {
+		pr_err("[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
+		return -ENOMEM;
+	} else {
+		table_clk_vlt->count = 4;
+		table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
+		table_clk_vlt->entries[0].v = 0;
+		table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
+		table_clk_vlt->entries[1].v = 720;
+		table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
+		table_clk_vlt->entries[2].v = 810;
+		table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
+		table_clk_vlt->entries[3].v = 900;
+		hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
+	}
+
+	return 0;
+}
+
+/**
+ * Initializes the Volcanic Islands Hardware Manager
+ *
+ * @param   hwmgr the address of the powerplay hardware manager.
+ * @return   1 if success; otherwise appropriate error code.
+ */
+static int iceland_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+{
+	int result = 0;
+	SMU71_Discrete_DpmTable *table = NULL;
+	iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
+	bool stay_in_boot;
+	struct phw_iceland_ulv_parm *ulv;
+	struct cgs_system_info sys_info = {0};
+
+	PP_ASSERT_WITH_CODE((NULL != hwmgr),
+		"Invalid Parameter!", return -EINVAL;);
+
+	data->dll_defaule_on = 0;
+	data->sram_end = SMC_RAM_END;
+
+	data->activity_target[0] = PPICELAND_TARGETACTIVITY_DFLT;
+	data->activity_target[1] = PPICELAND_TARGETACTIVITY_DFLT;
+	data->activity_target[2] = PPICELAND_TARGETACTIVITY_DFLT;
+	data->activity_target[3] = PPICELAND_TARGETACTIVITY_DFLT;
+	data->activity_target[4] = PPICELAND_TARGETACTIVITY_DFLT;
+	data->activity_target[5] = PPICELAND_TARGETACTIVITY_DFLT;
+	data->activity_target[6] = PPICELAND_TARGETACTIVITY_DFLT;
+	data->activity_target[7] = PPICELAND_TARGETACTIVITY_DFLT;
+
+	data->mclk_activity_target = PPICELAND_MCLK_TARGETACTIVITY_DFLT;
+
+	data->sclk_dpm_key_disabled = 0;
+	data->mclk_dpm_key_disabled = 0;
+	data->pcie_dpm_key_disabled = 0;
+	data->pcc_monitor_enabled = 0;
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+		    PHM_PlatformCaps_UnTabledHardwareInterface);
+
+	data->gpio_debug = 0;
+	data->engine_clock_data = 0;
+	data->memory_clock_data = 0;
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_SclkDeepSleepAboveLow);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+		    PHM_PlatformCaps_DynamicPatchPowerState);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+		    PHM_PlatformCaps_TablelessHardwareInterface);
+
+	/* Initializes DPM default values. */
+	iceland_initialize_dpm_defaults(hwmgr);
+
+	/* Enable Platform EVV support. */
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+		    PHM_PlatformCaps_EVV);
+
+	/* Get leakage voltage based on leakage ID. */
+	result = iceland_get_evv_voltage(hwmgr);
+	if (result)
+		goto failed;
+
+	/**
+	 * Patch our voltage dependency table with actual leakage
+	 * voltage. We need to perform leakage translation before it's
+	 * used by other functions such as
+	 * iceland_set_hwmgr_variables_based_on_pptable.
+	 */
+	result = iceland_patch_dependency_tables_with_leakage(hwmgr);
+	if (result)
+		goto failed;
+
+	/* Parse pptable data read from VBIOS. */
+	result = iceland_set_private_var_based_on_pptale(hwmgr);
+	if (result)
+		goto failed;
+
+	/* ULV support */
+	ulv = &(data->ulv);
+	ulv->ulv_supported = 1;
+
+	/* Initalize Dynamic State Adjustment Rule Settings*/
+	result = iceland_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
+	if (result) {
+		pr_err("[ powerplay ] iceland_initializa_dynamic_state_adjustment_rule_settings failed!\n");
+		goto failed;
+	}
+
+	data->voltage_control = ICELAND_VOLTAGE_CONTROL_NONE;
+	data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_NONE;
+	data->mvdd_control = ICELAND_VOLTAGE_CONTROL_NONE;
+
+	/*
+	 * Hardcode thermal temperature settings for now, these will
+	 * be overwritten if a custom policy exists.
+	 */
+	data->thermal_temp_setting.temperature_low = 99500;
+	data->thermal_temp_setting.temperature_high = 100000;
+	data->thermal_temp_setting.temperature_shutdown = 104000;
+	data->uvd_enabled = false;
+
+	table = &data->smc_state_table;
+
+	if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
+				       &gpio_pin_assignment)) {
+		table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			    PHM_PlatformCaps_RegulatorHot);
+	} else {
+		table->VRHotGpio = ICELAND_UNUSED_GPIO_PIN;
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			      PHM_PlatformCaps_RegulatorHot);
+	}
+
+	if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+				       &gpio_pin_assignment)) {
+		table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			    PHM_PlatformCaps_AutomaticDCTransition);
+	} else {
+		table->AcDcGpio = ICELAND_UNUSED_GPIO_PIN;
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			      PHM_PlatformCaps_AutomaticDCTransition);
+	}
+
+	/*
+	 * If ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak.
+	 * Current Control feature is enabled and we should program
+	 * PCC HW register
+	 */
+	if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID,
+				       &gpio_pin_assignment)) {
+		uint32_t temp_reg = cgs_read_ind_register(hwmgr->device,
+							  CGS_IND_REG__SMC,
+							  ixCNB_PWRMGT_CNTL);
+
+		switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
+		case 0:
+			temp_reg = PHM_SET_FIELD(temp_reg,
+				CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
+			break;
+		case 1:
+			temp_reg = PHM_SET_FIELD(temp_reg,
+				CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
+			break;
+		case 2:
+			temp_reg = PHM_SET_FIELD(temp_reg,
+				CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
+			break;
+		case 3:
+			temp_reg = PHM_SET_FIELD(temp_reg,
+				CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
+			break;
+		case 4:
+			temp_reg = PHM_SET_FIELD(temp_reg,
+				CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
+			break;
+		default:
+			pr_warning("[ powerplay ] Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!\n");
+			break;
+		}
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+				       ixCNB_PWRMGT_CNTL, temp_reg);
+	}
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+		    PHM_PlatformCaps_EnableSMU7ThermalManagement);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+		    PHM_PlatformCaps_SMU7);
+
+	if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+						     VOLTAGE_TYPE_VDDC,
+						     VOLTAGE_OBJ_GPIO_LUT))
+		data->voltage_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO;
+	else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+							  VOLTAGE_TYPE_VDDC,
+							  VOLTAGE_OBJ_SVID2))
+		data->voltage_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			    PHM_PlatformCaps_ControlVDDCI)) {
+		if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+							     VOLTAGE_TYPE_VDDCI,
+							     VOLTAGE_OBJ_GPIO_LUT))
+			data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO;
+		else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+								  VOLTAGE_TYPE_VDDCI,
+								  VOLTAGE_OBJ_SVID2))
+			data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2;
+	}
+
+	if (data->vdd_ci_control == ICELAND_VOLTAGE_CONTROL_NONE)
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			      PHM_PlatformCaps_ControlVDDCI);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			    PHM_PlatformCaps_EnableMVDDControl)) {
+		if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+							     VOLTAGE_TYPE_MVDDC,
+							     VOLTAGE_OBJ_GPIO_LUT))
+			data->mvdd_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO;
+		else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+								  VOLTAGE_TYPE_MVDDC,
+								  VOLTAGE_OBJ_SVID2))
+			data->mvdd_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2;
+	}
+
+	if (data->mvdd_control == ICELAND_VOLTAGE_CONTROL_NONE)
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			      PHM_PlatformCaps_EnableMVDDControl);
+
+	data->vddc_phase_shed_control = false;
+
+	stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				       PHM_PlatformCaps_StayInBootState);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_DynamicPowerManagement);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_ActivityReporting);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_GFXClockGatingSupport);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_MemorySpreadSpectrumSupport);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_EngineSpreadSpectrumSupport);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_DynamicPCIEGen2Support);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SMC);
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_DisablePowerGating);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_BACO);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_ThermalAutoThrottling);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_DisableLSClockGating);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SamuDPM);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_AcpDPM);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_OD6inACSupport);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_EnablePlatformPowerManagement);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PauseMMSessions);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_OD6PlusinACSupport);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PauseMMSessions);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_GFXClockGatingManagedInCAIL);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_IcelandULPSSWWorkAround);
+
+
+	/* iceland doesn't support UVD and VCE */
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_UVDPowerGating);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_VCEPowerGating);
+
+	sys_info.size = sizeof(struct cgs_system_info);
+	sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
+	result = cgs_query_system_info(hwmgr->device, &sys_info);
+	if (!result) {
+		if (sys_info.value & AMD_PG_SUPPORT_UVD)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				      PHM_PlatformCaps_UVDPowerGating);
+		if (sys_info.value & AMD_PG_SUPPORT_VCE)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				      PHM_PlatformCaps_VCEPowerGating);
+
+		data->is_tlu_enabled = false;
+		hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
+			ICELAND_MAX_HARDWARE_POWERLEVELS;
+		hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
+		hwmgr->platform_descriptor.minimumClocksReductionPercentage  = 50;
+
+		sys_info.size = sizeof(struct cgs_system_info);
+		sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
+		result = cgs_query_system_info(hwmgr->device, &sys_info);
+		if (result)
+			data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
+		else
+			data->pcie_gen_cap = (uint32_t)sys_info.value;
+		if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+			data->pcie_spc_cap = 20;
+		sys_info.size = sizeof(struct cgs_system_info);
+		sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
+		result = cgs_query_system_info(hwmgr->device, &sys_info);
+		if (result)
+			data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
+		else
+			data->pcie_lane_cap = (uint32_t)sys_info.value;
+	} else {
+		/* Ignore return value in here, we are cleaning up a mess. */
+		iceland_hwmgr_backend_fini(hwmgr);
+	}
+
+	return 0;
+failed:
+	return result;
+}
+
+static int iceland_get_num_of_entries(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	unsigned long ret = 0;
+
+	result = pp_tables_get_num_of_entries(hwmgr, &ret);
+
+	return result ? 0 : ret;
+}
+
+static const unsigned long PhwIceland_Magic = (unsigned long)(PHM_VIslands_Magic);
+
+struct iceland_power_state *cast_phw_iceland_power_state(
+				  struct pp_hw_power_state *hw_ps)
+{
+	if (hw_ps == NULL)
+		return NULL;
+
+	PP_ASSERT_WITH_CODE((PhwIceland_Magic == hw_ps->magic),
+				"Invalid Powerstate Type!",
+				 return NULL);
+
+	return (struct iceland_power_state *)hw_ps;
+}
+
+static int iceland_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+				struct pp_power_state  *prequest_ps,
+			const struct pp_power_state *pcurrent_ps)
+{
+	struct iceland_power_state *iceland_ps =
+				cast_phw_iceland_power_state(&prequest_ps->hardware);
+
+	uint32_t sclk;
+	uint32_t mclk;
+	struct PP_Clocks minimum_clocks = {0};
+	bool disable_mclk_switching;
+	bool disable_mclk_switching_for_frame_lock;
+	struct cgs_display_info info = {0};
+	const struct phm_clock_and_voltage_limits *max_limits;
+	uint32_t i;
+	iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+
+	int32_t count;
+	int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
+
+	data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
+
+	PP_ASSERT_WITH_CODE(iceland_ps->performance_level_count == 2,
+				 "VI should always have 2 performance levels",
+				 );
+
+	max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
+			&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
+			&(hwmgr->dyn_state.max_clock_voltage_on_dc);
+
+	if (PP_PowerSource_DC == hwmgr->power_source) {
+		for (i = 0; i < iceland_ps->performance_level_count; i++) {
+			if (iceland_ps->performance_levels[i].memory_clock > max_limits->mclk)
+				iceland_ps->performance_levels[i].memory_clock = max_limits->mclk;
+			if (iceland_ps->performance_levels[i].engine_clock > max_limits->sclk)
+				iceland_ps->performance_levels[i].engine_clock = max_limits->sclk;
+		}
+	}
+
+	iceland_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk;
+	iceland_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk;
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
+
+		max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
+		stable_pstate_sclk = (max_limits->sclk * 75) / 100;
+
+		for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; count >= 0; count--) {
+			if (stable_pstate_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
+				stable_pstate_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
+				break;
+			}
+		}
+
+		if (count < 0)
+			stable_pstate_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
+
+		stable_pstate_mclk = max_limits->mclk;
+
+		minimum_clocks.engineClock = stable_pstate_sclk;
+		minimum_clocks.memoryClock = stable_pstate_mclk;
+	}
+
+	if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
+		minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
+
+	if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
+		minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
+
+	iceland_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
+
+	if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
+		PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock),
+					"Overdrive sclk exceeds limit",
+					hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock);
+
+		if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
+			iceland_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive;
+	}
+
+	if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
+		PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock),
+			"Overdrive mclk exceeds limit",
+			hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock);
+
+		if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
+			iceland_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive;
+	}
+
+	disable_mclk_switching_for_frame_lock = phm_cap_enabled(
+				    hwmgr->platform_descriptor.platformCaps,
+				    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
+
+	disable_mclk_switching = (1 < info.display_count) ||
+				    disable_mclk_switching_for_frame_lock;
+
+	sclk  = iceland_ps->performance_levels[0].engine_clock;
+	mclk  = iceland_ps->performance_levels[0].memory_clock;
+
+	if (disable_mclk_switching)
+		mclk  = iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].memory_clock;
+
+	if (sclk < minimum_clocks.engineClock)
+		sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock;
+
+	if (mclk < minimum_clocks.memoryClock)
+		mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock;
+
+	iceland_ps->performance_levels[0].engine_clock = sclk;
+	iceland_ps->performance_levels[0].memory_clock = mclk;
+
+	iceland_ps->performance_levels[1].engine_clock =
+		(iceland_ps->performance_levels[1].engine_clock >= iceland_ps->performance_levels[0].engine_clock) ?
+			      iceland_ps->performance_levels[1].engine_clock :
+			      iceland_ps->performance_levels[0].engine_clock;
+
+	if (disable_mclk_switching) {
+		if (mclk < iceland_ps->performance_levels[1].memory_clock)
+			mclk = iceland_ps->performance_levels[1].memory_clock;
+
+		iceland_ps->performance_levels[0].memory_clock = mclk;
+		iceland_ps->performance_levels[1].memory_clock = mclk;
+	} else {
+		if (iceland_ps->performance_levels[1].memory_clock < iceland_ps->performance_levels[0].memory_clock)
+			iceland_ps->performance_levels[1].memory_clock = iceland_ps->performance_levels[0].memory_clock;
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
+		for (i=0; i < iceland_ps->performance_level_count; i++) {
+			iceland_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
+			iceland_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
+			iceland_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
+			iceland_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
+		}
+	}
+
+	return 0;
+}
+
+static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+	/*
+	 * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
+	 * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
+	 * whereas voltage control is a fundemental change that will not be disabled
+	 */
+	return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+					FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0);
+}
+
+/**
+ * force DPM power State
+ *
+ * @param    hwmgr:  the address of the powerplay hardware manager.
+ * @param    n     :  DPM level
+ * @return   The response that came from the SMC.
+ */
+int iceland_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	/* Checking if DPM is running.  If we discover hang because of this, we should skip this message. */
+	PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
+			"Trying to force SCLK when DPM is disabled", return -1;);
+	if (0 == data->sclk_dpm_key_disabled)
+		return (0 == smum_send_msg_to_smc_with_parameter(
+							     hwmgr->smumgr,
+							     PPSMC_MSG_DPM_ForceState,
+							     n) ? 0 : 1);
+
+	return 0;
+}
+
+/**
+ * force DPM power State
+ *
+ * @param    hwmgr:  the address of the powerplay hardware manager.
+ * @param    n     :  DPM level
+ * @return   The response that came from the SMC.
+ */
+int iceland_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	/* Checking if DPM is running.  If we discover hang because of this, we should skip this message. */
+	PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
+			"Trying to Force MCLK when DPM is disabled", return -1;);
+	if (0 == data->mclk_dpm_key_disabled)
+		return (0 == smum_send_msg_to_smc_with_parameter(
+								hwmgr->smumgr,
+								PPSMC_MSG_MCLKDPM_ForceState,
+								n) ? 0 : 1);
+
+	return 0;
+}
+
+/**
+ * force DPM power State
+ *
+ * @param    hwmgr:  the address of the powerplay hardware manager.
+ * @param    n     :  DPM level
+ * @return   The response that came from the SMC.
+ */
+int iceland_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	/* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
+	PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
+			"Trying to Force PCIE level when DPM is disabled", return -1;);
+	if (0 == data->pcie_dpm_key_disabled)
+		return (0 == smum_send_msg_to_smc_with_parameter(
+							     hwmgr->smumgr,
+							     PPSMC_MSG_PCIeDPM_ForceLevel,
+							     n) ? 0 : 1);
+
+	return 0;
+}
+
+static int iceland_force_dpm_highest(struct pp_hwmgr *hwmgr)
+{
+	uint32_t level, tmp;
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	if (0 == data->sclk_dpm_key_disabled) {
+		/* SCLK */
+		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) {
+			level = 0;
+			tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
+			while (tmp >>= 1)
+				level++ ;
+
+			if (0 != level) {
+				PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state(hwmgr, level)),
+					"force highest sclk dpm state failed!", return -1);
+				PHM_WAIT_INDIRECT_FIELD(hwmgr->device,
+					SMC_IND, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX, level);
+			}
+		}
+	}
+
+	if (0 == data->mclk_dpm_key_disabled) {
+		/* MCLK */
+		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
+			level = 0;
+			tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
+			while (tmp >>= 1)
+				level++ ;
+
+			if (0 != level) {
+				PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state_mclk(hwmgr, level)),
+					"force highest mclk dpm state failed!", return -1);
+				PHM_WAIT_INDIRECT_FIELD(hwmgr->device, SMC_IND,
+					TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX, level);
+			}
+		}
+	}
+
+	if (0 == data->pcie_dpm_key_disabled) {
+		/* PCIE */
+		if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
+			level = 0;
+			tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+			while (tmp >>= 1)
+				level++ ;
+
+			if (0 != level) {
+				PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state_pcie(hwmgr, level)),
+					"force highest pcie dpm state failed!", return -1);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t iceland_get_lowest_enable_level(struct pp_hwmgr *hwmgr,
+						uint32_t level_mask)
+{
+	uint32_t level = 0;
+
+	while (0 == (level_mask & (1 << level)))
+		level++;
+
+	return level;
+}
+
+static int iceland_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+{
+	uint32_t level;
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	/* for now force only sclk */
+	if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+		level = iceland_get_lowest_enable_level(hwmgr,
+						      data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+
+		PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state(hwmgr, level)),
+				    "force sclk dpm state failed!", return -1);
+
+		PHM_WAIT_INDIRECT_FIELD(hwmgr->device, SMC_IND,
+					TARGET_AND_CURRENT_PROFILE_INDEX,
+					CURR_SCLK_INDEX,
+					level);
+	}
+
+	return 0;
+}
+
+int iceland_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	PP_ASSERT_WITH_CODE (0 == iceland_is_dpm_running(hwmgr),
+		"Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
+		return -1);
+
+	if (0 == data->sclk_dpm_key_disabled) {
+		PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
+							     hwmgr->smumgr,
+					PPSMC_MSG_NoForcedLevel)),
+					   "unforce sclk dpm state failed!",
+								return -1);
+	}
+
+	if (0 == data->mclk_dpm_key_disabled) {
+		PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
+							     hwmgr->smumgr,
+					PPSMC_MSG_MCLKDPM_NoForcedLevel)),
+					   "unforce mclk dpm state failed!",
+								return -1);
+	}
+
+	if (0 == data->pcie_dpm_key_disabled) {
+		PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
+							     hwmgr->smumgr,
+					PPSMC_MSG_PCIeDPM_UnForceLevel)),
+					   "unforce pcie level failed!",
+								return -1);
+	}
+
+	return 0;
+}
+
+static int iceland_force_dpm_level(struct pp_hwmgr *hwmgr,
+		enum amd_dpm_forced_level level)
+{
+	int ret = 0;
+
+	switch (level) {
+	case AMD_DPM_FORCED_LEVEL_HIGH:
+		ret = iceland_force_dpm_highest(hwmgr);
+		if (ret)
+			return ret;
+		break;
+	case AMD_DPM_FORCED_LEVEL_LOW:
+		ret = iceland_force_dpm_lowest(hwmgr);
+		if (ret)
+			return ret;
+		break;
+	case AMD_DPM_FORCED_LEVEL_AUTO:
+		ret = iceland_unforce_dpm_levels(hwmgr);
+		if (ret)
+			return ret;
+		break;
+	default:
+		break;
+	}
+
+	hwmgr->dpm_level = level;
+	return ret;
+}
+
+const struct iceland_power_state *cast_const_phw_iceland_power_state(
+				 const struct pp_hw_power_state *hw_ps)
+{
+	if (hw_ps == NULL)
+		return NULL;
+
+	PP_ASSERT_WITH_CODE((PhwIceland_Magic == hw_ps->magic),
+			    "Invalid Powerstate Type!",
+			    return NULL);
+
+	return (const struct iceland_power_state *)hw_ps;
+}
+
+static int iceland_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
+{
+	const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
+	const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state);
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	struct iceland_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table);
+	uint32_t sclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock;
+	struct iceland_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table);
+	uint32_t mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock;
+	struct PP_Clocks min_clocks = {0};
+	uint32_t i;
+	struct cgs_display_info info = {0};
+
+	data->need_update_smu7_dpm_table = 0;
+
+	for (i = 0; i < psclk_table->count; i++) {
+		if (sclk == psclk_table->dpm_levels[i].value)
+			break;
+	}
+
+	if (i >= psclk_table->count)
+		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+	else {
+		/*
+		 * TODO: Check SCLK in DAL's minimum clocks in case DeepSleep
+		 * divider update is required.
+		 */
+		if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR)
+			data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
+	}
+
+	for (i = 0; i < pmclk_table->count; i++) {
+		if (mclk == pmclk_table->dpm_levels[i].value)
+			break;
+	}
+
+	if (i >= pmclk_table->count)
+		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+
+	if (data->display_timing.num_existing_displays != info.display_count)
+		data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
+
+	return 0;
+}
+
+static uint16_t iceland_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct iceland_power_state *hw_ps)
+{
+	uint32_t i;
+	uint32_t pcie_speed, max_speed = 0;
+
+	for (i = 0; i < hw_ps->performance_level_count; i++) {
+		pcie_speed = hw_ps->performance_levels[i].pcie_gen;
+		if (max_speed < pcie_speed)
+			max_speed = pcie_speed;
+	}
+
+	return max_speed;
+}
+
+static uint16_t iceland_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+{
+	uint32_t speed_cntl = 0;
+
+	speed_cntl = cgs_read_ind_register(hwmgr->device,
+					   CGS_IND_REG__PCIE,
+					   ixPCIE_LC_SPEED_CNTL);
+	return((uint16_t)PHM_GET_FIELD(speed_cntl,
+			PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
+}
+
+
+static int iceland_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input)
+{
+	const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	const struct iceland_power_state *iceland_nps = cast_const_phw_iceland_power_state(states->pnew_state);
+	const struct iceland_power_state *iceland_cps = cast_const_phw_iceland_power_state(states->pcurrent_state);
+
+	uint16_t target_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_nps);
+	uint16_t current_link_speed;
+
+	if (data->force_pcie_gen == PP_PCIEGenInvalid)
+		current_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_cps);
+	else
+		current_link_speed = data->force_pcie_gen;
+
+	data->force_pcie_gen = PP_PCIEGenInvalid;
+	data->pspp_notify_required = false;
+	if (target_link_speed > current_link_speed) {
+		switch(target_link_speed) {
+		case PP_PCIEGen3:
+			if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
+				break;
+			data->force_pcie_gen = PP_PCIEGen2;
+			if (current_link_speed == PP_PCIEGen2)
+				break;
+		case PP_PCIEGen2:
+			if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
+				break;
+		default:
+			data->force_pcie_gen = iceland_get_current_pcie_speed(hwmgr);
+			break;
+		}
+	} else {
+		if (target_link_speed < current_link_speed)
+			data->pspp_notify_required = true;
+	}
+
+	return 0;
+}
+
+static int iceland_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+
+	if (0 == data->need_update_smu7_dpm_table)
+		return 0;
+
+	if ((0 == data->sclk_dpm_key_disabled) &&
+		(data->need_update_smu7_dpm_table &
+		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+		PP_ASSERT_WITH_CODE(
+			0 == iceland_is_dpm_running(hwmgr),
+			"Trying to freeze SCLK DPM when DPM is disabled",
+			);
+		PP_ASSERT_WITH_CODE(
+			0 == smum_send_msg_to_smc(hwmgr->smumgr,
+					  PPSMC_MSG_SCLKDPM_FreezeLevel),
+			"Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
+			return -1);
+	}
+
+	if ((0 == data->mclk_dpm_key_disabled) &&
+		(data->need_update_smu7_dpm_table &
+		 DPMTABLE_OD_UPDATE_MCLK)) {
+		PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
+			"Trying to freeze MCLK DPM when DPM is disabled",
+			);
+		PP_ASSERT_WITH_CODE(
+			0 == smum_send_msg_to_smc(hwmgr->smumgr,
+							PPSMC_MSG_MCLKDPM_FreezeLevel),
+			"Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
+			return -1);
+	}
+
+	return 0;
+}
+
+static int iceland_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input)
+{
+	int result = 0;
+
+	const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
+	const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state);
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	uint32_t sclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock;
+	uint32_t mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock;
+	struct iceland_dpm_table *pdpm_table = &data->dpm_table;
+
+	struct iceland_dpm_table *pgolden_dpm_table = &data->golden_dpm_table;
+	uint32_t dpm_count, clock_percent;
+	uint32_t i;
+
+	if (0 == data->need_update_smu7_dpm_table)
+		return 0;
+
+	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
+		pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk;
+
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
+		    phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
+			/*
+			 * Need to do calculation based on the golden DPM table
+			 * as the Heatmap GPU Clock axis is also based on the default values
+			 */
+			PP_ASSERT_WITH_CODE(
+				(pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value != 0),
+				"Divide by 0!",
+				return -1);
+			dpm_count = pdpm_table->sclk_table.count < 2 ? 0 : pdpm_table->sclk_table.count-2;
+			for (i = dpm_count; i > 1; i--) {
+				if (sclk > pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value) {
+					clock_percent = ((sclk - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value)*100) /
+							pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
+
+					pdpm_table->sclk_table.dpm_levels[i].value =
+							pgolden_dpm_table->sclk_table.dpm_levels[i].value +
+							(pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
+
+				} else if (pgolden_dpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value > sclk) {
+					clock_percent = ((pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value - sclk)*100) /
+								pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
+
+					pdpm_table->sclk_table.dpm_levels[i].value =
+							pgolden_dpm_table->sclk_table.dpm_levels[i].value -
+							(pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
+				} else
+					pdpm_table->sclk_table.dpm_levels[i].value =
+							pgolden_dpm_table->sclk_table.dpm_levels[i].value;
+			}
+		}
+	}
+
+	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
+		pdpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value = mclk;
+
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
+			phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
+
+			PP_ASSERT_WITH_CODE(
+					(pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value != 0),
+					"Divide by 0!",
+					return -1);
+			dpm_count = pdpm_table->mclk_table.count < 2? 0 : pdpm_table->mclk_table.count-2;
+			for (i = dpm_count; i > 1; i--) {
+				if (mclk > pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value) {
+						clock_percent = ((mclk - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value)*100) /
+								    pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
+
+						pdpm_table->mclk_table.dpm_levels[i].value =
+										pgolden_dpm_table->mclk_table.dpm_levels[i].value +
+										(pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
+
+				} else if (pgolden_dpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value > mclk) {
+						clock_percent = ((pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value - mclk)*100) /
+								    pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
+
+						pdpm_table->mclk_table.dpm_levels[i].value =
+									pgolden_dpm_table->mclk_table.dpm_levels[i].value -
+									(pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
+				} else
+					pdpm_table->mclk_table.dpm_levels[i].value = pgolden_dpm_table->mclk_table.dpm_levels[i].value;
+			}
+		}
+	}
+
+
+	if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
+		result = iceland_populate_all_graphic_levels(hwmgr);
+		PP_ASSERT_WITH_CODE((0 == result),
+			"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
+			return result);
+	}
+
+	if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
+		/*populate MCLK dpm table to SMU7 */
+		result = iceland_populate_all_memory_levels(hwmgr);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
+				return result);
+	}
+
+	return result;
+}
+
+static int iceland_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
+			  struct iceland_single_dpm_table *pdpm_table,
+			     uint32_t low_limit, uint32_t high_limit)
+{
+	uint32_t i;
+
+	for (i = 0; i < pdpm_table->count; i++) {
+		if ((pdpm_table->dpm_levels[i].value < low_limit) ||
+		    (pdpm_table->dpm_levels[i].value > high_limit))
+			pdpm_table->dpm_levels[i].enabled = false;
+		else
+			pdpm_table->dpm_levels[i].enabled = true;
+	}
+	return 0;
+}
+
+static int iceland_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct iceland_power_state *hw_state)
+{
+	int result = 0;
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	uint32_t high_limit_count;
+
+	PP_ASSERT_WITH_CODE((hw_state->performance_level_count >= 1),
+				"power state did not have any performance level",
+				 return -1);
+
+	high_limit_count = (1 == hw_state->performance_level_count) ? 0: 1;
+
+	iceland_trim_single_dpm_states(hwmgr, &(data->dpm_table.sclk_table),
+					hw_state->performance_levels[0].engine_clock,
+					hw_state->performance_levels[high_limit_count].engine_clock);
+
+	iceland_trim_single_dpm_states(hwmgr, &(data->dpm_table.mclk_table),
+					hw_state->performance_levels[0].memory_clock,
+					hw_state->performance_levels[high_limit_count].memory_clock);
+
+	return result;
+}
+
+static int iceland_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input)
+{
+	int result;
+	const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state);
+
+	result = iceland_trim_dpm_states(hwmgr, iceland_ps);
+	if (0 != result)
+		return result;
+
+	data->dpm_level_enable_mask.sclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
+	data->dpm_level_enable_mask.mclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
+	data->last_mclk_dpm_enable_mask = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
+	if (data->uvd_enabled && (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1))
+		data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
+
+	data->dpm_level_enable_mask.pcie_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
+
+	return 0;
+}
+
+static int iceland_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
+{
+	return 0;
+}
+
+static int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	int result = 0;
+	uint32_t low_sclk_interrupt_threshold = 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkThrottleLowNotification)
+		&& (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) {
+		data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold;
+		low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+		result = iceland_copy_bytes_to_smc(
+				hwmgr->smumgr,
+				data->dpm_table_start + offsetof(SMU71_Discrete_DpmTable,
+				LowSclkInterruptThreshold),
+				(uint8_t *)&low_sclk_interrupt_threshold,
+				sizeof(uint32_t),
+				data->sram_end
+				);
+	}
+
+	return result;
+}
+
+static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+
+	uint32_t address;
+	int32_t result;
+
+	if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+		return 0;
+
+
+	memset(&data->mc_reg_table, 0, sizeof(SMU71_Discrete_MCRegisters));
+
+	result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(data->mc_reg_table));
+
+	if(result != 0)
+		return result;
+
+
+	address = data->mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
+
+	return  iceland_copy_bytes_to_smc(hwmgr->smumgr, address,
+				 (uint8_t *)&data->mc_reg_table.data[0],
+				sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
+				data->sram_end);
+}
+
+static int iceland_program_memory_timing_parameters_conditionally(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+
+	if (data->need_update_smu7_dpm_table &
+		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+		return iceland_program_memory_timing_parameters(hwmgr);
+
+	return 0;
+}
+
+static int iceland_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+
+	if (0 == data->need_update_smu7_dpm_table)
+		return 0;
+
+	if ((0 == data->sclk_dpm_key_disabled) &&
+		(data->need_update_smu7_dpm_table &
+		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+
+		PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
+			"Trying to Unfreeze SCLK DPM when DPM is disabled",
+			);
+		PP_ASSERT_WITH_CODE(
+			 0 == smum_send_msg_to_smc(hwmgr->smumgr,
+					 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+			"Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
+			return -1);
+	}
+
+	if ((0 == data->mclk_dpm_key_disabled) &&
+		(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+
+		PP_ASSERT_WITH_CODE(
+				0 == iceland_is_dpm_running(hwmgr),
+				"Trying to Unfreeze MCLK DPM when DPM is disabled",
+				);
+		PP_ASSERT_WITH_CODE(
+			 0 == smum_send_msg_to_smc(hwmgr->smumgr,
+					 PPSMC_MSG_MCLKDPM_UnfreezeLevel),
+		    "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
+		    return -1);
+	}
+
+	data->need_update_smu7_dpm_table = 0;
+
+	return 0;
+}
+
+static int iceland_notify_link_speed_change_after_state_change(struct pp_hwmgr *hwmgr, const void *input)
+{
+	const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state);
+	uint16_t target_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_ps);
+	uint8_t  request;
+
+	if (data->pspp_notify_required  ||
+	    data->pcie_performance_request) {
+		if (target_link_speed == PP_PCIEGen3)
+			request = PCIE_PERF_REQ_GEN3;
+		else if (target_link_speed == PP_PCIEGen2)
+			request = PCIE_PERF_REQ_GEN2;
+		else
+			request = PCIE_PERF_REQ_GEN1;
+
+		if(request == PCIE_PERF_REQ_GEN1 && iceland_get_current_pcie_speed(hwmgr) > 0) {
+			data->pcie_performance_request = false;
+			return 0;
+		}
+
+		if (0 != acpi_pcie_perf_request(hwmgr->device, request, false)) {
+			if (PP_PCIEGen2 == target_link_speed)
+				printk("PSPP request to switch to Gen2 from Gen3 Failed!");
+			else
+				printk("PSPP request to switch to Gen1 from Gen2 Failed!");
+		}
+	}
+
+	data->pcie_performance_request = false;
+	return 0;
+}
+
+int iceland_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
+{
+	PPSMC_Result result;
+	iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
+
+	if (0 == data->sclk_dpm_key_disabled) {
+		/* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
+		if (0 != iceland_is_dpm_running(hwmgr))
+			printk(KERN_ERR "[ powerplay ] Trying to set Enable Sclk Mask when DPM is disabled \n");
+
+		if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+			result = smum_send_msg_to_smc_with_parameter(
+								hwmgr->smumgr,
+				(PPSMC_Msg)PPSMC_MSG_SCLKDPM_SetEnabledMask,
+				data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+			PP_ASSERT_WITH_CODE((0 == result),
+				"Set Sclk Dpm enable Mask failed", return -1);
+		}
+	}
+
+	if (0 == data->mclk_dpm_key_disabled) {
+		/* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
+		if (0 != iceland_is_dpm_running(hwmgr))
+			printk(KERN_ERR "[ powerplay ] Trying to set Enable Mclk Mask when DPM is disabled \n");
+
+		if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+			result = smum_send_msg_to_smc_with_parameter(
+								hwmgr->smumgr,
+				(PPSMC_Msg)PPSMC_MSG_MCLKDPM_SetEnabledMask,
+				data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+			PP_ASSERT_WITH_CODE((0 == result),
+				"Set Mclk Dpm enable Mask failed", return -1);
+		}
+	}
+
+	return 0;
+}
+
+static int iceland_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+{
+	int tmp_result, result = 0;
+
+	tmp_result = iceland_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
+	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to find DPM states clocks in DPM table!", result = tmp_result);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
+		tmp_result = iceland_request_link_speed_change_before_state_change(hwmgr, input);
+		PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to request link speed change before state change!", result = tmp_result);
+	}
+
+	tmp_result = iceland_freeze_sclk_mclk_dpm(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
+
+	tmp_result = iceland_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
+	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result);
+
+	tmp_result = iceland_generate_dpm_level_enable_mask(hwmgr, input);
+	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", result = tmp_result);
+
+	tmp_result = iceland_update_vce_dpm(hwmgr, input);
+	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update VCE DPM!", result = tmp_result);
+
+	tmp_result = iceland_update_sclk_threshold(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", result = tmp_result);
+
+	tmp_result = iceland_update_and_upload_mc_reg_table(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload MC reg table!", result = tmp_result);
+
+	tmp_result = iceland_program_memory_timing_parameters_conditionally(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to program memory timing parameters!", result = tmp_result);
+
+	tmp_result = iceland_unfreeze_sclk_mclk_dpm(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", result = tmp_result);
+
+	tmp_result = iceland_upload_dpm_level_enable_mask(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload DPM level enabled mask!", result = tmp_result);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
+		tmp_result = iceland_notify_link_speed_change_after_state_change(hwmgr, input);
+		PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to notify link speed change after state change!", result = tmp_result);
+	}
+
+	return result;
+}
+
+static int iceland_get_power_state_size(struct pp_hwmgr *hwmgr)
+{
+	return sizeof(struct iceland_power_state);
+}
+
+static int iceland_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+{
+	struct pp_power_state  *ps;
+	struct iceland_power_state  *iceland_ps;
+
+	if (hwmgr == NULL)
+		return -EINVAL;
+
+	ps = hwmgr->request_ps;
+
+	if (ps == NULL)
+		return -EINVAL;
+
+	iceland_ps = cast_phw_iceland_power_state(&ps->hardware);
+
+	if (low)
+		return iceland_ps->performance_levels[0].memory_clock;
+	else
+		return iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock;
+}
+
+static int iceland_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+{
+	struct pp_power_state  *ps;
+	struct iceland_power_state  *iceland_ps;
+
+	if (hwmgr == NULL)
+		return -EINVAL;
+
+	ps = hwmgr->request_ps;
+
+	if (ps == NULL)
+		return -EINVAL;
+
+	iceland_ps = cast_phw_iceland_power_state(&ps->hardware);
+
+	if (low)
+		return iceland_ps->performance_levels[0].engine_clock;
+	else
+		return iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock;
+}
+
+static int iceland_get_current_pcie_lane_number(
+						   struct pp_hwmgr *hwmgr)
+{
+	uint32_t link_width;
+
+	link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device,
+							CGS_IND_REG__PCIE,
+						  PCIE_LC_LINK_WIDTH_CNTL,
+							LC_LINK_WIDTH_RD);
+
+	PP_ASSERT_WITH_CODE((7 >= link_width),
+			"Invalid PCIe lane width!", return 0);
+
+	return decode_pcie_lane_width(link_width);
+}
+
+static int iceland_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
+					struct pp_hw_power_state *hw_ps)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	struct iceland_power_state *ps = (struct iceland_power_state *)hw_ps;
+	ATOM_FIRMWARE_INFO_V2_2 *fw_info;
+	uint16_t size;
+	uint8_t frev, crev;
+	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+
+	/* First retrieve the Boot clocks and VDDC from the firmware info table.
+	 * We assume here that fw_info is unchanged if this call fails.
+	 */
+	fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
+			hwmgr->device, index,
+			&size, &frev, &crev);
+	if (!fw_info)
+		/* During a test, there is no firmware info table. */
+		return 0;
+
+	/* Patch the state. */
+	data->vbios_boot_state.sclk_bootup_value  = le32_to_cpu(fw_info->ulDefaultEngineClock);
+	data->vbios_boot_state.mclk_bootup_value  = le32_to_cpu(fw_info->ulDefaultMemoryClock);
+	data->vbios_boot_state.mvdd_bootup_value  = le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
+	data->vbios_boot_state.vddc_bootup_value  = le16_to_cpu(fw_info->usBootUpVDDCVoltage);
+	data->vbios_boot_state.vddci_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
+	data->vbios_boot_state.pcie_gen_bootup_value = iceland_get_current_pcie_speed(hwmgr);
+	data->vbios_boot_state.pcie_lane_bootup_value =
+			(uint16_t)iceland_get_current_pcie_lane_number(hwmgr);
+
+	/* set boot power state */
+	ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
+	ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
+	ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
+	ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
+
+	return 0;
+}
+
+static int iceland_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
+					struct pp_hw_power_state *power_state,
+					unsigned int index, const void *clock_info)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	struct iceland_power_state  *iceland_power_state = cast_phw_iceland_power_state(power_state);
+	const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
+	struct iceland_performance_level *performance_level;
+	uint32_t engine_clock, memory_clock;
+	uint16_t pcie_gen_from_bios;
+
+	engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
+	memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
+
+	if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
+		data->highest_mclk = memory_clock;
+
+	performance_level = &(iceland_power_state->performance_levels
+			[iceland_power_state->performance_level_count++]);
+
+	PP_ASSERT_WITH_CODE(
+			(iceland_power_state->performance_level_count < SMU71_MAX_LEVELS_GRAPHICS),
+			"Performance levels exceeds SMC limit!",
+			return -1);
+
+	PP_ASSERT_WITH_CODE(
+			(iceland_power_state->performance_level_count <=
+					hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
+			"Performance levels exceeds Driver limit!",
+			return -1);
+
+	/* Performance levels are arranged from low to high. */
+	performance_level->memory_clock = memory_clock;
+	performance_level->engine_clock = engine_clock;
+
+	pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
+
+	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
+	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
+
+	return 0;
+}
+
+static int iceland_get_pp_table_entry(struct pp_hwmgr *hwmgr,
+		unsigned long entry_index, struct pp_power_state *state)
+{
+	int result;
+	struct iceland_power_state *ps;
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	struct phm_clock_voltage_dependency_table *dep_mclk_table =
+			hwmgr->dyn_state.vddci_dependency_on_mclk;
+
+	memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
+
+	state->hardware.magic = PHM_VIslands_Magic;
+
+	ps = (struct iceland_power_state *)(&state->hardware);
+
+	result = pp_tables_get_entry(hwmgr, entry_index, state,
+			iceland_get_pp_table_entry_callback_func);
+
+	/*
+	 * This is the earliest time we have all the dependency table
+	 * and the VBIOS boot state as
+	 * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
+	 * state if there is only one VDDCI/MCLK level, check if it's
+	 * the same as VBIOS boot state
+	 */
+	if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
+		if (dep_mclk_table->entries[0].clk !=
+				data->vbios_boot_state.mclk_bootup_value)
+			printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
+					"does not match VBIOS boot MCLK level");
+		if (dep_mclk_table->entries[0].v !=
+				data->vbios_boot_state.vddci_bootup_value)
+			printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
+					"does not match VBIOS boot VDDCI level");
+	}
+
+	/* set DC compatible flag if this state supports DC */
+	if (!state->validation.disallowOnDC)
+		ps->dc_compatible = true;
+
+	if (state->classification.flags & PP_StateClassificationFlag_ACPI)
+		data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
+	else if (0 != (state->classification.flags & PP_StateClassificationFlag_Boot)) {
+		if (data->bacos.best_match == 0xffff) {
+			/* For C.I. use boot state as base BACO state */
+			data->bacos.best_match = PP_StateClassificationFlag_Boot;
+			data->bacos.performance_level = ps->performance_levels[0];
+		}
+	}
+
+
+	ps->uvd_clocks.VCLK = state->uvd_clocks.VCLK;
+	ps->uvd_clocks.DCLK = state->uvd_clocks.DCLK;
+
+	if (!result) {
+		uint32_t i;
+
+		switch (state->classification.ui_label) {
+		case PP_StateUILabel_Performance:
+			data->use_pcie_performance_levels = true;
+
+			for (i = 0; i < ps->performance_level_count; i++) {
+				if (data->pcie_gen_performance.max <
+						ps->performance_levels[i].pcie_gen)
+					data->pcie_gen_performance.max =
+							ps->performance_levels[i].pcie_gen;
+
+				if (data->pcie_gen_performance.min >
+						ps->performance_levels[i].pcie_gen)
+					data->pcie_gen_performance.min =
+							ps->performance_levels[i].pcie_gen;
+
+				if (data->pcie_lane_performance.max <
+						ps->performance_levels[i].pcie_lane)
+					data->pcie_lane_performance.max =
+							ps->performance_levels[i].pcie_lane;
+
+				if (data->pcie_lane_performance.min >
+						ps->performance_levels[i].pcie_lane)
+					data->pcie_lane_performance.min =
+							ps->performance_levels[i].pcie_lane;
+			}
+			break;
+		case PP_StateUILabel_Battery:
+			data->use_pcie_power_saving_levels = true;
+
+			for (i = 0; i < ps->performance_level_count; i++) {
+				if (data->pcie_gen_power_saving.max <
+						ps->performance_levels[i].pcie_gen)
+					data->pcie_gen_power_saving.max =
+							ps->performance_levels[i].pcie_gen;
+
+				if (data->pcie_gen_power_saving.min >
+						ps->performance_levels[i].pcie_gen)
+					data->pcie_gen_power_saving.min =
+							ps->performance_levels[i].pcie_gen;
+
+				if (data->pcie_lane_power_saving.max <
+						ps->performance_levels[i].pcie_lane)
+					data->pcie_lane_power_saving.max =
+							ps->performance_levels[i].pcie_lane;
+
+				if (data->pcie_lane_power_saving.min >
+						ps->performance_levels[i].pcie_lane)
+					data->pcie_lane_power_saving.min =
+							ps->performance_levels[i].pcie_lane;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+static void
+iceland_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
+{
+	uint32_t sclk, mclk, activity_percent;
+	uint32_t offset;
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+
+	smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetSclkFrequency));
+
+	sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+	smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetMclkFrequency));
+
+	mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+	seq_printf(m, "\n [  mclk  ]: %u MHz\n\n [  sclk  ]: %u MHz\n", mclk/100, sclk/100);
+
+	offset = data->soft_regs_start + offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
+	activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
+	activity_percent += 0x80;
+	activity_percent >>= 8;
+
+	seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
+
+	seq_printf(m, "uvd    %sabled\n", data->uvd_power_gated ? "dis" : "en");
+
+	seq_printf(m, "vce    %sabled\n", data->vce_power_gated ? "dis" : "en");
+}
+
+int iceland_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
+{
+	uint32_t num_active_displays = 0;
+	struct cgs_display_info info = {0};
+	info.mode_info = NULL;
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+
+	num_active_displays = info.display_count;
+
+	if (num_active_displays > 1)  /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
+		iceland_notify_smc_display_change(hwmgr, false);
+	else
+		iceland_notify_smc_display_change(hwmgr, true);
+
+	return 0;
+}
+
+/**
+* Programs the display gap
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always OK
+*/
+int iceland_program_display_gap(struct pp_hwmgr *hwmgr)
+{
+	uint32_t num_active_displays = 0;
+	uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
+	uint32_t display_gap2;
+	uint32_t pre_vbi_time_in_us;
+	uint32_t frame_time_in_us;
+	uint32_t ref_clock;
+	uint32_t refresh_rate = 0;
+	struct cgs_display_info info = {0};
+	struct cgs_mode_info mode_info;
+
+	info.mode_info = &mode_info;
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+	num_active_displays = info.display_count;
+
+	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0)? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
+
+	ref_clock = mode_info.ref_clock;
+	refresh_rate = mode_info.refresh_rate;
+
+	if(0 == refresh_rate)
+		refresh_rate = 60;
+
+	frame_time_in_us = 1000000 / refresh_rate;
+
+	pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+	display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
+
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_4, PreVBlankGap, 0x64);
+
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_5, VBlankTimeout, (frame_time_in_us - pre_vbi_time_in_us));
+
+	if (num_active_displays == 1)
+		iceland_notify_smc_display_change(hwmgr, true);
+
+	return 0;
+}
+
+int iceland_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+{
+	iceland_program_display_gap(hwmgr);
+
+	return 0;
+}
+
+/**
+*  Set maximum target operating fan output PWM
+*
+* @param    pHwMgr:  the address of the powerplay hardware manager.
+* @param    usMaxFanPwm:  max operating fan PWM in percents
+* @return   The response that came from the SMC.
+*/
+static int iceland_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
+{
+	hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
+
+	if (phm_is_hw_access_blocked(hwmgr))
+		return 0;
+
+	return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm) ? 0 : -1);
+}
+
+/**
+*  Set maximum target operating fan output RPM
+*
+* @param    pHwMgr:  the address of the powerplay hardware manager.
+* @param    usMaxFanRpm:  max operating fan RPM value.
+* @return   The response that came from the SMC.
+*/
+static int iceland_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
+{
+	hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = us_max_fan_pwm;
+
+	if (phm_is_hw_access_blocked(hwmgr))
+		return 0;
+
+	return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_pwm) ? 0 : -1);
+}
+
+static int iceland_dpm_set_interrupt_state(void *private_data,
+					 unsigned src_id, unsigned type,
+					 int enabled)
+{
+	uint32_t cg_thermal_int;
+	struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
+
+	if (hwmgr == NULL)
+		return -EINVAL;
+
+	switch (type) {
+	case AMD_THERMAL_IRQ_LOW_TO_HIGH:
+		if (enabled) {
+			cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
+			cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
+		} else {
+			cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
+			cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
+		}
+		break;
+
+	case AMD_THERMAL_IRQ_HIGH_TO_LOW:
+		if (enabled) {
+			cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
+			cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
+			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
+		} else {
+			cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
+			cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
+			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
+		}
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int iceland_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+					const void *thermal_interrupt_info)
+{
+	int result;
+	const struct pp_interrupt_registration_info *info =
+			(const struct pp_interrupt_registration_info *)thermal_interrupt_info;
+
+	if (info == NULL)
+		return -EINVAL;
+
+	result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
+				iceland_dpm_set_interrupt_state,
+				info->call_back, info->context);
+
+	if (result)
+		return -EINVAL;
+
+	result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
+				iceland_dpm_set_interrupt_state,
+				info->call_back, info->context);
+
+	if (result)
+		return -EINVAL;
+
+	return 0;
+}
+
+
+static bool iceland_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	bool is_update_required = false;
+	struct cgs_display_info info = {0,0,NULL};
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+
+	if (data->display_timing.num_existing_displays != info.display_count)
+		is_update_required = true;
+/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
+	if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+		cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
+		if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
+			is_update_required = true;
+*/
+	return is_update_required;
+}
+
+
+static inline bool iceland_are_power_levels_equal(const struct iceland_performance_level *pl1,
+							   const struct iceland_performance_level *pl2)
+{
+	return ((pl1->memory_clock == pl2->memory_clock) &&
+		  (pl1->engine_clock == pl2->engine_clock) &&
+		  (pl1->pcie_gen == pl2->pcie_gen) &&
+		  (pl1->pcie_lane == pl2->pcie_lane));
+}
+
+int iceland_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1,
+		const struct pp_hw_power_state *pstate2, bool *equal)
+{
+	const struct iceland_power_state *psa = cast_const_phw_iceland_power_state(pstate1);
+	const struct iceland_power_state *psb = cast_const_phw_iceland_power_state(pstate2);
+	int i;
+
+	if (equal == NULL || psa == NULL || psb == NULL)
+		return -EINVAL;
+
+	/* If the two states don't even have the same number of performance levels they cannot be the same state. */
+	if (psa->performance_level_count != psb->performance_level_count) {
+		*equal = false;
+		return 0;
+	}
+
+	for (i = 0; i < psa->performance_level_count; i++) {
+		if (!iceland_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
+			/* If we have found even one performance level pair that is different the states are different. */
+			*equal = false;
+			return 0;
+		}
+	}
+
+	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+	*equal = ((psa->uvd_clocks.VCLK == psb->uvd_clocks.VCLK) && (psa->uvd_clocks.DCLK == psb->uvd_clocks.DCLK));
+	*equal &= ((psa->vce_clocks.EVCLK == psb->vce_clocks.EVCLK) && (psa->vce_clocks.ECCLK == psb->vce_clocks.ECCLK));
+	*equal &= (psa->sclk_threshold == psb->sclk_threshold);
+	*equal &= (psa->acp_clk == psb->acp_clk);
+
+	return 0;
+}
+
+static int iceland_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+{
+	if (mode) {
+		/* stop auto-manage */
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_MicrocodeFanControl))
+			iceland_fan_ctrl_stop_smc_fan_control(hwmgr);
+		iceland_fan_ctrl_set_static_mode(hwmgr, mode);
+	} else
+		/* restart auto-manage */
+		iceland_fan_ctrl_reset_fan_speed_to_default(hwmgr);
+
+	return 0;
+}
+
+static int iceland_get_fan_control_mode(struct pp_hwmgr *hwmgr)
+{
+	if (hwmgr->fan_ctrl_is_in_default_mode)
+		return hwmgr->fan_ctrl_default_mode;
+	else
+		return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+				CG_FDO_CTRL2, FDO_PWM_MODE);
+}
+
+static int iceland_force_clock_level(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, uint32_t mask)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+
+	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+		return -EINVAL;
+
+	switch (type) {
+	case PP_SCLK:
+		if (!data->sclk_dpm_key_disabled)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_SCLKDPM_SetEnabledMask,
+					data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+		break;
+	case PP_MCLK:
+		if (!data->mclk_dpm_key_disabled)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_MCLKDPM_SetEnabledMask,
+					data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+		break;
+	case PP_PCIE:
+	{
+		uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+		uint32_t level = 0;
+
+		while (tmp >>= 1)
+			level++;
+
+		if (!data->pcie_dpm_key_disabled)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_PCIeDPM_ForceLevel,
+					level);
+		break;
+	}
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int iceland_print_clock_levels(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, char *buf)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	struct iceland_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+	struct iceland_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+	struct iceland_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
+	int i, now, size = 0;
+	uint32_t clock, pcie_speed;
+
+	switch (type) {
+	case PP_SCLK:
+		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+		for (i = 0; i < sclk_table->count; i++) {
+			if (clock > sclk_table->dpm_levels[i].value)
+				continue;
+			break;
+		}
+		now = i;
+
+		for (i = 0; i < sclk_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, sclk_table->dpm_levels[i].value / 100,
+					(i == now) ? "*" : "");
+		break;
+	case PP_MCLK:
+		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+		for (i = 0; i < mclk_table->count; i++) {
+			if (clock > mclk_table->dpm_levels[i].value)
+				continue;
+			break;
+		}
+		now = i;
+
+		for (i = 0; i < mclk_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, mclk_table->dpm_levels[i].value / 100,
+					(i == now) ? "*" : "");
+		break;
+	case PP_PCIE:
+		pcie_speed = iceland_get_current_pcie_speed(hwmgr);
+		for (i = 0; i < pcie_table->count; i++) {
+			if (pcie_speed != pcie_table->dpm_levels[i].value)
+				continue;
+			break;
+		}
+		now = i;
+
+		for (i = 0; i < pcie_table->count; i++)
+			size += sprintf(buf + size, "%d: %s %s\n", i,
+					(pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
+					(pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
+					(pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+					(i == now) ? "*" : "");
+		break;
+	default:
+		break;
+	}
+	return size;
+}
+
+static int iceland_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	struct iceland_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+	struct iceland_single_dpm_table *golden_sclk_table =
+			&(data->golden_dpm_table.sclk_table);
+	int value;
+
+	value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+			100 /
+			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+	return value;
+}
+
+static int iceland_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	struct iceland_single_dpm_table *golden_sclk_table =
+			&(data->golden_dpm_table.sclk_table);
+	struct pp_power_state  *ps;
+	struct iceland_power_state  *iceland_ps;
+
+	if (value > 20)
+		value = 20;
+
+	ps = hwmgr->request_ps;
+
+	if (ps == NULL)
+		return -EINVAL;
+
+	iceland_ps = cast_phw_iceland_power_state(&ps->hardware);
+
+	iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].engine_clock =
+			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+			value / 100 +
+			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+	return 0;
+}
+
+static int iceland_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	struct iceland_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+	struct iceland_single_dpm_table *golden_mclk_table =
+			&(data->golden_dpm_table.mclk_table);
+	int value;
+
+	value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+			100 /
+			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+	return value;
+}
+
+uint32_t iceland_get_xclk(struct pp_hwmgr *hwmgr)
+{
+	uint32_t reference_clock;
+	uint32_t tc;
+	uint32_t divide;
+
+	ATOM_FIRMWARE_INFO *fw_info;
+	uint16_t size;
+	uint8_t frev, crev;
+	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+
+	tc = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
+
+	if (tc)
+		return TCLK;
+
+	fw_info = (ATOM_FIRMWARE_INFO *)cgs_atom_get_data_table(hwmgr->device, index,
+						  &size, &frev, &crev);
+
+	if (!fw_info)
+		return 0;
+
+	reference_clock = le16_to_cpu(fw_info->usReferenceClock);
+
+	divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
+
+	if (0 != divide)
+		return reference_clock / 4;
+
+	return reference_clock;
+}
+
+static int iceland_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	struct iceland_single_dpm_table *golden_mclk_table =
+			&(data->golden_dpm_table.mclk_table);
+	struct pp_power_state  *ps;
+	struct iceland_power_state  *iceland_ps;
+
+	if (value > 20)
+		value = 20;
+
+	ps = hwmgr->request_ps;
+
+	if (ps == NULL)
+		return -EINVAL;
+
+	iceland_ps = cast_phw_iceland_power_state(&ps->hardware);
+
+	iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].memory_clock =
+			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+			value / 100 +
+			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+	return 0;
+}
+
+static const struct pp_hwmgr_func iceland_hwmgr_funcs = {
+	.backend_init = &iceland_hwmgr_backend_init,
+	.backend_fini = &iceland_hwmgr_backend_fini,
+	.asic_setup = &iceland_setup_asic_task,
+	.dynamic_state_management_enable = &iceland_enable_dpm_tasks,
+	.apply_state_adjust_rules = iceland_apply_state_adjust_rules,
+	.force_dpm_level = &iceland_force_dpm_level,
+	.power_state_set = iceland_set_power_state_tasks,
+	.get_power_state_size = iceland_get_power_state_size,
+	.get_mclk = iceland_dpm_get_mclk,
+	.get_sclk = iceland_dpm_get_sclk,
+	.patch_boot_state = iceland_dpm_patch_boot_state,
+	.get_pp_table_entry = iceland_get_pp_table_entry,
+	.get_num_of_pp_table_entries = iceland_get_num_of_entries,
+	.print_current_perforce_level = iceland_print_current_perforce_level,
+	.powerdown_uvd = iceland_phm_powerdown_uvd,
+	.powergate_uvd = iceland_phm_powergate_uvd,
+	.powergate_vce = iceland_phm_powergate_vce,
+	.disable_clock_power_gating = iceland_phm_disable_clock_power_gating,
+	.update_clock_gatings = iceland_phm_update_clock_gatings,
+	.notify_smc_display_config_after_ps_adjustment = iceland_notify_smc_display_config_after_ps_adjustment,
+	.display_config_changed = iceland_display_configuration_changed_task,
+	.set_max_fan_pwm_output = iceland_set_max_fan_pwm_output,
+	.set_max_fan_rpm_output = iceland_set_max_fan_rpm_output,
+	.get_temperature = iceland_thermal_get_temperature,
+	.stop_thermal_controller = iceland_thermal_stop_thermal_controller,
+	.get_fan_speed_info = iceland_fan_ctrl_get_fan_speed_info,
+	.get_fan_speed_percent = iceland_fan_ctrl_get_fan_speed_percent,
+	.set_fan_speed_percent = iceland_fan_ctrl_set_fan_speed_percent,
+	.reset_fan_speed_to_default = iceland_fan_ctrl_reset_fan_speed_to_default,
+	.get_fan_speed_rpm = iceland_fan_ctrl_get_fan_speed_rpm,
+	.set_fan_speed_rpm = iceland_fan_ctrl_set_fan_speed_rpm,
+	.uninitialize_thermal_controller = iceland_thermal_ctrl_uninitialize_thermal_controller,
+	.register_internal_thermal_interrupt = iceland_register_internal_thermal_interrupt,
+	.check_smc_update_required_for_display_configuration = iceland_check_smc_update_required_for_display_configuration,
+	.check_states_equal = iceland_check_states_equal,
+	.set_fan_control_mode = iceland_set_fan_control_mode,
+	.get_fan_control_mode = iceland_get_fan_control_mode,
+	.force_clock_level = iceland_force_clock_level,
+	.print_clock_levels = iceland_print_clock_levels,
+	.get_sclk_od = iceland_get_sclk_od,
+	.set_sclk_od = iceland_set_sclk_od,
+	.get_mclk_od = iceland_get_mclk_od,
+	.set_mclk_od = iceland_set_mclk_od,
+};
+
+int iceland_hwmgr_init(struct pp_hwmgr *hwmgr)
+{
+	iceland_hwmgr  *data;
+
+	data = kzalloc (sizeof(iceland_hwmgr), GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+	memset(data, 0x00, sizeof(iceland_hwmgr));
+
+	hwmgr->backend = data;
+	hwmgr->hwmgr_func = &iceland_hwmgr_funcs;
+	hwmgr->pptable_func = &pptable_funcs;
+
+	/* thermal */
+	pp_iceland_thermal_initialize(hwmgr);
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h
new file mode 100644
index 0000000..f253988
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h
@@ -0,0 +1,424 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+#ifndef ICELAND_HWMGR_H
+#define ICELAND_HWMGR_H
+
+#include "hwmgr.h"
+#include "ppatomctrl.h"
+#include "ppinterrupt.h"
+#include "ppsmc.h"
+#include "iceland_powertune.h"
+#include "pp_endian.h"
+#include "smu71_discrete.h"
+
+#define ICELAND_MAX_HARDWARE_POWERLEVELS 2
+#define ICELAND_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
+
+struct iceland_performance_level {
+	uint32_t	memory_clock;
+	uint32_t	engine_clock;
+	uint16_t	pcie_gen;
+	uint16_t	pcie_lane;
+};
+
+struct _phw_iceland_bacos {
+	uint32_t                          best_match;
+	uint32_t                          baco_flags;
+	struct iceland_performance_level		  performance_level;
+};
+typedef struct _phw_iceland_bacos phw_iceland_bacos;
+
+struct _phw_iceland_uvd_clocks {
+	uint32_t   VCLK;
+	uint32_t   DCLK;
+};
+
+typedef struct _phw_iceland_uvd_clocks phw_iceland_uvd_clocks;
+
+struct _phw_iceland_vce_clocks {
+	uint32_t   EVCLK;
+	uint32_t   ECCLK;
+};
+
+typedef struct _phw_iceland_vce_clocks phw_iceland_vce_clocks;
+
+struct iceland_power_state {
+	uint32_t                    magic;
+	phw_iceland_uvd_clocks        uvd_clocks;
+	phw_iceland_vce_clocks        vce_clocks;
+	uint32_t                    sam_clk;
+	uint32_t                    acp_clk;
+	uint16_t                    performance_level_count;
+	bool                        dc_compatible;
+	uint32_t                    sclk_threshold;
+	struct iceland_performance_level performance_levels[ICELAND_MAX_HARDWARE_POWERLEVELS];
+};
+
+struct _phw_iceland_dpm_level {
+	bool		enabled;
+	uint32_t    value;
+	uint32_t    param1;
+};
+typedef struct _phw_iceland_dpm_level phw_iceland_dpm_level;
+
+#define ICELAND_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define MAX_REGULAR_DPM_NUMBER 8
+#define ICELAND_MINIMUM_ENGINE_CLOCK 5000
+
+struct iceland_single_dpm_table {
+	uint32_t count;
+	phw_iceland_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct iceland_dpm_table {
+	struct iceland_single_dpm_table  sclk_table;
+	struct iceland_single_dpm_table  mclk_table;
+	struct iceland_single_dpm_table  pcie_speed_table;
+	struct iceland_single_dpm_table  vddc_table;
+	struct iceland_single_dpm_table  vdd_gfx_table;
+	struct iceland_single_dpm_table  vdd_ci_table;
+	struct iceland_single_dpm_table  mvdd_table;
+};
+typedef struct _phw_iceland_dpm_table phw_iceland_dpm_table;
+
+
+struct _phw_iceland_clock_regisiters {
+	uint32_t  vCG_SPLL_FUNC_CNTL;
+	uint32_t  vCG_SPLL_FUNC_CNTL_2;
+	uint32_t  vCG_SPLL_FUNC_CNTL_3;
+	uint32_t  vCG_SPLL_FUNC_CNTL_4;
+	uint32_t  vCG_SPLL_SPREAD_SPECTRUM;
+	uint32_t  vCG_SPLL_SPREAD_SPECTRUM_2;
+	uint32_t  vDLL_CNTL;
+	uint32_t  vMCLK_PWRMGT_CNTL;
+	uint32_t  vMPLL_AD_FUNC_CNTL;
+	uint32_t  vMPLL_DQ_FUNC_CNTL;
+	uint32_t  vMPLL_FUNC_CNTL;
+	uint32_t  vMPLL_FUNC_CNTL_1;
+	uint32_t  vMPLL_FUNC_CNTL_2;
+	uint32_t  vMPLL_SS1;
+	uint32_t  vMPLL_SS2;
+};
+typedef struct _phw_iceland_clock_regisiters phw_iceland_clock_registers;
+
+struct _phw_iceland_voltage_smio_registers {
+	uint32_t vs0_vid_lower_smio_cntl;
+};
+typedef struct _phw_iceland_voltage_smio_registers phw_iceland_voltage_smio_registers;
+
+
+struct _phw_iceland_mc_reg_entry {
+	uint32_t mclk_max;
+	uint32_t mc_data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+typedef struct _phw_iceland_mc_reg_entry phw_iceland_mc_reg_entry;
+
+struct _phw_iceland_mc_reg_table {
+	uint8_t   last;               /* number of registers*/
+	uint8_t   num_entries;        /* number of entries in mc_reg_table_entry used*/
+	uint16_t  validflag;          /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
+	phw_iceland_mc_reg_entry    mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+	SMU71_Discrete_MCRegisterAddress mc_reg_address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+typedef struct _phw_iceland_mc_reg_table phw_iceland_mc_reg_table;
+
+#define DISABLE_MC_LOADMICROCODE   1
+#define DISABLE_MC_CFGPROGRAMMING  2
+
+
+/*Ultra Low Voltage parameter structure */
+struct phw_iceland_ulv_parm{
+	bool					ulv_supported;
+	uint32_t   				ch_ulv_parameter;
+	uint32_t				ulv_volt_change_delay;
+	struct iceland_performance_level	ulv_power_level;
+};
+
+#define ICELAND_MAX_LEAKAGE_COUNT  8
+
+struct phw_iceland_leakage_voltage {
+	uint16_t  count;
+	uint16_t  leakage_id[ICELAND_MAX_LEAKAGE_COUNT];
+	uint16_t  actual_voltage[ICELAND_MAX_LEAKAGE_COUNT];
+};
+
+struct _phw_iceland_display_timing {
+	uint32_t min_clock_insr;
+	uint32_t num_existing_displays;
+};
+typedef struct _phw_iceland_display_timing phw_iceland_display_timing;
+
+
+struct phw_iceland_thermal_temperature_setting
+{
+	long temperature_low;
+	long temperature_high;
+	long temperature_shutdown;
+};
+
+struct _phw_iceland_dpmlevel_enable_mask {
+	uint32_t uvd_dpm_enable_mask;
+	uint32_t vce_dpm_enable_mask;
+	uint32_t acp_dpm_enable_mask;
+	uint32_t samu_dpm_enable_mask;
+	uint32_t sclk_dpm_enable_mask;
+	uint32_t mclk_dpm_enable_mask;
+	uint32_t pcie_dpm_enable_mask;
+};
+typedef struct _phw_iceland_dpmlevel_enable_mask phw_iceland_dpmlevel_enable_mask;
+
+struct _phw_iceland_pcie_perf_range {
+	uint16_t max;
+	uint16_t min;
+};
+typedef struct _phw_iceland_pcie_perf_range phw_iceland_pcie_perf_range;
+
+struct _phw_iceland_vbios_boot_state {
+	uint16_t					mvdd_bootup_value;
+	uint16_t					vddc_bootup_value;
+	uint16_t					vddci_bootup_value;
+	uint16_t					vddgfx_bootup_value;
+	uint32_t					sclk_bootup_value;
+	uint32_t					mclk_bootup_value;
+	uint16_t					pcie_gen_bootup_value;
+	uint16_t					pcie_lane_bootup_value;
+};
+typedef struct _phw_iceland_vbios_boot_state phw_iceland_vbios_boot_state;
+
+#define DPMTABLE_OD_UPDATE_SCLK     0x00000001
+#define DPMTABLE_OD_UPDATE_MCLK     0x00000002
+#define DPMTABLE_UPDATE_SCLK        0x00000004
+#define DPMTABLE_UPDATE_MCLK        0x00000008
+
+/* We need to review which fields are needed. */
+/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */
+struct iceland_hwmgr {
+	struct iceland_dpm_table               dpm_table;
+	struct iceland_dpm_table               golden_dpm_table;
+
+	uint32_t                           voting_rights_clients0;
+	uint32_t                           voting_rights_clients1;
+	uint32_t                           voting_rights_clients2;
+	uint32_t                           voting_rights_clients3;
+	uint32_t                           voting_rights_clients4;
+	uint32_t                           voting_rights_clients5;
+	uint32_t                           voting_rights_clients6;
+	uint32_t                           voting_rights_clients7;
+	uint32_t                           static_screen_threshold_unit;
+	uint32_t                           static_screen_threshold;
+	uint32_t                           voltage_control;
+	uint32_t                           vdd_gfx_control;
+
+	uint32_t                           vddc_vddci_delta;
+	uint32_t                           vddc_vddgfx_delta;
+
+	struct pp_interrupt_registration_info    internal_high_thermal_interrupt_info;
+	struct pp_interrupt_registration_info    internal_low_thermal_interrupt_info;
+	struct pp_interrupt_registration_info    smc_to_host_interrupt_info;
+	uint32_t                          active_auto_throttle_sources;
+
+	struct pp_interrupt_registration_info    external_throttle_interrupt;
+	irq_handler_func_t             external_throttle_callback;
+	void                             *external_throttle_context;
+
+	struct pp_interrupt_registration_info    ctf_interrupt_info;
+	irq_handler_func_t             ctf_callback;
+	void                             *ctf_context;
+
+	phw_iceland_clock_registers	  clock_registers;
+	phw_iceland_voltage_smio_registers  voltage_smio_registers;
+
+	bool	is_memory_GDDR5;
+	uint16_t                          acpi_vddc;
+	bool	pspp_notify_required;        /* Flag to indicate if PSPP notification to SBIOS is required */
+	uint16_t                          force_pcie_gen;            /* The forced PCI-E speed if not 0xffff */
+	uint16_t                          acpi_pcie_gen;             /* The PCI-E speed at ACPI time */
+	uint32_t                           pcie_gen_cap;             /* The PCI-E speed capabilities bitmap from CAIL */
+	uint32_t                           pcie_lane_cap;            /* The PCI-E lane capabilities bitmap from CAIL */
+	uint32_t                           pcie_spc_cap;             /* Symbol Per Clock Capabilities from registry */
+	struct phw_iceland_leakage_voltage	vddc_leakage;        /* The Leakage VDDC supported (based on leakage ID).*/
+	struct phw_iceland_leakage_voltage	vddcgfx_leakage;     /* The Leakage VDDC supported (based on leakage ID). */
+	struct phw_iceland_leakage_voltage	vddci_leakage;       /* The Leakage VDDCI supported (based on leakage ID). */
+
+	uint32_t                           mvdd_control;
+	uint32_t                           vddc_mask_low;
+	uint32_t                           mvdd_mask_low;
+	uint16_t                          max_vddc_in_pp_table;        /* the maximum VDDC value in the powerplay table*/
+	uint16_t                          min_vddc_in_pp_table;
+	uint16_t                          max_vddci_in_pp_table;       /* the maximum VDDCI value in the powerplay table */
+	uint16_t                          min_vddci_in_pp_table;
+	uint32_t                           mclk_strobe_mode_threshold;
+	uint32_t                           mclk_stutter_mode_threshold;
+	uint32_t                           mclk_edc_enable_threshold;
+	uint32_t                           mclk_edc_wr_enable_threshold;
+	bool	is_uvd_enabled;
+	bool	is_xdma_enabled;
+	phw_iceland_vbios_boot_state      vbios_boot_state;
+
+	bool                         battery_state;
+	bool                         is_tlu_enabled;
+	bool                         pcie_performance_request;
+
+	/* -------------- SMC SRAM Address of firmware header tables ----------------*/
+	uint32_t			sram_end;           /* The first address after the SMC SRAM. */
+	uint32_t			dpm_table_start;    /* The start of the dpm table in the SMC SRAM. */
+	uint32_t			soft_regs_start;    /* The start of the soft registers in the SMC SRAM. */
+	uint32_t			mc_reg_table_start; /* The start of the mc register table in the SMC SRAM. */
+	uint32_t			fan_table_start;    /* The start of the fan table in the SMC SRAM. */
+	uint32_t			arb_table_start;    /* The start of the ARB setting table in the SMC SRAM. */
+	uint32_t			ulv_settings_start;
+	SMU71_Discrete_DpmTable		smc_state_table;    /* The carbon copy of the SMC state table. */
+	SMU71_Discrete_MCRegisters      mc_reg_table;
+	SMU71_Discrete_Ulv              ulv_setting;         /* The carbon copy of ULV setting. */
+
+	/* -------------- Stuff originally coming from Evergreen --------------------*/
+	phw_iceland_mc_reg_table	 iceland_mc_reg_table;
+	uint32_t                         vdd_ci_control;
+	pp_atomctrl_voltage_table        vddc_voltage_table;
+	pp_atomctrl_voltage_table        vddci_voltage_table;
+	pp_atomctrl_voltage_table        vddgfx_voltage_table;
+	pp_atomctrl_voltage_table        mvdd_voltage_table;
+
+	uint32_t                           mgcg_cgtt_local2;
+	uint32_t                           mgcg_cgtt_local3;
+	uint32_t                           gpio_debug;
+	uint32_t			mc_micro_code_feature;
+	uint32_t			highest_mclk;
+	uint16_t                          acpi_vdd_ci;
+	uint8_t                           mvdd_high_index;
+	uint8_t                           mvdd_low_index;
+	bool                         dll_defaule_on;
+	bool                         performance_request_registered;
+
+	/* ----------------- Low Power Features ---------------------*/
+	phw_iceland_bacos					bacos;
+	struct phw_iceland_ulv_parm              	ulv;
+
+	/* ----------------- CAC Stuff ---------------------*/
+	uint32_t					cac_table_start;
+	bool                         cac_configuration_required;    /* TRUE if PP_CACConfigurationRequired == 1 */
+	bool                         driver_calculate_cac_leakage;  /* TRUE if PP_DriverCalculateCACLeakage == 1 */
+	bool                         cac_enabled;
+
+	/* ----------------- DPM2 Parameters ---------------------*/
+	uint32_t		power_containment_features;
+	bool                         enable_bapm_feature;
+	bool			     enable_dte_feature;
+	bool                         enable_tdc_limit_feature;
+	bool                         enable_pkg_pwr_tracking_feature;
+	bool                         disable_uvd_power_tune_feature;
+	struct iceland_pt_defaults           *power_tune_defaults;
+	SMU71_Discrete_PmFuses           power_tune_table;
+	uint32_t                           ul_dte_tj_offset;     /* Fudge factor in DPM table to correct HW DTE errors */
+	uint32_t                           fast_watermark_threshold;  /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */
+
+	/* ----------------- Phase Shedding ---------------------*/
+	bool                         vddc_phase_shed_control;
+
+	/* --------------------- DI/DT --------------------------*/
+	phw_iceland_display_timing       display_timing;
+
+	/* --------- ReadRegistry data for memory and engine clock margins ---- */
+	uint32_t                           engine_clock_data;
+	uint32_t                           memory_clock_data;
+
+	/* -------- Thermal Temperature Setting --------------*/
+	struct phw_iceland_thermal_temperature_setting 	thermal_temp_setting;
+	phw_iceland_dpmlevel_enable_mask     dpm_level_enable_mask;
+
+	uint32_t				need_update_smu7_dpm_table;
+	uint32_t				sclk_dpm_key_disabled;
+	uint32_t				mclk_dpm_key_disabled;
+	uint32_t				pcie_dpm_key_disabled;
+	/* used to store the previous dal min sclock */
+	uint32_t                           min_engine_clocks;
+	phw_iceland_pcie_perf_range       pcie_gen_performance;
+	phw_iceland_pcie_perf_range       pcie_lane_performance;
+	phw_iceland_pcie_perf_range       pcie_gen_power_saving;
+	phw_iceland_pcie_perf_range       pcie_lane_power_saving;
+	bool                            use_pcie_performance_levels;
+	bool                            use_pcie_power_saving_levels;
+	/* percentage value from 0-100, default 50 */
+	uint32_t                           activity_target[SMU71_MAX_LEVELS_GRAPHICS];
+	uint32_t                           mclk_activity_target;
+	uint32_t                           low_sclk_interrupt_threshold;
+	uint32_t                           last_mclk_dpm_enable_mask;
+	bool								uvd_enabled;
+	uint32_t                           pcc_monitor_enabled;
+
+	/* --------- Power Gating States ------------*/
+	bool                           uvd_power_gated;  /* 1: gated, 0:not gated */
+	bool                           vce_power_gated;  /* 1: gated, 0:not gated */
+	bool                           samu_power_gated; /* 1: gated, 0:not gated */
+	bool                           acp_power_gated;  /* 1: gated, 0:not gated */
+	bool                           pg_acp_init;
+
+	/* soft pptable for re-uploading into smu */
+	void *soft_pp_table;
+};
+
+typedef struct iceland_hwmgr iceland_hwmgr;
+
+int iceland_hwmgr_init(struct pp_hwmgr *hwmgr);
+int iceland_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
+uint32_t iceland_get_xclk(struct pp_hwmgr *hwmgr);
+int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr);
+int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr);
+
+#define ICELAND_DPM2_NEAR_TDP_DEC          10
+#define ICELAND_DPM2_ABOVE_SAFE_INC        5
+#define ICELAND_DPM2_BELOW_SAFE_INC        20
+
+/*
+ * Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size
+ * is 128, then this value should be Log2(128) = 7.
+ */
+#define ICELAND_DPM2_LTA_WINDOW_SIZE       7
+
+#define ICELAND_DPM2_LTS_TRUNCATE          0
+
+#define ICELAND_DPM2_TDP_SAFE_LIMIT_PERCENT            80  // Maximum 100
+
+#define ICELAND_DPM2_MAXPS_PERCENT_H                   90  // Maximum 0xFF
+#define ICELAND_DPM2_MAXPS_PERCENT_M                   90  // Maximum 0xFF
+
+#define ICELAND_DPM2_PWREFFICIENCYRATIO_MARGIN         50
+
+#define ICELAND_DPM2_SQ_RAMP_MAX_POWER                 0x3FFF
+#define ICELAND_DPM2_SQ_RAMP_MIN_POWER                 0x12
+#define ICELAND_DPM2_SQ_RAMP_MAX_POWER_DELTA           0x15
+#define ICELAND_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE  0x1E
+#define ICELAND_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO  0xF
+
+#define ICELAND_VOLTAGE_CONTROL_NONE                   0x0
+#define ICELAND_VOLTAGE_CONTROL_BY_GPIO                0x1
+#define ICELAND_VOLTAGE_CONTROL_BY_SVID2               0x2
+
+/* convert to Q8.8 format for firmware */
+#define ICELAND_Q88_FORMAT_CONVERSION_UNIT             256
+
+#define ICELAND_UNUSED_GPIO_PIN 0x7F
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c
new file mode 100644
index 0000000..041e964
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c
@@ -0,0 +1,490 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+
+#include "amdgpu.h"
+#include "hwmgr.h"
+#include "smumgr.h"
+#include "iceland_hwmgr.h"
+#include "iceland_powertune.h"
+#include "iceland_smumgr.h"
+#include "smu71_discrete.h"
+#include "smu71.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "pp_endian.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#define VOLTAGE_SCALE  4
+#define POWERTUNE_DEFAULT_SET_MAX    1
+
+#define DEVICE_ID_VI_ICELAND_M_6900	0x6900
+#define DEVICE_ID_VI_ICELAND_M_6901	0x6901
+#define DEVICE_ID_VI_ICELAND_M_6902	0x6902
+#define DEVICE_ID_VI_ICELAND_M_6903	0x6903
+
+
+struct iceland_pt_defaults defaults_iceland =
+{
+	/*
+	 * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
+	 * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
+	 */
+	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
+	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
+};
+
+/* 35W - XT, XTL */
+struct iceland_pt_defaults defaults_icelandxt =
+{
+	/*
+	 * sviLoadLIneEn, SviLoadLineVddC,
+	 * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+	 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+	 * BAPM_TEMP_GRADIENT
+	 */
+	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+	{ 0xA7,  0x0, 0x0, 0xB5,  0x0, 0x0, 0x9F,  0x0, 0x0, 0xD6,  0x0, 0x0, 0xD7,  0x0, 0x0},
+	{ 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
+/* 25W - PRO, LE */
+struct iceland_pt_defaults defaults_icelandpro =
+{
+	/*
+	 * sviLoadLIneEn, SviLoadLineVddC,
+	 * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+	 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+	 * BAPM_TEMP_GRADIENT
+	 */
+	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+	{ 0xB7,  0x0, 0x0, 0xC3,  0x0, 0x0, 0xB5,  0x0, 0x0, 0xEA,  0x0, 0x0, 0xE6,  0x0, 0x0},
+	{ 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
+void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	uint32_t tmp = 0;
+	struct cgs_system_info sys_info = {0};
+	uint32_t pdev_id;
+
+	sys_info.size = sizeof(struct cgs_system_info);
+	sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+	cgs_query_system_info(hwmgr->device, &sys_info);
+	pdev_id = (uint32_t)sys_info.value;
+
+	switch (pdev_id) {
+	case DEVICE_ID_VI_ICELAND_M_6900:
+	case DEVICE_ID_VI_ICELAND_M_6903:
+		data->power_tune_defaults = &defaults_icelandxt;
+		break;
+
+	case DEVICE_ID_VI_ICELAND_M_6901:
+	case DEVICE_ID_VI_ICELAND_M_6902:
+		data->power_tune_defaults = &defaults_icelandpro;
+		break;
+	default:
+	    /* TODO: need to assign valid defaults */
+	    data->power_tune_defaults = &defaults_iceland;
+	    pr_warning("Unknown V.I. Device ID.\n");
+	    break;
+	}
+
+	/* Assume disabled */
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PowerContainment);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_CAC);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SQRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_DBRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_TDRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_TCPRamping);
+
+	data->ul_dte_tj_offset = tmp;
+
+	if (!tmp) {
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_CAC);
+
+		data->fast_watermark_threshold = 100;
+
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_PowerContainment)) {
+			tmp = 1;
+			data->enable_dte_feature = tmp ? false : true;
+			data->enable_tdc_limit_feature = tmp ? true : false;
+			data->enable_pkg_pwr_tracking_feature = tmp ? true : false;
+		}
+	}
+}
+
+int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	struct iceland_pt_defaults *defaults = data->power_tune_defaults;
+	SMU71_Discrete_DpmTable  *dpm_table = &(data->smc_state_table);
+	struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
+	struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
+	uint16_t *def1, *def2;
+	int i, j, k;
+
+	/*
+	 * TDP number of fraction bits are changed from 8 to 7 for Iceland
+	 * as requested by SMC team
+	 */
+	dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
+	dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+	dpm_table->DTETjOffset = (uint8_t)data->ul_dte_tj_offset;
+
+	dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
+	dpm_table->GpuTjHyst = 8;
+
+	dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+	/* The following are for new Iceland Multi-input fan/thermal control */
+	if(NULL != ppm) {
+		dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
+		dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
+	} else {
+		dpm_table->PPM_PkgPwrLimit = 0;
+		dpm_table->PPM_TemperatureLimit = 0;
+	}
+
+	CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
+	CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
+
+	dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
+	def1 = defaults->bapmti_r;
+	def2 = defaults->bapmti_rc;
+
+	for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
+		for (j = 0; j < SMU71_DTE_SOURCES; j++) {
+			for (k = 0; k < SMU71_DTE_SINKS; k++) {
+				dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
+				dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
+				def1++;
+				def2++;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+    struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+    const struct iceland_pt_defaults *defaults = data->power_tune_defaults;
+
+    data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+    data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
+    data->power_tune_table.SviLoadLineTrimVddC = 3;
+    data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+    return 0;
+}
+
+static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+	uint16_t tdc_limit;
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	const struct iceland_pt_defaults *defaults = data->power_tune_defaults;
+
+	/* TDC number of fraction bits are changed from 8 to 7
+	 * for Iceland as requested by SMC team
+	 */
+	tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
+	data->power_tune_table.TDC_VDDC_PkgLimit =
+			CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+	data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+			defaults->tdc_vddc_throttle_release_limit_perc;
+	data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+	return 0;
+}
+
+static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	const struct iceland_pt_defaults *defaults = data->power_tune_defaults;
+	uint32_t temp;
+
+	if (iceland_read_smc_sram_dword(hwmgr->smumgr,
+			fuse_table_offset +
+			offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
+			(uint32_t *)&temp, data->sram_end))
+		PP_ASSERT_WITH_CODE(false,
+				"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+				return -EINVAL);
+	else
+		data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+	return 0;
+}
+
+static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+	return 0;
+}
+
+static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+
+	/* Currently not used. Set all to zero. */
+	for (i = 0; i < 8; i++)
+		data->power_tune_table.GnbLPML[i] = 0;
+
+	return 0;
+}
+
+static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+    return 0;
+}
+
+static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+	uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+	struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+
+	HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+	LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+	data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+			CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+	data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+			CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+	return 0;
+}
+
+int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	uint32_t pm_fuse_table_offset;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PowerContainment)) {
+		if (iceland_read_smc_sram_dword(hwmgr->smumgr,
+				SMU71_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU71_Firmware_Header, PmFuseTable),
+				&pm_fuse_table_offset, data->sram_end))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to get pm_fuse_table_offset Failed!",
+					return -EINVAL);
+
+		/* DW0 - DW3 */
+		if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate bapm vddc vid Failed!",
+					return -EINVAL);
+
+		/* DW4 - DW5 */
+		if (iceland_populate_vddc_vid(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate vddc vid Failed!",
+					return -EINVAL);
+
+		/* DW6 */
+		if (iceland_populate_svi_load_line(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate SviLoadLine Failed!",
+					return -EINVAL);
+		/* DW7 */
+		if (iceland_populate_tdc_limit(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate TDCLimit Failed!", return -EINVAL);
+		/* DW8 */
+		if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate TdcWaterfallCtl, "
+					"LPMLTemperature Min and Max Failed!",
+					return -EINVAL);
+
+		/* DW9-DW12 */
+		if (0 != iceland_populate_temperature_scaler(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate LPMLTemperatureScaler Failed!",
+					return -EINVAL);
+
+		/* DW13-DW16 */
+		if (iceland_populate_gnb_lpml(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate GnbLPML Failed!",
+					return -EINVAL);
+
+		/* DW17 */
+		if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate GnbLPML Min and Max Vid Failed!",
+					return -EINVAL);
+
+		/* DW18 */
+		if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
+					return -EINVAL);
+
+		if (iceland_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+				(uint8_t *)&data->power_tune_table,
+				sizeof(struct SMU71_Discrete_PmFuses), data->sram_end))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to download PmFuseTable Failed!",
+					return -EINVAL);
+	}
+	return 0;
+}
+
+int iceland_enable_smc_cac(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	int result = 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_CAC)) {
+		int smc_result;
+		smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+				(uint16_t)(PPSMC_MSG_EnableCac));
+		PP_ASSERT_WITH_CODE((0 == smc_result),
+				"Failed to enable CAC in SMC.", result = -1);
+
+		data->cac_enabled = (0 == smc_result) ? true : false;
+	}
+	return result;
+}
+
+static int iceland_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+
+	if(data->power_containment_features &
+			POWERCONTAINMENT_FEATURE_PkgPwrLimit)
+		return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_PkgPwrSetLimit, n);
+	return 0;
+}
+
+static int iceland_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
+{
+	return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
+			PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
+}
+
+int iceland_enable_power_containment(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	SMU71_Discrete_DpmTable *dpm_table = &data->smc_state_table;
+	int smc_result;
+	int result = 0;
+	uint32_t is_asic_kicker;
+
+	data->power_containment_features = 0;
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PowerContainment)) {
+		is_asic_kicker = cgs_read_register(hwmgr->device, mmCC_BIF_BX_STRAP2);
+		is_asic_kicker = (is_asic_kicker >> 12) & 0x01;
+
+		if (data->enable_bapm_feature &&
+			(!is_asic_kicker ||
+			 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				 PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc))) {
+			smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+					(uint16_t)(PPSMC_MSG_EnableDTE));
+			PP_ASSERT_WITH_CODE((0 == smc_result),
+					"Failed to enable BAPM in SMC.", result = -1;);
+			if (0 == smc_result)
+				data->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
+		}
+
+		if (is_asic_kicker && !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc))
+			dpm_table->DTEMode = 2;
+
+		if (data->enable_tdc_limit_feature) {
+			smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+					(uint16_t)(PPSMC_MSG_TDCLimitEnable));
+			PP_ASSERT_WITH_CODE((0 == smc_result),
+					"Failed to enable TDCLimit in SMC.", result = -1;);
+			if (0 == smc_result)
+				data->power_containment_features |=
+						POWERCONTAINMENT_FEATURE_TDCLimit;
+		}
+
+		if (data->enable_pkg_pwr_tracking_feature) {
+			smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+					(uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
+			PP_ASSERT_WITH_CODE((0 == smc_result),
+					"Failed to enable PkgPwrTracking in SMC.", result = -1;);
+			if (0 == smc_result) {
+				struct phm_cac_tdp_table *cac_table =
+						hwmgr->dyn_state.cac_dtp_table;
+				uint32_t default_limit =
+					(uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
+
+				data->power_containment_features |=
+						POWERCONTAINMENT_FEATURE_PkgPwrLimit;
+
+				if (iceland_set_power_limit(hwmgr, default_limit))
+					printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
+			}
+		}
+	}
+	return result;
+}
+
+int iceland_power_control_set_level(struct pp_hwmgr *hwmgr)
+{
+	struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+	int adjust_percent, target_tdp;
+	int result = 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PowerContainment)) {
+		/* adjustment percentage has already been validated */
+		adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
+				hwmgr->platform_descriptor.TDPAdjustment :
+				(-1 * hwmgr->platform_descriptor.TDPAdjustment);
+		/*
+		 * SMC requested that target_tdp to be 7 bit fraction in DPM table
+		 * but message to be 8 bit fraction for messages
+		 */
+		target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
+		result = iceland_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
+	}
+
+	return result;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h
new file mode 100644
index 0000000..6c25ee1
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+#ifndef ICELAND_POWERTUNE_H
+#define ICELAND_POWERTUNE_H
+
+#include "smu71.h"
+
+enum iceland_pt_config_reg_type {
+	ICELAND_CONFIGREG_MMR = 0,
+	ICELAND_CONFIGREG_SMC_IND,
+	ICELAND_CONFIGREG_DIDT_IND,
+	ICELAND_CONFIGREG_CACHE,
+	ICELAND_CONFIGREG_MAX
+};
+
+/* PowerContainment Features */
+#define POWERCONTAINMENT_FEATURE_DTE             0x00000001
+#define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
+#define POWERCONTAINMENT_FEATURE_PkgPwrLimit     0x00000004
+#define POWERCONTAINMENT_FEATURE_BAPM		 0x00000001
+
+struct iceland_pt_config_reg {
+	uint32_t                           offset;
+	uint32_t                           mask;
+	uint32_t                           shift;
+	uint32_t                           value;
+	enum iceland_pt_config_reg_type       type;
+};
+
+struct iceland_pt_defaults
+{
+	uint8_t   svi_load_line_en;
+	uint8_t   svi_load_line_vddc;
+	uint8_t   tdc_vddc_throttle_release_limit_perc;
+	uint8_t   tdc_mawt;
+	uint8_t   tdc_waterfall_ctl;
+	uint8_t   dte_ambient_temp_base;
+	uint32_t  display_cac;
+	uint32_t  bamp_temp_gradient;
+	uint16_t  bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
+	uint16_t  bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
+};
+
+void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
+int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
+int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr);
+int iceland_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int iceland_enable_power_containment(struct pp_hwmgr *hwmgr);
+int iceland_power_control_set_level(struct pp_hwmgr *hwmgr);
+
+#endif  /* ICELAND_POWERTUNE_H */
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c
new file mode 100644
index 0000000..527f370
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c
@@ -0,0 +1,595 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+#include <asm/div64.h>
+#include "iceland_thermal.h"
+#include "iceland_hwmgr.h"
+#include "iceland_smumgr.h"
+#include "atombios.h"
+#include "ppsmc.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "smu/smu_7_1_1_d.h"
+#include "smu/smu_7_1_1_sh_mask.h"
+
+
+/**
+* Get Fan Speed Control Parameters.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pSpeed is the address of the structure where the result is to be placed.
+* @exception Always succeeds except if we cannot zero out the output structure.
+*/
+int iceland_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
+					struct phm_fan_speed_info *fan_speed_info)
+{
+
+	if (hwmgr->thermal_controller.fanInfo.bNoFan)
+		return 0;
+
+	fan_speed_info->supports_percent_read = true;
+	fan_speed_info->supports_percent_write = true;
+	fan_speed_info->min_percent = 0;
+	fan_speed_info->max_percent = 100;
+
+	if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
+		fan_speed_info->supports_rpm_read = true;
+		fan_speed_info->supports_rpm_write = true;
+		fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
+		fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
+	} else {
+		fan_speed_info->min_rpm = 0;
+		fan_speed_info->max_rpm = 0;
+	}
+
+	return 0;
+}
+
+/**
+* Get Fan Speed in percent.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pSpeed is the address of the structure where the result is to be placed.
+* @exception Fails is the 100% setting appears to be 0.
+*/
+int iceland_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed)
+{
+	uint32_t duty100;
+	uint32_t duty;
+	uint64_t tmp64;
+
+	if (hwmgr->thermal_controller.fanInfo.bNoFan)
+		return 0;
+
+	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
+	duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY);
+
+	if (0 == duty100)
+		return -EINVAL;
+
+
+	tmp64 = (uint64_t)duty * 100;
+	do_div(tmp64, duty100);
+	*speed = (uint32_t)tmp64;
+
+	if (*speed > 100)
+		*speed = 100;
+
+	return 0;
+}
+
+/**
+* Get Fan Speed in RPM.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    speed is the address of the structure where the result is to be placed.
+* @exception Returns not supported if no fan is found or if pulses per revolution are not set
+*/
+int iceland_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
+{
+	return 0;
+}
+
+/**
+* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
+* @param    hwmgr  the address of the powerplay hardware manager.
+*           mode    the fan control mode, 0 default, 1 by percent, 5, by RPM
+* @exception Should always succeed.
+*/
+int iceland_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+{
+
+	if (hwmgr->fan_ctrl_is_in_default_mode) {
+		hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE);
+		hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN);
+		hwmgr->fan_ctrl_is_in_default_mode = false;
+	}
+
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0);
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode);
+
+	return 0;
+}
+
+/**
+* Reset Fan Speed Control to default mode.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @exception Should always succeed.
+*/
+static int iceland_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
+{
+	if (!hwmgr->fan_ctrl_is_in_default_mode) {
+		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
+		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin);
+		hwmgr->fan_ctrl_is_in_default_mode = true;
+	}
+
+	return 0;
+}
+
+int iceland_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
+{
+	return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ?  0 : -EINVAL;
+}
+
+
+int iceland_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
+{
+	return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ?  0 : -EINVAL;
+}
+
+/**
+* Set Fan Speed in percent.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    speed is the percentage value (0% - 100%) to be set.
+* @exception Fails is the 100% setting appears to be 0.
+*/
+int iceland_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed)
+{
+	uint32_t duty100;
+	uint32_t duty;
+	uint64_t tmp64;
+
+	if (hwmgr->thermal_controller.fanInfo.bNoFan)
+		return -EINVAL;
+
+	if (speed > 100) {
+		pr_warning("Cannot set more than 100%% duty cycle. Set it to 100.\n");
+		speed = 100;
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
+		iceland_fan_ctrl_stop_smc_fan_control(hwmgr);
+
+	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
+
+	if (0 == duty100)
+		return -EINVAL;
+
+	tmp64 = (uint64_t)speed * duty100;
+	do_div(tmp64, 100);
+	duty = (uint32_t)tmp64;
+
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
+
+	return iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+}
+
+/**
+* Reset Fan Speed to default.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @exception Always succeeds.
+*/
+int iceland_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
+{
+	int result;
+
+	if (hwmgr->thermal_controller.fanInfo.bNoFan)
+		return 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
+		result = iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+		if (0 == result)
+			result = iceland_fan_ctrl_start_smc_fan_control(hwmgr);
+	} else
+		result = iceland_fan_ctrl_set_default_mode(hwmgr);
+
+	return result;
+}
+
+/**
+* Set Fan Speed in RPM.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    speed is the percentage value (min - max) to be set.
+* @exception Fails is the speed not lie between min and max.
+*/
+int iceland_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
+{
+	return 0;
+}
+
+/**
+* Reads the remote temperature from the SIslands thermal controller.
+*
+* @param    hwmgr The address of the hardware manager.
+*/
+int iceland_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+{
+	int temp;
+
+	temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP);
+
+	/*
+	 * Bit 9 means the reading is lower than the lowest usable
+	 * value.
+	 */
+	if (0 != (0x200 & temp))
+		temp = ICELAND_THERMAL_MAXIMUM_TEMP_READING;
+	else
+		temp = (temp & 0x1ff);
+
+	temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+	return temp;
+}
+
+/**
+* Set the requested temperature range for high and low alert signals
+*
+* @param    hwmgr The address of the hardware manager.
+* @param    range Temperature range to be programmed for high and low alert signals
+* @exception PP_Result_BadInput if the input data is not valid.
+*/
+static int iceland_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp)
+{
+	uint32_t low = ICELAND_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+	uint32_t high = ICELAND_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+	if (low < low_temp)
+		low = low_temp;
+	if (high > high_temp)
+		high = high_temp;
+
+	if (low > high)
+		return -EINVAL;
+
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+
+	return 0;
+}
+
+/**
+* Programs thermal controller one-time setting registers
+*
+* @param    hwmgr The address of the hardware manager.
+*/
+static int iceland_thermal_initialize(struct pp_hwmgr *hwmgr)
+{
+	if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
+		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+						CG_TACH_CTRL, EDGE_PER_REV,
+						hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1);
+
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
+
+	return 0;
+}
+
+/**
+* Enable thermal alerts on the RV770 thermal controller.
+*
+* @param    hwmgr The address of the hardware manager.
+*/
+static int iceland_thermal_enable_alert(struct pp_hwmgr *hwmgr)
+{
+	uint32_t alert;
+
+	alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
+	alert &= ~(ICELAND_THERMAL_HIGH_ALERT_MASK | ICELAND_THERMAL_LOW_ALERT_MASK);
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
+
+	/* send message to SMU to enable internal thermal interrupts */
+	return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1;
+}
+
+/**
+* Disable thermal alerts on the RV770 thermal controller.
+* @param    hwmgr The address of the hardware manager.
+*/
+static int iceland_thermal_disable_alert(struct pp_hwmgr *hwmgr)
+{
+	uint32_t alert;
+
+	alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
+	alert |= (ICELAND_THERMAL_HIGH_ALERT_MASK | ICELAND_THERMAL_LOW_ALERT_MASK);
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
+
+	/* send message to SMU to disable internal thermal interrupts */
+	return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1;
+}
+
+/**
+* Uninitialize the thermal controller.
+* Currently just disables alerts.
+* @param    hwmgr The address of the hardware manager.
+*/
+int iceland_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
+{
+	int result = iceland_thermal_disable_alert(hwmgr);
+
+	if (result)
+		pr_warning("Failed to disable thermal alerts!\n");
+
+	if (hwmgr->thermal_controller.fanInfo.bNoFan)
+		iceland_fan_ctrl_set_default_mode(hwmgr);
+
+	return result;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from set temperature range routine
+*/
+int tf_iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
+{
+	struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
+	SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+	uint32_t duty100;
+	uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+	uint16_t fdo_min, slope1, slope2;
+	uint32_t reference_clock;
+	int res;
+	uint64_t tmp64;
+
+	if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
+		return 0;
+
+	if (0 == data->fan_table_start) {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+		return 0;
+	}
+
+	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
+
+	if (0 == duty100) {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+		return 0;
+	}
+
+	tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+	do_div(tmp64, 10000);
+	fdo_min = (uint16_t)tmp64;
+
+	t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+	t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+	pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+	pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+	slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+	slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+	fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+	fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+	fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+	fan_table.Slope1 = cpu_to_be16(slope1);
+	fan_table.Slope2 = cpu_to_be16(slope2);
+
+	fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+	fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+	fan_table.HystUp = cpu_to_be16(1);
+
+	fan_table.HystSlope = cpu_to_be16(1);
+
+	fan_table.TempRespLim = cpu_to_be16(5);
+
+	reference_clock = iceland_get_xclk(hwmgr);
+
+	fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+	fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+	fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+	//fan_table.FanControl_GL_Flag = 1;
+
+	res = iceland_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end);
+/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
+	if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0)
+		res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \
+						hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1);
+
+	if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0)
+		res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \
+					hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1);
+
+	if (0 != res)
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+*/
+	return 0;
+}
+
+/**
+* Start the fan control on the SMC.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from set temperature range routine
+*/
+int tf_iceland_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
+{
+/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table.
+ * Make sure that we still think controlling the fan is OK.
+*/
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
+		iceland_fan_ctrl_start_smc_fan_control(hwmgr);
+		iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+	}
+
+	return 0;
+}
+
+/**
+* Set temperature range for high and low alerts
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from set temperature range routine
+*/
+static int tf_iceland_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+		void *input, void *output, void *storage, int result)
+{
+	struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
+
+	if (range == NULL)
+		return -EINVAL;
+
+	return iceland_thermal_set_temperature_range(hwmgr, range->min, range->max);
+}
+
+/**
+* Programs one-time setting registers
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from initialize thermal controller routine
+*/
+static int tf_iceland_thermal_initialize(struct pp_hwmgr *hwmgr, void *input,
+				void *output, void *storage, int result)
+{
+    return iceland_thermal_initialize(hwmgr);
+}
+
+/**
+* Enable high and low alerts
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from enable alert routine
+*/
+static int tf_iceland_thermal_enable_alert(struct pp_hwmgr *hwmgr,
+		void *input, void *output, void *storage, int result)
+{
+	return iceland_thermal_enable_alert(hwmgr);
+}
+
+/**
+* Disable high and low alerts
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from disable alert routine
+*/
+static int tf_iceland_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
+{
+	return iceland_thermal_disable_alert(hwmgr);
+}
+
+static const struct phm_master_table_item iceland_thermal_start_thermal_controller_master_list[] = {
+	{ NULL, tf_iceland_thermal_initialize },
+	{ NULL, tf_iceland_thermal_set_temperature_range },
+	{ NULL, tf_iceland_thermal_enable_alert },
+	/*
+	 * We should restrict performance levels to low before we halt
+	 * the SMC.  On the other hand we are still in boot state when
+	 * we do this so it would be pointless.  If this assumption
+	 * changes we have to revisit this table.
+	 */
+	{ NULL, tf_iceland_thermal_setup_fan_table},
+	{ NULL, tf_iceland_thermal_start_smc_fan_control},
+	{ NULL, NULL }
+};
+
+static const struct phm_master_table_header iceland_thermal_start_thermal_controller_master = {
+	0,
+	PHM_MasterTableFlag_None,
+	iceland_thermal_start_thermal_controller_master_list
+};
+
+static const struct phm_master_table_item iceland_thermal_set_temperature_range_master_list[] = {
+	{ NULL, tf_iceland_thermal_disable_alert},
+	{ NULL, tf_iceland_thermal_set_temperature_range},
+	{ NULL, tf_iceland_thermal_enable_alert},
+	{ NULL, NULL }
+};
+
+static const struct phm_master_table_header iceland_thermal_set_temperature_range_master = {
+	0,
+	PHM_MasterTableFlag_None,
+	iceland_thermal_set_temperature_range_master_list
+};
+
+int iceland_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
+{
+	if (!hwmgr->thermal_controller.fanInfo.bNoFan)
+		iceland_fan_ctrl_set_default_mode(hwmgr);
+	return 0;
+}
+
+/**
+* Initializes the thermal controller related functions in the Hardware Manager structure.
+* @param    hwmgr The address of the hardware manager.
+* @exception Any error code from the low-level communication.
+*/
+int pp_iceland_thermal_initialize(struct pp_hwmgr *hwmgr)
+{
+	int result;
+
+	result = phm_construct_table(hwmgr, &iceland_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range));
+
+	if (0 == result) {
+		result = phm_construct_table(hwmgr,
+						&iceland_thermal_start_thermal_controller_master,
+						&(hwmgr->start_thermal_controller));
+		if (0 != result)
+			phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
+	}
+
+	if (0 == result)
+		hwmgr->fan_ctrl_is_in_default_mode = true;
+	return result;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h
new file mode 100644
index 0000000..267945f
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+
+#ifndef ICELAND_THERMAL_H
+#define ICELAND_THERMAL_H
+
+#include "hwmgr.h"
+
+#define ICELAND_THERMAL_HIGH_ALERT_MASK         0x1
+#define ICELAND_THERMAL_LOW_ALERT_MASK          0x2
+
+#define ICELAND_THERMAL_MINIMUM_TEMP_READING    -256
+#define ICELAND_THERMAL_MAXIMUM_TEMP_READING    255
+
+#define ICELAND_THERMAL_MINIMUM_ALERT_TEMP      0
+#define ICELAND_THERMAL_MAXIMUM_ALERT_TEMP      255
+
+#define FDO_PWM_MODE_STATIC  1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+
+extern int iceland_thermal_get_temperature(struct pp_hwmgr *hwmgr);
+extern int iceland_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
+extern int iceland_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
+extern int iceland_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
+extern int iceland_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
+extern int iceland_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
+extern int iceland_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
+extern int pp_iceland_thermal_initialize(struct pp_hwmgr *hwmgr);
+extern int iceland_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
+extern int iceland_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
+extern int iceland_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
+extern int iceland_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
index b5edb51..7e405b0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
@@ -31,7 +31,7 @@
 	return 0;
 }
 
-int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
+static int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cf_want_uvd_power_gating(hwmgr)) {
 		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -47,7 +47,7 @@
 	return 0;
 }
 
-int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
+static int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cf_want_vce_power_gating(hwmgr))
 		return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -55,7 +55,7 @@
 	return 0;
 }
 
-int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
+static int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cf_want_vce_power_gating(hwmgr))
 		return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -63,7 +63,7 @@
 	return 0;
 }
 
-int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
+static int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_SamuPowerGating))
@@ -72,7 +72,7 @@
 	return 0;
 }
 
-int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
+static int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_SamuPowerGating))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
index 783dcc3..191ed50 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
@@ -33,11 +33,11 @@
 #include "pp_debug.h"
 #include "ppatomctrl.h"
 #include "atombios.h"
-#include "tonga_pptable.h"
+#include "pptable_v1_0.h"
 #include "pppcielanes.h"
 #include "amd_pcie_helpers.h"
 #include "hardwaremanager.h"
-#include "tonga_processpptables.h"
+#include "process_pptables_v1_0.h"
 #include "cgs_common.h"
 #include "smu74.h"
 #include "smu_ucode_xfer_vi.h"
@@ -97,19 +97,6 @@
 #define PCIE_BUS_CLK                10000
 #define TCLK                        (PCIE_BUS_CLK / 10)
 
-
-static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] =
-{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
-
-/*  [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */
-static const uint32_t polaris10_clock_stretcher_ddt_table[2][4][4] =
-{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
-  { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/*  [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */
-static const uint8_t polaris10_clock_stretch_amount_conversion[2][6] =
-{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
-
 /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
 enum DPM_EVENT_SRC {
 	DPM_EVENT_SRC_ANALOG = 0,
@@ -121,7 +108,7 @@
 
 static const unsigned long PhwPolaris10_Magic = (unsigned long)(PHM_VIslands_Magic);
 
-struct polaris10_power_state *cast_phw_polaris10_power_state(
+static struct polaris10_power_state *cast_phw_polaris10_power_state(
 				  struct pp_hw_power_state *hw_ps)
 {
 	PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
@@ -131,7 +118,8 @@
 	return (struct polaris10_power_state *)hw_ps;
 }
 
-const struct polaris10_power_state *cast_const_phw_polaris10_power_state(
+static const struct polaris10_power_state *
+cast_const_phw_polaris10_power_state(
 				 const struct pp_hw_power_state *hw_ps)
 {
 	PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
@@ -154,7 +142,7 @@
  * @param    hwmgr  the address of the powerplay hardware manager.
  * @return   always 0
  */
-int phm_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
+static int phm_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
 {
 	cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
 
@@ -163,7 +151,7 @@
 	return 0;
 }
 
-uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+static uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
 {
 	uint32_t speedCntl = 0;
 
@@ -174,7 +162,7 @@
 			PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
 }
 
-int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
+static int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
 {
 	uint32_t link_width;
 
@@ -194,7 +182,7 @@
 * @param    pHwMgr  the address of the powerplay hardware manager.
 * @return   always PP_Result_OK
 */
-int polaris10_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
+static int polaris10_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
 {
 	PP_ASSERT_WITH_CODE(
 		(hwmgr->smumgr->smumgr_funcs->send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable) == 0),
@@ -674,7 +662,7 @@
  * on the power policy or external client requests,
  * such as UVD request, etc.
  */
-int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+static int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 {
 	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
 	struct phm_ppt_v1_information *table_info =
@@ -748,11 +736,6 @@
 	return 0;
 }
 
-uint8_t convert_to_vid(uint16_t vddc)
-{
-	return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
-}
-
 /**
  * Mvdd table preparation for SMC.
  *
@@ -853,7 +836,7 @@
 * @return   always  0
 */
 
-int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
 		struct SMU74_Discrete_DpmTable *table)
 {
 	polaris10_populate_smc_vddci_table(hwmgr, table);
@@ -1430,7 +1413,7 @@
 * @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
 * @param    voltage     the SMC VOLTAGE structure to be populated
 */
-int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
 		uint32_t mclk, SMIO_Pattern *smio_pat)
 {
 	const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -1944,7 +1927,7 @@
 }
 
 
-int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
 {
 	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
 	SMU74_Discrete_DpmTable  *table = &(data->smc_state_table);
@@ -2573,7 +2556,7 @@
 	return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
 }
 
-int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
+static int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
 {
 	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
 	data->pcie_performance_request = true;
@@ -2581,7 +2564,7 @@
 	return 0;
 }
 
-int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+static int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 {
 	int tmp_result, result = 0;
 	tmp_result = (!polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
@@ -2762,19 +2745,16 @@
 	return 0;
 }
 
-int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
+static int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
 {
 	return phm_hwmgr_backend_fini(hwmgr);
 }
 
-int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
+static int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
 {
 	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
 
 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_SclkDeepSleep);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 		PHM_PlatformCaps_DynamicPatchPowerState);
 
 	if (data->mvdd_control == POLARIS10_VOLTAGE_CONTROL_NONE)
@@ -2819,13 +2799,6 @@
 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 						PHM_PlatformCaps_TCPRamping);
 
-	if (hwmgr->powercontainment_enabled)
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			    PHM_PlatformCaps_PowerContainment);
-	else
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-			    PHM_PlatformCaps_PowerContainment);
-
 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 							PHM_PlatformCaps_CAC);
 
@@ -2904,8 +2877,8 @@
 				continue;
 			}
 
-			/* need to make sure vddc is less than 2v or else, it could burn the ASIC.
-			 * real voltage level in unit of 0.01mv */
+			/* need to make sure vddc is less than 2V or else, it could burn the ASIC.
+			 * real voltage level in unit of 0.01mV */
 			PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
 					"Invalid VDDC value", result = -EINVAL;);
 
@@ -3132,7 +3105,7 @@
 	return 0;
 }
 
-int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+static int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
 {
 	struct phm_ppt_v1_information *table_info =
 		       (struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -3141,8 +3114,27 @@
 	struct phm_ppt_v1_voltage_lookup_table *lookup_table =
 			table_info->vddc_lookup_table;
 	uint32_t i;
+	uint32_t hw_revision, sub_vendor_id, sub_sys_id;
+	struct cgs_system_info sys_info = {0};
 
-	if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) {
+	sys_info.size = sizeof(struct cgs_system_info);
+
+	sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+	cgs_query_system_info(hwmgr->device, &sys_info);
+	hw_revision = (uint32_t)sys_info.value;
+
+	sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID;
+	cgs_query_system_info(hwmgr->device, &sys_info);
+	sub_sys_id = (uint32_t)sys_info.value;
+
+	sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID;
+	cgs_query_system_info(hwmgr->device, &sys_info);
+	sub_vendor_id = (uint32_t)sys_info.value;
+
+	if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
+			((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
+		    (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
+		    (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
 		if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
 			return 0;
 
@@ -3157,7 +3149,7 @@
 }
 
 
-int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+static int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 {
 	struct polaris10_hwmgr *data;
 	struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
@@ -3900,7 +3892,7 @@
 
 	ps = (struct polaris10_power_state *)(&state->hardware);
 
-	result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
+	result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
 			polaris10_get_pp_table_entry_callback_func);
 
 	/* This is the earliest time we have all the dependency table and the VBIOS boot state
@@ -4367,7 +4359,8 @@
 	return 0;
 }
 
-int polaris10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
+static int
+polaris10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
 {
 	return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
 			PPSMC_MSG_UVDDPM_Enable :
@@ -4381,7 +4374,8 @@
 			PPSMC_MSG_VCEDPM_Disable);
 }
 
-int polaris10_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
+static int
+polaris10_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
 {
 	return smum_send_msg_to_smc(hwmgr->smumgr, enable?
 			PPSMC_MSG_SAMUDPM_Enable :
@@ -4695,14 +4689,16 @@
 }
 
 
-int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
+static int
+polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
 {
 	PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
 
 	return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ?  0 : -1;
 }
 
-int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
+static int
+polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
 {
 	uint32_t num_active_displays = 0;
 	struct cgs_display_info info = {0};
@@ -4725,7 +4721,7 @@
 * @param    hwmgr  the address of the powerplay hardware manager.
 * @return   always OK
 */
-int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
+static int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
 {
 	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
 	uint32_t num_active_displays = 0;
@@ -4770,7 +4766,7 @@
 }
 
 
-int polaris10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+static int polaris10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
 {
 	return polaris10_program_display_gap(hwmgr);
 }
@@ -4794,13 +4790,15 @@
 			PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
 }
 
-int polaris10_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+static int
+polaris10_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
 					const void *thermal_interrupt_info)
 {
 	return 0;
 }
 
-bool polaris10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+static bool polaris10_check_smc_update_required_for_display_configuration(
+		struct pp_hwmgr *hwmgr)
 {
 	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
 	bool is_update_required = false;
@@ -4830,7 +4828,9 @@
 		  (pl1->pcie_lane == pl2->pcie_lane));
 }
 
-int polaris10_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+static int polaris10_check_states_equal(struct pp_hwmgr *hwmgr,
+		const struct pp_hw_power_state *pstate1,
+		const struct pp_hw_power_state *pstate2, bool *equal)
 {
 	const struct polaris10_power_state *psa = cast_const_phw_polaris10_power_state(pstate1);
 	const struct polaris10_power_state *psb = cast_const_phw_polaris10_power_state(pstate2);
@@ -4861,7 +4861,7 @@
 	return 0;
 }
 
-int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
+static int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
 {
 	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
 
@@ -4974,7 +4974,7 @@
 	return 0;
 }
 
-int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
+static int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
 {
 	int tmp_result, result = 0;
 
@@ -5245,7 +5245,7 @@
 	.get_sclk = polaris10_dpm_get_sclk,
 	.patch_boot_state = polaris10_dpm_patch_boot_state,
 	.get_pp_table_entry = polaris10_get_pp_table_entry,
-	.get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
+	.get_num_of_pp_table_entries = get_number_of_powerplay_table_entries_v1_0,
 	.print_current_perforce_level = polaris10_print_current_perforce_level,
 	.powerdown_uvd = polaris10_phm_powerdown_uvd,
 	.powergate_uvd = polaris10_phm_powergate_uvd,
@@ -5282,7 +5282,7 @@
 int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr)
 {
 	hwmgr->hwmgr_func = &polaris10_hwmgr_funcs;
-	hwmgr->pptable_func = &tonga_pptable_funcs;
+	hwmgr->pptable_func = &pptable_v1_0_funcs;
 	pp_polaris10_thermal_initialize(hwmgr);
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
index 33c3394..378ab34 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
@@ -30,6 +30,7 @@
 #include "ppatomctrl.h"
 #include "polaris10_ppsmc.h"
 #include "polaris10_powertune.h"
+#include "polaris10_smumgr.h"
 
 #define POLARIS10_MAX_HARDWARE_POWERLEVELS	2
 
@@ -165,10 +166,6 @@
 	uint16_t  max;
 	uint16_t  min;
 };
-struct polaris10_range_table {
-	uint32_t trans_lower_frequency; /* in 10khz */
-	uint32_t trans_upper_frequency;
-};
 
 struct polaris10_hwmgr {
 	struct polaris10_dpm_table			dpm_table;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
index bc78e28..329119d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
@@ -66,19 +66,6 @@
 	enum polaris10_pt_config_reg_type       type;
 };
 
-struct polaris10_pt_defaults {
-	uint8_t   SviLoadLineEn;
-	uint8_t   SviLoadLineVddC;
-	uint8_t   TDC_VDDC_ThrottleReleaseLimitPerc;
-	uint8_t   TDC_MAWt;
-	uint8_t   TdcWaterfallCtl;
-	uint8_t   DTEAmbientTempBase;
-
-	uint32_t  DisplayCac;
-	uint32_t  BAPM_TEMP_GRADIENT;
-	uint16_t  BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
-	uint16_t  BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
-};
 
 void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
 int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
index b206632..41f835a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
@@ -152,7 +152,7 @@
 	return 0;
 }
 
-int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
+static int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
 {
 	int result;
 
@@ -425,7 +425,7 @@
 * @param    Result the last failure code
 * @return   result from set temperature range routine
 */
-int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
+static int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
 		void *input, void *output, void *storage, int result)
 {
 	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -537,7 +537,7 @@
 * @param    Result the last failure code
 * @return   result from set temperature range routine
 */
-int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
+static int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
 		void *input, void *output, void *storage, int result)
 {
 /* If the fantable setup has failed we could have disabled
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h
similarity index 99%
rename from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
rename to drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h
index f127198..1e870f5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h
@@ -164,7 +164,7 @@
 typedef struct _ATOM_Tonga_State_Array {
 	UCHAR ucRevId;
 	UCHAR ucNumEntries;		/* Number of entries. */
-	ATOM_Tonga_State states[1];	/* Dynamically allocate entries. */
+	ATOM_Tonga_State entries[1];	/* Dynamically allocate entries. */
 } ATOM_Tonga_State_Array;
 
 typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
similarity index 81%
rename from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
rename to drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index a0ffd4a..7de701d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -23,13 +23,13 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 
-#include "tonga_processpptables.h"
+#include "process_pptables_v1_0.h"
 #include "ppatomctrl.h"
 #include "atombios.h"
 #include "pp_debug.h"
 #include "hwmgr.h"
 #include "cgs_common.h"
-#include "tonga_pptable.h"
+#include "pptable_v1_0.h"
 
 /**
  * Private Function used during initialization.
@@ -153,12 +153,14 @@
 static int get_vddc_lookup_table(
 		struct pp_hwmgr	*hwmgr,
 		phm_ppt_v1_voltage_lookup_table	**lookup_table,
-		const ATOM_Tonga_Voltage_Lookup_Table	*vddc_lookup_pp_tables,
-		uint32_t	max_levels
+		const ATOM_Tonga_Voltage_Lookup_Table *vddc_lookup_pp_tables,
+		uint32_t max_levels
 		)
 {
 	uint32_t table_size, i;
 	phm_ppt_v1_voltage_lookup_table *table;
+	phm_ppt_v1_voltage_lookup_record *record;
+	ATOM_Tonga_Voltage_Lookup_Record *atom_record;
 
 	PP_ASSERT_WITH_CODE((0 != vddc_lookup_pp_tables->ucNumEntries),
 		"Invalid CAC Leakage PowerPlay Table!", return 1);
@@ -176,15 +178,17 @@
 	table->count = vddc_lookup_pp_tables->ucNumEntries;
 
 	for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) {
-		table->entries[i].us_calculated = 0;
-		table->entries[i].us_vdd =
-			vddc_lookup_pp_tables->entries[i].usVdd;
-		table->entries[i].us_cac_low =
-			vddc_lookup_pp_tables->entries[i].usCACLow;
-		table->entries[i].us_cac_mid =
-			vddc_lookup_pp_tables->entries[i].usCACMid;
-		table->entries[i].us_cac_high =
-			vddc_lookup_pp_tables->entries[i].usCACHigh;
+		record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					phm_ppt_v1_voltage_lookup_record,
+					entries, table, i);
+		atom_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					ATOM_Tonga_Voltage_Lookup_Record,
+					entries, vddc_lookup_pp_tables, i);
+		record->us_calculated = 0;
+		record->us_vdd = atom_record->usVdd;
+		record->us_cac_low = atom_record->usCACLow;
+		record->us_cac_mid = atom_record->usCACMid;
+		record->us_cac_high = atom_record->usCACHigh;
 	}
 
 	*lookup_table = table;
@@ -313,11 +317,12 @@
 static int get_valid_clk(
 		struct pp_hwmgr *hwmgr,
 		struct phm_clock_array **clk_table,
-		const phm_ppt_v1_clock_voltage_dependency_table  * clk_volt_pp_table
+		phm_ppt_v1_clock_voltage_dependency_table const *clk_volt_pp_table
 		)
 {
 	uint32_t table_size, i;
 	struct phm_clock_array *table;
+	phm_ppt_v1_clock_voltage_dependency_record *dep_record;
 
 	PP_ASSERT_WITH_CODE((0 != clk_volt_pp_table->count),
 		"Invalid PowerPlay Table!", return -1);
@@ -334,9 +339,12 @@
 
 	table->count = (uint32_t)clk_volt_pp_table->count;
 
-	for (i = 0; i < table->count; i++)
-		table->values[i] = (uint32_t)clk_volt_pp_table->entries[i].clk;
-
+	for (i = 0; i < table->count; i++) {
+		dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+				phm_ppt_v1_clock_voltage_dependency_record,
+				entries, clk_volt_pp_table, i);
+		table->values[i] = (uint32_t)dep_record->clk;
+	}
 	*clk_table = table;
 
 	return 0;
@@ -345,7 +353,7 @@
 static int get_hard_limits(
 		struct pp_hwmgr *hwmgr,
 		struct phm_clock_and_voltage_limits *limits,
-		const ATOM_Tonga_Hard_Limit_Table * limitable
+		ATOM_Tonga_Hard_Limit_Table const *limitable
 		)
 {
 	PP_ASSERT_WITH_CODE((0 != limitable->ucNumEntries), "Invalid PowerPlay Table!", return -1);
@@ -363,11 +371,13 @@
 static int get_mclk_voltage_dependency_table(
 		struct pp_hwmgr *hwmgr,
 		phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_mclk_dep_table,
-		const ATOM_Tonga_MCLK_Dependency_Table * mclk_dep_table
+		ATOM_Tonga_MCLK_Dependency_Table const *mclk_dep_table
 		)
 {
 	uint32_t table_size, i;
 	phm_ppt_v1_clock_voltage_dependency_table *mclk_table;
+	phm_ppt_v1_clock_voltage_dependency_record *mclk_table_record;
+	ATOM_Tonga_MCLK_Dependency_Record *mclk_dep_record;
 
 	PP_ASSERT_WITH_CODE((0 != mclk_dep_table->ucNumEntries),
 		"Invalid PowerPlay Table!", return -1);
@@ -385,16 +395,17 @@
 	mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries;
 
 	for (i = 0; i < mclk_dep_table->ucNumEntries; i++) {
-		mclk_table->entries[i].vddInd =
-			mclk_dep_table->entries[i].ucVddcInd;
-		mclk_table->entries[i].vdd_offset =
-			mclk_dep_table->entries[i].usVddgfxOffset;
-		mclk_table->entries[i].vddci =
-			mclk_dep_table->entries[i].usVddci;
-		mclk_table->entries[i].mvdd =
-			mclk_dep_table->entries[i].usMvdd;
-		mclk_table->entries[i].clk =
-			mclk_dep_table->entries[i].ulMclk;
+		mclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					phm_ppt_v1_clock_voltage_dependency_record,
+						entries, mclk_table, i);
+		mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					ATOM_Tonga_MCLK_Dependency_Record,
+						entries, mclk_dep_table, i);
+		mclk_table_record->vddInd = mclk_dep_record->ucVddcInd;
+		mclk_table_record->vdd_offset = mclk_dep_record->usVddgfxOffset;
+		mclk_table_record->vddci = mclk_dep_record->usVddci;
+		mclk_table_record->mvdd = mclk_dep_record->usMvdd;
+		mclk_table_record->clk = mclk_dep_record->ulMclk;
 	}
 
 	*pp_tonga_mclk_dep_table = mclk_table;
@@ -405,15 +416,17 @@
 static int get_sclk_voltage_dependency_table(
 		struct pp_hwmgr *hwmgr,
 		phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
-		const PPTable_Generic_SubTable_Header *sclk_dep_table
+		PPTable_Generic_SubTable_Header const  *sclk_dep_table
 		)
 {
 	uint32_t table_size, i;
 	phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
+	phm_ppt_v1_clock_voltage_dependency_record *sclk_table_record;
 
 	if (sclk_dep_table->ucRevId < 1) {
 		const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
 			    (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
+		ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
 
 		PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
 			"Invalid PowerPlay Table!", return -1);
@@ -431,20 +444,23 @@
 		sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
 
 		for (i = 0; i < tonga_table->ucNumEntries; i++) {
-			sclk_table->entries[i].vddInd =
-				tonga_table->entries[i].ucVddInd;
-			sclk_table->entries[i].vdd_offset =
-				tonga_table->entries[i].usVddcOffset;
-			sclk_table->entries[i].clk =
-				tonga_table->entries[i].ulSclk;
-			sclk_table->entries[i].cks_enable =
-				(((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
-			sclk_table->entries[i].cks_voffset =
-				(tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
+			sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						ATOM_Tonga_SCLK_Dependency_Record,
+						entries, tonga_table, i);
+			sclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						phm_ppt_v1_clock_voltage_dependency_record,
+						entries, sclk_table, i);
+			sclk_table_record->vddInd = sclk_dep_record->ucVddInd;
+			sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset;
+			sclk_table_record->clk = sclk_dep_record->ulSclk;
+			sclk_table_record->cks_enable =
+				(((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+			sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F);
 		}
 	} else {
 		const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
 			    (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
+		ATOM_Polaris_SCLK_Dependency_Record *sclk_dep_record;
 
 		PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
 			"Invalid PowerPlay Table!", return -1);
@@ -462,17 +478,19 @@
 		sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
 
 		for (i = 0; i < polaris_table->ucNumEntries; i++) {
-			sclk_table->entries[i].vddInd =
-				polaris_table->entries[i].ucVddInd;
-			sclk_table->entries[i].vdd_offset =
-				polaris_table->entries[i].usVddcOffset;
-			sclk_table->entries[i].clk =
-				polaris_table->entries[i].ulSclk;
-			sclk_table->entries[i].cks_enable =
-				(((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
-			sclk_table->entries[i].cks_voffset =
-				(polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
-			sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset;
+			sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						ATOM_Polaris_SCLK_Dependency_Record,
+						entries, polaris_table, i);
+			sclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						phm_ppt_v1_clock_voltage_dependency_record,
+						entries, sclk_table, i);
+			sclk_table_record->vddInd = sclk_dep_record->ucVddInd;
+			sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset;
+			sclk_table_record->clk = sclk_dep_record->ulSclk;
+			sclk_table_record->cks_enable =
+				(((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+			sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F);
+			sclk_table_record->sclk_offset = sclk_dep_record->ulSclkOffset;
 		}
 	}
 	*pp_tonga_sclk_dep_table = sclk_table;
@@ -483,16 +501,19 @@
 static int get_pcie_table(
 		struct pp_hwmgr *hwmgr,
 		phm_ppt_v1_pcie_table **pp_tonga_pcie_table,
-		const PPTable_Generic_SubTable_Header * pTable
+		PPTable_Generic_SubTable_Header const *ptable
 		)
 {
 	uint32_t table_size, i, pcie_count;
 	phm_ppt_v1_pcie_table *pcie_table;
 	struct phm_ppt_v1_information *pp_table_information =
 		(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	phm_ppt_v1_pcie_record *pcie_record;
 
-	if (pTable->ucRevId < 1) {
-		const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)pTable;
+	if (ptable->ucRevId < 1) {
+		const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)ptable;
+		ATOM_Tonga_PCIE_Record *atom_pcie_record;
+
 		PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
 			"Invalid PowerPlay Table!", return -1);
 
@@ -518,18 +539,23 @@
 			Disregarding the excess entries... \n");
 
 		pcie_table->count = pcie_count;
-
 		for (i = 0; i < pcie_count; i++) {
-			pcie_table->entries[i].gen_speed =
-				atom_pcie_table->entries[i].ucPCIEGenSpeed;
-			pcie_table->entries[i].lane_width =
-				atom_pcie_table->entries[i].usPCIELaneWidth;
+			pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						phm_ppt_v1_pcie_record,
+						entries, pcie_table, i);
+			atom_pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						ATOM_Tonga_PCIE_Record,
+						entries, atom_pcie_table, i);
+			pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed;
+			pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth;
 		}
 
 		*pp_tonga_pcie_table = pcie_table;
 	} else {
 		/* Polaris10/Polaris11 and newer. */
-		const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)pTable;
+		const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)ptable;
+		ATOM_Polaris10_PCIE_Record *atom_pcie_record;
+
 		PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
 			"Invalid PowerPlay Table!", return -1);
 
@@ -557,12 +583,15 @@
 		pcie_table->count = pcie_count;
 
 		for (i = 0; i < pcie_count; i++) {
-			pcie_table->entries[i].gen_speed =
-				atom_pcie_table->entries[i].ucPCIEGenSpeed;
-			pcie_table->entries[i].lane_width =
-				atom_pcie_table->entries[i].usPCIELaneWidth;
-			pcie_table->entries[i].pcie_sclk =
-				atom_pcie_table->entries[i].ulPCIE_Sclk;
+			pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						phm_ppt_v1_pcie_record,
+						entries, pcie_table, i);
+			atom_pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						ATOM_Polaris10_PCIE_Record,
+						entries, atom_pcie_table, i);
+			pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed;
+			pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth;
+			pcie_record->pcie_sclk = atom_pcie_record->ulPCIE_Sclk;
 		}
 
 		*pp_tonga_pcie_table = pcie_table;
@@ -684,6 +713,7 @@
 	uint32_t table_size, i;
 	const ATOM_Tonga_MM_Dependency_Record *mm_dependency_record;
 	phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table;
+	phm_ppt_v1_mm_clock_voltage_dependency_record *mm_table_record;
 
 	PP_ASSERT_WITH_CODE((0 != mm_dependency_table->ucNumEntries),
 		"Invalid PowerPlay Table!", return -1);
@@ -700,14 +730,19 @@
 	mm_table->count = mm_dependency_table->ucNumEntries;
 
 	for (i = 0; i < mm_dependency_table->ucNumEntries; i++) {
-		mm_dependency_record = &mm_dependency_table->entries[i];
-		mm_table->entries[i].vddcInd = mm_dependency_record->ucVddcInd;
-		mm_table->entries[i].vddgfx_offset = mm_dependency_record->usVddgfxOffset;
-		mm_table->entries[i].aclk = mm_dependency_record->ulAClk;
-		mm_table->entries[i].samclock = mm_dependency_record->ulSAMUClk;
-		mm_table->entries[i].eclk = mm_dependency_record->ulEClk;
-		mm_table->entries[i].vclk = mm_dependency_record->ulVClk;
-		mm_table->entries[i].dclk = mm_dependency_record->ulDClk;
+		mm_dependency_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						ATOM_Tonga_MM_Dependency_Record,
+						entries, mm_dependency_table, i);
+		mm_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					phm_ppt_v1_mm_clock_voltage_dependency_record,
+					entries, mm_table, i);
+		mm_table_record->vddcInd = mm_dependency_record->ucVddcInd;
+		mm_table_record->vddgfx_offset = mm_dependency_record->usVddgfxOffset;
+		mm_table_record->aclk = mm_dependency_record->ulAClk;
+		mm_table_record->samclock = mm_dependency_record->ulSAMUClk;
+		mm_table_record->eclk = mm_dependency_record->ulEClk;
+		mm_table_record->vclk = mm_dependency_record->ulVClk;
+		mm_table_record->dclk = mm_dependency_record->ulDClk;
 	}
 
 	*tonga_mm_table = mm_table;
@@ -1014,7 +1049,7 @@
 	return 0;
 }
 
-int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr)
+int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
 {
 	int result = 0;
 	const ATOM_Tonga_POWERPLAYTABLE *powerplay_table;
@@ -1065,7 +1100,7 @@
 	return result;
 }
 
-int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
+int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
 {
 	struct phm_ppt_v1_information *pp_table_information =
 		(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1109,14 +1144,14 @@
 	return 0;
 }
 
-const struct pp_table_func tonga_pptable_funcs = {
-	.pptable_init = tonga_pp_tables_initialize,
-	.pptable_fini = tonga_pp_tables_uninitialize,
+const struct pp_table_func pptable_v1_0_funcs = {
+	.pptable_init = pp_tables_v1_0_initialize,
+	.pptable_fini = pp_tables_v1_0_uninitialize,
 };
 
-int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
+int get_number_of_powerplay_table_entries_v1_0(struct pp_hwmgr *hwmgr)
 {
-	const ATOM_Tonga_State_Array * state_arrays;
+	ATOM_Tonga_State_Array const *state_arrays;
 	const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
 
 	PP_ASSERT_WITH_CODE((NULL != pp_table),
@@ -1163,6 +1198,71 @@
 	return result;
 }
 
+static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
+{
+	const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
+	const ATOM_Tonga_VCE_State_Table *vce_state_table =
+				(ATOM_Tonga_VCE_State_Table *)(((unsigned long)pp_table) + le16_to_cpu(pp_table->usVCEStateTableOffset));
+
+	if (vce_state_table == NULL)
+		return 0;
+
+	return vce_state_table->ucNumEntries;
+}
+
+static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i,
+		struct pp_vce_state *vce_state, void **clock_info, uint32_t *flag)
+{
+	const ATOM_Tonga_VCE_State_Record *vce_state_record;
+	ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
+	ATOM_Tonga_MCLK_Dependency_Record *mclk_dep_record;
+	ATOM_Tonga_MM_Dependency_Record *mm_dep_record;
+	const ATOM_Tonga_POWERPLAYTABLE *pptable = get_powerplay_table(hwmgr);
+	const ATOM_Tonga_VCE_State_Table *vce_state_table = (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pptable)
+							  + le16_to_cpu(pptable->usVCEStateTableOffset));
+	const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = (ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long)pptable)
+							  + le16_to_cpu(pptable->usSclkDependencyTableOffset));
+	const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = (ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long)pptable)
+							  + le16_to_cpu(pptable->usMclkDependencyTableOffset));
+	const ATOM_Tonga_MM_Dependency_Table *mm_dep_table = (ATOM_Tonga_MM_Dependency_Table *)(((unsigned long)pptable)
+							  + le16_to_cpu(pptable->usMMDependencyTableOffset));
+
+	PP_ASSERT_WITH_CODE((i < vce_state_table->ucNumEntries),
+			 "Requested state entry ID is out of range!",
+			 return -EINVAL);
+
+	vce_state_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					ATOM_Tonga_VCE_State_Record,
+					entries, vce_state_table, i);
+	sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					ATOM_Tonga_SCLK_Dependency_Record,
+					entries, sclk_dep_table,
+					vce_state_record->ucSCLKIndex);
+	mm_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					ATOM_Tonga_MM_Dependency_Record,
+					entries, mm_dep_table,
+					vce_state_record->ucVCEClockIndex);
+	*flag = vce_state_record->ucFlag;
+
+	vce_state->evclk = mm_dep_record->ulEClk;
+	vce_state->ecclk = mm_dep_record->ulEClk;
+	vce_state->sclk = sclk_dep_record->ulSclk;
+
+	if (vce_state_record->ucMCLKIndex >= mclk_dep_table->ucNumEntries)
+		mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					ATOM_Tonga_MCLK_Dependency_Record,
+					entries, mclk_dep_table,
+					mclk_dep_table->ucNumEntries - 1);
+	else
+		mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					ATOM_Tonga_MCLK_Dependency_Record,
+					entries, mclk_dep_table,
+					vce_state_record->ucMCLKIndex);
+
+	vce_state->mclk = mclk_dep_record->ulMclk;
+	return 0;
+}
+
 /**
 * Create a Power State out of an entry in the PowerPlay table.
 * This function is called by the hardware back-end.
@@ -1171,15 +1271,17 @@
 * @param power_state The address of the PowerState instance being created.
 * @return -1 if the entry cannot be retrieved.
 */
-int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr,
+int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
 		uint32_t entry_index, struct pp_power_state *power_state,
 		int (*call_back_func)(struct pp_hwmgr *, void *,
 				struct pp_power_state *, void *, uint32_t))
 {
 	int result = 0;
-	const ATOM_Tonga_State_Array * state_arrays;
+	const ATOM_Tonga_State_Array *state_arrays;
 	const ATOM_Tonga_State *state_entry;
 	const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
+	int i, j;
+	uint32_t flags = 0;
 
 	PP_ASSERT_WITH_CODE((NULL != pp_table), "Missing PowerPlay Table!", return -1;);
 	power_state->classification.bios_index = entry_index;
@@ -1196,7 +1298,9 @@
 		PP_ASSERT_WITH_CODE((entry_index <= state_arrays->ucNumEntries),
 				"Invalid PowerPlay Table State Array Entry.", return -1);
 
-		state_entry = &(state_arrays->states[entry_index]);
+		state_entry = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						ATOM_Tonga_State, entries,
+						state_arrays, entry_index);
 
 		result = call_back_func(hwmgr, (void *)state_entry, power_state,
 				(void *)pp_table,
@@ -1209,5 +1313,13 @@
 			PP_StateClassificationFlag_Boot))
 		result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(power_state->hardware));
 
+	hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr);
+
+	if ((i != 0) && (i <= PP_MAX_VCE_LEVELS)) {
+		for (j = 0; j < i; j++)
+			ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags);
+	}
+
 	return result;
 }
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h
similarity index 81%
rename from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
rename to drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h
index d24b888..b9710ab 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h
@@ -20,14 +20,14 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#ifndef TONGA_PROCESSPPTABLES_H
-#define TONGA_PROCESSPPTABLES_H
+#ifndef _PROCESSPPTABLES_V1_0_H
+#define _PROCESSPPTABLES_V1_0_H
 
 #include "hwmgr.h"
 
-extern const struct pp_table_func tonga_pptable_funcs;
-extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
-extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
+extern const struct pp_table_func pptable_v1_0_funcs;
+extern int get_number_of_powerplay_table_entries_v1_0(struct pp_hwmgr *hwmgr);
+extern int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t entry_index,
 		struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
 				struct pp_power_state *, void *, uint32_t));
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index 6c321b0..ccf7ebe 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -1523,7 +1523,7 @@
 
 int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
 							unsigned long i,
-							struct PP_VCEState *vce_state,
+							struct pp_vce_state *vce_state,
 							void **clock_info,
 							unsigned long *flag)
 {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 7dc4afd..582d04a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -29,8 +29,8 @@
 #include "tonga_hwmgr.h"
 #include "pptable.h"
 #include "processpptables.h"
-#include "tonga_processpptables.h"
-#include "tonga_pptable.h"
+#include "process_pptables_v1_0.h"
+#include "pptable_v1_0.h"
 #include "pp_debug.h"
 #include "tonga_ppsmc.h"
 #include "cgs_common.h"
@@ -202,6 +202,7 @@
 	return i - 1;
 }
 
+
 /**
  * @brief PhwTonga_GetVoltageOrder
  *  Returns index of requested voltage record in lookup(table)
@@ -229,7 +230,7 @@
 	return i-1;
 }
 
-bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
+static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
 {
 	/*
 	 * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
@@ -301,6 +302,8 @@
 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_DisableMemoryTransition);
 
+	tonga_initialize_power_tune_defaults(hwmgr);
+
 	data->mclk_strobe_mode_threshold = 40000;
 	data->mclk_stutter_mode_threshold = 30000;
 	data->mclk_edc_enable_threshold = 40000;
@@ -332,7 +335,7 @@
 
 }
 
-int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+static int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
 {
 	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
 
@@ -769,7 +772,7 @@
  * @param    hwmgr  the address of the powerplay hardware manager.
  * @return   always 0
  */
-int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
+static int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
 {
 	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
 	struct tonga_smumgr *tonga_smu = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
@@ -1313,15 +1316,6 @@
 }
 
 /**
- * Convert a voltage value in mv unit to VID number required by SMU firmware
- */
-static uint8_t convert_to_vid(uint16_t vddc)
-{
-	return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
-}
-
-
-/**
  * Preparation of vddc and vddgfx CAC tables for SMC.
  *
  * @param    hwmgr      the address of the hardware manager
@@ -2478,7 +2472,7 @@
 	graphic_level->VoltageDownHyst = 0;
 	graphic_level->PowerThrottle = 0;
 
-	threshold = engine_clock * data->fast_watemark_threshold / 100;
+	threshold = engine_clock * data->fast_watermark_threshold / 100;
 /*
 	*get the DAL clock. do it in funture.
 	PECI_GetMinClockSettings(hwmgr->peci, &minClocks);
@@ -2892,7 +2886,7 @@
  * @param    pInput  the pointer to input data (PowerState)
  * @return   always 0
  */
-int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
+static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
 {
 	int result;
 	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
@@ -2981,6 +2975,10 @@
 	PP_ASSERT_WITH_CODE(0 == result,
 		"Failed to initialize Boot Level!", return result;);
 
+	result = tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
+	PP_ASSERT_WITH_CODE(result == 0,
+			"Failed to populate BAPM Parameters!", return result);
+
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_ClockStretcher)) {
 		result = tonga_populate_clock_stretcher_data_table(hwmgr);
@@ -3983,7 +3981,7 @@
 	return 0;
 }
 
-int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
 {
 	int result;
 	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
@@ -4320,6 +4318,79 @@
 	return 0;
 }
 
+static void tonga_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
+{
+	bool protection;
+	enum DPM_EVENT_SRC src;
+
+	switch (sources) {
+	default:
+		printk(KERN_ERR "Unknown throttling event sources.");
+		/* fall through */
+	case 0:
+		protection = false;
+		/* src is unused */
+		break;
+	case (1 << PHM_AutoThrottleSource_Thermal):
+		protection = true;
+		src = DPM_EVENT_SRC_DIGITAL;
+		break;
+	case (1 << PHM_AutoThrottleSource_External):
+		protection = true;
+		src = DPM_EVENT_SRC_EXTERNAL;
+		break;
+	case (1 << PHM_AutoThrottleSource_External) |
+			(1 << PHM_AutoThrottleSource_Thermal):
+		protection = true;
+		src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
+		break;
+	}
+	/* Order matters - don't enable thermal protection for the wrong source. */
+	if (protection) {
+		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
+				DPM_EVENT_SRC, src);
+		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+				THERMAL_PROTECTION_DIS,
+				!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_ThermalController));
+	} else
+		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+				THERMAL_PROTECTION_DIS, 1);
+}
+
+static int tonga_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+		PHM_AutoThrottleSource source)
+{
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+
+	if (!(data->active_auto_throttle_sources & (1 << source))) {
+		data->active_auto_throttle_sources |= 1 << source;
+		tonga_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+	}
+	return 0;
+}
+
+static int tonga_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+	return tonga_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
+static int tonga_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+		PHM_AutoThrottleSource source)
+{
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+
+	if (data->active_auto_throttle_sources & (1 << source)) {
+		data->active_auto_throttle_sources &= ~(1 << source);
+		tonga_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+	}
+	return 0;
+}
+
+static int tonga_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+	return tonga_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
 
 int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 {
@@ -4369,6 +4440,10 @@
 	PP_ASSERT_WITH_CODE((0 == tmp_result),
 		"Failed to initialize ARB table index!", result = tmp_result);
 
+	tmp_result = tonga_populate_pm_fuses(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+			"Failed to populate PM fuses!", result = tmp_result);
+
 	tmp_result = tonga_populate_initial_mc_reg_table(hwmgr);
 	PP_ASSERT_WITH_CODE((0 == tmp_result),
 		"Failed to populate initialize MC Reg table!", result = tmp_result);
@@ -4387,6 +4462,22 @@
 	PP_ASSERT_WITH_CODE((0 == tmp_result),
 		"Failed to start DPM!", result = tmp_result);
 
+	tmp_result = tonga_enable_smc_cac(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+			"Failed to enable SMC CAC!", result = tmp_result);
+
+	tmp_result = tonga_enable_power_containment(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+			"Failed to enable power containment!", result = tmp_result);
+
+	tmp_result = tonga_power_control_set_level(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+			"Failed to power control set level!", result = tmp_result);
+
+	tmp_result = tonga_enable_thermal_auto_throttle(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable thermal auto throttle!", result = tmp_result);
+
 	return result;
 }
 
@@ -4398,6 +4489,10 @@
 	PP_ASSERT_WITH_CODE((0 == tmp_result),
 		"SMC is still running!", return 0);
 
+	tmp_result = tonga_disable_thermal_auto_throttle(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+			"Failed to disable thermal auto throttle!", result = tmp_result);
+
 	tmp_result = tonga_stop_dpm(hwmgr);
 	PP_ASSERT_WITH_CODE((0 == tmp_result),
 		"Failed to stop DPM!", result = tmp_result);
@@ -5068,7 +5163,7 @@
 
 	tonga_ps = cast_phw_tonga_power_state(&(ps->hardware));
 
-	result = tonga_get_powerplay_table_entry(hwmgr, entry_index, ps,
+	result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, ps,
 			tonga_get_pp_table_entry_callback_func);
 
 	/* This is the earliest time we have all the dependency table and the VBIOS boot state
@@ -6232,7 +6327,7 @@
 	.get_sclk = tonga_dpm_get_sclk,
 	.patch_boot_state = tonga_dpm_patch_boot_state,
 	.get_pp_table_entry = tonga_get_pp_table_entry,
-	.get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
+	.get_num_of_pp_table_entries = get_number_of_powerplay_table_entries_v1_0,
 	.print_current_perforce_level = tonga_print_current_perforce_level,
 	.powerdown_uvd = tonga_phm_powerdown_uvd,
 	.powergate_uvd = tonga_phm_powergate_uvd,
@@ -6268,7 +6363,7 @@
 int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
 {
 	hwmgr->hwmgr_func = &tonga_hwmgr_funcs;
-	hwmgr->pptable_func = &tonga_pptable_funcs;
+	hwmgr->pptable_func = &pptable_v1_0_funcs;
 	pp_tonga_thermal_initialize(hwmgr);
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
index 3961884..fcad942 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
@@ -300,6 +300,7 @@
 	bool                         dll_defaule_on;
 	bool                         performance_request_registered;
 
+
 	/* ----------------- Low Power Features ---------------------*/
 	phw_tonga_bacos					bacos;
 	phw_tonga_ulv_parm              ulv;
@@ -314,10 +315,14 @@
 	bool                         enable_tdc_limit_feature;
 	bool                         enable_pkg_pwr_tracking_feature;
 	bool                         disable_uvd_power_tune_feature;
-	phw_tonga_pt_defaults           *power_tune_defaults;
+	struct tonga_pt_defaults           *power_tune_defaults;
 	SMU72_Discrete_PmFuses           power_tune_table;
-	uint32_t                           ul_dte_tj_offset;             /* Fudge factor in DPM table to correct HW DTE errors */
-	uint32_t                           fast_watemark_threshold;      /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */
+	uint32_t                           dte_tj_offset;             /* Fudge factor in DPM table to correct HW DTE errors */
+	uint32_t                           fast_watermark_threshold;      /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */
+
+
+	bool                           enable_dte_feature;
+
 
 	/* ----------------- Phase Shedding ---------------------*/
 	bool                         vddc_phase_shed_control;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c
new file mode 100644
index 0000000..24d9a05
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c
@@ -0,0 +1,495 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "hwmgr.h"
+#include "smumgr.h"
+#include "tonga_hwmgr.h"
+#include "tonga_powertune.h"
+#include "tonga_smumgr.h"
+#include "smu72_discrete.h"
+#include "pp_debug.h"
+#include "tonga_ppsmc.h"
+
+#define VOLTAGE_SCALE  4
+#define POWERTUNE_DEFAULT_SET_MAX    1
+
+struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+/*    sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
+	{1,               0xF,             0xFD,                               0x19,     5,               45,                  0,          0xB0000,
+	{0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,    0x83,  0x86,  0x6F,  0xC8,    0xC9,  0xC9,  0x2F,  0x4D, 0x61},
+	{0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
+};
+
+void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_hwmgr *tonga_hwmgr = (struct tonga_hwmgr *)(hwmgr->backend);
+	struct  phm_ppt_v1_information *table_info =
+			(struct  phm_ppt_v1_information *)(hwmgr->pptable);
+	uint32_t tmp = 0;
+
+	if (table_info &&
+			table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+			table_info->cac_dtp_table->usPowerTuneDataSetID)
+		tonga_hwmgr->power_tune_defaults =
+				&tonga_power_tune_data_set_array
+				[table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+	else
+		tonga_hwmgr->power_tune_defaults = &tonga_power_tune_data_set_array[0];
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_CAC);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SQRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_DBRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_TDRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_TCPRamping);
+
+	tonga_hwmgr->dte_tj_offset = tmp;
+
+	if (!tmp) {
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_CAC);
+
+		tonga_hwmgr->fast_watermark_threshold = 100;
+
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_PowerContainment)) {
+			tmp = 1;
+			tonga_hwmgr->enable_dte_feature = tmp ? false : true;
+			tonga_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
+			tonga_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
+		}
+	}
+}
+
+
+int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+	struct tonga_pt_defaults *defaults = data->power_tune_defaults;
+	SMU72_Discrete_DpmTable  *dpm_table = &(data->smc_state_table);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+	int  i, j, k;
+	uint16_t *pdef1;
+	uint16_t *pdef2;
+
+
+	/* TDP number of fraction bits are changed from 8 to 7 for Fiji
+	 * as requested by SMC team
+	 */
+	dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
+			(uint16_t)(cac_dtp_table->usTDP * 256));
+	dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
+			(uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+	PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+			"Target Operating Temp is out of Range!",
+			);
+
+	dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
+	dpm_table->GpuTjHyst = 8;
+
+	dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+	dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
+	pdef1 = defaults->bapmti_r;
+	pdef2 = defaults->bapmti_rc;
+
+	for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
+		for (j = 0; j < SMU72_DTE_SOURCES; j++) {
+			for (k = 0; k < SMU72_DTE_SINKS; k++) {
+				dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
+				dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
+				pdef1++;
+				pdef2++;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+	const struct tonga_pt_defaults *defaults = data->power_tune_defaults;
+
+	data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+	data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
+	data->power_tune_table.SviLoadLineTrimVddC = 3;
+	data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+	return 0;
+}
+
+static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+	uint16_t tdc_limit;
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	const struct tonga_pt_defaults *defaults = data->power_tune_defaults;
+
+	/* TDC number of fraction bits are changed from 8 to 7
+	 * for Fiji as requested by SMC team
+	 */
+	tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
+	data->power_tune_table.TDC_VDDC_PkgLimit =
+			CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+	data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+			defaults->tdc_vddc_throttle_release_limit_perc;
+	data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+	return 0;
+}
+
+static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+	const struct tonga_pt_defaults *defaults = data->power_tune_defaults;
+	uint32_t temp;
+
+	if (tonga_read_smc_sram_dword(hwmgr->smumgr,
+			fuse_table_offset +
+			offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
+			(uint32_t *)&temp, data->sram_end))
+		PP_ASSERT_WITH_CODE(false,
+				"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+				return -EINVAL);
+	else
+		data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+	return 0;
+}
+
+static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+
+	/* Currently not used. Set all to zero. */
+	for (i = 0; i < 16; i++)
+		data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+	return 0;
+}
+
+static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+
+	if ((hwmgr->thermal_controller.advanceFanControlParameters.
+			usFanOutputSensitivity & (1 << 15)) ||
+		(hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
+		hwmgr->thermal_controller.advanceFanControlParameters.
+		usFanOutputSensitivity = hwmgr->thermal_controller.
+			advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+	data->power_tune_table.FuzzyFan_PwmSetDelta =
+			PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
+					advanceFanControlParameters.usFanOutputSensitivity);
+	return 0;
+}
+
+static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+
+	/* Currently not used. Set all to zero. */
+	for (i = 0; i < 16; i++)
+		data->power_tune_table.GnbLPML[i] = 0;
+
+	return 0;
+}
+
+static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+	return 0;
+}
+
+static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+	uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+	struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+	hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+	lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+	data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+			CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+	data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+			CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+	return 0;
+}
+
+int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+	uint32_t pm_fuse_table_offset;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PowerContainment)) {
+		if (tonga_read_smc_sram_dword(hwmgr->smumgr,
+				SMU72_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU72_Firmware_Header, PmFuseTable),
+				&pm_fuse_table_offset, data->sram_end))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to get pm_fuse_table_offset Failed!",
+					return -EINVAL);
+
+		/* DW6 */
+		if (tonga_populate_svi_load_line(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate SviLoadLine Failed!",
+					return -EINVAL);
+		/* DW7 */
+		if (tonga_populate_tdc_limit(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate TDCLimit Failed!", return -EINVAL);
+		/* DW8 */
+		if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate TdcWaterfallCtl Failed !",
+					return -EINVAL);
+
+		/* DW9-DW12 */
+		if (tonga_populate_temperature_scaler(hwmgr) != 0)
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate LPMLTemperatureScaler Failed!",
+					return -EINVAL);
+
+		/* DW13-DW14 */
+		if (tonga_populate_fuzzy_fan(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate Fuzzy Fan Control parameters Failed!",
+					return -EINVAL);
+
+		/* DW15-DW18 */
+		if (tonga_populate_gnb_lpml(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate GnbLPML Failed!",
+					return -EINVAL);
+
+		/* DW19 */
+		if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate GnbLPML Min and Max Vid Failed!",
+					return -EINVAL);
+
+		/* DW20 */
+		if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
+					return -EINVAL);
+
+		if (tonga_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+				(uint8_t *)&data->power_tune_table,
+				sizeof(struct SMU72_Discrete_PmFuses), data->sram_end))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to download PmFuseTable Failed!",
+					return -EINVAL);
+	}
+	return 0;
+}
+
+int tonga_enable_smc_cac(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+	int result = 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_CAC)) {
+		int smc_result;
+
+		smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+				(uint16_t)(PPSMC_MSG_EnableCac));
+		PP_ASSERT_WITH_CODE((smc_result == 0),
+				"Failed to enable CAC in SMC.", result = -1);
+
+		data->cac_enabled = (smc_result == 0) ? true : false;
+	}
+	return result;
+}
+
+int tonga_disable_smc_cac(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+	int result = 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_CAC) && data->cac_enabled) {
+		int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+				(uint16_t)(PPSMC_MSG_DisableCac));
+		PP_ASSERT_WITH_CODE((smc_result == 0),
+				"Failed to disable CAC in SMC.", result = -1);
+
+		data->cac_enabled = false;
+	}
+	return result;
+}
+
+int tonga_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
+{
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+
+	if (data->power_containment_features &
+			POWERCONTAINMENT_FEATURE_PkgPwrLimit)
+		return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_PkgPwrSetLimit, n);
+	return 0;
+}
+
+static int tonga_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
+{
+	return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
+			PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
+}
+
+int tonga_enable_power_containment(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	int smc_result;
+	int result = 0;
+
+	data->power_containment_features = 0;
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PowerContainment)) {
+		if (data->enable_dte_feature) {
+			smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+					(uint16_t)(PPSMC_MSG_EnableDTE));
+			PP_ASSERT_WITH_CODE((smc_result == 0),
+					"Failed to enable DTE in SMC.", result = -1;);
+			if (smc_result == 0)
+				data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE;
+		}
+
+		if (data->enable_tdc_limit_feature) {
+			smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+					(uint16_t)(PPSMC_MSG_TDCLimitEnable));
+			PP_ASSERT_WITH_CODE((smc_result == 0),
+					"Failed to enable TDCLimit in SMC.", result = -1;);
+			if (smc_result == 0)
+				data->power_containment_features |=
+						POWERCONTAINMENT_FEATURE_TDCLimit;
+		}
+
+		if (data->enable_pkg_pwr_tracking_feature) {
+			smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+					(uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
+			PP_ASSERT_WITH_CODE((smc_result == 0),
+					"Failed to enable PkgPwrTracking in SMC.", result = -1;);
+			if (smc_result == 0) {
+				struct phm_cac_tdp_table *cac_table =
+						table_info->cac_dtp_table;
+				uint32_t default_limit =
+					(uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
+
+				data->power_containment_features |=
+						POWERCONTAINMENT_FEATURE_PkgPwrLimit;
+
+				if (tonga_set_power_limit(hwmgr, default_limit))
+					printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
+			}
+		}
+	}
+	return result;
+}
+
+int tonga_disable_power_containment(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+	int result = 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PowerContainment) &&
+			data->power_containment_features) {
+		int smc_result;
+
+		if (data->power_containment_features &
+				POWERCONTAINMENT_FEATURE_TDCLimit) {
+			smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+					(uint16_t)(PPSMC_MSG_TDCLimitDisable));
+			PP_ASSERT_WITH_CODE((smc_result == 0),
+					"Failed to disable TDCLimit in SMC.",
+					result = smc_result);
+		}
+
+		if (data->power_containment_features &
+				POWERCONTAINMENT_FEATURE_DTE) {
+			smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+					(uint16_t)(PPSMC_MSG_DisableDTE));
+			PP_ASSERT_WITH_CODE((smc_result == 0),
+					"Failed to disable DTE in SMC.",
+					result = smc_result);
+		}
+
+		if (data->power_containment_features &
+				POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+			smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+					(uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
+			PP_ASSERT_WITH_CODE((smc_result == 0),
+					"Failed to disable PkgPwrTracking in SMC.",
+					result = smc_result);
+		}
+		data->power_containment_features = 0;
+	}
+
+	return result;
+}
+
+int tonga_power_control_set_level(struct pp_hwmgr *hwmgr)
+{
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+	int adjust_percent, target_tdp;
+	int result = 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PowerContainment)) {
+		/* adjustment percentage has already been validated */
+		adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
+				hwmgr->platform_descriptor.TDPAdjustment :
+				(-1 * hwmgr->platform_descriptor.TDPAdjustment);
+		/* SMC requested that target_tdp to be 7 bit fraction in DPM table
+		 * but message to be 8 bit fraction for messages
+		 */
+		target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
+		result = tonga_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
+	}
+
+	return result;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h
index 8e6670b..c8bdb92 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h
@@ -35,20 +35,23 @@
 typedef enum _phw_tonga_ptc_config_reg_type phw_tonga_ptc_config_reg_type;
 
 /* PowerContainment Features */
+#define POWERCONTAINMENT_FEATURE_DTE             0x00000001
+
+
+/* PowerContainment Features */
 #define POWERCONTAINMENT_FEATURE_BAPM            0x00000001
 #define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
 #define POWERCONTAINMENT_FEATURE_PkgPwrLimit     0x00000004
 
-struct _phw_tonga_pt_config_reg {
+struct tonga_pt_config_reg {
 	uint32_t                           Offset;
 	uint32_t                           Mask;
 	uint32_t                           Shift;
 	uint32_t                           Value;
 	phw_tonga_ptc_config_reg_type     Type;
 };
-typedef struct _phw_tonga_pt_config_reg phw_tonga_pt_config_reg;
 
-struct _phw_tonga_pt_defaults {
+struct tonga_pt_defaults {
 	uint8_t   svi_load_line_en;
 	uint8_t   svi_load_line_vddC;
 	uint8_t   tdc_vddc_throttle_release_limit_perc;
@@ -60,7 +63,18 @@
 	uint16_t  bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
 	uint16_t  bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
 };
-typedef struct _phw_tonga_pt_defaults phw_tonga_pt_defaults;
+
+
+
+void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
+int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
+int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr);
+int tonga_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int tonga_disable_smc_cac(struct pp_hwmgr *hwmgr);
+int tonga_enable_power_containment(struct pp_hwmgr *hwmgr);
+int tonga_disable_power_containment(struct pp_hwmgr *hwmgr);
+int tonga_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
+int tonga_power_control_set_level(struct pp_hwmgr *hwmgr);
 
 #endif
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index b764c8c..18f39e8 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -131,9 +131,8 @@
 	struct cgs_device *device;
 	uint32_t chip_family;
 	uint32_t chip_id;
-	uint32_t rev_id;
-	bool powercontainment_enabled;
 };
+
 enum amd_pp_display_config_type{
 	AMD_PP_DisplayConfigType_None = 0,
 	AMD_PP_DisplayConfigType_DP54 ,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 962cb53..d4495839c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -341,7 +341,6 @@
 extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
 extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
 extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
-extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr);
 extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr);
 extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
 extern int phm_set_power_state(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index bf0d2ac..e987483 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -31,15 +31,20 @@
 #include "hwmgr_ppt.h"
 #include "ppatomctrl.h"
 #include "hwmgr_ppt.h"
+#include "power_state.h"
 
 struct pp_instance;
 struct pp_hwmgr;
-struct pp_hw_power_state;
-struct pp_power_state;
-struct PP_VCEState;
 struct phm_fan_speed_info;
 struct pp_atomctrl_voltage_table;
 
+extern int amdgpu_powercontainment;
+extern int amdgpu_sclk_deep_sleep_en;
+extern unsigned amdgpu_pp_feature_mask;
+
+#define VOLTAGE_SCALE 4
+
+uint8_t convert_to_vid(uint16_t vddc);
 
 enum DISPLAY_GAP {
 	DISPLAY_GAP_VBLANK_OR_WM = 0,   /* Wait for vblank or MCHG watermark. */
@@ -49,7 +54,6 @@
 };
 typedef enum DISPLAY_GAP DISPLAY_GAP;
 
-
 struct vi_dpm_level {
 	bool enabled;
 	uint32_t value;
@@ -71,6 +75,19 @@
 #define PCIE_PERF_REQ_GEN2         3
 #define PCIE_PERF_REQ_GEN3         4
 
+enum PP_FEATURE_MASK {
+	PP_SCLK_DPM_MASK = 0x1,
+	PP_MCLK_DPM_MASK = 0x2,
+	PP_PCIE_DPM_MASK = 0x4,
+	PP_SCLK_DEEP_SLEEP_MASK = 0x8,
+	PP_POWER_CONTAINMENT_MASK = 0x10,
+	PP_UVD_HANDSHAKE_MASK = 0x20,
+	PP_SMC_VOLTAGE_CONTROL_MASK = 0x40,
+	PP_VBI_TIME_SUPPORT_MASK = 0x80,
+	PP_ULV_MASK = 0x100,
+	PP_ENABLE_GFX_CG_THRU_SMU = 0x200
+};
+
 enum PHM_BackEnd_Magic {
 	PHM_Dummy_Magic       = 0xAA5555AA,
 	PHM_RV770_Magic       = 0xDCBAABCD,
@@ -351,7 +368,7 @@
 	int (*pptable_get_vce_state_table_entry)(
 						struct pp_hwmgr *hwmgr,
 						unsigned long i,
-						struct PP_VCEState *vce_state,
+						struct pp_vce_state *vce_state,
 						void **clock_info,
 						unsigned long *flag);
 };
@@ -570,22 +587,43 @@
 	uint32_t NB;
 };
 
+#define PP_MAX_VCE_LEVELS 6
+
+enum PP_VCE_LEVEL {
+	PP_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */
+	PP_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */
+	PP_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */
+	PP_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
+	PP_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */
+	PP_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
+};
+
+
+enum PP_TABLE_VERSION {
+	PP_TABLE_V0 = 0,
+	PP_TABLE_V1,
+	PP_TABLE_V2,
+	PP_TABLE_MAX
+};
+
 /**
  * The main hardware manager structure.
  */
 struct pp_hwmgr {
 	uint32_t chip_family;
 	uint32_t chip_id;
-	uint32_t hw_revision;
-	uint32_t sub_sys_id;
-	uint32_t sub_vendor_id;
 
+	uint32_t pp_table_version;
 	void *device;
 	struct pp_smumgr *smumgr;
 	const void *soft_pp_table;
 	uint32_t soft_pp_table_size;
 	void *hardcode_pp_table;
 	bool need_pp_table_upload;
+
+	struct pp_vce_state vce_states[PP_MAX_VCE_LEVELS];
+	uint32_t num_vce_state_tables;
+
 	enum amd_dpm_forced_level dpm_level;
 	bool block_hw_access;
 	struct phm_gfx_arbiter gfx_arbiter;
@@ -614,7 +652,6 @@
 	uint32_t num_ps;
 	struct pp_thermal_controller_info thermal_controller;
 	bool fan_ctrl_is_in_default_mode;
-	bool powercontainment_enabled;
 	uint32_t fan_ctrl_default_mode;
 	uint32_t tmin;
 	struct phm_microcode_version_info microcode_version_info;
@@ -624,6 +661,7 @@
 	struct pp_power_state    *boot_ps;
 	struct pp_power_state    *uvd_ps;
 	struct amd_pp_display_configuration display_config;
+	uint32_t feature_mask;
 };
 
 
@@ -637,16 +675,7 @@
 extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
 				uint32_t value, uint32_t mask);
 
-extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
-				uint32_t index, uint32_t value, uint32_t mask);
 
-extern uint32_t phm_read_indirect_register(struct pp_hwmgr *hwmgr,
-		uint32_t indirect_port, uint32_t index);
-
-extern void phm_write_indirect_register(struct pp_hwmgr *hwmgr,
-		uint32_t indirect_port,
-		uint32_t index,
-		uint32_t value);
 
 extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
 				uint32_t indirect_port,
@@ -654,12 +683,7 @@
 				uint32_t value,
 				uint32_t mask);
 
-extern void phm_wait_for_indirect_register_unequal(
-				struct pp_hwmgr *hwmgr,
-				uint32_t indirect_port,
-				uint32_t index,
-				uint32_t value,
-				uint32_t mask);
+
 
 extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
 extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr);
@@ -673,6 +697,8 @@
 extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
 extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
 extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
+extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table,
+		uint32_t voltage);
 extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
 extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
 extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
@@ -683,6 +709,9 @@
 extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
 extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
 
+extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+				uint32_t sclk, uint16_t id, uint16_t *voltage);
+
 #define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
 
 #define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
@@ -697,44 +726,6 @@
 	 PHM_FIELD_SHIFT(reg, field))
 
 
-#define PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, index, value, mask)	\
-	phm_wait_on_register(hwmgr, index, value, mask)
-
-#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, index, value, mask)	\
-	phm_wait_for_register_unequal(hwmgr, index, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask)	\
-	phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask)	\
-	phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX, index, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask)	\
-	phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX_0, index, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask)	\
-	phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX_0, index, value, mask)
-
-/* Operations on named registers. */
-
-#define PHM_WAIT_REGISTER(hwmgr, reg, value, mask)	\
-	PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg, value, mask)
-
-#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask)	\
-	PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask)	\
-	PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask)	\
-	PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask)	\
-	PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask)	\
-	PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
 /* Operations on named fields. */
 
 #define PHM_READ_FIELD(device, reg, field)	\
@@ -762,60 +753,16 @@
 			PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg),	\
 				reg, field, fieldval))
 
-#define PHM_WAIT_FIELD(hwmgr, reg, field, fieldval)	\
-	PHM_WAIT_REGISTER(hwmgr, reg, (fieldval)	\
-			<< PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
+#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask)        \
+       phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
+
+
+#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask)      \
+       PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
 
 #define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval)	\
 	PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval)	\
 			<< PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
 
-#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval)	\
-	PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval)	\
-			<< PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval)	\
-	PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, (fieldval)	\
-			<< PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval)	\
-	PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval)	\
-			<< PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval)	\
-	PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval)	\
-			<< PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-
-/* Operations on arrays of registers & fields. */
-
-#define PHM_READ_ARRAY_REGISTER(device, reg, offset)	\
-	cgs_read_register(device, mm##reg + (offset))
-
-#define PHM_WRITE_ARRAY_REGISTER(device, reg, offset, value)	\
-	cgs_write_register(device, mm##reg + (offset), value)
-
-#define PHM_WAIT_ARRAY_REGISTER(hwmgr, reg, offset, value, mask)	\
-	PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask)
-
-#define PHM_WAIT_ARRAY_REGISTER_UNEQUAL(hwmgr, reg, offset, value, mask)	\
-	PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask)
-
-#define PHM_READ_ARRAY_FIELD(hwmgr, reg, offset, field) \
-	PHM_GET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), reg, field)
-
-#define PHM_WRITE_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue)	\
-	PHM_WRITE_ARRAY_REGISTER(hwmgr->device, reg, offset,	\
-			PHM_SET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset),	\
-				reg, field, fieldvalue))
-
-#define PHM_WAIT_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue)	\
-	PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset),	\
-			(fieldvalue) << PHM_FIELD_SHIFT(reg, field),	\
-			PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_ARRAY_FIELD_UNEQUAL(hwmgr, reg, offset, field, fieldvalue)	\
-	PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset),	\
-			(fieldvalue) << PHM_FIELD_SHIFT(reg, field),	\
-			PHM_FIELD_MASK(reg, field))
 
 #endif /* _HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
index a3f0ce4..9ceaed9 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
@@ -158,7 +158,7 @@
 
 
 /*Structure to hold a VCE state entry*/
-struct PP_VCEState {
+struct pp_vce_state {
 	uint32_t evclk;
 	uint32_t ecclk;
 	uint32_t sclk;
@@ -171,30 +171,28 @@
 	PP_MMProfilingState_Stopped
 };
 
-struct PP_Clock_Engine_Request {
-	unsigned long clientType;
-	unsigned long ctxid;
+struct pp_clock_engine_request {
+	unsigned long client_type;
+	unsigned long ctx_id;
 	uint64_t  context_handle;
 	unsigned long sclk;
-	unsigned long sclkHardMin;
+	unsigned long sclk_hard_min;
 	unsigned long mclk;
 	unsigned long iclk;
 	unsigned long evclk;
 	unsigned long ecclk;
-	unsigned long ecclkHardMin;
+	unsigned long ecclk_hard_min;
 	unsigned long vclk;
 	unsigned long dclk;
-	unsigned long samclk;
-	unsigned long acpclk;
-	unsigned long sclkOverdrive;
-	unsigned long mclkOverdrive;
+	unsigned long sclk_over_drive;
+	unsigned long mclk_over_drive;
 	unsigned long sclk_threshold;
 	unsigned long flag;
 	unsigned long vclk_ceiling;
 	unsigned long dclk_ceiling;
 	unsigned long num_cus;
-	unsigned long pmflag;
-	enum PP_MMProfilingState MMProfilingState;
+	unsigned long pm_flag;
+	enum PP_MMProfilingState mm_profiling_state;
 };
 
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
index d7d83b7..bfdbec1 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
@@ -43,5 +43,8 @@
 	} while (0)
 
 
+#define GET_FLEXIBLE_ARRAY_MEMBER_ADDR(type, member, ptr, n)	\
+	(type *)((char *)&(ptr)->member + (sizeof(type) * (n)))
+
 #endif /* PP_DEBUG_H */
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71.h b/drivers/gpu/drm/amd/powerplay/inc/smu71.h
new file mode 100644
index 0000000..71c9b2d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu71.h
@@ -0,0 +1,510 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU71_H
+#define SMU71_H
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(push, 1)
+#endif
+
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 4
+#define SMU__VARIANT__ICELAND 1
+#define SMU__DGPU_ONLY 1
+#define SMU__DYNAMIC_MCARB_SETTINGS 1
+
+enum SID_OPTION {
+  SID_OPTION_HI,
+  SID_OPTION_LO,
+  SID_OPTION_COUNT
+};
+
+typedef struct {
+  uint32_t high;
+  uint32_t low;
+} data_64_t;
+
+typedef struct {
+  data_64_t high;
+  data_64_t low;
+} data_128_t;
+
+#define SMU7_CONTEXT_ID_SMC        1
+#define SMU7_CONTEXT_ID_VBIOS      2
+
+#define SMU71_MAX_LEVELS_VDDC            8
+#define SMU71_MAX_LEVELS_VDDCI           4
+#define SMU71_MAX_LEVELS_MVDD            4
+#define SMU71_MAX_LEVELS_VDDNB           8
+
+#define SMU71_MAX_LEVELS_GRAPHICS        SMU__NUM_SCLK_DPM_STATE
+#define SMU71_MAX_LEVELS_MEMORY          SMU__NUM_MCLK_DPM_LEVELS
+#define SMU71_MAX_LEVELS_GIO             SMU__NUM_LCLK_DPM_LEVELS
+#define SMU71_MAX_LEVELS_LINK            SMU__NUM_PCIE_DPM_LEVELS
+#define SMU71_MAX_ENTRIES_SMIO           32
+
+#define DPM_NO_LIMIT 0
+#define DPM_NO_UP 1
+#define DPM_GO_DOWN 2
+#define DPM_GO_UP 3
+
+#define SMU7_FIRST_DPM_GRAPHICS_LEVEL    0
+#define SMU7_FIRST_DPM_MEMORY_LEVEL      0
+
+#define GPIO_CLAMP_MODE_VRHOT      1
+#define GPIO_CLAMP_MODE_THERM      2
+#define GPIO_CLAMP_MODE_DC         4
+
+#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
+#define SCRATCH_B_TARG_PCIE_INDEX_MASK  (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
+#define SCRATCH_B_CURR_PCIE_INDEX_MASK  (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_UVD_INDEX_SHIFT  6
+#define SCRATCH_B_TARG_UVD_INDEX_MASK   (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
+#define SCRATCH_B_CURR_UVD_INDEX_SHIFT  9
+#define SCRATCH_B_CURR_UVD_INDEX_MASK   (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
+#define SCRATCH_B_TARG_VCE_INDEX_SHIFT  12
+#define SCRATCH_B_TARG_VCE_INDEX_MASK   (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_VCE_INDEX_SHIFT  15
+#define SCRATCH_B_CURR_VCE_INDEX_MASK   (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_ACP_INDEX_SHIFT  18
+#define SCRATCH_B_TARG_ACP_INDEX_MASK   (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
+#define SCRATCH_B_CURR_ACP_INDEX_SHIFT  21
+#define SCRATCH_B_CURR_ACP_INDEX_MASK   (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
+#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
+#define SCRATCH_B_TARG_SAMU_INDEX_MASK  (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
+#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
+#define SCRATCH_B_CURR_SAMU_INDEX_MASK  (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
+
+
+#if defined SMU__DGPU_ONLY
+#define SMU71_DTE_ITERATIONS 5
+#define SMU71_DTE_SOURCES 3
+#define SMU71_DTE_SINKS 1
+#define SMU71_NUM_CPU_TES 0
+#define SMU71_NUM_GPU_TES 1
+#define SMU71_NUM_NON_TES 2
+
+#endif
+
+#if defined SMU__FUSION_ONLY
+#define SMU7_DTE_ITERATIONS 5
+#define SMU7_DTE_SOURCES 5
+#define SMU7_DTE_SINKS 3
+#define SMU7_NUM_CPU_TES 2
+#define SMU7_NUM_GPU_TES 1
+#define SMU7_NUM_NON_TES 2
+
+#endif
+
+struct SMU71_PIDController
+{
+    uint32_t Ki;
+    int32_t LFWindupUpperLim;
+    int32_t LFWindupLowerLim;
+    uint32_t StatePrecision;
+    uint32_t LfPrecision;
+    uint32_t LfOffset;
+    uint32_t MaxState;
+    uint32_t MaxLfFraction;
+    uint32_t StateShift;
+};
+
+typedef struct SMU71_PIDController SMU71_PIDController;
+
+struct SMU7_LocalDpmScoreboard
+{
+    uint32_t PercentageBusy;
+
+    int32_t  PIDError;
+    int32_t  PIDIntegral;
+    int32_t  PIDOutput;
+
+    uint32_t SigmaDeltaAccum;
+    uint32_t SigmaDeltaOutput;
+    uint32_t SigmaDeltaLevel;
+
+    uint32_t UtilizationSetpoint;
+
+    uint8_t  TdpClampMode;
+    uint8_t  TdcClampMode;
+    uint8_t  ThermClampMode;
+    uint8_t  VoltageBusy;
+
+    int8_t   CurrLevel;
+    int8_t   TargLevel;
+    uint8_t  LevelChangeInProgress;
+    uint8_t  UpHyst;
+
+    uint8_t  DownHyst;
+    uint8_t  VoltageDownHyst;
+    uint8_t  DpmEnable;
+    uint8_t  DpmRunning;
+
+    uint8_t  DpmForce;
+    uint8_t  DpmForceLevel;
+    uint8_t  DisplayWatermark;
+    uint8_t  McArbIndex;
+
+    uint32_t MinimumPerfSclk;
+
+    uint8_t  AcpiReq;
+    uint8_t  AcpiAck;
+    uint8_t  GfxClkSlow;
+    uint8_t  GpioClampMode;
+
+    uint8_t  FpsFilterWeight;
+    uint8_t  EnabledLevelsChange;
+    uint8_t  DteClampMode;
+    uint8_t  FpsClampMode;
+
+    uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_GRAPHICS];
+    uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_GRAPHICS];
+
+    void     (*TargetStateCalculator)(uint8_t);
+    void     (*SavedTargetStateCalculator)(uint8_t);
+
+    uint16_t AutoDpmInterval;
+    uint16_t AutoDpmRange;
+
+    uint8_t  FpsEnabled;
+    uint8_t  MaxPerfLevel;
+    uint8_t  AllowLowClkInterruptToHost;
+    uint8_t  FpsRunning;
+
+    uint32_t MaxAllowedFrequency;
+};
+
+typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard;
+
+#define SMU7_MAX_VOLTAGE_CLIENTS 12
+
+struct SMU7_VoltageScoreboard
+{
+    uint16_t CurrentVoltage;
+    uint16_t HighestVoltage;
+    uint16_t MaxVid;
+    uint8_t  HighestVidOffset;
+    uint8_t  CurrentVidOffset;
+#if defined (SMU__DGPU_ONLY)
+    uint8_t  CurrentPhases;
+    uint8_t  HighestPhases;
+#else
+    uint8_t  AvsOffset;
+    uint8_t  AvsOffsetApplied;
+#endif
+    uint8_t  ControllerBusy;
+    uint8_t  CurrentVid;
+    uint16_t RequestedVoltage[SMU7_MAX_VOLTAGE_CLIENTS];
+#if defined (SMU__DGPU_ONLY)
+    uint8_t  RequestedPhases[SMU7_MAX_VOLTAGE_CLIENTS];
+#endif
+    uint8_t  EnabledRequest[SMU7_MAX_VOLTAGE_CLIENTS];
+    uint8_t  TargetIndex;
+    uint8_t  Delay;
+    uint8_t  ControllerEnable;
+    uint8_t  ControllerRunning;
+    uint16_t CurrentStdVoltageHiSidd;
+    uint16_t CurrentStdVoltageLoSidd;
+#if defined (SMU__DGPU_ONLY)
+    uint16_t RequestedVddci;
+    uint16_t CurrentVddci;
+    uint16_t HighestVddci;
+    uint8_t  CurrentVddciVid;
+    uint8_t  TargetVddciIndex;
+#endif
+};
+
+typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard;
+
+// -------------------------------------------------------------------------------------------------------------------------
+#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */
+
+struct SMU7_PCIeLinkSpeedScoreboard
+{
+    uint8_t     DpmEnable;
+    uint8_t     DpmRunning;
+    uint8_t     DpmForce;
+    uint8_t     DpmForceLevel;
+
+    uint8_t     CurrentLinkSpeed;
+    uint8_t     EnabledLevelsChange;
+    uint16_t    AutoDpmInterval;
+
+    uint16_t    AutoDpmRange;
+    uint16_t    AutoDpmCount;
+
+    uint8_t     DpmMode;
+    uint8_t     AcpiReq;
+    uint8_t     AcpiAck;
+    uint8_t     CurrentLinkLevel;
+
+};
+
+typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard;
+
+// -------------------------------------------------------- CAC table ------------------------------------------------------
+#define SMU7_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
+#define SMU7_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
+
+#define SMU7_SCALE_I  7
+#define SMU7_SCALE_R 12
+
+struct SMU7_PowerScoreboard
+{
+    uint16_t   MinVoltage;
+    uint16_t   MaxVoltage;
+
+    uint32_t   AvgGpuPower;
+
+    uint16_t   VddcLeakagePower[SID_OPTION_COUNT];
+    uint16_t   VddcSclkConstantPower[SID_OPTION_COUNT];
+    uint16_t   VddcSclkDynamicPower[SID_OPTION_COUNT];
+    uint16_t   VddcNonSclkDynamicPower[SID_OPTION_COUNT];
+    uint16_t   VddcTotalPower[SID_OPTION_COUNT];
+    uint16_t   VddcTotalCurrent[SID_OPTION_COUNT];
+    uint16_t   VddcLoadVoltage[SID_OPTION_COUNT];
+    uint16_t   VddcNoLoadVoltage[SID_OPTION_COUNT];
+
+    uint16_t   DisplayPhyPower;
+    uint16_t   PciePhyPower;
+
+    uint16_t   VddciTotalPower;
+    uint16_t   Vddr1TotalPower;
+
+    uint32_t   RocPower;
+
+    uint32_t   last_power;
+    uint32_t   enableWinAvg;
+
+    uint32_t   lkg_acc;
+    uint16_t   VoltLkgeScaler;
+    uint16_t   TempLkgeScaler;
+
+    uint32_t   uvd_cac_dclk;
+    uint32_t   uvd_cac_vclk;
+    uint32_t   vce_cac_eclk;
+    uint32_t   samu_cac_samclk;
+    uint32_t   display_cac_dispclk;
+    uint32_t   acp_cac_aclk;
+    uint32_t   unb_cac;
+
+    uint32_t   WinTime;
+
+    uint16_t  GpuPwr_MAWt;
+    uint16_t  FilteredVddcTotalPower;
+
+    uint8_t   CalculationRepeats;
+    uint8_t   WaterfallUp;
+    uint8_t   WaterfallDown;
+    uint8_t   WaterfallLimit;
+};
+
+typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard;
+
+// --------------------------------------------------------------------------------------------------
+
+struct SMU7_ThermalScoreboard
+{
+   int16_t  GpuLimit;
+   int16_t  GpuHyst;
+   uint16_t CurrGnbTemp;
+   uint16_t FilteredGnbTemp;
+   uint8_t  ControllerEnable;
+   uint8_t  ControllerRunning;
+   uint8_t  WaterfallUp;
+   uint8_t  WaterfallDown;
+   uint8_t  WaterfallLimit;
+   uint8_t  padding[3];
+};
+
+typedef struct SMU7_ThermalScoreboard SMU7_ThermalScoreboard;
+
+// For FeatureEnables:
+#define SMU7_SCLK_DPM_CONFIG_MASK                        0x01
+#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK              0x02
+#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK              0x04
+#define SMU7_MCLK_DPM_CONFIG_MASK                        0x08
+#define SMU7_UVD_DPM_CONFIG_MASK                         0x10
+#define SMU7_VCE_DPM_CONFIG_MASK                         0x20
+#define SMU7_ACP_DPM_CONFIG_MASK                         0x40
+#define SMU7_SAMU_DPM_CONFIG_MASK                        0x80
+#define SMU7_PCIEGEN_DPM_CONFIG_MASK                    0x100
+
+#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE                  0x00000001
+#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE                  0x00000002
+#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE                  0x00000100
+#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE                  0x00000200
+#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE                  0x00010000
+#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE                  0x00020000
+
+// All 'soft registers' should be uint32_t.
+struct SMU71_SoftRegisters
+{
+    uint32_t        RefClockFrequency;
+    uint32_t        PmTimerPeriod;
+    uint32_t        FeatureEnables;
+#if defined (SMU__DGPU_ONLY)
+    uint32_t        PreVBlankGap;
+    uint32_t        VBlankTimeout;
+    uint32_t        TrainTimeGap;
+    uint32_t        MvddSwitchTime;
+    uint32_t        LongestAcpiTrainTime;
+    uint32_t        AcpiDelay;
+    uint32_t        G5TrainTime;
+    uint32_t        DelayMpllPwron;
+    uint32_t        VoltageChangeTimeout;
+#endif
+    uint32_t        HandshakeDisables;
+
+    uint8_t         DisplayPhy1Config;
+    uint8_t         DisplayPhy2Config;
+    uint8_t         DisplayPhy3Config;
+    uint8_t         DisplayPhy4Config;
+
+    uint8_t         DisplayPhy5Config;
+    uint8_t         DisplayPhy6Config;
+    uint8_t         DisplayPhy7Config;
+    uint8_t         DisplayPhy8Config;
+
+    uint32_t        AverageGraphicsActivity;
+    uint32_t        AverageMemoryActivity;
+    uint32_t        AverageGioActivity;
+
+    uint8_t         SClkDpmEnabledLevels;
+    uint8_t         MClkDpmEnabledLevels;
+    uint8_t         LClkDpmEnabledLevels;
+    uint8_t         PCIeDpmEnabledLevels;
+
+    uint32_t        DRAM_LOG_ADDR_H;
+    uint32_t        DRAM_LOG_ADDR_L;
+    uint32_t        DRAM_LOG_PHY_ADDR_H;
+    uint32_t        DRAM_LOG_PHY_ADDR_L;
+    uint32_t        DRAM_LOG_BUFF_SIZE;
+    uint32_t        UlvEnterCount;
+    uint32_t        UlvTime;
+    uint32_t        UcodeLoadStatus;
+    uint8_t         DPMFreezeAndForced;
+    uint8_t         Activity_Weight;
+    uint8_t         Reserved8[2];
+    uint32_t        Reserved;
+};
+
+typedef struct SMU71_SoftRegisters SMU71_SoftRegisters;
+
+struct SMU71_Firmware_Header
+{
+    uint32_t Digest[5];
+    uint32_t Version;
+    uint32_t HeaderSize;
+    uint32_t Flags;
+    uint32_t EntryPoint;
+    uint32_t CodeSize;
+    uint32_t ImageSize;
+
+    uint32_t Rtos;
+    uint32_t SoftRegisters;
+    uint32_t DpmTable;
+    uint32_t FanTable;
+    uint32_t CacConfigTable;
+    uint32_t CacStatusTable;
+
+    uint32_t mcRegisterTable;
+
+    uint32_t mcArbDramTimingTable;
+
+    uint32_t PmFuseTable;
+    uint32_t Globals;
+    uint32_t UvdDpmTable;
+    uint32_t AcpDpmTable;
+    uint32_t VceDpmTable;
+    uint32_t SamuDpmTable;
+    uint32_t UlvSettings;
+    uint32_t Reserved[37];
+    uint32_t Signature;
+};
+
+typedef struct SMU71_Firmware_Header SMU71_Firmware_Header;
+
+struct SMU7_HystController_Data
+{
+    uint8_t waterfall_up;
+    uint8_t waterfall_down;
+    uint8_t pstate;
+    uint8_t clamp_mode;
+};
+
+typedef struct SMU7_HystController_Data SMU7_HystController_Data;
+
+#define SMU71_FIRMWARE_HEADER_LOCATION 0x20000
+
+enum  DisplayConfig {
+    PowerDown = 1,
+    DP54x4,
+    DP54x2,
+    DP54x1,
+    DP27x4,
+    DP27x2,
+    DP27x1,
+    HDMI297,
+    HDMI162,
+    LVDS,
+    DP324x4,
+    DP324x2,
+    DP324x1
+};
+
+//#define SX_BLOCK_COUNT 8
+//#define MC_BLOCK_COUNT 1
+//#define CPL_BLOCK_COUNT 27
+
+#if defined SMU__VARIANT__ICELAND
+  #define SX_BLOCK_COUNT 8
+  #define MC_BLOCK_COUNT 1
+  #define CPL_BLOCK_COUNT 29
+#endif
+
+struct SMU7_Local_Cac {
+  uint8_t BlockId;
+  uint8_t SignalId;
+  uint8_t Threshold;
+  uint8_t Padding;
+};
+
+typedef struct SMU7_Local_Cac SMU7_Local_Cac;
+
+struct SMU7_Local_Cac_Table {
+  SMU7_Local_Cac SxLocalCac[SX_BLOCK_COUNT];
+  SMU7_Local_Cac CplLocalCac[CPL_BLOCK_COUNT];
+  SMU7_Local_Cac McLocalCac[MC_BLOCK_COUNT];
+};
+
+typedef struct SMU7_Local_Cac_Table SMU7_Local_Cac_Table;
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(pop)
+#endif
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h
new file mode 100644
index 0000000..c0e3936
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h
@@ -0,0 +1,631 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU71_DISCRETE_H
+#define SMU71_DISCRETE_H
+
+#include "smu71.h"
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(push, 1)
+#endif
+
+#define VDDC_ON_SVI2  0x1
+#define VDDCI_ON_SVI2 0x2
+#define MVDD_ON_SVI2  0x4
+
+struct SMU71_Discrete_VoltageLevel
+{
+    uint16_t    Voltage;
+    uint16_t    StdVoltageHiSidd;
+    uint16_t    StdVoltageLoSidd;
+    uint8_t     Smio;
+    uint8_t     padding;
+};
+
+typedef struct SMU71_Discrete_VoltageLevel SMU71_Discrete_VoltageLevel;
+
+struct SMU71_Discrete_GraphicsLevel
+{
+    uint32_t    MinVddc;
+    uint32_t    MinVddcPhases;
+
+    uint32_t    SclkFrequency;
+
+    uint8_t     pcieDpmLevel;
+    uint8_t     DeepSleepDivId;
+    uint16_t    ActivityLevel;
+
+    uint32_t    CgSpllFuncCntl3;
+    uint32_t    CgSpllFuncCntl4;
+    uint32_t    SpllSpreadSpectrum;
+    uint32_t    SpllSpreadSpectrum2;
+    uint32_t    CcPwrDynRm;
+    uint32_t    CcPwrDynRm1;
+    uint8_t     SclkDid;
+    uint8_t     DisplayWatermark;
+    uint8_t     EnabledForActivity;
+    uint8_t     EnabledForThrottle;
+    uint8_t     UpHyst;
+    uint8_t     DownHyst;
+    uint8_t     VoltageDownHyst;
+    uint8_t     PowerThrottle;
+};
+
+typedef struct SMU71_Discrete_GraphicsLevel SMU71_Discrete_GraphicsLevel;
+
+struct SMU71_Discrete_ACPILevel
+{
+    uint32_t    Flags;
+    uint32_t    MinVddc;
+    uint32_t    MinVddcPhases;
+    uint32_t    SclkFrequency;
+    uint8_t     SclkDid;
+    uint8_t     DisplayWatermark;
+    uint8_t     DeepSleepDivId;
+    uint8_t     padding;
+    uint32_t    CgSpllFuncCntl;
+    uint32_t    CgSpllFuncCntl2;
+    uint32_t    CgSpllFuncCntl3;
+    uint32_t    CgSpllFuncCntl4;
+    uint32_t    SpllSpreadSpectrum;
+    uint32_t    SpllSpreadSpectrum2;
+    uint32_t    CcPwrDynRm;
+    uint32_t    CcPwrDynRm1;
+};
+
+typedef struct SMU71_Discrete_ACPILevel SMU71_Discrete_ACPILevel;
+
+struct SMU71_Discrete_Ulv
+{
+    uint32_t    CcPwrDynRm;
+    uint32_t    CcPwrDynRm1;
+    uint16_t    VddcOffset;
+    uint8_t     VddcOffsetVid;
+    uint8_t     VddcPhase;
+    uint32_t    Reserved;
+};
+
+typedef struct SMU71_Discrete_Ulv SMU71_Discrete_Ulv;
+
+struct SMU71_Discrete_MemoryLevel
+{
+    uint32_t    MinVddc;
+    uint32_t    MinVddcPhases;
+    uint32_t    MinVddci;
+    uint32_t    MinMvdd;
+
+    uint32_t    MclkFrequency;
+
+    uint8_t     EdcReadEnable;
+    uint8_t     EdcWriteEnable;
+    uint8_t     RttEnable;
+    uint8_t     StutterEnable;
+
+    uint8_t     StrobeEnable;
+    uint8_t     StrobeRatio;
+    uint8_t     EnabledForThrottle;
+    uint8_t     EnabledForActivity;
+
+    uint8_t     UpHyst;
+    uint8_t     DownHyst;
+    uint8_t     VoltageDownHyst;
+    uint8_t     padding;
+
+    uint16_t    ActivityLevel;
+    uint8_t     DisplayWatermark;
+    uint8_t     padding1;
+
+    uint32_t    MpllFuncCntl;
+    uint32_t    MpllFuncCntl_1;
+    uint32_t    MpllFuncCntl_2;
+    uint32_t    MpllAdFuncCntl;
+    uint32_t    MpllDqFuncCntl;
+    uint32_t    MclkPwrmgtCntl;
+    uint32_t    DllCntl;
+    uint32_t    MpllSs1;
+    uint32_t    MpllSs2;
+};
+
+typedef struct SMU71_Discrete_MemoryLevel SMU71_Discrete_MemoryLevel;
+
+struct SMU71_Discrete_LinkLevel
+{
+    uint8_t     PcieGenSpeed;           ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3
+    uint8_t     PcieLaneCount;          ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16
+    uint8_t     EnabledForActivity;
+    uint8_t     SPC;
+    uint32_t    DownThreshold;
+    uint32_t    UpThreshold;
+    uint32_t    Reserved;
+};
+
+typedef struct SMU71_Discrete_LinkLevel SMU71_Discrete_LinkLevel;
+
+
+#ifdef SMU__DYNAMIC_MCARB_SETTINGS
+// MC ARB DRAM Timing registers.
+struct SMU71_Discrete_MCArbDramTimingTableEntry
+{
+    uint32_t McArbDramTiming;
+    uint32_t McArbDramTiming2;
+    uint8_t  McArbBurstTime;
+    uint8_t  padding[3];
+};
+
+typedef struct SMU71_Discrete_MCArbDramTimingTableEntry SMU71_Discrete_MCArbDramTimingTableEntry;
+
+struct SMU71_Discrete_MCArbDramTimingTable
+{
+    SMU71_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
+};
+
+typedef struct SMU71_Discrete_MCArbDramTimingTable SMU71_Discrete_MCArbDramTimingTable;
+#endif
+
+// UVD VCLK/DCLK state (level) definition.
+struct SMU71_Discrete_UvdLevel
+{
+    uint32_t VclkFrequency;
+    uint32_t DclkFrequency;
+    uint16_t MinVddc;
+    uint8_t  MinVddcPhases;
+    uint8_t  VclkDivider;
+    uint8_t  DclkDivider;
+    uint8_t  padding[3];
+};
+
+typedef struct SMU71_Discrete_UvdLevel SMU71_Discrete_UvdLevel;
+
+// Clocks for other external blocks (VCE, ACP, SAMU).
+struct SMU71_Discrete_ExtClkLevel
+{
+    uint32_t Frequency;
+    uint16_t MinVoltage;
+    uint8_t  MinPhases;
+    uint8_t  Divider;
+};
+
+typedef struct SMU71_Discrete_ExtClkLevel SMU71_Discrete_ExtClkLevel;
+
+// Everything that we need to keep track of about the current state.
+// Use this instead of copies of the GraphicsLevel and MemoryLevel structures to keep track of state parameters
+// that need to be checked later.
+// We don't need to cache everything about a state, just a few parameters.
+struct SMU71_Discrete_StateInfo
+{
+    uint32_t SclkFrequency;
+    uint32_t MclkFrequency;
+    uint32_t VclkFrequency;
+    uint32_t DclkFrequency;
+    uint32_t SamclkFrequency;
+    uint32_t AclkFrequency;
+    uint32_t EclkFrequency;
+    uint16_t MvddVoltage;
+    uint16_t padding16;
+    uint8_t  DisplayWatermark;
+    uint8_t  McArbIndex;
+    uint8_t  McRegIndex;
+    uint8_t  SeqIndex;
+    uint8_t  SclkDid;
+    int8_t   SclkIndex;
+    int8_t   MclkIndex;
+    uint8_t  PCIeGen;
+
+};
+
+typedef struct SMU71_Discrete_StateInfo SMU71_Discrete_StateInfo;
+
+
+struct SMU71_Discrete_DpmTable
+{
+    // Multi-DPM controller settings
+    SMU71_PIDController                  GraphicsPIDController;
+    SMU71_PIDController                  MemoryPIDController;
+    SMU71_PIDController                  LinkPIDController;
+
+    uint32_t                            SystemFlags;
+
+    // SMIO masks for voltage and phase controls
+    uint32_t                            SmioMaskVddcVid;
+    uint32_t                            SmioMaskVddcPhase;
+    uint32_t                            SmioMaskVddciVid;
+    uint32_t                            SmioMaskMvddVid;
+
+    uint32_t                            VddcLevelCount;
+    uint32_t                            VddciLevelCount;
+    uint32_t                            MvddLevelCount;
+
+    SMU71_Discrete_VoltageLevel          VddcLevel               [SMU71_MAX_LEVELS_VDDC];
+    SMU71_Discrete_VoltageLevel          VddciLevel              [SMU71_MAX_LEVELS_VDDCI];
+    SMU71_Discrete_VoltageLevel          MvddLevel               [SMU71_MAX_LEVELS_MVDD];
+
+    uint8_t                             GraphicsDpmLevelCount;
+    uint8_t                             MemoryDpmLevelCount;
+    uint8_t                             LinkLevelCount;
+    uint8_t                             MasterDeepSleepControl;
+
+    uint32_t                            Reserved[5];
+
+    // State table entries for each DPM state
+    SMU71_Discrete_GraphicsLevel         GraphicsLevel           [SMU71_MAX_LEVELS_GRAPHICS];
+    SMU71_Discrete_MemoryLevel           MemoryACPILevel;
+    SMU71_Discrete_MemoryLevel           MemoryLevel             [SMU71_MAX_LEVELS_MEMORY];
+    SMU71_Discrete_LinkLevel             LinkLevel               [SMU71_MAX_LEVELS_LINK];
+    SMU71_Discrete_ACPILevel             ACPILevel;
+
+    uint32_t                            SclkStepSize;
+    uint32_t                            Smio                    [SMU71_MAX_ENTRIES_SMIO];
+
+    uint8_t                             GraphicsBootLevel;
+    uint8_t                             GraphicsVoltageChangeEnable;
+    uint8_t                             GraphicsThermThrottleEnable;
+    uint8_t                             GraphicsInterval;
+
+    uint8_t                             VoltageInterval;
+    uint8_t                             ThermalInterval;
+    uint16_t                            TemperatureLimitHigh;
+
+    uint16_t                            TemperatureLimitLow;
+    uint8_t                             MemoryBootLevel;
+    uint8_t                             MemoryVoltageChangeEnable;
+
+    uint8_t                             MemoryInterval;
+    uint8_t                             MemoryThermThrottleEnable;
+    uint8_t                             MergedVddci;
+    uint8_t                             padding2;
+
+    uint16_t                            VoltageResponseTime;
+    uint16_t                            PhaseResponseTime;
+
+    uint8_t                             PCIeBootLinkLevel;
+    uint8_t                             PCIeGenInterval;
+    uint8_t                             DTEInterval;
+    uint8_t                             DTEMode;
+
+    uint8_t                             SVI2Enable;
+    uint8_t                             VRHotGpio;
+    uint8_t                             AcDcGpio;
+    uint8_t                             ThermGpio;
+
+    uint32_t                            DisplayCac;
+
+    uint16_t                            MaxPwr;
+    uint16_t                            NomPwr;
+
+    uint16_t                            FpsHighThreshold;
+    uint16_t                            FpsLowThreshold;
+
+    uint16_t                            BAPMTI_R  [SMU71_DTE_ITERATIONS][SMU71_DTE_SOURCES][SMU71_DTE_SINKS];
+    uint16_t                            BAPMTI_RC [SMU71_DTE_ITERATIONS][SMU71_DTE_SOURCES][SMU71_DTE_SINKS];
+
+    uint8_t                             DTEAmbientTempBase;
+    uint8_t                             DTETjOffset;
+    uint8_t                             GpuTjMax;
+    uint8_t                             GpuTjHyst;
+
+    uint16_t                            BootVddc;
+    uint16_t                            BootVddci;
+
+    uint16_t                            BootMVdd;
+    uint16_t                            padding;
+
+    uint32_t                            BAPM_TEMP_GRADIENT;
+
+    uint32_t                            LowSclkInterruptThreshold;
+    uint32_t                            VddGfxReChkWait;
+
+    uint16_t                            PPM_PkgPwrLimit;
+    uint16_t                            PPM_TemperatureLimit;
+
+    uint16_t                            DefaultTdp;
+    uint16_t                            TargetTdp;
+};
+
+typedef struct SMU71_Discrete_DpmTable SMU71_Discrete_DpmTable;
+
+// --------------------------------------------------- AC Timing Parameters ------------------------------------------------
+#define SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE 16
+#define SMU71_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU71_MAX_LEVELS_MEMORY
+
+struct SMU71_Discrete_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMU71_Discrete_MCRegisterAddress SMU71_Discrete_MCRegisterAddress;
+
+struct SMU71_Discrete_MCRegisterSet
+{
+    uint32_t value[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMU71_Discrete_MCRegisterSet SMU71_Discrete_MCRegisterSet;
+
+struct SMU71_Discrete_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMU71_Discrete_MCRegisterAddress     address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+    SMU71_Discrete_MCRegisterSet         data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMU71_Discrete_MCRegisters SMU71_Discrete_MCRegisters;
+
+
+// --------------------------------------------------- Fan Table -----------------------------------------------------------
+struct SMU71_Discrete_FanTable
+{
+    uint16_t FdoMode;
+    int16_t  TempMin;
+    int16_t  TempMed;
+    int16_t  TempMax;
+    int16_t  Slope1;
+    int16_t  Slope2;
+    int16_t  FdoMin;
+    int16_t  HystUp;
+    int16_t  HystDown;
+    int16_t  HystSlope;
+    int16_t  TempRespLim;
+    int16_t  TempCurr;
+    int16_t  SlopeCurr;
+    int16_t  PwmCurr;
+    uint32_t RefreshPeriod;
+    int16_t  FdoMax;
+    uint8_t  TempSrc;
+    int8_t   Padding;
+};
+
+typedef struct SMU71_Discrete_FanTable SMU71_Discrete_FanTable;
+
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG             4
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG_BIT         (0x1 << SMU7_DISCRETE_GPIO_SCLK_DEBUG)
+
+struct SMU71_MclkDpmScoreboard
+{
+
+    uint32_t PercentageBusy;
+
+    int32_t  PIDError;
+    int32_t  PIDIntegral;
+    int32_t  PIDOutput;
+
+    uint32_t SigmaDeltaAccum;
+    uint32_t SigmaDeltaOutput;
+    uint32_t SigmaDeltaLevel;
+
+    uint32_t UtilizationSetpoint;
+
+    uint8_t  TdpClampMode;
+    uint8_t  TdcClampMode;
+    uint8_t  ThermClampMode;
+    uint8_t  VoltageBusy;
+
+    int8_t   CurrLevel;
+    int8_t   TargLevel;
+    uint8_t  LevelChangeInProgress;
+    uint8_t  UpHyst;
+
+    uint8_t  DownHyst;
+    uint8_t  VoltageDownHyst;
+    uint8_t  DpmEnable;
+    uint8_t  DpmRunning;
+
+    uint8_t  DpmForce;
+    uint8_t  DpmForceLevel;
+    uint8_t  DisplayWatermark;
+    uint8_t  McArbIndex;
+
+    uint32_t MinimumPerfMclk;
+
+    uint8_t  AcpiReq;
+    uint8_t  AcpiAck;
+    uint8_t  MclkSwitchInProgress;
+    uint8_t  MclkSwitchCritical;
+
+    uint8_t  TargetMclkIndex;
+    uint8_t  TargetMvddIndex;
+    uint8_t  MclkSwitchResult;
+
+    uint8_t  EnabledLevelsChange;
+
+    uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_MEMORY];
+    uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_MEMORY];
+
+    void     (*TargetStateCalculator)(uint8_t);
+    void     (*SavedTargetStateCalculator)(uint8_t);
+
+    uint16_t AutoDpmInterval;
+    uint16_t AutoDpmRange;
+
+    uint16_t  MclkSwitchingTime;
+    uint8_t padding[2];
+};
+
+typedef struct SMU71_MclkDpmScoreboard SMU71_MclkDpmScoreboard;
+
+struct SMU71_UlvScoreboard
+{
+    uint8_t     EnterUlv;
+    uint8_t     ExitUlv;
+    uint8_t     UlvActive;
+    uint8_t     WaitingForUlv;
+    uint8_t     UlvEnable;
+    uint8_t     UlvRunning;
+    uint8_t     UlvMasterEnable;
+    uint8_t     padding;
+    uint32_t    UlvAbortedCount;
+    uint32_t    UlvTimeStamp;
+};
+
+typedef struct SMU71_UlvScoreboard SMU71_UlvScoreboard;
+
+struct SMU71_VddGfxScoreboard
+{
+    uint8_t     VddGfxEnable;
+    uint8_t     VddGfxActive;
+    uint8_t     padding[2];
+
+    uint32_t    VddGfxEnteredCount;
+    uint32_t    VddGfxAbortedCount;
+};
+
+typedef struct SMU71_VddGfxScoreboard SMU71_VddGfxScoreboard;
+
+struct SMU71_AcpiScoreboard {
+  uint32_t SavedInterruptMask[2];
+  uint8_t LastACPIRequest;
+  uint8_t CgBifResp;
+  uint8_t RequestType;
+  uint8_t Padding;
+  SMU71_Discrete_ACPILevel D0Level;
+};
+
+typedef struct SMU71_AcpiScoreboard SMU71_AcpiScoreboard;
+
+
+struct SMU71_Discrete_PmFuses {
+  // dw0-dw1
+  uint8_t BapmVddCVidHiSidd[8];
+
+  // dw2-dw3
+  uint8_t BapmVddCVidLoSidd[8];
+
+  // dw4-dw5
+  uint8_t VddCVid[8];
+
+  // dw6
+  uint8_t SviLoadLineEn;
+  uint8_t SviLoadLineVddC;
+  uint8_t SviLoadLineTrimVddC;
+  uint8_t SviLoadLineOffsetVddC;
+
+  // dw7
+  uint16_t TDC_VDDC_PkgLimit;
+  uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+  uint8_t TDC_MAWt;
+
+  // dw8
+  uint8_t TdcWaterfallCtl;
+  uint8_t LPMLTemperatureMin;
+  uint8_t LPMLTemperatureMax;
+  uint8_t Reserved;
+
+  // dw9-dw12
+  uint8_t LPMLTemperatureScaler[16];
+
+  // dw13-dw14
+  int16_t FuzzyFan_ErrorSetDelta;
+  int16_t FuzzyFan_ErrorRateSetDelta;
+  int16_t FuzzyFan_PwmSetDelta;
+  uint16_t Reserved6;
+
+  // dw15
+  uint8_t GnbLPML[16];
+
+  // dw15
+  uint8_t GnbLPMLMaxVid;
+  uint8_t GnbLPMLMinVid;
+  uint8_t Reserved1[2];
+
+  // dw16
+  uint16_t BapmVddCBaseLeakageHiSidd;
+  uint16_t BapmVddCBaseLeakageLoSidd;
+};
+
+typedef struct SMU71_Discrete_PmFuses SMU71_Discrete_PmFuses;
+
+struct SMU71_Discrete_Log_Header_Table {
+  uint32_t    version;
+  uint32_t    asic_id;
+  uint16_t    flags;
+  uint16_t    entry_size;
+  uint32_t    total_size;
+  uint32_t    num_of_entries;
+  uint8_t     type;
+  uint8_t     mode;
+  uint8_t     filler_0[2];
+  uint32_t    filler_1[2];
+};
+
+typedef struct SMU71_Discrete_Log_Header_Table SMU71_Discrete_Log_Header_Table;
+
+struct SMU71_Discrete_Log_Cntl {
+    uint8_t             Enabled;
+    uint8_t             Type;
+    uint8_t             padding[2];
+    uint32_t            BufferSize;
+    uint32_t            SamplesLogged;
+    uint32_t            SampleSize;
+    uint32_t            AddrL;
+    uint32_t            AddrH;
+};
+
+typedef struct SMU71_Discrete_Log_Cntl SMU71_Discrete_Log_Cntl;
+
+#if defined SMU__DGPU_ONLY
+  #define CAC_ACC_NW_NUM_OF_SIGNALS 83
+#endif
+
+
+struct SMU71_Discrete_Cac_Collection_Table {
+  uint32_t temperature;
+  uint32_t cac_acc_nw[CAC_ACC_NW_NUM_OF_SIGNALS];
+  uint32_t filler[4];
+};
+
+typedef struct SMU71_Discrete_Cac_Collection_Table SMU71_Discrete_Cac_Collection_Table;
+
+struct SMU71_Discrete_Cac_Verification_Table {
+  uint32_t VddcTotalPower;
+  uint32_t VddcLeakagePower;
+  uint32_t VddcConstantPower;
+  uint32_t VddcGfxDynamicPower;
+  uint32_t VddcUvdDynamicPower;
+  uint32_t VddcVceDynamicPower;
+  uint32_t VddcAcpDynamicPower;
+  uint32_t VddcPcieDynamicPower;
+  uint32_t VddcDceDynamicPower;
+  uint32_t VddcCurrent;
+  uint32_t VddcVoltage;
+  uint32_t VddciTotalPower;
+  uint32_t VddciLeakagePower;
+  uint32_t VddciConstantPower;
+  uint32_t VddciDynamicPower;
+  uint32_t Vddr1TotalPower;
+  uint32_t Vddr1LeakagePower;
+  uint32_t Vddr1ConstantPower;
+  uint32_t Vddr1DynamicPower;
+  uint32_t spare[8];
+  uint32_t temperature;
+};
+
+typedef struct SMU71_Discrete_Cac_Verification_Table SMU71_Discrete_Cac_Verification_Table;
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(pop)
+#endif
+
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 3c235f0..34abfd2 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -74,7 +74,6 @@
 struct pp_smumgr {
 	uint32_t chip_family;
 	uint32_t chip_id;
-	uint32_t hw_revision;
 	void *device;
 	void *backend;
 	uint32_t usec_timeout;
@@ -122,6 +121,12 @@
 
 extern int smu_free_memory(void *device, void *handle);
 
+extern int cz_smum_init(struct pp_smumgr *smumgr);
+extern int iceland_smum_init(struct pp_smumgr *smumgr);
+extern int tonga_smum_init(struct pp_smumgr *smumgr);
+extern int fiji_smum_init(struct pp_smumgr *smumgr);
+extern int polaris10_smum_init(struct pp_smumgr *smumgr);
+
 #define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
 
 #define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index f10fb64..19e7946 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -2,7 +2,8 @@
 # Makefile for the 'smu manager' sub-component of powerplay.
 # It provides the smu management services for the driver.
 
-SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o polaris10_smumgr.o
+SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \
+	  polaris10_smumgr.o iceland_smumgr.o
 
 AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index 87c023e..5a44485 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -89,13 +89,8 @@
 	if (result != 0)
 		return result;
 
-	result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
+	return SMUM_WAIT_FIELD_UNEQUAL(smumgr,
 					SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
-
-	if (result != 0)
-		return result;
-
-	return 0;
 }
 
 static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
@@ -106,12 +101,12 @@
 
 	if (0 != (3 & smc_address)) {
 		printk(KERN_ERR "[ powerplay ] SMC address must be 4 byte aligned\n");
-		return -1;
+		return -EINVAL;
 	}
 
 	if (limit <= (smc_address + 3)) {
 		printk(KERN_ERR "[ powerplay ] SMC address beyond the SMC RAM area\n");
-		return -1;
+		return -EINVAL;
 	}
 
 	cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0,
@@ -129,9 +124,10 @@
 		return -EINVAL;
 
 	result = cz_set_smc_sram_address(smumgr, smc_address, limit);
-	cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
+	if (!result)
+		cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
 
-	return 0;
+	return result;
 }
 
 static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
@@ -148,7 +144,6 @@
 static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
 {
 	struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend);
-	int result = 0;
 	uint32_t smc_address;
 
 	if (!smumgr->reload_fw) {
@@ -177,11 +172,9 @@
 	cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
 				cz_smu->toc_entry_power_profiling_index);
 
-	result = cz_send_msg_to_smc_with_parameter(smumgr,
+	return cz_send_msg_to_smc_with_parameter(smumgr,
 					PPSMC_MSG_ExecuteJob,
 					cz_smu->toc_entry_initialize_index);
-
-	return result;
 }
 
 static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
@@ -195,9 +188,6 @@
 	if (smumgr == NULL || smumgr->device == NULL)
 		return -EINVAL;
 
-	return cgs_read_register(smumgr->device,
-					mmSMU_MP1_SRBM2P_ARG_0);
-
 	cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index);
 
 	for (i = 0; i < smumgr->usec_timeout; i++) {
@@ -275,7 +265,10 @@
 	if (smumgr->chip_id == CHIP_STONEY)
 		fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
 
-	cz_request_smu_load_fw(smumgr);
+	ret = cz_request_smu_load_fw(smumgr);
+	if (ret)
+		printk(KERN_ERR "[ powerplay] SMU firmware load failed\n");
+
 	cz_check_fw_load_finish(smumgr, fw_to_check);
 
 	ret = cz_load_mec_firmware(smumgr);
@@ -566,10 +559,7 @@
 
 	cz_smu_populate_single_ucode_load_task(smumgr,
 				CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
-	if (smumgr->chip_id == CHIP_STONEY)
-		cz_smu_populate_single_ucode_load_task(smumgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
-	else
+	if (smumgr->chip_id != CHIP_STONEY)
 		cz_smu_populate_single_ucode_load_task(smumgr,
 				CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
 	cz_smu_populate_single_ucode_load_task(smumgr,
@@ -580,10 +570,7 @@
 				CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
 	cz_smu_populate_single_ucode_load_task(smumgr,
 				CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
-	if (smumgr->chip_id == CHIP_STONEY)
-		cz_smu_populate_single_ucode_load_task(smumgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
-	else
+	if (smumgr->chip_id != CHIP_STONEY)
 		cz_smu_populate_single_ucode_load_task(smumgr,
 				CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
 	cz_smu_populate_single_ucode_load_task(smumgr,
@@ -610,19 +597,12 @@
 	struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
 
 	cz_smu->toc_entry_used_count = 0;
-
 	cz_smu_initialize_toc_empty_job_list(smumgr);
-
 	cz_smu_construct_toc_for_rlc_aram_save(smumgr);
-
 	cz_smu_construct_toc_for_vddgfx_enter(smumgr);
-
 	cz_smu_construct_toc_for_vddgfx_exit(smumgr);
-
 	cz_smu_construct_toc_for_power_profiling(smumgr);
-
 	cz_smu_construct_toc_for_bootup(smumgr);
-
 	cz_smu_construct_toc_for_clock_table(smumgr);
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
new file mode 100644
index 0000000..f506583
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -0,0 +1,713 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+
+#include "smumgr.h"
+#include "iceland_smumgr.h"
+#include "pp_debug.h"
+#include "smu_ucode_xfer_vi.h"
+#include "ppsmc.h"
+#include "smu/smu_7_1_1_d.h"
+#include "smu/smu_7_1_1_sh_mask.h"
+#include "cgs_common.h"
+
+#define ICELAND_SMC_SIZE		0x20000
+#define BUFFER_SIZE			80000
+#define MAX_STRING_SIZE			15
+#define BUFFER_SIZETWO              	131072 /*128 *1024*/
+
+/**
+ * Set the address for reading/writing the SMC SRAM space.
+ * @param    smumgr  the address of the powerplay hardware manager.
+ * @param    smcAddress the address in the SMC RAM to access.
+ */
+static int iceland_set_smc_sram_address(struct pp_smumgr *smumgr,
+				uint32_t smcAddress, uint32_t limit)
+{
+	if (smumgr == NULL || smumgr->device == NULL)
+		return -EINVAL;
+	PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)),
+		"SMC address must be 4 byte aligned.",
+		return -1;);
+
+	PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)),
+		"SMC address is beyond the SMC RAM area.",
+		return -1;);
+
+	cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress);
+	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+
+	return 0;
+}
+
+/**
+ * Copy bytes from an array into the SMC RAM space.
+ *
+ * @param    smumgr  the address of the powerplay SMU manager.
+ * @param    smcStartAddress the start address in the SMC RAM to copy bytes to.
+ * @param    src the byte array to copy the bytes from.
+ * @param    byteCount the number of bytes to copy.
+ */
+int iceland_copy_bytes_to_smc(struct pp_smumgr *smumgr,
+		uint32_t smcStartAddress, const uint8_t *src,
+		uint32_t byteCount, uint32_t limit)
+{
+	uint32_t addr;
+	uint32_t data, orig_data;
+	int result = 0;
+	uint32_t extra_shift;
+
+	if (smumgr == NULL || smumgr->device == NULL)
+		return -EINVAL;
+	PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)),
+		"SMC address must be 4 byte aligned.",
+		return 0;);
+
+	PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)),
+		"SMC address is beyond the SMC RAM area.",
+		return 0;);
+
+	addr = smcStartAddress;
+
+	while (byteCount >= 4) {
+		/*
+		 * Bytes are written into the
+		 * SMC address space with the MSB first
+		 */
+		data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
+
+		result = iceland_set_smc_sram_address(smumgr, addr, limit);
+
+		if (result)
+			goto out;
+
+		cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
+
+		src += 4;
+		byteCount -= 4;
+		addr += 4;
+	}
+
+	if (0 != byteCount) {
+		/* Now write odd bytes left, do a read modify write cycle */
+		data = 0;
+
+		result = iceland_set_smc_sram_address(smumgr, addr, limit);
+		if (result)
+			goto out;
+
+		orig_data = cgs_read_register(smumgr->device,
+							mmSMC_IND_DATA_0);
+		extra_shift = 8 * (4 - byteCount);
+
+		while (byteCount > 0) {
+			data = (data << 8) + *src++;
+			byteCount--;
+		}
+
+		data <<= extra_shift;
+		data |= (orig_data & ~((~0UL) << extra_shift));
+
+		result = iceland_set_smc_sram_address(smumgr, addr, limit);
+		if (result)
+			goto out;
+
+		cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
+	}
+
+out:
+	return result;
+}
+
+/**
+ * Deassert the reset'pin' (set it to high).
+ *
+ * @param smumgr  the address of the powerplay hardware manager.
+ */
+static int iceland_start_smc(struct pp_smumgr *smumgr)
+{
+	SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+				  SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+	return 0;
+}
+
+static void iceland_pp_reset_smc(struct pp_smumgr *smumgr)
+{
+	SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+				  SMC_SYSCON_RESET_CNTL,
+				  rst_reg, 1);
+}
+
+int iceland_program_jump_on_start(struct pp_smumgr *smumgr)
+{
+	static const unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 };
+
+	iceland_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1);
+
+	return 0;
+}
+
+/**
+ * Return if the SMC is currently running.
+ *
+ * @param    smumgr  the address of the powerplay hardware manager.
+ */
+bool iceland_is_smc_ram_running(struct pp_smumgr *smumgr)
+{
+	uint32_t val1, val2;
+
+	val1 = SMUM_READ_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+					SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
+	val2 = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC,
+				     ixSMC_PC_C);
+
+	return ((0 == val1) && (0x20100 <= val2));
+}
+
+/**
+ * Send a message to the SMC, and wait for its response.
+ *
+ * @param    smumgr  the address of the powerplay hardware manager.
+ * @param    msg the message to send.
+ * @return   The response that came from the SMC.
+ */
+static int iceland_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+{
+	if (smumgr == NULL || smumgr->device == NULL)
+		return -EINVAL;
+
+	if (!iceland_is_smc_ram_running(smumgr))
+		return -EINVAL;
+
+	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+	PP_ASSERT_WITH_CODE(
+		1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
+		"Failed to send Previous Message.",
+		);
+
+	cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+
+	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+	PP_ASSERT_WITH_CODE(
+		1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
+		"Failed to send Message.",
+		);
+
+	return 0;
+}
+
+/**
+ * Send a message to the SMC with parameter
+ *
+ * @param    smumgr:  the address of the powerplay hardware manager.
+ * @param    msg: the message to send.
+ * @param    parameter: the parameter to send
+ * @return   The response that came from the SMC.
+ */
+static int iceland_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+				uint16_t msg, uint32_t parameter)
+{
+	if (smumgr == NULL || smumgr->device == NULL)
+		return -EINVAL;
+
+	cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+
+	return iceland_send_msg_to_smc(smumgr, msg);
+}
+
+/*
+ * Read a 32bit value from the SMC SRAM space.
+ * ALL PARAMETERS ARE IN HOST BYTE ORDER.
+ * @param    smumgr  the address of the powerplay hardware manager.
+ * @param    smcAddress the address in the SMC RAM to access.
+ * @param    value and output parameter for the data read from the SMC SRAM.
+ */
+int iceland_read_smc_sram_dword(struct pp_smumgr *smumgr,
+				uint32_t smcAddress, uint32_t *value,
+				uint32_t limit)
+{
+	int result;
+
+	result = iceland_set_smc_sram_address(smumgr, smcAddress, limit);
+
+	if (0 != result)
+		return result;
+
+	*value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
+
+	return 0;
+}
+
+/*
+ * Write a 32bit value to the SMC SRAM space.
+ * ALL PARAMETERS ARE IN HOST BYTE ORDER.
+ * @param    smumgr  the address of the powerplay hardware manager.
+ * @param    smcAddress the address in the SMC RAM to access.
+ * @param    value to write to the SMC SRAM.
+ */
+int iceland_write_smc_sram_dword(struct pp_smumgr *smumgr,
+				 uint32_t smcAddress, uint32_t value,
+				 uint32_t limit)
+{
+	int result;
+
+	result = iceland_set_smc_sram_address(smumgr, smcAddress, limit);
+
+	if (0 != result)
+		return result;
+
+	cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value);
+
+	return 0;
+}
+
+static int iceland_smu_fini(struct pp_smumgr *smumgr)
+{
+	struct iceland_smumgr *priv = (struct iceland_smumgr *)(smumgr->backend);
+
+	smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
+	if (smumgr->backend != NULL) {
+		kfree(smumgr->backend);
+		smumgr->backend = NULL;
+	}
+
+	cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
+	return 0;
+}
+
+static enum cgs_ucode_id iceland_convert_fw_type_to_cgs(uint32_t fw_type)
+{
+	enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
+
+	switch (fw_type) {
+	case UCODE_ID_SMU:
+		result = CGS_UCODE_ID_SMU;
+		break;
+	case UCODE_ID_SDMA0:
+		result = CGS_UCODE_ID_SDMA0;
+		break;
+	case UCODE_ID_SDMA1:
+		result = CGS_UCODE_ID_SDMA1;
+		break;
+	case UCODE_ID_CP_CE:
+		result = CGS_UCODE_ID_CP_CE;
+		break;
+	case UCODE_ID_CP_PFP:
+		result = CGS_UCODE_ID_CP_PFP;
+		break;
+	case UCODE_ID_CP_ME:
+		result = CGS_UCODE_ID_CP_ME;
+		break;
+	case UCODE_ID_CP_MEC:
+		result = CGS_UCODE_ID_CP_MEC;
+		break;
+	case UCODE_ID_CP_MEC_JT1:
+		result = CGS_UCODE_ID_CP_MEC_JT1;
+		break;
+	case UCODE_ID_CP_MEC_JT2:
+		result = CGS_UCODE_ID_CP_MEC_JT2;
+		break;
+	case UCODE_ID_RLC_G:
+		result = CGS_UCODE_ID_RLC_G;
+		break;
+	default:
+		break;
+	}
+
+	return result;
+}
+
+/**
+ * Convert the PPIRI firmware type to SMU type mask.
+ * For MEC, we need to check all MEC related type
+ */
+static uint16_t iceland_get_mask_for_firmware_type(uint16_t firmwareType)
+{
+	uint16_t result = 0;
+
+	switch (firmwareType) {
+	case UCODE_ID_SDMA0:
+		result = UCODE_ID_SDMA0_MASK;
+		break;
+	case UCODE_ID_SDMA1:
+		result = UCODE_ID_SDMA1_MASK;
+		break;
+	case UCODE_ID_CP_CE:
+		result = UCODE_ID_CP_CE_MASK;
+		break;
+	case UCODE_ID_CP_PFP:
+		result = UCODE_ID_CP_PFP_MASK;
+		break;
+	case UCODE_ID_CP_ME:
+		result = UCODE_ID_CP_ME_MASK;
+		break;
+	case UCODE_ID_CP_MEC:
+	case UCODE_ID_CP_MEC_JT1:
+	case UCODE_ID_CP_MEC_JT2:
+		result = UCODE_ID_CP_MEC_MASK;
+		break;
+	case UCODE_ID_RLC_G:
+		result = UCODE_ID_RLC_G_MASK;
+		break;
+	default:
+		break;
+	}
+
+	return result;
+}
+
+/**
+ * Check if the FW has been loaded,
+ * SMU will not return if loading has not finished.
+*/
+static int iceland_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fwType)
+{
+	uint16_t fwMask = iceland_get_mask_for_firmware_type(fwType);
+
+	if (0 != SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND,
+				SOFT_REGISTERS_TABLE_27, fwMask, fwMask)) {
+		pr_err("[ powerplay ] check firmware loading failed\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Populate one firmware image to the data structure */
+static int iceland_populate_single_firmware_entry(struct pp_smumgr *smumgr,
+				uint16_t firmware_type,
+				struct SMU_Entry *pentry)
+{
+	int result;
+	struct cgs_firmware_info info = {0};
+
+	result = cgs_get_firmware_info(
+				smumgr->device,
+				iceland_convert_fw_type_to_cgs(firmware_type),
+				&info);
+
+	if (result == 0) {
+		pentry->version = 0;
+		pentry->id = (uint16_t)firmware_type;
+		pentry->image_addr_high = smu_upper_32_bits(info.mc_addr);
+		pentry->image_addr_low = smu_lower_32_bits(info.mc_addr);
+		pentry->meta_data_addr_high = 0;
+		pentry->meta_data_addr_low = 0;
+		pentry->data_size_byte = info.image_size;
+		pentry->num_register_entries = 0;
+
+		if (firmware_type == UCODE_ID_RLC_G)
+			pentry->flags = 1;
+		else
+			pentry->flags = 0;
+	} else {
+		return result;
+	}
+
+	return result;
+}
+
+static void iceland_pp_stop_smc_clock(struct pp_smumgr *smumgr)
+{
+	SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+				  SMC_SYSCON_CLOCK_CNTL_0,
+				  ck_disable, 1);
+}
+
+static void iceland_start_smc_clock(struct pp_smumgr *smumgr)
+{
+	SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+				  SMC_SYSCON_CLOCK_CNTL_0,
+				  ck_disable, 0);
+}
+
+int iceland_smu_start_smc(struct pp_smumgr *smumgr)
+{
+	/* set smc instruct start point at 0x0 */
+	iceland_program_jump_on_start(smumgr);
+
+	/* enable smc clock */
+	iceland_start_smc_clock(smumgr);
+
+	/* de-assert reset */
+	iceland_start_smc(smumgr);
+
+	SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS,
+				 INTERRUPTS_ENABLED, 1);
+
+	return 0;
+}
+
+/**
+ * Upload the SMC firmware to the SMC microcontroller.
+ *
+ * @param    smumgr  the address of the powerplay hardware manager.
+ * @param    pFirmware the data structure containing the various sections of the firmware.
+ */
+int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
+{
+	const uint8_t *src;
+	uint32_t byte_count, val;
+	uint32_t data;
+	struct cgs_firmware_info info = {0};
+
+	if (smumgr == NULL || smumgr->device == NULL)
+		return -EINVAL;
+
+	/* load SMC firmware */
+	cgs_get_firmware_info(smumgr->device,
+		iceland_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
+
+	if (info.image_size & 3) {
+		pr_err("[ powerplay ] SMC ucode is not 4 bytes aligned\n");
+		return -EINVAL;
+	}
+
+	if (info.image_size > ICELAND_SMC_SIZE) {
+		pr_err("[ powerplay ] SMC address is beyond the SMC RAM area\n");
+		return -EINVAL;
+	}
+
+	/* wait for smc boot up */
+	SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+					 RCU_UC_EVENTS, boot_seq_done, 0);
+
+	/* clear firmware interrupt enable flag */
+	val = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC,
+				    ixSMC_SYSCON_MISC_CNTL);
+	cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+			       ixSMC_SYSCON_MISC_CNTL, val | 1);
+
+	/* stop smc clock */
+	iceland_pp_stop_smc_clock(smumgr);
+
+	/* reset smc */
+	iceland_pp_reset_smc(smumgr);
+
+	cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0,
+			   info.ucode_start_address);
+
+	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL,
+			 AUTO_INCREMENT_IND_0, 1);
+
+	byte_count = info.image_size;
+	src = (const uint8_t *)info.kptr;
+
+	while (byte_count >= 4) {
+		data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
+		cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
+		src += 4;
+		byte_count -= 4;
+	}
+
+	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL,
+			 AUTO_INCREMENT_IND_0, 0);
+
+	return 0;
+}
+
+static int iceland_request_smu_reload_fw(struct pp_smumgr *smumgr)
+{
+	struct iceland_smumgr *iceland_smu =
+		(struct iceland_smumgr *)(smumgr->backend);
+	uint16_t fw_to_load;
+	int result = 0;
+	struct SMU_DRAMData_TOC *toc;
+
+	toc = (struct SMU_DRAMData_TOC *)iceland_smu->pHeader;
+	toc->num_entries = 0;
+	toc->structure_version = 1;
+
+	PP_ASSERT_WITH_CODE(
+		0 == iceland_populate_single_firmware_entry(smumgr,
+		UCODE_ID_RLC_G,
+		&toc->entry[toc->num_entries++]),
+		"Failed to Get Firmware Entry.\n",
+		return -1);
+	PP_ASSERT_WITH_CODE(
+		0 == iceland_populate_single_firmware_entry(smumgr,
+		UCODE_ID_CP_CE,
+		&toc->entry[toc->num_entries++]),
+		"Failed to Get Firmware Entry.\n",
+		return -1);
+	PP_ASSERT_WITH_CODE(
+		0 == iceland_populate_single_firmware_entry
+		(smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
+		"Failed to Get Firmware Entry.\n", return -1);
+	PP_ASSERT_WITH_CODE(
+		0 == iceland_populate_single_firmware_entry
+		(smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
+		"Failed to Get Firmware Entry.\n", return -1);
+	PP_ASSERT_WITH_CODE(
+		0 == iceland_populate_single_firmware_entry
+		(smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
+		"Failed to Get Firmware Entry.\n", return -1);
+	PP_ASSERT_WITH_CODE(
+		0 == iceland_populate_single_firmware_entry
+		(smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
+		"Failed to Get Firmware Entry.\n", return -1);
+	PP_ASSERT_WITH_CODE(
+		0 == iceland_populate_single_firmware_entry
+		(smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
+		"Failed to Get Firmware Entry.\n", return -1);
+	PP_ASSERT_WITH_CODE(
+		0 == iceland_populate_single_firmware_entry
+		(smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
+		"Failed to Get Firmware Entry.\n", return -1);
+	PP_ASSERT_WITH_CODE(
+		0 == iceland_populate_single_firmware_entry
+		(smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
+		"Failed to Get Firmware Entry.\n", return -1);
+
+	if (!iceland_is_smc_ram_running(smumgr)) {
+		result = iceland_smu_upload_firmware_image(smumgr);
+		if (result)
+			return result;
+
+		result = iceland_smu_start_smc(smumgr);
+		if (result)
+			return result;
+	}
+
+	iceland_send_msg_to_smc_with_parameter(smumgr,
+		PPSMC_MSG_DRV_DRAM_ADDR_HI,
+		iceland_smu->header_buffer.mc_addr_high);
+
+	iceland_send_msg_to_smc_with_parameter(smumgr,
+		PPSMC_MSG_DRV_DRAM_ADDR_LO,
+		iceland_smu->header_buffer.mc_addr_low);
+
+	fw_to_load = UCODE_ID_RLC_G_MASK
+			+ UCODE_ID_SDMA0_MASK
+			+ UCODE_ID_SDMA1_MASK
+			+ UCODE_ID_CP_CE_MASK
+			+ UCODE_ID_CP_ME_MASK
+			+ UCODE_ID_CP_PFP_MASK
+			+ UCODE_ID_CP_MEC_MASK
+			+ UCODE_ID_CP_MEC_JT1_MASK
+			+ UCODE_ID_CP_MEC_JT2_MASK;
+
+	PP_ASSERT_WITH_CODE(
+		0 == iceland_send_msg_to_smc_with_parameter(
+		smumgr, PPSMC_MSG_LoadUcodes, fw_to_load),
+		"Fail to Request SMU Load uCode", return 0);
+
+	return result;
+}
+
+static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
+						uint32_t firmwareType)
+{
+	return 0;
+}
+
+static int iceland_start_smu(struct pp_smumgr *smumgr)
+{
+	int result;
+
+	result = iceland_smu_upload_firmware_image(smumgr);
+	if (result)
+		return result;
+
+	result = iceland_smu_start_smc(smumgr);
+	if (result)
+		return result;
+
+	result = iceland_request_smu_reload_fw(smumgr);
+
+	return result;
+}
+
+/**
+ * Write a 32bit value to the SMC SRAM space.
+ * ALL PARAMETERS ARE IN HOST BYTE ORDER.
+ * @param    smumgr  the address of the powerplay hardware manager.
+ * @param    smcAddress the address in the SMC RAM to access.
+ * @param    value to write to the SMC SRAM.
+ */
+static int iceland_smu_init(struct pp_smumgr *smumgr)
+{
+	struct iceland_smumgr *iceland_smu;
+	uint64_t mc_addr = 0;
+
+	/* Allocate memory for backend private data */
+	iceland_smu = (struct iceland_smumgr *)(smumgr->backend);
+	iceland_smu->header_buffer.data_size =
+		((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
+
+	smu_allocate_memory(smumgr->device,
+		iceland_smu->header_buffer.data_size,
+		CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+		PAGE_SIZE,
+		&mc_addr,
+		&iceland_smu->header_buffer.kaddr,
+		&iceland_smu->header_buffer.handle);
+
+	iceland_smu->pHeader = iceland_smu->header_buffer.kaddr;
+	iceland_smu->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
+	iceland_smu->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+
+	PP_ASSERT_WITH_CODE((NULL != iceland_smu->pHeader),
+		"Out of memory.",
+		kfree(smumgr->backend);
+		cgs_free_gpu_mem(smumgr->device,
+		(cgs_handle_t)iceland_smu->header_buffer.handle);
+		return -1);
+
+	return 0;
+}
+
+static const struct pp_smumgr_func iceland_smu_funcs = {
+	.smu_init = &iceland_smu_init,
+	.smu_fini = &iceland_smu_fini,
+	.start_smu = &iceland_start_smu,
+	.check_fw_load_finish = &iceland_check_fw_load_finish,
+	.request_smu_load_fw = &iceland_request_smu_reload_fw,
+	.request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw,
+	.send_msg_to_smc = &iceland_send_msg_to_smc,
+	.send_msg_to_smc_with_parameter = &iceland_send_msg_to_smc_with_parameter,
+	.download_pptable_settings = NULL,
+	.upload_pptable_settings = NULL,
+};
+
+int iceland_smum_init(struct pp_smumgr *smumgr)
+{
+	struct iceland_smumgr *iceland_smu = NULL;
+
+	iceland_smu = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL);
+
+	if (iceland_smu == NULL)
+		return -ENOMEM;
+
+	smumgr->backend = iceland_smu;
+	smumgr->smumgr_funcs = &iceland_smu_funcs;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
new file mode 100644
index 0000000..62009a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+
+#ifndef _ICELAND_SMUMGR_H_
+#define _ICELAND_SMUMGR_H_
+
+struct iceland_buffer_entry {
+	uint32_t data_size;
+	uint32_t mc_addr_low;
+	uint32_t mc_addr_high;
+	void *kaddr;
+	unsigned long  handle;
+};
+
+/* Iceland only has header_buffer, don't have smu buffer. */
+struct iceland_smumgr {
+	uint8_t *pHeader;
+	uint8_t *pMecImage;
+	uint32_t ulSoftRegsStart;
+
+	struct iceland_buffer_entry header_buffer;
+};
+
+extern int iceland_smum_init(struct pp_smumgr *smumgr);
+extern int iceland_copy_bytes_to_smc(struct pp_smumgr *smumgr,
+				     uint32_t smcStartAddress,
+				     const uint8_t *src,
+				     uint32_t byteCount, uint32_t limit);
+
+extern int iceland_smu_start_smc(struct pp_smumgr *smumgr);
+
+extern int iceland_read_smc_sram_dword(struct pp_smumgr *smumgr,
+				       uint32_t smcAddress,
+				       uint32_t *value, uint32_t limit);
+extern int iceland_write_smc_sram_dword(struct pp_smumgr *smumgr,
+					uint32_t smcAddress,
+					uint32_t value, uint32_t limit);
+
+extern bool iceland_is_smc_ram_running(struct pp_smumgr *smumgr);
+extern int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 5dba7c5..8047ad2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -40,7 +40,6 @@
 #include "cgs_common.h"
 
 #define POLARIS10_SMC_SIZE 0x20000
-#define VOLTAGE_SCALE 4
 
 /* Microcode file is stored in this buffer */
 #define BUFFER_SIZE                 80000
@@ -978,7 +977,7 @@
 	return 0;
 }
 
-static const struct pp_smumgr_func ellsemere_smu_funcs = {
+static const struct pp_smumgr_func polaris10_smu_funcs = {
 	.smu_init = polaris10_smu_init,
 	.smu_fini = polaris10_smu_fini,
 	.start_smu = polaris10_start_smu,
@@ -1001,7 +1000,7 @@
 		return -1;
 
 	smumgr->backend = polaris10_smu;
-	smumgr->smumgr_funcs = &ellsemere_smu_funcs;
+	smumgr->smumgr_funcs = &polaris10_smu_funcs;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
index e5377ae..7c2445f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
@@ -26,12 +26,27 @@
 
 #include <polaris10_ppsmc.h>
 #include <pp_endian.h>
+#include "smu74.h"
 
 struct polaris10_avfs {
 	enum AVFS_BTC_STATUS avfs_btc_status;
 	uint32_t           avfs_btc_param;
 };
 
+struct polaris10_pt_defaults {
+	uint8_t   SviLoadLineEn;
+	uint8_t   SviLoadLineVddC;
+	uint8_t   TDC_VDDC_ThrottleReleaseLimitPerc;
+	uint8_t   TDC_MAWt;
+	uint8_t   TdcWaterfallCtl;
+	uint8_t   DTEAmbientTempBase;
+
+	uint32_t  DisplayCac;
+	uint32_t  BAPM_TEMP_GRADIENT;
+	uint16_t  BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+	uint16_t  BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+};
+
 struct polaris10_buffer_entry {
 	uint32_t data_size;
 	uint32_t mc_addr_low;
@@ -40,6 +55,11 @@
 	unsigned long  handle;
 };
 
+struct polaris10_range_table {
+	uint32_t trans_lower_frequency; /* in 10khz */
+	uint32_t trans_upper_frequency;
+};
+
 struct polaris10_smumgr {
 	uint8_t *header;
 	uint8_t *mec_image;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 7723473..bbeb786 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -28,10 +28,7 @@
 #include "smumgr.h"
 #include "cgs_common.h"
 #include "linux/delay.h"
-#include "cz_smumgr.h"
-#include "tonga_smumgr.h"
-#include "fiji_smumgr.h"
-#include "polaris10_smumgr.h"
+
 
 int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
 {
@@ -47,7 +44,6 @@
 	smumgr->device = pp_init->device;
 	smumgr->chip_family = pp_init->chip_family;
 	smumgr->chip_id = pp_init->chip_id;
-	smumgr->hw_revision = pp_init->rev_id;
 	smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
 	smumgr->reload_fw = 1;
 	handle->smu_mgr = smumgr;
@@ -58,6 +54,9 @@
 		break;
 	case AMDGPU_FAMILY_VI:
 		switch (smumgr->chip_id) {
+		case CHIP_TOPAZ:
+			iceland_smum_init(smumgr);
+			break;
 		case CHIP_TONGA:
 			tonga_smum_init(smumgr);
 			break;
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index ee0a61c..7130b04 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -183,8 +183,6 @@
 }
 
 static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
-	.prepare_fb = NULL,
-	.cleanup_fb = NULL,
 	.atomic_update = arc_pgu_plane_atomic_update,
 };
 
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 82171d2..c383d72 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -91,7 +91,8 @@
 
 	drm_atomic_helper_commit_modeset_disables(drm, state);
 	drm_atomic_helper_commit_modeset_enables(drm, state);
-	drm_atomic_helper_commit_planes(drm, state, true);
+	drm_atomic_helper_commit_planes(drm, state,
+					DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
 	malidp_atomic_commit_hw_done(state);
 
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index f5ebdd6..1e0e68f 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -211,7 +211,7 @@
 	.desc			= "Armada SoC DRM",
 	.date			= "20120730",
 	.driver_features	= DRIVER_GEM | DRIVER_MODESET |
-				  DRIVER_HAVE_IRQ | DRIVER_PRIME,
+				  DRIVER_PRIME,
 	.ioctls			= armada_ioctls,
 	.fops			= &armada_drm_fops,
 };
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index d4a3d61..8e7483d 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -457,7 +457,7 @@
 
 	/* Apply the atomic update. */
 	drm_atomic_helper_commit_modeset_disables(dev, old_state);
-	drm_atomic_helper_commit_planes(dev, old_state, false);
+	drm_atomic_helper_commit_planes(dev, old_state, 0);
 	drm_atomic_helper_commit_modeset_enables(dev, old_state);
 
 	drm_atomic_helper_wait_for_vblanks(dev, old_state);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 146809a..72e6b7d 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -755,7 +755,7 @@
 }
 
 static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p,
-					const struct drm_plane_state *new_state)
+					struct drm_plane_state *new_state)
 {
 	/*
 	 * FIXME: we should avoid this const -> non-const cast but it's
@@ -780,7 +780,7 @@
 }
 
 static void atmel_hlcdc_plane_cleanup_fb(struct drm_plane *p,
-				const struct drm_plane_state *old_state)
+					 struct drm_plane_state *old_state)
 {
 	/*
 	 * FIXME: we should avoid this const -> non-const cast but it's
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index ec8fb2e..8ed3906 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -922,15 +922,13 @@
 	return 0;
 }
 
-static const int edid_i2c_addr = 0x7e;
-static const int packet_i2c_addr = 0x70;
-static const int cec_i2c_addr = 0x78;
-
 static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
 {
 	struct adv7511_link_config link_config;
 	struct adv7511 *adv7511;
 	struct device *dev = &i2c->dev;
+	unsigned int main_i2c_addr = i2c->addr << 1;
+	unsigned int edid_i2c_addr = main_i2c_addr + 4;
 	unsigned int val;
 	int ret;
 
@@ -991,8 +989,10 @@
 
 	regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
 	regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
-		     packet_i2c_addr);
-	regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, cec_i2c_addr);
+		     main_i2c_addr - 0xa);
+	regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR,
+		     main_i2c_addr - 2);
+
 	adv7511_packet_disable(adv7511, 0xffff);
 
 	adv7511->i2c_main = i2c;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index 5eebd15..d7f7b7c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -149,13 +149,12 @@
 	i2c_unregister_device(adv->i2c_cec);
 }
 
-static const int cec_i2c_addr = 0x78;
-
 int adv7533_init_cec(struct adv7511 *adv)
 {
 	int ret;
 
-	adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter, cec_i2c_addr >> 1);
+	adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter,
+				     adv->i2c_main->addr - 1);
 	if (!adv->i2c_cec)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 32715da..efac8ab 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -97,6 +97,83 @@
 	return 0;
 }
 
+int analogix_dp_enable_psr(struct device *dev)
+{
+	struct analogix_dp_device *dp = dev_get_drvdata(dev);
+	struct edp_vsc_psr psr_vsc;
+
+	if (!dp->psr_support)
+		return -EINVAL;
+
+	/* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
+	memset(&psr_vsc, 0, sizeof(psr_vsc));
+	psr_vsc.sdp_header.HB0 = 0;
+	psr_vsc.sdp_header.HB1 = 0x7;
+	psr_vsc.sdp_header.HB2 = 0x2;
+	psr_vsc.sdp_header.HB3 = 0x8;
+
+	psr_vsc.DB0 = 0;
+	psr_vsc.DB1 = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID;
+
+	analogix_dp_send_psr_spd(dp, &psr_vsc);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_enable_psr);
+
+int analogix_dp_disable_psr(struct device *dev)
+{
+	struct analogix_dp_device *dp = dev_get_drvdata(dev);
+	struct edp_vsc_psr psr_vsc;
+
+	if (!dp->psr_support)
+		return -EINVAL;
+
+	/* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
+	memset(&psr_vsc, 0, sizeof(psr_vsc));
+	psr_vsc.sdp_header.HB0 = 0;
+	psr_vsc.sdp_header.HB1 = 0x7;
+	psr_vsc.sdp_header.HB2 = 0x2;
+	psr_vsc.sdp_header.HB3 = 0x8;
+
+	psr_vsc.DB0 = 0;
+	psr_vsc.DB1 = 0;
+
+	analogix_dp_send_psr_spd(dp, &psr_vsc);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_disable_psr);
+
+static bool analogix_dp_detect_sink_psr(struct analogix_dp_device *dp)
+{
+	unsigned char psr_version;
+
+	analogix_dp_read_byte_from_dpcd(dp, DP_PSR_SUPPORT, &psr_version);
+	dev_dbg(dp->dev, "Panel PSR version : %x\n", psr_version);
+
+	return (psr_version & DP_PSR_IS_SUPPORTED) ? true : false;
+}
+
+static void analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
+{
+	unsigned char psr_en;
+
+	/* Disable psr function */
+	analogix_dp_read_byte_from_dpcd(dp, DP_PSR_EN_CFG, &psr_en);
+	psr_en &= ~DP_PSR_ENABLE;
+	analogix_dp_write_byte_to_dpcd(dp, DP_PSR_EN_CFG, psr_en);
+
+	/* Main-Link transmitter remains active during PSR active states */
+	psr_en = DP_PSR_MAIN_LINK_ACTIVE | DP_PSR_CRC_VERIFICATION;
+	analogix_dp_write_byte_to_dpcd(dp, DP_PSR_EN_CFG, psr_en);
+
+	/* Enable psr function */
+	psr_en = DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE |
+		 DP_PSR_CRC_VERIFICATION;
+	analogix_dp_write_byte_to_dpcd(dp, DP_PSR_EN_CFG, psr_en);
+
+	analogix_dp_enable_psr_crc(dp);
+}
+
 static unsigned char analogix_dp_calc_edid_check_sum(unsigned char *edid_data)
 {
 	int i;
@@ -921,13 +998,69 @@
 
 	/* Enable video */
 	analogix_dp_start_video(dp);
+
+	dp->psr_support = analogix_dp_detect_sink_psr(dp);
+	if (dp->psr_support)
+		analogix_dp_enable_sink_psr(dp);
+}
+
+/*
+ * This function is a bit of a catch-all for panel preparation, hopefully
+ * simplifying the logic of functions that need to prepare/unprepare the panel
+ * below.
+ *
+ * If @prepare is true, this function will prepare the panel. Conversely, if it
+ * is false, the panel will be unprepared.
+ *
+ * If @is_modeset_prepare is true, the function will disregard the current state
+ * of the panel and either prepare/unprepare the panel based on @prepare. Once
+ * it finishes, it will update dp->panel_is_modeset to reflect the current state
+ * of the panel.
+ */
+static int analogix_dp_prepare_panel(struct analogix_dp_device *dp,
+				     bool prepare, bool is_modeset_prepare)
+{
+	int ret = 0;
+
+	if (!dp->plat_data->panel)
+		return 0;
+
+	mutex_lock(&dp->panel_lock);
+
+	/*
+	 * Exit early if this is a temporary prepare/unprepare and we're already
+	 * modeset (since we neither want to prepare twice or unprepare early).
+	 */
+	if (dp->panel_is_modeset && !is_modeset_prepare)
+		goto out;
+
+	if (prepare)
+		ret = drm_panel_prepare(dp->plat_data->panel);
+	else
+		ret = drm_panel_unprepare(dp->plat_data->panel);
+
+	if (ret)
+		goto out;
+
+	if (is_modeset_prepare)
+		dp->panel_is_modeset = prepare;
+
+out:
+	mutex_unlock(&dp->panel_lock);
+	return ret;
 }
 
 int analogix_dp_get_modes(struct drm_connector *connector)
 {
 	struct analogix_dp_device *dp = to_dp(connector);
 	struct edid *edid = (struct edid *)dp->edid;
-	int num_modes = 0;
+	int ret, num_modes = 0;
+
+	ret = analogix_dp_prepare_panel(dp, true, false);
+	if (ret) {
+		DRM_ERROR("Failed to prepare panel (%d)\n", ret);
+		return 0;
+	}
 
 	if (analogix_dp_handle_edid(dp) == 0) {
 		drm_mode_connector_update_edid_property(&dp->connector, edid);
@@ -940,6 +1073,10 @@
 	if (dp->plat_data->get_modes)
 		num_modes += dp->plat_data->get_modes(dp->plat_data, connector);
 
+	ret = analogix_dp_prepare_panel(dp, false, false);
+	if (ret)
+		DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
+
 	return num_modes;
 }
 
@@ -960,11 +1097,23 @@
 analogix_dp_detect(struct drm_connector *connector, bool force)
 {
 	struct analogix_dp_device *dp = to_dp(connector);
+	enum drm_connector_status status = connector_status_disconnected;
+	int ret;
 
-	if (analogix_dp_detect_hpd(dp))
+	ret = analogix_dp_prepare_panel(dp, true, false);
+	if (ret) {
+		DRM_ERROR("Failed to prepare panel (%d)\n", ret);
 		return connector_status_disconnected;
+	}
 
-	return connector_status_connected;
+	if (!analogix_dp_detect_hpd(dp))
+		status = connector_status_connected;
+
+	ret = analogix_dp_prepare_panel(dp, false, false);
+	if (ret)
+		DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
+
+	return status;
 }
 
 static void analogix_dp_connector_destroy(struct drm_connector *connector)
@@ -1035,6 +1184,16 @@
 	return 0;
 }
 
+static void analogix_dp_bridge_pre_enable(struct drm_bridge *bridge)
+{
+	struct analogix_dp_device *dp = bridge->driver_private;
+	int ret;
+
+	ret = analogix_dp_prepare_panel(dp, true, true);
+	if (ret)
+		DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+}
+
 static void analogix_dp_bridge_enable(struct drm_bridge *bridge)
 {
 	struct analogix_dp_device *dp = bridge->driver_private;
@@ -1058,6 +1217,7 @@
 static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
 {
 	struct analogix_dp_device *dp = bridge->driver_private;
+	int ret;
 
 	if (dp->dpms_mode != DRM_MODE_DPMS_ON)
 		return;
@@ -1077,6 +1237,10 @@
 
 	pm_runtime_put_sync(dp->dev);
 
+	ret = analogix_dp_prepare_panel(dp, false, true);
+	if (ret)
+		DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+
 	dp->dpms_mode = DRM_MODE_DPMS_OFF;
 }
 
@@ -1165,9 +1329,9 @@
 }
 
 static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
+	.pre_enable = analogix_dp_bridge_pre_enable,
 	.enable = analogix_dp_bridge_enable,
 	.disable = analogix_dp_bridge_disable,
-	.pre_enable = analogix_dp_bridge_nop,
 	.post_disable = analogix_dp_bridge_nop,
 	.mode_set = analogix_dp_bridge_mode_set,
 	.attach = analogix_dp_bridge_attach,
@@ -1254,6 +1418,9 @@
 	dp->dev = &pdev->dev;
 	dp->dpms_mode = DRM_MODE_DPMS_OFF;
 
+	mutex_init(&dp->panel_lock);
+	dp->panel_is_modeset = false;
+
 	/*
 	 * platform dp driver need containor_of the plat_data to get
 	 * the driver private data, so we need to store the point of
@@ -1333,13 +1500,6 @@
 
 	phy_power_on(dp->phy);
 
-	if (dp->plat_data->panel) {
-		if (drm_panel_prepare(dp->plat_data->panel)) {
-			DRM_ERROR("failed to setup the panel\n");
-			return -EBUSY;
-		}
-	}
-
 	analogix_dp_init_dp(dp);
 
 	ret = devm_request_threaded_irq(&pdev->dev, dp->irq,
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index b456380..473b980 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -177,6 +177,10 @@
 	int			hpd_gpio;
 	bool                    force_hpd;
 	unsigned char           edid[EDID_BLOCK_LENGTH * 2];
+	bool			psr_support;
+
+	struct mutex		panel_lock;
+	bool			panel_is_modeset;
 
 	struct analogix_dp_plat_data *plat_data;
 };
@@ -278,4 +282,8 @@
 void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp);
 void analogix_dp_enable_scrambling(struct analogix_dp_device *dp);
 void analogix_dp_disable_scrambling(struct analogix_dp_device *dp);
+void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp);
+void analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
+			      struct edp_vsc_psr *vsc);
+
 #endif /* _ANALOGIX_DP_CORE_H */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 48030f0..fae0293 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -1073,34 +1073,22 @@
 
 u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp)
 {
-	u32 reg;
-
-	reg = readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
-	return reg;
+	return readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
 }
 
 u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp)
 {
-	u32 reg;
-
-	reg = readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
-	return reg;
+	return readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
 }
 
 u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp)
 {
-	u32 reg;
-
-	reg = readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
-	return reg;
+	return readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
 }
 
 u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp)
 {
-	u32 reg;
-
-	reg = readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
-	return reg;
+	return readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
 }
 
 void analogix_dp_reset_macro(struct analogix_dp_device *dp)
@@ -1322,3 +1310,54 @@
 	reg |= SCRAMBLING_DISABLE;
 	writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
 }
+
+void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp)
+{
+	writel(PSR_VID_CRC_ENABLE, dp->reg_base + ANALOGIX_DP_CRC_CON);
+}
+
+void analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
+			      struct edp_vsc_psr *vsc)
+{
+	unsigned int val;
+
+	/* don't send info frame */
+	val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+	val &= ~IF_EN;
+	writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+
+	/* configure single frame update mode */
+	writel(PSR_FRAME_UP_TYPE_BURST | PSR_CRC_SEL_HARDWARE,
+	       dp->reg_base + ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL);
+
+	/* configure VSC HB0~HB3 */
+	writel(vsc->sdp_header.HB0, dp->reg_base + ANALOGIX_DP_SPD_HB0);
+	writel(vsc->sdp_header.HB1, dp->reg_base + ANALOGIX_DP_SPD_HB1);
+	writel(vsc->sdp_header.HB2, dp->reg_base + ANALOGIX_DP_SPD_HB2);
+	writel(vsc->sdp_header.HB3, dp->reg_base + ANALOGIX_DP_SPD_HB3);
+
+	/* configure reused VSC PB0~PB3, magic number from vendor */
+	writel(0x00, dp->reg_base + ANALOGIX_DP_SPD_PB0);
+	writel(0x16, dp->reg_base + ANALOGIX_DP_SPD_PB1);
+	writel(0xCE, dp->reg_base + ANALOGIX_DP_SPD_PB2);
+	writel(0x5D, dp->reg_base + ANALOGIX_DP_SPD_PB3);
+
+	/* configure DB0 / DB1 values */
+	writel(vsc->DB0, dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB0);
+	writel(vsc->DB1, dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB1);
+
+	/* set reuse spd inforframe */
+	val = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
+	val |= REUSE_SPD_EN;
+	writel(val, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
+
+	/* mark info frame update */
+	val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+	val = (val | IF_UP) & ~IF_EN;
+	writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+
+	/* send info frame */
+	val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+	val |= IF_EN;
+	writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
index cdcc6c5..40200c6 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
@@ -22,6 +22,8 @@
 #define ANALOGIX_DP_VIDEO_CTL_8			0x3C
 #define ANALOGIX_DP_VIDEO_CTL_10		0x44
 
+#define ANALOGIX_DP_SPDIF_AUDIO_CTL_0		0xD8
+
 #define ANALOGIX_DP_PLL_REG_1			0xfc
 #define ANALOGIX_DP_PLL_REG_2			0x9e4
 #define ANALOGIX_DP_PLL_REG_3			0x9e8
@@ -30,6 +32,21 @@
 
 #define ANALOGIX_DP_PD				0x12c
 
+#define ANALOGIX_DP_IF_TYPE			0x244
+#define ANALOGIX_DP_IF_PKT_DB1			0x254
+#define ANALOGIX_DP_IF_PKT_DB2			0x258
+#define ANALOGIX_DP_SPD_HB0			0x2F8
+#define ANALOGIX_DP_SPD_HB1			0x2FC
+#define ANALOGIX_DP_SPD_HB2			0x300
+#define ANALOGIX_DP_SPD_HB3			0x304
+#define ANALOGIX_DP_SPD_PB0			0x308
+#define ANALOGIX_DP_SPD_PB1			0x30C
+#define ANALOGIX_DP_SPD_PB2			0x310
+#define ANALOGIX_DP_SPD_PB3			0x314
+#define ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL	0x318
+#define ANALOGIX_DP_VSC_SHADOW_DB0		0x31C
+#define ANALOGIX_DP_VSC_SHADOW_DB1		0x320
+
 #define ANALOGIX_DP_LANE_MAP			0x35C
 
 #define ANALOGIX_DP_ANALOG_CTL_1		0x370
@@ -103,6 +120,8 @@
 
 #define ANALOGIX_DP_SOC_GENERAL_CTL		0x800
 
+#define ANALOGIX_DP_CRC_CON			0x890
+
 /* ANALOGIX_DP_TX_SW_RESET */
 #define RESET_DP_TX				(0x1 << 0)
 
@@ -151,6 +170,7 @@
 #define VID_CHK_UPDATE_TYPE_SHIFT		(4)
 #define VID_CHK_UPDATE_TYPE_1			(0x1 << 4)
 #define VID_CHK_UPDATE_TYPE_0			(0x0 << 4)
+#define REUSE_SPD_EN				(0x1 << 3)
 
 /* ANALOGIX_DP_VIDEO_CTL_8 */
 #define VID_HRES_TH(x)				(((x) & 0xf) << 4)
@@ -167,6 +187,12 @@
 #define REF_CLK_27M				(0x0 << 0)
 #define REF_CLK_MASK				(0x1 << 0)
 
+/* ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL */
+#define PSR_FRAME_UP_TYPE_BURST			(0x1 << 0)
+#define PSR_FRAME_UP_TYPE_SINGLE		(0x0 << 0)
+#define PSR_CRC_SEL_HARDWARE			(0x1 << 1)
+#define PSR_CRC_SEL_MANUALLY			(0x0 << 1)
+
 /* ANALOGIX_DP_LANE_MAP */
 #define LANE3_MAP_LOGIC_LANE_0			(0x0 << 6)
 #define LANE3_MAP_LOGIC_LANE_1			(0x1 << 6)
@@ -376,4 +402,12 @@
 #define VIDEO_MODE_SLAVE_MODE			(0x1 << 0)
 #define VIDEO_MODE_MASTER_MODE			(0x0 << 0)
 
+/* ANALOGIX_DP_PKT_SEND_CTL */
+#define IF_UP					(0x1 << 4)
+#define IF_EN					(0x1 << 0)
+
+/* ANALOGIX_DP_CRC_CON */
+#define PSR_VID_CRC_FLUSH			(0x1 << 2)
+#define PSR_VID_CRC_ENABLE			(0x1 << 0)
+
 #endif /* _ANALOGIX_DP_REG_H */
diff --git a/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c
index 122bb01..8f2d137 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c
@@ -640,7 +640,6 @@
 	.remove	= snd_dw_hdmi_remove,
 	.driver	= {
 		.name = DRIVER_NAME,
-		.owner = THIS_MODULE,
 		.pm = PM_OPS,
 	},
 };
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index 77ab473..cdf39aa 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -940,10 +940,11 @@
 	 */
 
 	/*
-	 * AVI data byte 1 differences: Colorspace in bits 4,5 rather than 5,6,
-	 * active aspect present in bit 6 rather than 4.
+	 * AVI data byte 1 differences: Colorspace in bits 0,1 rather than 5,6,
+	 * scan info in bits 4,5 rather than 0,1 and active aspect present in
+	 * bit 6 rather than 4.
 	 */
-	val = (frame.colorspace & 3) << 4 | (frame.scan_mode & 0x3);
+	val = (frame.scan_mode & 3) << 4 | (frame.colorspace & 3);
 	if (frame.active_aspect & 15)
 		val |= HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT;
 	if (frame.top_bar || frame.bottom_bar)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index fa39307..904d29c 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -837,8 +837,9 @@
 	/* Check whether this plane supports the fb pixel format. */
 	ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format);
 	if (ret) {
-		DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
-				 drm_get_format_name(state->fb->pixel_format));
+		char *format_name = drm_get_format_name(state->fb->pixel_format);
+		DRM_DEBUG_ATOMIC("Invalid pixel format %s\n", format_name);
+		kfree(format_name);
 		return ret;
 	}
 
@@ -1690,7 +1691,7 @@
 				goto out;
 			}
 
-			prop = drm_property_find(dev, prop_id);
+			prop = drm_mode_obj_find_prop_id(obj, prop_id);
 			if (!prop) {
 				drm_mode_object_unreference(obj);
 				ret = -ENOENT;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index e1f5de2..ea78d70 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -594,7 +594,7 @@
 	struct drm_plane_state *plane_state;
 	int i, ret = 0;
 
-	ret = drm_atomic_helper_normalize_zpos(dev, state);
+	ret = drm_atomic_normalize_zpos(dev, state);
 	if (ret)
 		return ret;
 
@@ -749,6 +749,8 @@
 		/* Right function depends upon target state. */
 		if (crtc->state->enable && funcs->prepare)
 			funcs->prepare(crtc);
+		else if (funcs->atomic_disable)
+			funcs->atomic_disable(crtc, old_crtc_state);
 		else if (funcs->disable)
 			funcs->disable(crtc);
 		else
@@ -1007,29 +1009,46 @@
  * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
  * @dev: DRM device
  * @state: atomic state object with old state structures
+ * @pre_swap: if true, do an interruptible wait
  *
  * For implicit sync, driver should fish the exclusive fence out from the
  * incoming fb's and stash it in the drm_plane_state.  This is called after
  * drm_atomic_helper_swap_state() so it uses the current plane state (and
  * just uses the atomic state to find the changed planes)
+ *
+ * Returns zero if success or < 0 if fence_wait() fails.
  */
-void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
-			    struct drm_atomic_state *state)
+int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
+				      struct drm_atomic_state *state,
+				      bool pre_swap)
 {
 	struct drm_plane *plane;
 	struct drm_plane_state *plane_state;
-	int i;
+	int i, ret;
 
 	for_each_plane_in_state(state, plane, plane_state, i) {
-		if (!plane->state->fence)
+		if (!pre_swap)
+			plane_state = plane->state;
+
+		if (!plane_state->fence)
 			continue;
 
-		WARN_ON(!plane->state->fb);
+		WARN_ON(!plane_state->fb);
 
-		fence_wait(plane->state->fence, false);
-		fence_put(plane->state->fence);
-		plane->state->fence = NULL;
+		/*
+		 * If waiting for fences pre-swap (ie: nonblock), userspace can
+		 * still interrupt the operation. Instead of blocking until the
+		 * timer expires, make the wait interruptible.
+		 */
+		ret = fence_wait(plane_state->fence, pre_swap);
+		if (ret)
+			return ret;
+
+		fence_put(plane_state->fence);
+		plane_state->fence = NULL;
 	}
+
+	return 0;
 }
 EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
 
@@ -1146,7 +1165,8 @@
  *
  *     drm_atomic_helper_commit_modeset_enables(dev, state);
  *
- *     drm_atomic_helper_commit_planes(dev, state, true);
+ *     drm_atomic_helper_commit_planes(dev, state,
+ *                                     DRM_PLANE_COMMIT_ACTIVE_ONLY);
  *
  * for committing the atomic update to hardware.  See the kerneldoc entries for
  * these three functions for more details.
@@ -1157,7 +1177,7 @@
 
 	drm_atomic_helper_commit_modeset_disables(dev, state);
 
-	drm_atomic_helper_commit_planes(dev, state, false);
+	drm_atomic_helper_commit_planes(dev, state, 0);
 
 	drm_atomic_helper_commit_modeset_enables(dev, state);
 
@@ -1176,7 +1196,7 @@
 
 	funcs = dev->mode_config.helper_private;
 
-	drm_atomic_helper_wait_for_fences(dev, state);
+	drm_atomic_helper_wait_for_fences(dev, state, false);
 
 	drm_atomic_helper_wait_for_dependencies(state);
 
@@ -1235,6 +1255,12 @@
 	if (ret)
 		return ret;
 
+	if (!nonblock) {
+		ret = drm_atomic_helper_wait_for_fences(dev, state, true);
+		if (ret)
+			return ret;
+	}
+
 	/*
 	 * This is the point of no return - everything below never fails except
 	 * when the hw goes bonghits. Which means we can commit the new state on
@@ -1676,7 +1702,7 @@
  * drm_atomic_helper_commit_planes - commit plane state
  * @dev: DRM device
  * @old_state: atomic state object with old state structures
- * @active_only: Only commit on active CRTC if set
+ * @flags: flags for committing plane state
  *
  * This function commits the new plane state using the plane and atomic helper
  * functions for planes and crtcs. It assumes that the atomic state has already
@@ -1696,25 +1722,34 @@
  * most drivers don't need to be immediately notified of plane updates for a
  * disabled CRTC.
  *
- * Unless otherwise needed, drivers are advised to set the @active_only
- * parameters to true in order not to receive plane update notifications related
- * to a disabled CRTC. This avoids the need to manually ignore plane updates in
+ * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
+ * @flags in order not to receive plane update notifications related to a
+ * disabled CRTC. This avoids the need to manually ignore plane updates in
  * driver code when the driver and/or hardware can't or just don't need to deal
  * with updates on disabled CRTCs, for example when supporting runtime PM.
  *
- * The drm_atomic_helper_commit() default implementation only sets @active_only
- * to false to most closely match the behaviour of the legacy helpers. This should
- * not be copied blindly by drivers.
+ * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
+ * display controllers require to disable a CRTC's planes when the CRTC is
+ * disabled. This function would skip the ->atomic_disable call for a plane if
+ * the CRTC of the old plane state needs a modesetting operation. Of course,
+ * the drivers need to disable the planes in their CRTC disable callbacks
+ * since no one else would do that.
+ *
+ * The drm_atomic_helper_commit() default implementation doesn't set the
+ * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
+ * This should not be copied blindly by drivers.
  */
 void drm_atomic_helper_commit_planes(struct drm_device *dev,
 				     struct drm_atomic_state *old_state,
-				     bool active_only)
+				     uint32_t flags)
 {
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *old_crtc_state;
 	struct drm_plane *plane;
 	struct drm_plane_state *old_plane_state;
 	int i;
+	bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
+	bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
 
 	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
 		const struct drm_crtc_helper_funcs *funcs;
@@ -1758,10 +1793,19 @@
 		/*
 		 * Special-case disabling the plane if drivers support it.
 		 */
-		if (disabling && funcs->atomic_disable)
+		if (disabling && funcs->atomic_disable) {
+			struct drm_crtc_state *crtc_state;
+
+			crtc_state = old_plane_state->crtc->state;
+
+			if (drm_atomic_crtc_needs_modeset(crtc_state) &&
+			    no_disable)
+				continue;
+
 			funcs->atomic_disable(plane, old_plane_state);
-		else if (plane->state->crtc || disabling)
+		} else if (plane->state->crtc || disabling) {
 			funcs->atomic_update(plane, old_plane_state);
+		}
 	}
 
 	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
@@ -1840,12 +1884,12 @@
 
 /**
  * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
- * @crtc: CRTC
+ * @old_crtc_state: atomic state object with the old CRTC state
  * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
  *
  * Disables all planes associated with the given CRTC. This can be
- * used for instance in the CRTC helper disable callback to disable
- * all planes before shutting down the display pipeline.
+ * used for instance in the CRTC helper atomic_disable callback to disable
+ * all planes.
  *
  * If the atomic-parameter is set the function calls the CRTC's
  * atomic_begin hook before and atomic_flush hook after disabling the
@@ -1854,9 +1898,11 @@
  * It is a bug to call this function without having implemented the
  * ->atomic_disable() plane hook.
  */
-void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
-					      bool atomic)
+void
+drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
+					 bool atomic)
 {
+	struct drm_crtc *crtc = old_crtc_state->crtc;
 	const struct drm_crtc_helper_funcs *crtc_funcs =
 		crtc->helper_private;
 	struct drm_plane *plane;
@@ -1864,11 +1910,11 @@
 	if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
 		crtc_funcs->atomic_begin(crtc, NULL);
 
-	drm_for_each_plane(plane, crtc->dev) {
+	drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
 		const struct drm_plane_helper_funcs *plane_funcs =
 			plane->helper_private;
 
-		if (plane->state->crtc != crtc || !plane_funcs)
+		if (!plane_funcs)
 			continue;
 
 		WARN_ON(!plane_funcs->atomic_disable);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index f3c0942..0813b7e 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -193,8 +193,7 @@
 }
 
 /**
- * drm_atomic_helper_normalize_zpos - calculate normalized zpos values for all
- *				      crtcs
+ * drm_atomic_normalize_zpos - calculate normalized zpos values for all crtcs
  * @dev: DRM device
  * @state: atomic state of DRM device
  *
@@ -205,8 +204,8 @@
  * RETURNS
  * Zero for success or -errno
  */
-int drm_atomic_helper_normalize_zpos(struct drm_device *dev,
-				     struct drm_atomic_state *state)
+int drm_atomic_normalize_zpos(struct drm_device *dev,
+			      struct drm_atomic_state *state)
 {
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
@@ -236,3 +235,4 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(drm_atomic_normalize_zpos);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 2555430..4840466 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -98,11 +98,11 @@
  * @dev: DRM device
  * @bridge: bridge control structure
  *
- * called by a kms driver to link one of our encoder/bridge to the given
+ * Called by a kms driver to link one of our encoder/bridge to the given
  * bridge.
  *
  * Note that setting up links between the bridge and our encoder/bridge
- * objects needs to be handled by the kms driver itself
+ * objects needs to be handled by the kms driver itself.
  *
  * RETURNS:
  * Zero on success, error code on failure
@@ -125,6 +125,31 @@
 EXPORT_SYMBOL(drm_bridge_attach);
 
 /**
+ * drm_bridge_detach - deassociate given bridge from its DRM device
+ *
+ * @bridge: bridge control structure
+ *
+ * Called by a kms driver to unlink the given bridge from its DRM device.
+ *
+ * Note that tearing down links between the bridge and our encoder/bridge
+ * objects needs to be handled by the kms driver itself.
+ */
+void drm_bridge_detach(struct drm_bridge *bridge)
+{
+	if (WARN_ON(!bridge))
+		return;
+
+	if (WARN_ON(!bridge->dev))
+		return;
+
+	if (bridge->funcs->detach)
+		bridge->funcs->detach(bridge);
+
+	bridge->dev = NULL;
+}
+EXPORT_SYMBOL(drm_bridge_detach);
+
+/**
  * DOC: bridge callbacks
  *
  * The &drm_bridge_funcs ops are populated by the bridge driver. The DRM
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
new file mode 100644
index 0000000..26bb78c7
--- /dev/null
+++ b/drivers/gpu/drm/drm_connector.c
@@ -0,0 +1,1123 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_edid.h>
+
+#include "drm_crtc_internal.h"
+#include "drm_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * In DRM connectors are the general abstraction for display sinks, and include
+ * als fixed panels or anything else that can display pixels in some form. As
+ * opposed to all other KMS objects representing hardware (like CRTC, encoder or
+ * plane abstractions) connectors can be hotplugged and unplugged at runtime.
+ * Hence they are reference-counted using drm_connector_reference() and
+ * drm_connector_unreference().
+ *
+ * KMS driver must create, initialize, register and attach at a struct
+ * &drm_connector for each such sink. The instance is created as other KMS
+ * objects and initialized by setting the following fields.
+ *
+ * The connector is then registered with a call to drm_connector_init() with a
+ * pointer to the connector functions and a connector type, and exposed through
+ * sysfs with a call to drm_connector_register().
+ *
+ * Connectors must be attached to an encoder to be used. For devices that map
+ * connectors to encoders 1:1, the connector should be attached at
+ * initialization time with a call to drm_mode_connector_attach_encoder(). The
+ * driver must also set the struct &drm_connector encoder field to point to the
+ * attached encoder.
+ *
+ * For connectors which are not fixed (like built-in panels) the driver needs to
+ * support hotplug notifications. The simplest way to do that is by using the
+ * probe helpers, see drm_kms_helper_poll_init() for connectors which don't have
+ * hardware support for hotplug interrupts. Connectors with hardware hotplug
+ * support can instead use e.g. drm_helper_hpd_irq_event().
+ */
+
+struct drm_conn_prop_enum_list {
+	int type;
+	const char *name;
+	struct ida ida;
+};
+
+/*
+ * Connector and encoder types.
+ */
+static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
+	{ DRM_MODE_CONNECTOR_Unknown, "Unknown" },
+	{ DRM_MODE_CONNECTOR_VGA, "VGA" },
+	{ DRM_MODE_CONNECTOR_DVII, "DVI-I" },
+	{ DRM_MODE_CONNECTOR_DVID, "DVI-D" },
+	{ DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
+	{ DRM_MODE_CONNECTOR_Composite, "Composite" },
+	{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
+	{ DRM_MODE_CONNECTOR_LVDS, "LVDS" },
+	{ DRM_MODE_CONNECTOR_Component, "Component" },
+	{ DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
+	{ DRM_MODE_CONNECTOR_DisplayPort, "DP" },
+	{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
+	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
+	{ DRM_MODE_CONNECTOR_TV, "TV" },
+	{ DRM_MODE_CONNECTOR_eDP, "eDP" },
+	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
+	{ DRM_MODE_CONNECTOR_DSI, "DSI" },
+	{ DRM_MODE_CONNECTOR_DPI, "DPI" },
+};
+
+void drm_connector_ida_init(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+		ida_init(&drm_connector_enum_list[i].ida);
+}
+
+void drm_connector_ida_destroy(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+		ida_destroy(&drm_connector_enum_list[i].ida);
+}
+
+/**
+ * drm_connector_get_cmdline_mode - reads the user's cmdline mode
+ * @connector: connector to quwery
+ *
+ * The kernel supports per-connector configuration of its consoles through
+ * use of the video= parameter. This function parses that option and
+ * extracts the user's specified mode (or enable/disable status) for a
+ * particular connector. This is typically only used during the early fbdev
+ * setup.
+ */
+static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
+{
+	struct drm_cmdline_mode *mode = &connector->cmdline_mode;
+	char *option = NULL;
+
+	if (fb_get_options(connector->name, &option))
+		return;
+
+	if (!drm_mode_parse_command_line_for_connector(option,
+						       connector,
+						       mode))
+		return;
+
+	if (mode->force) {
+		const char *s;
+
+		switch (mode->force) {
+		case DRM_FORCE_OFF:
+			s = "OFF";
+			break;
+		case DRM_FORCE_ON_DIGITAL:
+			s = "ON - dig";
+			break;
+		default:
+		case DRM_FORCE_ON:
+			s = "ON";
+			break;
+		}
+
+		DRM_INFO("forcing %s connector %s\n", connector->name, s);
+		connector->force = mode->force;
+	}
+
+	DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
+		      connector->name,
+		      mode->xres, mode->yres,
+		      mode->refresh_specified ? mode->refresh : 60,
+		      mode->rb ? " reduced blanking" : "",
+		      mode->margins ? " with margins" : "",
+		      mode->interlace ?  " interlaced" : "");
+}
+
+static void drm_connector_free(struct kref *kref)
+{
+	struct drm_connector *connector =
+		container_of(kref, struct drm_connector, base.refcount);
+	struct drm_device *dev = connector->dev;
+
+	drm_mode_object_unregister(dev, &connector->base);
+	connector->funcs->destroy(connector);
+}
+
+/**
+ * drm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @connector_type: user visible type of the connector
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_init(struct drm_device *dev,
+		       struct drm_connector *connector,
+		       const struct drm_connector_funcs *funcs,
+		       int connector_type)
+{
+	struct drm_mode_config *config = &dev->mode_config;
+	int ret;
+	struct ida *connector_ida =
+		&drm_connector_enum_list[connector_type].ida;
+
+	drm_modeset_lock_all(dev);
+
+	ret = drm_mode_object_get_reg(dev, &connector->base,
+				      DRM_MODE_OBJECT_CONNECTOR,
+				      false, drm_connector_free);
+	if (ret)
+		goto out_unlock;
+
+	connector->base.properties = &connector->properties;
+	connector->dev = dev;
+	connector->funcs = funcs;
+
+	ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
+	if (ret < 0)
+		goto out_put;
+	connector->index = ret;
+	ret = 0;
+
+	connector->connector_type = connector_type;
+	connector->connector_type_id =
+		ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
+	if (connector->connector_type_id < 0) {
+		ret = connector->connector_type_id;
+		goto out_put_id;
+	}
+	connector->name =
+		kasprintf(GFP_KERNEL, "%s-%d",
+			  drm_connector_enum_list[connector_type].name,
+			  connector->connector_type_id);
+	if (!connector->name) {
+		ret = -ENOMEM;
+		goto out_put_type_id;
+	}
+
+	INIT_LIST_HEAD(&connector->probed_modes);
+	INIT_LIST_HEAD(&connector->modes);
+	connector->edid_blob_ptr = NULL;
+	connector->status = connector_status_unknown;
+
+	drm_connector_get_cmdline_mode(connector);
+
+	/* We should add connectors at the end to avoid upsetting the connector
+	 * index too much. */
+	list_add_tail(&connector->head, &config->connector_list);
+	config->num_connector++;
+
+	if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+		drm_object_attach_property(&connector->base,
+					      config->edid_property,
+					      0);
+
+	drm_object_attach_property(&connector->base,
+				      config->dpms_property, 0);
+
+	if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+		drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
+	}
+
+	connector->debugfs_entry = NULL;
+out_put_type_id:
+	if (ret)
+		ida_remove(connector_ida, connector->connector_type_id);
+out_put_id:
+	if (ret)
+		ida_remove(&config->connector_ida, connector->index);
+out_put:
+	if (ret)
+		drm_mode_object_unregister(dev, &connector->base);
+
+out_unlock:
+	drm_modeset_unlock_all(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_connector_init);
+
+/**
+ * drm_mode_connector_attach_encoder - attach a connector to an encoder
+ * @connector: connector to attach
+ * @encoder: encoder to attach @connector to
+ *
+ * This function links up a connector to an encoder. Note that the routing
+ * restrictions between encoders and crtcs are exposed to userspace through the
+ * possible_clones and possible_crtcs bitmasks.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+				      struct drm_encoder *encoder)
+{
+	int i;
+
+	/*
+	 * In the past, drivers have attempted to model the static association
+	 * of connector to encoder in simple connector/encoder devices using a
+	 * direct assignment of connector->encoder = encoder. This connection
+	 * is a logical one and the responsibility of the core, so drivers are
+	 * expected not to mess with this.
+	 *
+	 * Note that the error return should've been enough here, but a large
+	 * majority of drivers ignores the return value, so add in a big WARN
+	 * to get people's attention.
+	 */
+	if (WARN_ON(connector->encoder))
+		return -EINVAL;
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0) {
+			connector->encoder_ids[i] = encoder->base.id;
+			return 0;
+		}
+	}
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
+
+static void drm_mode_remove(struct drm_connector *connector,
+			    struct drm_display_mode *mode)
+{
+	list_del(&mode->head);
+	drm_mode_destroy(connector->dev, mode);
+}
+
+/**
+ * drm_connector_cleanup - cleans up an initialised connector
+ * @connector: connector to cleanup
+ *
+ * Cleans up the connector but doesn't free the object.
+ */
+void drm_connector_cleanup(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *mode, *t;
+
+	/* The connector should have been removed from userspace long before
+	 * it is finally destroyed.
+	 */
+	if (WARN_ON(connector->registered))
+		drm_connector_unregister(connector);
+
+	if (connector->tile_group) {
+		drm_mode_put_tile_group(dev, connector->tile_group);
+		connector->tile_group = NULL;
+	}
+
+	list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+		drm_mode_remove(connector, mode);
+
+	list_for_each_entry_safe(mode, t, &connector->modes, head)
+		drm_mode_remove(connector, mode);
+
+	ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
+		   connector->connector_type_id);
+
+	ida_remove(&dev->mode_config.connector_ida,
+		   connector->index);
+
+	kfree(connector->display_info.bus_formats);
+	drm_mode_object_unregister(dev, &connector->base);
+	kfree(connector->name);
+	connector->name = NULL;
+	list_del(&connector->head);
+	dev->mode_config.num_connector--;
+
+	WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
+	if (connector->state && connector->funcs->atomic_destroy_state)
+		connector->funcs->atomic_destroy_state(connector,
+						       connector->state);
+
+	memset(connector, 0, sizeof(*connector));
+}
+EXPORT_SYMBOL(drm_connector_cleanup);
+
+/**
+ * drm_connector_register - register a connector
+ * @connector: the connector to register
+ *
+ * Register userspace interfaces for a connector
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_register(struct drm_connector *connector)
+{
+	int ret;
+
+	if (connector->registered)
+		return 0;
+
+	ret = drm_sysfs_connector_add(connector);
+	if (ret)
+		return ret;
+
+	ret = drm_debugfs_connector_add(connector);
+	if (ret) {
+		goto err_sysfs;
+	}
+
+	if (connector->funcs->late_register) {
+		ret = connector->funcs->late_register(connector);
+		if (ret)
+			goto err_debugfs;
+	}
+
+	drm_mode_object_register(connector->dev, &connector->base);
+
+	connector->registered = true;
+	return 0;
+
+err_debugfs:
+	drm_debugfs_connector_remove(connector);
+err_sysfs:
+	drm_sysfs_connector_remove(connector);
+	return ret;
+}
+EXPORT_SYMBOL(drm_connector_register);
+
+/**
+ * drm_connector_unregister - unregister a connector
+ * @connector: the connector to unregister
+ *
+ * Unregister userspace interfaces for a connector
+ */
+void drm_connector_unregister(struct drm_connector *connector)
+{
+	if (!connector->registered)
+		return;
+
+	if (connector->funcs->early_unregister)
+		connector->funcs->early_unregister(connector);
+
+	drm_sysfs_connector_remove(connector);
+	drm_debugfs_connector_remove(connector);
+
+	connector->registered = false;
+}
+EXPORT_SYMBOL(drm_connector_unregister);
+
+void drm_connector_unregister_all(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+
+	/* FIXME: taking the mode config mutex ends up in a clash with sysfs */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		drm_connector_unregister(connector);
+}
+
+int drm_connector_register_all(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	int ret;
+
+	/* FIXME: taking the mode config mutex ends up in a clash with
+	 * fbcon/backlight registration */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		ret = drm_connector_register(connector);
+		if (ret)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	mutex_unlock(&dev->mode_config.mutex);
+	drm_connector_unregister_all(dev);
+	return ret;
+}
+
+/**
+ * drm_get_connector_status_name - return a string for connector status
+ * @status: connector status to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
+const char *drm_get_connector_status_name(enum drm_connector_status status)
+{
+	if (status == connector_status_connected)
+		return "connected";
+	else if (status == connector_status_disconnected)
+		return "disconnected";
+	else
+		return "unknown";
+}
+EXPORT_SYMBOL(drm_get_connector_status_name);
+
+static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
+	{ SubPixelUnknown, "Unknown" },
+	{ SubPixelHorizontalRGB, "Horizontal RGB" },
+	{ SubPixelHorizontalBGR, "Horizontal BGR" },
+	{ SubPixelVerticalRGB, "Vertical RGB" },
+	{ SubPixelVerticalBGR, "Vertical BGR" },
+	{ SubPixelNone, "None" },
+};
+
+/**
+ * drm_get_subpixel_order_name - return a string for a given subpixel enum
+ * @order: enum of subpixel_order
+ *
+ * Note you could abuse this and return something out of bounds, but that
+ * would be a caller error.  No unscrubbed user data should make it here.
+ */
+const char *drm_get_subpixel_order_name(enum subpixel_order order)
+{
+	return drm_subpixel_enum_list[order].name;
+}
+EXPORT_SYMBOL(drm_get_subpixel_order_name);
+
+static const struct drm_prop_enum_list drm_dpms_enum_list[] = {
+	{ DRM_MODE_DPMS_ON, "On" },
+	{ DRM_MODE_DPMS_STANDBY, "Standby" },
+	{ DRM_MODE_DPMS_SUSPEND, "Suspend" },
+	{ DRM_MODE_DPMS_OFF, "Off" }
+};
+DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+
+/**
+ * drm_display_info_set_bus_formats - set the supported bus formats
+ * @info: display info to store bus formats in
+ * @formats: array containing the supported bus formats
+ * @num_formats: the number of entries in the fmts array
+ *
+ * Store the supported bus formats in display info structure.
+ * See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for
+ * a full list of available formats.
+ */
+int drm_display_info_set_bus_formats(struct drm_display_info *info,
+				     const u32 *formats,
+				     unsigned int num_formats)
+{
+	u32 *fmts = NULL;
+
+	if (!formats && num_formats)
+		return -EINVAL;
+
+	if (formats && num_formats) {
+		fmts = kmemdup(formats, sizeof(*formats) * num_formats,
+			       GFP_KERNEL);
+		if (!fmts)
+			return -ENOMEM;
+	}
+
+	kfree(info->bus_formats);
+	info->bus_formats = fmts;
+	info->num_bus_formats = num_formats;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_display_info_set_bus_formats);
+
+/* Optional connector properties. */
+static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = {
+	{ DRM_MODE_SCALE_NONE, "None" },
+	{ DRM_MODE_SCALE_FULLSCREEN, "Full" },
+	{ DRM_MODE_SCALE_CENTER, "Center" },
+	{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
+};
+
+static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
+	{ DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
+	{ DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
+	{ DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
+};
+
+static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = {
+	{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+	{ DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
+	{ DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
+};
+DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
+
+static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = {
+	{ DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
+	{ DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
+	{ DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
+};
+DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
+		 drm_dvi_i_subconnector_enum_list)
+
+static const struct drm_prop_enum_list drm_tv_select_enum_list[] = {
+	{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+	{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
+};
+DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
+
+static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
+	{ DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
+	{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
+};
+DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
+		 drm_tv_subconnector_enum_list)
+
+int drm_connector_create_standard_properties(struct drm_device *dev)
+{
+	struct drm_property *prop;
+
+	prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+				   DRM_MODE_PROP_IMMUTABLE,
+				   "EDID", 0);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.edid_property = prop;
+
+	prop = drm_property_create_enum(dev, 0,
+				   "DPMS", drm_dpms_enum_list,
+				   ARRAY_SIZE(drm_dpms_enum_list));
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.dpms_property = prop;
+
+	prop = drm_property_create(dev,
+				   DRM_MODE_PROP_BLOB |
+				   DRM_MODE_PROP_IMMUTABLE,
+				   "PATH", 0);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.path_property = prop;
+
+	prop = drm_property_create(dev,
+				   DRM_MODE_PROP_BLOB |
+				   DRM_MODE_PROP_IMMUTABLE,
+				   "TILE", 0);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.tile_property = prop;
+
+	return 0;
+}
+
+/**
+ * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
+ * @dev: DRM device
+ *
+ * Called by a driver the first time a DVI-I connector is made.
+ */
+int drm_mode_create_dvi_i_properties(struct drm_device *dev)
+{
+	struct drm_property *dvi_i_selector;
+	struct drm_property *dvi_i_subconnector;
+
+	if (dev->mode_config.dvi_i_select_subconnector_property)
+		return 0;
+
+	dvi_i_selector =
+		drm_property_create_enum(dev, 0,
+				    "select subconnector",
+				    drm_dvi_i_select_enum_list,
+				    ARRAY_SIZE(drm_dvi_i_select_enum_list));
+	dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
+
+	dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+				    "subconnector",
+				    drm_dvi_i_subconnector_enum_list,
+				    ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
+	dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
+
+/**
+ * drm_create_tv_properties - create TV specific connector properties
+ * @dev: DRM device
+ * @num_modes: number of different TV formats (modes) supported
+ * @modes: array of pointers to strings containing name of each format
+ *
+ * Called by a driver's TV initialization routine, this function creates
+ * the TV specific connector properties for a given device.  Caller is
+ * responsible for allocating a list of format names and passing them to
+ * this routine.
+ */
+int drm_mode_create_tv_properties(struct drm_device *dev,
+				  unsigned int num_modes,
+				  const char * const modes[])
+{
+	struct drm_property *tv_selector;
+	struct drm_property *tv_subconnector;
+	unsigned int i;
+
+	if (dev->mode_config.tv_select_subconnector_property)
+		return 0;
+
+	/*
+	 * Basic connector properties
+	 */
+	tv_selector = drm_property_create_enum(dev, 0,
+					  "select subconnector",
+					  drm_tv_select_enum_list,
+					  ARRAY_SIZE(drm_tv_select_enum_list));
+	if (!tv_selector)
+		goto nomem;
+
+	dev->mode_config.tv_select_subconnector_property = tv_selector;
+
+	tv_subconnector =
+		drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+				    "subconnector",
+				    drm_tv_subconnector_enum_list,
+				    ARRAY_SIZE(drm_tv_subconnector_enum_list));
+	if (!tv_subconnector)
+		goto nomem;
+	dev->mode_config.tv_subconnector_property = tv_subconnector;
+
+	/*
+	 * Other, TV specific properties: margins & TV modes.
+	 */
+	dev->mode_config.tv_left_margin_property =
+		drm_property_create_range(dev, 0, "left margin", 0, 100);
+	if (!dev->mode_config.tv_left_margin_property)
+		goto nomem;
+
+	dev->mode_config.tv_right_margin_property =
+		drm_property_create_range(dev, 0, "right margin", 0, 100);
+	if (!dev->mode_config.tv_right_margin_property)
+		goto nomem;
+
+	dev->mode_config.tv_top_margin_property =
+		drm_property_create_range(dev, 0, "top margin", 0, 100);
+	if (!dev->mode_config.tv_top_margin_property)
+		goto nomem;
+
+	dev->mode_config.tv_bottom_margin_property =
+		drm_property_create_range(dev, 0, "bottom margin", 0, 100);
+	if (!dev->mode_config.tv_bottom_margin_property)
+		goto nomem;
+
+	dev->mode_config.tv_mode_property =
+		drm_property_create(dev, DRM_MODE_PROP_ENUM,
+				    "mode", num_modes);
+	if (!dev->mode_config.tv_mode_property)
+		goto nomem;
+
+	for (i = 0; i < num_modes; i++)
+		drm_property_add_enum(dev->mode_config.tv_mode_property, i,
+				      i, modes[i]);
+
+	dev->mode_config.tv_brightness_property =
+		drm_property_create_range(dev, 0, "brightness", 0, 100);
+	if (!dev->mode_config.tv_brightness_property)
+		goto nomem;
+
+	dev->mode_config.tv_contrast_property =
+		drm_property_create_range(dev, 0, "contrast", 0, 100);
+	if (!dev->mode_config.tv_contrast_property)
+		goto nomem;
+
+	dev->mode_config.tv_flicker_reduction_property =
+		drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
+	if (!dev->mode_config.tv_flicker_reduction_property)
+		goto nomem;
+
+	dev->mode_config.tv_overscan_property =
+		drm_property_create_range(dev, 0, "overscan", 0, 100);
+	if (!dev->mode_config.tv_overscan_property)
+		goto nomem;
+
+	dev->mode_config.tv_saturation_property =
+		drm_property_create_range(dev, 0, "saturation", 0, 100);
+	if (!dev->mode_config.tv_saturation_property)
+		goto nomem;
+
+	dev->mode_config.tv_hue_property =
+		drm_property_create_range(dev, 0, "hue", 0, 100);
+	if (!dev->mode_config.tv_hue_property)
+		goto nomem;
+
+	return 0;
+nomem:
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_mode_create_tv_properties);
+
+/**
+ * drm_mode_create_scaling_mode_property - create scaling mode property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_scaling_mode_property(struct drm_device *dev)
+{
+	struct drm_property *scaling_mode;
+
+	if (dev->mode_config.scaling_mode_property)
+		return 0;
+
+	scaling_mode =
+		drm_property_create_enum(dev, 0, "scaling mode",
+				drm_scaling_mode_enum_list,
+				    ARRAY_SIZE(drm_scaling_mode_enum_list));
+
+	dev->mode_config.scaling_mode_property = scaling_mode;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
+
+/**
+ * drm_mode_create_aspect_ratio_property - create aspect ratio property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
+{
+	if (dev->mode_config.aspect_ratio_property)
+		return 0;
+
+	dev->mode_config.aspect_ratio_property =
+		drm_property_create_enum(dev, 0, "aspect ratio",
+				drm_aspect_ratio_enum_list,
+				ARRAY_SIZE(drm_aspect_ratio_enum_list));
+
+	if (dev->mode_config.aspect_ratio_property == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
+
+/**
+ * drm_mode_create_suggested_offset_properties - create suggests offset properties
+ * @dev: DRM device
+ *
+ * Create the the suggested x/y offset property for connectors.
+ */
+int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
+{
+	if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property)
+		return 0;
+
+	dev->mode_config.suggested_x_property =
+		drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff);
+
+	dev->mode_config.suggested_y_property =
+		drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff);
+
+	if (dev->mode_config.suggested_x_property == NULL ||
+	    dev->mode_config.suggested_y_property == NULL)
+		return -ENOMEM;
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
+
+/**
+ * drm_mode_connector_set_path_property - set tile property on connector
+ * @connector: connector to set property on.
+ * @path: path to use for property; must not be NULL.
+ *
+ * This creates a property to expose to userspace to specify a
+ * connector path. This is mainly used for DisplayPort MST where
+ * connectors have a topology and we want to allow userspace to give
+ * them more meaningful names.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_connector_set_path_property(struct drm_connector *connector,
+					 const char *path)
+{
+	struct drm_device *dev = connector->dev;
+	int ret;
+
+	ret = drm_property_replace_global_blob(dev,
+	                                       &connector->path_blob_ptr,
+	                                       strlen(path) + 1,
+	                                       path,
+	                                       &connector->base,
+	                                       dev->mode_config.path_property);
+	return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_set_path_property);
+
+/**
+ * drm_mode_connector_set_tile_property - set tile property on connector
+ * @connector: connector to set property on.
+ *
+ * This looks up the tile information for a connector, and creates a
+ * property for userspace to parse if it exists. The property is of
+ * the form of 8 integers using ':' as a separator.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_connector_set_tile_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	char tile[256];
+	int ret;
+
+	if (!connector->has_tile) {
+		ret  = drm_property_replace_global_blob(dev,
+		                                        &connector->tile_blob_ptr,
+		                                        0,
+		                                        NULL,
+		                                        &connector->base,
+		                                        dev->mode_config.tile_property);
+		return ret;
+	}
+
+	snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
+		 connector->tile_group->id, connector->tile_is_single_monitor,
+		 connector->num_h_tile, connector->num_v_tile,
+		 connector->tile_h_loc, connector->tile_v_loc,
+		 connector->tile_h_size, connector->tile_v_size);
+
+	ret = drm_property_replace_global_blob(dev,
+	                                       &connector->tile_blob_ptr,
+	                                       strlen(tile) + 1,
+	                                       tile,
+	                                       &connector->base,
+	                                       dev->mode_config.tile_property);
+	return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
+
+/**
+ * drm_mode_connector_update_edid_property - update the edid property of a connector
+ * @connector: drm connector
+ * @edid: new value of the edid property
+ *
+ * This function creates a new blob modeset object and assigns its id to the
+ * connector's edid property.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+					    const struct edid *edid)
+{
+	struct drm_device *dev = connector->dev;
+	size_t size = 0;
+	int ret;
+
+	/* ignore requests to set edid when overridden */
+	if (connector->override_edid)
+		return 0;
+
+	if (edid)
+		size = EDID_LENGTH * (1 + edid->extensions);
+
+	ret = drm_property_replace_global_blob(dev,
+					       &connector->edid_blob_ptr,
+	                                       size,
+	                                       edid,
+	                                       &connector->base,
+	                                       dev->mode_config.edid_property);
+	return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+
+int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+				    struct drm_property *property,
+				    uint64_t value)
+{
+	int ret = -EINVAL;
+	struct drm_connector *connector = obj_to_connector(obj);
+
+	/* Do DPMS ourselves */
+	if (property == connector->dev->mode_config.dpms_property) {
+		ret = (*connector->funcs->dpms)(connector, (int)value);
+	} else if (connector->funcs->set_property)
+		ret = connector->funcs->set_property(connector, property, value);
+
+	/* store the property value if successful */
+	if (!ret)
+		drm_object_property_set_value(&connector->base, property, value);
+	return ret;
+}
+
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+				       void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_connector_set_property *conn_set_prop = data;
+	struct drm_mode_obj_set_property obj_set_prop = {
+		.value = conn_set_prop->value,
+		.prop_id = conn_set_prop->prop_id,
+		.obj_id = conn_set_prop->connector_id,
+		.obj_type = DRM_MODE_OBJECT_CONNECTOR
+	};
+
+	/* It does all the locking and checking we need */
+	return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
+}
+
+static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector)
+{
+	/* For atomic drivers only state objects are synchronously updated and
+	 * protected by modeset locks, so check those first. */
+	if (connector->state)
+		return connector->state->best_encoder;
+	return connector->encoder;
+}
+
+static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
+					 const struct drm_file *file_priv)
+{
+	/*
+	 * If user-space hasn't configured the driver to expose the stereo 3D
+	 * modes, don't expose them.
+	 */
+	if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
+		return false;
+
+	return true;
+}
+
+int drm_mode_getconnector(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct drm_mode_get_connector *out_resp = data;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_display_mode *mode;
+	int mode_count = 0;
+	int encoders_count = 0;
+	int ret = 0;
+	int copied = 0;
+	int i;
+	struct drm_mode_modeinfo u_mode;
+	struct drm_mode_modeinfo __user *mode_ptr;
+	uint32_t __user *encoder_ptr;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
+
+	mutex_lock(&dev->mode_config.mutex);
+
+	connector = drm_connector_lookup(dev, out_resp->connector_id);
+	if (!connector) {
+		ret = -ENOENT;
+		goto out_unlock;
+	}
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
+		if (connector->encoder_ids[i] != 0)
+			encoders_count++;
+
+	if (out_resp->count_modes == 0) {
+		connector->funcs->fill_modes(connector,
+					     dev->mode_config.max_width,
+					     dev->mode_config.max_height);
+	}
+
+	/* delayed so we get modes regardless of pre-fill_modes state */
+	list_for_each_entry(mode, &connector->modes, head)
+		if (drm_mode_expose_to_userspace(mode, file_priv))
+			mode_count++;
+
+	out_resp->connector_id = connector->base.id;
+	out_resp->connector_type = connector->connector_type;
+	out_resp->connector_type_id = connector->connector_type_id;
+	out_resp->mm_width = connector->display_info.width_mm;
+	out_resp->mm_height = connector->display_info.height_mm;
+	out_resp->subpixel = connector->display_info.subpixel_order;
+	out_resp->connection = connector->status;
+
+	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+	encoder = drm_connector_get_encoder(connector);
+	if (encoder)
+		out_resp->encoder_id = encoder->base.id;
+	else
+		out_resp->encoder_id = 0;
+
+	/*
+	 * This ioctl is called twice, once to determine how much space is
+	 * needed, and the 2nd time to fill it.
+	 */
+	if ((out_resp->count_modes >= mode_count) && mode_count) {
+		copied = 0;
+		mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
+		list_for_each_entry(mode, &connector->modes, head) {
+			if (!drm_mode_expose_to_userspace(mode, file_priv))
+				continue;
+
+			drm_mode_convert_to_umode(&u_mode, mode);
+			if (copy_to_user(mode_ptr + copied,
+					 &u_mode, sizeof(u_mode))) {
+				ret = -EFAULT;
+				goto out;
+			}
+			copied++;
+		}
+	}
+	out_resp->count_modes = mode_count;
+
+	ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
+			(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
+			(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
+			&out_resp->count_props);
+	if (ret)
+		goto out;
+
+	if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
+		copied = 0;
+		encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
+		for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+			if (connector->encoder_ids[i] != 0) {
+				if (put_user(connector->encoder_ids[i],
+					     encoder_ptr + copied)) {
+					ret = -EFAULT;
+					goto out;
+				}
+				copied++;
+			}
+		}
+	}
+	out_resp->count_encoders = encoders_count;
+
+out:
+	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+	drm_connector_unreference(connector);
+out_unlock:
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return ret;
+}
+
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index e92bb9d..631691b 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -40,39 +40,14 @@
 #include <drm/drm_modeset_lock.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_auth.h>
+#include <drm/drm_framebuffer.h>
 
 #include "drm_crtc_internal.h"
 #include "drm_internal.h"
 
-static struct drm_framebuffer *
-internal_framebuffer_create(struct drm_device *dev,
-			    const struct drm_mode_fb_cmd2 *r,
-			    struct drm_file *file_priv);
-
-/* Avoid boilerplate.  I'm tired of typing. */
-#define DRM_ENUM_NAME_FN(fnname, list)				\
-	const char *fnname(int val)				\
-	{							\
-		int i;						\
-		for (i = 0; i < ARRAY_SIZE(list); i++) {	\
-			if (list[i].type == val)		\
-				return list[i].name;		\
-		}						\
-		return "(unknown)";				\
-	}
-
 /*
  * Global properties
  */
-static const struct drm_prop_enum_list drm_dpms_enum_list[] = {
-	{ DRM_MODE_DPMS_ON, "On" },
-	{ DRM_MODE_DPMS_STANDBY, "Standby" },
-	{ DRM_MODE_DPMS_SUSPEND, "Suspend" },
-	{ DRM_MODE_DPMS_OFF, "Off" }
-};
-
-DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
-
 static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
 	{ DRM_PLANE_TYPE_OVERLAY, "Overlay" },
 	{ DRM_PLANE_TYPE_PRIMARY, "Primary" },
@@ -82,320 +57,6 @@
 /*
  * Optional properties
  */
-static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = {
-	{ DRM_MODE_SCALE_NONE, "None" },
-	{ DRM_MODE_SCALE_FULLSCREEN, "Full" },
-	{ DRM_MODE_SCALE_CENTER, "Center" },
-	{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
-};
-
-static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
-	{ DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
-	{ DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
-	{ DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
-};
-
-/*
- * Non-global properties, but "required" for certain connectors.
- */
-static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = {
-	{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
-	{ DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
-	{ DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
-};
-
-DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
-
-static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = {
-	{ DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
-	{ DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
-	{ DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
-};
-
-DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
-		 drm_dvi_i_subconnector_enum_list)
-
-static const struct drm_prop_enum_list drm_tv_select_enum_list[] = {
-	{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
-	{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
-	{ DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
-	{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
-	{ DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
-};
-
-DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
-
-static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
-	{ DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
-	{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
-	{ DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
-	{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
-	{ DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
-};
-
-DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
-		 drm_tv_subconnector_enum_list)
-
-static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
-	{ DRM_MODE_DIRTY_OFF,      "Off"      },
-	{ DRM_MODE_DIRTY_ON,       "On"       },
-	{ DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
-};
-
-struct drm_conn_prop_enum_list {
-	int type;
-	const char *name;
-	struct ida ida;
-};
-
-/*
- * Connector and encoder types.
- */
-static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
-	{ DRM_MODE_CONNECTOR_Unknown, "Unknown" },
-	{ DRM_MODE_CONNECTOR_VGA, "VGA" },
-	{ DRM_MODE_CONNECTOR_DVII, "DVI-I" },
-	{ DRM_MODE_CONNECTOR_DVID, "DVI-D" },
-	{ DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
-	{ DRM_MODE_CONNECTOR_Composite, "Composite" },
-	{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
-	{ DRM_MODE_CONNECTOR_LVDS, "LVDS" },
-	{ DRM_MODE_CONNECTOR_Component, "Component" },
-	{ DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
-	{ DRM_MODE_CONNECTOR_DisplayPort, "DP" },
-	{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
-	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
-	{ DRM_MODE_CONNECTOR_TV, "TV" },
-	{ DRM_MODE_CONNECTOR_eDP, "eDP" },
-	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
-	{ DRM_MODE_CONNECTOR_DSI, "DSI" },
-	{ DRM_MODE_CONNECTOR_DPI, "DPI" },
-};
-
-static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
-	{ DRM_MODE_ENCODER_NONE, "None" },
-	{ DRM_MODE_ENCODER_DAC, "DAC" },
-	{ DRM_MODE_ENCODER_TMDS, "TMDS" },
-	{ DRM_MODE_ENCODER_LVDS, "LVDS" },
-	{ DRM_MODE_ENCODER_TVDAC, "TV" },
-	{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
-	{ DRM_MODE_ENCODER_DSI, "DSI" },
-	{ DRM_MODE_ENCODER_DPMST, "DP MST" },
-	{ DRM_MODE_ENCODER_DPI, "DPI" },
-};
-
-static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
-	{ SubPixelUnknown, "Unknown" },
-	{ SubPixelHorizontalRGB, "Horizontal RGB" },
-	{ SubPixelHorizontalBGR, "Horizontal BGR" },
-	{ SubPixelVerticalRGB, "Vertical RGB" },
-	{ SubPixelVerticalBGR, "Vertical BGR" },
-	{ SubPixelNone, "None" },
-};
-
-void drm_connector_ida_init(void)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
-		ida_init(&drm_connector_enum_list[i].ida);
-}
-
-void drm_connector_ida_destroy(void)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
-		ida_destroy(&drm_connector_enum_list[i].ida);
-}
-
-/**
- * drm_get_connector_status_name - return a string for connector status
- * @status: connector status to compute name of
- *
- * In contrast to the other drm_get_*_name functions this one here returns a
- * const pointer and hence is threadsafe.
- */
-const char *drm_get_connector_status_name(enum drm_connector_status status)
-{
-	if (status == connector_status_connected)
-		return "connected";
-	else if (status == connector_status_disconnected)
-		return "disconnected";
-	else
-		return "unknown";
-}
-EXPORT_SYMBOL(drm_get_connector_status_name);
-
-/**
- * drm_get_subpixel_order_name - return a string for a given subpixel enum
- * @order: enum of subpixel_order
- *
- * Note you could abuse this and return something out of bounds, but that
- * would be a caller error.  No unscrubbed user data should make it here.
- */
-const char *drm_get_subpixel_order_name(enum subpixel_order order)
-{
-	return drm_subpixel_enum_list[order].name;
-}
-EXPORT_SYMBOL(drm_get_subpixel_order_name);
-
-/*
- * Internal function to assign a slot in the object idr and optionally
- * register the object into the idr.
- */
-static int drm_mode_object_get_reg(struct drm_device *dev,
-				   struct drm_mode_object *obj,
-				   uint32_t obj_type,
-				   bool register_obj,
-				   void (*obj_free_cb)(struct kref *kref))
-{
-	int ret;
-
-	mutex_lock(&dev->mode_config.idr_mutex);
-	ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
-	if (ret >= 0) {
-		/*
-		 * Set up the object linking under the protection of the idr
-		 * lock so that other users can't see inconsistent state.
-		 */
-		obj->id = ret;
-		obj->type = obj_type;
-		if (obj_free_cb) {
-			obj->free_cb = obj_free_cb;
-			kref_init(&obj->refcount);
-		}
-	}
-	mutex_unlock(&dev->mode_config.idr_mutex);
-
-	return ret < 0 ? ret : 0;
-}
-
-/**
- * drm_mode_object_get - allocate a new modeset identifier
- * @dev: DRM device
- * @obj: object pointer, used to generate unique ID
- * @obj_type: object type
- *
- * Create a unique identifier based on @ptr in @dev's identifier space.  Used
- * for tracking modes, CRTCs and connectors. Note that despite the _get postfix
- * modeset identifiers are _not_ reference counted. Hence don't use this for
- * reference counted modeset objects like framebuffers.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_mode_object_get(struct drm_device *dev,
-			struct drm_mode_object *obj, uint32_t obj_type)
-{
-	return drm_mode_object_get_reg(dev, obj, obj_type, true, NULL);
-}
-
-static void drm_mode_object_register(struct drm_device *dev,
-				     struct drm_mode_object *obj)
-{
-	mutex_lock(&dev->mode_config.idr_mutex);
-	idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
-	mutex_unlock(&dev->mode_config.idr_mutex);
-}
-
-/**
- * drm_mode_object_unregister - free a modeset identifer
- * @dev: DRM device
- * @object: object to free
- *
- * Free @id from @dev's unique identifier pool.
- * This function can be called multiple times, and guards against
- * multiple removals.
- * These modeset identifiers are _not_ reference counted. Hence don't use this
- * for reference counted modeset objects like framebuffers.
- */
-void drm_mode_object_unregister(struct drm_device *dev,
-			 struct drm_mode_object *object)
-{
-	mutex_lock(&dev->mode_config.idr_mutex);
-	if (object->id) {
-		idr_remove(&dev->mode_config.crtc_idr, object->id);
-		object->id = 0;
-	}
-	mutex_unlock(&dev->mode_config.idr_mutex);
-}
-
-static struct drm_mode_object *_object_find(struct drm_device *dev,
-		uint32_t id, uint32_t type)
-{
-	struct drm_mode_object *obj = NULL;
-
-	mutex_lock(&dev->mode_config.idr_mutex);
-	obj = idr_find(&dev->mode_config.crtc_idr, id);
-	if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
-		obj = NULL;
-	if (obj && obj->id != id)
-		obj = NULL;
-
-	if (obj && obj->free_cb) {
-		if (!kref_get_unless_zero(&obj->refcount))
-			obj = NULL;
-	}
-	mutex_unlock(&dev->mode_config.idr_mutex);
-
-	return obj;
-}
-
-/**
- * drm_mode_object_find - look up a drm object with static lifetime
- * @dev: drm device
- * @id: id of the mode object
- * @type: type of the mode object
- *
- * This function is used to look up a modeset object. It will acquire a
- * reference for reference counted objects. This reference must be dropped again
- * by callind drm_mode_object_unreference().
- */
-struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
-		uint32_t id, uint32_t type)
-{
-	struct drm_mode_object *obj = NULL;
-
-	obj = _object_find(dev, id, type);
-	return obj;
-}
-EXPORT_SYMBOL(drm_mode_object_find);
-
-/**
- * drm_mode_object_unreference - decr the object refcnt
- * @obj: mode_object
- *
- * This functions decrements the object's refcount if it is a refcounted modeset
- * object. It is a no-op on any other object. This is used to drop references
- * acquired with drm_mode_object_reference().
- */
-void drm_mode_object_unreference(struct drm_mode_object *obj)
-{
-	if (obj->free_cb) {
-		DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
-		kref_put(&obj->refcount, obj->free_cb);
-	}
-}
-EXPORT_SYMBOL(drm_mode_object_unreference);
-
-/**
- * drm_mode_object_reference - incr the object refcnt
- * @obj: mode_object
- *
- * This functions increments the object's refcount if it is a refcounted modeset
- * object. It is a no-op on any other object. References should be dropped again
- * by calling drm_mode_object_unreference().
- */
-void drm_mode_object_reference(struct drm_mode_object *obj)
-{
-	if (obj->free_cb) {
-		DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
-		kref_get(&obj->refcount);
-	}
-}
-EXPORT_SYMBOL(drm_mode_object_reference);
-
 /**
  * drm_crtc_force_disable - Forcibly turn off a CRTC
  * @crtc: CRTC to turn off
@@ -441,199 +102,6 @@
 }
 EXPORT_SYMBOL(drm_crtc_force_disable_all);
 
-static void drm_framebuffer_free(struct kref *kref)
-{
-	struct drm_framebuffer *fb =
-			container_of(kref, struct drm_framebuffer, base.refcount);
-	struct drm_device *dev = fb->dev;
-
-	/*
-	 * The lookup idr holds a weak reference, which has not necessarily been
-	 * removed at this point. Check for that.
-	 */
-	drm_mode_object_unregister(dev, &fb->base);
-
-	fb->funcs->destroy(fb);
-}
-
-/**
- * drm_framebuffer_init - initialize a framebuffer
- * @dev: DRM device
- * @fb: framebuffer to be initialized
- * @funcs: ... with these functions
- *
- * Allocates an ID for the framebuffer's parent mode object, sets its mode
- * functions & device file and adds it to the master fd list.
- *
- * IMPORTANT:
- * This functions publishes the fb and makes it available for concurrent access
- * by other users. Which means by this point the fb _must_ be fully set up -
- * since all the fb attributes are invariant over its lifetime, no further
- * locking but only correct reference counting is required.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
-			 const struct drm_framebuffer_funcs *funcs)
-{
-	int ret;
-
-	INIT_LIST_HEAD(&fb->filp_head);
-	fb->dev = dev;
-	fb->funcs = funcs;
-
-	ret = drm_mode_object_get_reg(dev, &fb->base, DRM_MODE_OBJECT_FB,
-				      false, drm_framebuffer_free);
-	if (ret)
-		goto out;
-
-	mutex_lock(&dev->mode_config.fb_lock);
-	dev->mode_config.num_fb++;
-	list_add(&fb->head, &dev->mode_config.fb_list);
-	mutex_unlock(&dev->mode_config.fb_lock);
-
-	drm_mode_object_register(dev, &fb->base);
-out:
-	return ret;
-}
-EXPORT_SYMBOL(drm_framebuffer_init);
-
-/**
- * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
- * @dev: drm device
- * @id: id of the fb object
- *
- * If successful, this grabs an additional reference to the framebuffer -
- * callers need to make sure to eventually unreference the returned framebuffer
- * again, using @drm_framebuffer_unreference.
- */
-struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
-					       uint32_t id)
-{
-	struct drm_mode_object *obj;
-	struct drm_framebuffer *fb = NULL;
-
-	obj = _object_find(dev, id, DRM_MODE_OBJECT_FB);
-	if (obj)
-		fb = obj_to_fb(obj);
-	return fb;
-}
-EXPORT_SYMBOL(drm_framebuffer_lookup);
-
-/**
- * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
- * @fb: fb to unregister
- *
- * Drivers need to call this when cleaning up driver-private framebuffers, e.g.
- * those used for fbdev. Note that the caller must hold a reference of it's own,
- * i.e. the object may not be destroyed through this call (since it'll lead to a
- * locking inversion).
- */
-void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
-{
-	struct drm_device *dev;
-
-	if (!fb)
-		return;
-
-	dev = fb->dev;
-
-	/* Mark fb as reaped and drop idr ref. */
-	drm_mode_object_unregister(dev, &fb->base);
-}
-EXPORT_SYMBOL(drm_framebuffer_unregister_private);
-
-/**
- * drm_framebuffer_cleanup - remove a framebuffer object
- * @fb: framebuffer to remove
- *
- * Cleanup framebuffer. This function is intended to be used from the drivers
- * ->destroy callback. It can also be used to clean up driver private
- * framebuffers embedded into a larger structure.
- *
- * Note that this function does not remove the fb from active usuage - if it is
- * still used anywhere, hilarity can ensue since userspace could call getfb on
- * the id and get back -EINVAL. Obviously no concern at driver unload time.
- *
- * Also, the framebuffer will not be removed from the lookup idr - for
- * user-created framebuffers this will happen in in the rmfb ioctl. For
- * driver-private objects (e.g. for fbdev) drivers need to explicitly call
- * drm_framebuffer_unregister_private.
- */
-void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
-{
-	struct drm_device *dev = fb->dev;
-
-	mutex_lock(&dev->mode_config.fb_lock);
-	list_del(&fb->head);
-	dev->mode_config.num_fb--;
-	mutex_unlock(&dev->mode_config.fb_lock);
-}
-EXPORT_SYMBOL(drm_framebuffer_cleanup);
-
-/**
- * drm_framebuffer_remove - remove and unreference a framebuffer object
- * @fb: framebuffer to remove
- *
- * Scans all the CRTCs and planes in @dev's mode_config.  If they're
- * using @fb, removes it, setting it to NULL. Then drops the reference to the
- * passed-in framebuffer. Might take the modeset locks.
- *
- * Note that this function optimizes the cleanup away if the caller holds the
- * last reference to the framebuffer. It is also guaranteed to not take the
- * modeset locks in this case.
- */
-void drm_framebuffer_remove(struct drm_framebuffer *fb)
-{
-	struct drm_device *dev;
-	struct drm_crtc *crtc;
-	struct drm_plane *plane;
-
-	if (!fb)
-		return;
-
-	dev = fb->dev;
-
-	WARN_ON(!list_empty(&fb->filp_head));
-
-	/*
-	 * drm ABI mandates that we remove any deleted framebuffers from active
-	 * useage. But since most sane clients only remove framebuffers they no
-	 * longer need, try to optimize this away.
-	 *
-	 * Since we're holding a reference ourselves, observing a refcount of 1
-	 * means that we're the last holder and can skip it. Also, the refcount
-	 * can never increase from 1 again, so we don't need any barriers or
-	 * locks.
-	 *
-	 * Note that userspace could try to race with use and instate a new
-	 * usage _after_ we've cleared all current ones. End result will be an
-	 * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
-	 * in this manner.
-	 */
-	if (drm_framebuffer_read_refcount(fb) > 1) {
-		drm_modeset_lock_all(dev);
-		/* remove from any CRTC */
-		drm_for_each_crtc(crtc, dev) {
-			if (crtc->primary->fb == fb) {
-				/* should turn off the crtc */
-				if (drm_crtc_force_disable(crtc))
-					DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
-			}
-		}
-
-		drm_for_each_plane(plane, dev) {
-			if (plane->fb == fb)
-				drm_plane_force_disable(plane);
-		}
-		drm_modeset_unlock_all(dev);
-	}
-
-	drm_framebuffer_unreference(fb);
-}
-EXPORT_SYMBOL(drm_framebuffer_remove);
-
 DEFINE_WW_CLASS(crtc_ww_class);
 
 static unsigned int drm_num_crtcs(struct drm_device *dev)
@@ -783,471 +251,6 @@
 }
 EXPORT_SYMBOL(drm_crtc_cleanup);
 
-/*
- * drm_mode_remove - remove and free a mode
- * @connector: connector list to modify
- * @mode: mode to remove
- *
- * Remove @mode from @connector's mode list, then free it.
- */
-static void drm_mode_remove(struct drm_connector *connector,
-			    struct drm_display_mode *mode)
-{
-	list_del(&mode->head);
-	drm_mode_destroy(connector->dev, mode);
-}
-
-/**
- * drm_display_info_set_bus_formats - set the supported bus formats
- * @info: display info to store bus formats in
- * @formats: array containing the supported bus formats
- * @num_formats: the number of entries in the fmts array
- *
- * Store the supported bus formats in display info structure.
- * See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for
- * a full list of available formats.
- */
-int drm_display_info_set_bus_formats(struct drm_display_info *info,
-				     const u32 *formats,
-				     unsigned int num_formats)
-{
-	u32 *fmts = NULL;
-
-	if (!formats && num_formats)
-		return -EINVAL;
-
-	if (formats && num_formats) {
-		fmts = kmemdup(formats, sizeof(*formats) * num_formats,
-			       GFP_KERNEL);
-		if (!fmts)
-			return -ENOMEM;
-	}
-
-	kfree(info->bus_formats);
-	info->bus_formats = fmts;
-	info->num_bus_formats = num_formats;
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_display_info_set_bus_formats);
-
-/**
- * drm_connector_get_cmdline_mode - reads the user's cmdline mode
- * @connector: connector to quwery
- *
- * The kernel supports per-connector configration of its consoles through
- * use of the video= parameter. This function parses that option and
- * extracts the user's specified mode (or enable/disable status) for a
- * particular connector. This is typically only used during the early fbdev
- * setup.
- */
-static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
-{
-	struct drm_cmdline_mode *mode = &connector->cmdline_mode;
-	char *option = NULL;
-
-	if (fb_get_options(connector->name, &option))
-		return;
-
-	if (!drm_mode_parse_command_line_for_connector(option,
-						       connector,
-						       mode))
-		return;
-
-	if (mode->force) {
-		const char *s;
-
-		switch (mode->force) {
-		case DRM_FORCE_OFF:
-			s = "OFF";
-			break;
-		case DRM_FORCE_ON_DIGITAL:
-			s = "ON - dig";
-			break;
-		default:
-		case DRM_FORCE_ON:
-			s = "ON";
-			break;
-		}
-
-		DRM_INFO("forcing %s connector %s\n", connector->name, s);
-		connector->force = mode->force;
-	}
-
-	DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
-		      connector->name,
-		      mode->xres, mode->yres,
-		      mode->refresh_specified ? mode->refresh : 60,
-		      mode->rb ? " reduced blanking" : "",
-		      mode->margins ? " with margins" : "",
-		      mode->interlace ?  " interlaced" : "");
-}
-
-static void drm_connector_free(struct kref *kref)
-{
-	struct drm_connector *connector =
-		container_of(kref, struct drm_connector, base.refcount);
-	struct drm_device *dev = connector->dev;
-
-	drm_mode_object_unregister(dev, &connector->base);
-	connector->funcs->destroy(connector);
-}
-
-/**
- * drm_connector_init - Init a preallocated connector
- * @dev: DRM device
- * @connector: the connector to init
- * @funcs: callbacks for this connector
- * @connector_type: user visible type of the connector
- *
- * Initialises a preallocated connector. Connectors should be
- * subclassed as part of driver connector objects.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_connector_init(struct drm_device *dev,
-		       struct drm_connector *connector,
-		       const struct drm_connector_funcs *funcs,
-		       int connector_type)
-{
-	struct drm_mode_config *config = &dev->mode_config;
-	int ret;
-	struct ida *connector_ida =
-		&drm_connector_enum_list[connector_type].ida;
-
-	drm_modeset_lock_all(dev);
-
-	ret = drm_mode_object_get_reg(dev, &connector->base,
-				      DRM_MODE_OBJECT_CONNECTOR,
-				      false, drm_connector_free);
-	if (ret)
-		goto out_unlock;
-
-	connector->base.properties = &connector->properties;
-	connector->dev = dev;
-	connector->funcs = funcs;
-
-	ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
-	if (ret < 0)
-		goto out_put;
-	connector->index = ret;
-	ret = 0;
-
-	connector->connector_type = connector_type;
-	connector->connector_type_id =
-		ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
-	if (connector->connector_type_id < 0) {
-		ret = connector->connector_type_id;
-		goto out_put_id;
-	}
-	connector->name =
-		kasprintf(GFP_KERNEL, "%s-%d",
-			  drm_connector_enum_list[connector_type].name,
-			  connector->connector_type_id);
-	if (!connector->name) {
-		ret = -ENOMEM;
-		goto out_put_type_id;
-	}
-
-	INIT_LIST_HEAD(&connector->probed_modes);
-	INIT_LIST_HEAD(&connector->modes);
-	connector->edid_blob_ptr = NULL;
-	connector->status = connector_status_unknown;
-
-	drm_connector_get_cmdline_mode(connector);
-
-	/* We should add connectors at the end to avoid upsetting the connector
-	 * index too much. */
-	list_add_tail(&connector->head, &config->connector_list);
-	config->num_connector++;
-
-	if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
-		drm_object_attach_property(&connector->base,
-					      config->edid_property,
-					      0);
-
-	drm_object_attach_property(&connector->base,
-				      config->dpms_property, 0);
-
-	if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
-		drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
-	}
-
-	connector->debugfs_entry = NULL;
-out_put_type_id:
-	if (ret)
-		ida_remove(connector_ida, connector->connector_type_id);
-out_put_id:
-	if (ret)
-		ida_remove(&config->connector_ida, connector->index);
-out_put:
-	if (ret)
-		drm_mode_object_unregister(dev, &connector->base);
-
-out_unlock:
-	drm_modeset_unlock_all(dev);
-
-	return ret;
-}
-EXPORT_SYMBOL(drm_connector_init);
-
-/**
- * drm_connector_cleanup - cleans up an initialised connector
- * @connector: connector to cleanup
- *
- * Cleans up the connector but doesn't free the object.
- */
-void drm_connector_cleanup(struct drm_connector *connector)
-{
-	struct drm_device *dev = connector->dev;
-	struct drm_display_mode *mode, *t;
-
-	/* The connector should have been removed from userspace long before
-	 * it is finally destroyed.
-	 */
-	if (WARN_ON(connector->registered))
-		drm_connector_unregister(connector);
-
-	if (connector->tile_group) {
-		drm_mode_put_tile_group(dev, connector->tile_group);
-		connector->tile_group = NULL;
-	}
-
-	list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
-		drm_mode_remove(connector, mode);
-
-	list_for_each_entry_safe(mode, t, &connector->modes, head)
-		drm_mode_remove(connector, mode);
-
-	ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
-		   connector->connector_type_id);
-
-	ida_remove(&dev->mode_config.connector_ida,
-		   connector->index);
-
-	kfree(connector->display_info.bus_formats);
-	drm_mode_object_unregister(dev, &connector->base);
-	kfree(connector->name);
-	connector->name = NULL;
-	list_del(&connector->head);
-	dev->mode_config.num_connector--;
-
-	WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
-	if (connector->state && connector->funcs->atomic_destroy_state)
-		connector->funcs->atomic_destroy_state(connector,
-						       connector->state);
-
-	memset(connector, 0, sizeof(*connector));
-}
-EXPORT_SYMBOL(drm_connector_cleanup);
-
-/**
- * drm_connector_register - register a connector
- * @connector: the connector to register
- *
- * Register userspace interfaces for a connector
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_connector_register(struct drm_connector *connector)
-{
-	int ret;
-
-	if (connector->registered)
-		return 0;
-
-	ret = drm_sysfs_connector_add(connector);
-	if (ret)
-		return ret;
-
-	ret = drm_debugfs_connector_add(connector);
-	if (ret) {
-		goto err_sysfs;
-	}
-
-	if (connector->funcs->late_register) {
-		ret = connector->funcs->late_register(connector);
-		if (ret)
-			goto err_debugfs;
-	}
-
-	drm_mode_object_register(connector->dev, &connector->base);
-
-	connector->registered = true;
-	return 0;
-
-err_debugfs:
-	drm_debugfs_connector_remove(connector);
-err_sysfs:
-	drm_sysfs_connector_remove(connector);
-	return ret;
-}
-EXPORT_SYMBOL(drm_connector_register);
-
-/**
- * drm_connector_unregister - unregister a connector
- * @connector: the connector to unregister
- *
- * Unregister userspace interfaces for a connector
- */
-void drm_connector_unregister(struct drm_connector *connector)
-{
-	if (!connector->registered)
-		return;
-
-	if (connector->funcs->early_unregister)
-		connector->funcs->early_unregister(connector);
-
-	drm_sysfs_connector_remove(connector);
-	drm_debugfs_connector_remove(connector);
-
-	connector->registered = false;
-}
-EXPORT_SYMBOL(drm_connector_unregister);
-
-static void drm_connector_unregister_all(struct drm_device *dev)
-{
-	struct drm_connector *connector;
-
-	/* FIXME: taking the mode config mutex ends up in a clash with sysfs */
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
-		drm_connector_unregister(connector);
-}
-
-static int drm_connector_register_all(struct drm_device *dev)
-{
-	struct drm_connector *connector;
-	int ret;
-
-	/* FIXME: taking the mode config mutex ends up in a clash with
-	 * fbcon/backlight registration */
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		ret = drm_connector_register(connector);
-		if (ret)
-			goto err;
-	}
-
-	return 0;
-
-err:
-	mutex_unlock(&dev->mode_config.mutex);
-	drm_connector_unregister_all(dev);
-	return ret;
-}
-
-static int drm_encoder_register_all(struct drm_device *dev)
-{
-	struct drm_encoder *encoder;
-	int ret = 0;
-
-	drm_for_each_encoder(encoder, dev) {
-		if (encoder->funcs->late_register)
-			ret = encoder->funcs->late_register(encoder);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static void drm_encoder_unregister_all(struct drm_device *dev)
-{
-	struct drm_encoder *encoder;
-
-	drm_for_each_encoder(encoder, dev) {
-		if (encoder->funcs->early_unregister)
-			encoder->funcs->early_unregister(encoder);
-	}
-}
-
-/**
- * drm_encoder_init - Init a preallocated encoder
- * @dev: drm device
- * @encoder: the encoder to init
- * @funcs: callbacks for this encoder
- * @encoder_type: user visible type of the encoder
- * @name: printf style format string for the encoder name, or NULL for default name
- *
- * Initialises a preallocated encoder. Encoder should be
- * subclassed as part of driver encoder objects.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_encoder_init(struct drm_device *dev,
-		      struct drm_encoder *encoder,
-		      const struct drm_encoder_funcs *funcs,
-		      int encoder_type, const char *name, ...)
-{
-	int ret;
-
-	drm_modeset_lock_all(dev);
-
-	ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
-	if (ret)
-		goto out_unlock;
-
-	encoder->dev = dev;
-	encoder->encoder_type = encoder_type;
-	encoder->funcs = funcs;
-	if (name) {
-		va_list ap;
-
-		va_start(ap, name);
-		encoder->name = kvasprintf(GFP_KERNEL, name, ap);
-		va_end(ap);
-	} else {
-		encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
-					  drm_encoder_enum_list[encoder_type].name,
-					  encoder->base.id);
-	}
-	if (!encoder->name) {
-		ret = -ENOMEM;
-		goto out_put;
-	}
-
-	list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
-	encoder->index = dev->mode_config.num_encoder++;
-
-out_put:
-	if (ret)
-		drm_mode_object_unregister(dev, &encoder->base);
-
-out_unlock:
-	drm_modeset_unlock_all(dev);
-
-	return ret;
-}
-EXPORT_SYMBOL(drm_encoder_init);
-
-/**
- * drm_encoder_cleanup - cleans up an initialised encoder
- * @encoder: encoder to cleanup
- *
- * Cleans up the encoder but doesn't free the object.
- */
-void drm_encoder_cleanup(struct drm_encoder *encoder)
-{
-	struct drm_device *dev = encoder->dev;
-
-	/* Note that the encoder_list is considered to be static; should we
-	 * remove the drm_encoder at runtime we would have to decrement all
-	 * the indices on the drm_encoder after us in the encoder_list.
-	 */
-
-	drm_modeset_lock_all(dev);
-	drm_mode_object_unregister(dev, &encoder->base);
-	kfree(encoder->name);
-	list_del(&encoder->head);
-	dev->mode_config.num_encoder--;
-	drm_modeset_unlock_all(dev);
-
-	memset(encoder, 0, sizeof(*encoder));
-}
-EXPORT_SYMBOL(drm_encoder_cleanup);
-
 static unsigned int drm_num_planes(struct drm_device *dev)
 {
 	unsigned int num = 0;
@@ -1266,7 +269,7 @@
  * @plane: plane object to init
  * @possible_crtcs: bitmask of possible CRTCs
  * @funcs: callbacks for the new plane
- * @formats: array of supported formats (%DRM_FORMAT_*)
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
  * @format_count: number of elements in @formats
  * @type: type of plane (overlay, primary, cursor)
  * @name: printf style format string for the plane name, or NULL for default name
@@ -1381,7 +384,7 @@
  * @plane: plane object to init
  * @possible_crtcs: bitmask of possible CRTCs
  * @funcs: callbacks for the new plane
- * @formats: array of supported formats (%DRM_FORMAT_*)
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
  * @format_count: number of elements in @formats
  * @is_primary: plane type (primary vs overlay)
  *
@@ -1540,39 +543,11 @@
 static int drm_mode_create_standard_properties(struct drm_device *dev)
 {
 	struct drm_property *prop;
+	int ret;
 
-	/*
-	 * Standard properties (apply to all connectors)
-	 */
-	prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
-				   DRM_MODE_PROP_IMMUTABLE,
-				   "EDID", 0);
-	if (!prop)
-		return -ENOMEM;
-	dev->mode_config.edid_property = prop;
-
-	prop = drm_property_create_enum(dev, 0,
-				   "DPMS", drm_dpms_enum_list,
-				   ARRAY_SIZE(drm_dpms_enum_list));
-	if (!prop)
-		return -ENOMEM;
-	dev->mode_config.dpms_property = prop;
-
-	prop = drm_property_create(dev,
-				   DRM_MODE_PROP_BLOB |
-				   DRM_MODE_PROP_IMMUTABLE,
-				   "PATH", 0);
-	if (!prop)
-		return -ENOMEM;
-	dev->mode_config.path_property = prop;
-
-	prop = drm_property_create(dev,
-				   DRM_MODE_PROP_BLOB |
-				   DRM_MODE_PROP_IMMUTABLE,
-				   "TILE", 0);
-	if (!prop)
-		return -ENOMEM;
-	dev->mode_config.tile_property = prop;
+	ret = drm_connector_create_standard_properties(dev);
+	if (ret)
+		return ret;
 
 	prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
 					"type", drm_plane_type_enum_list,
@@ -1693,250 +668,6 @@
 }
 
 /**
- * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
- * @dev: DRM device
- *
- * Called by a driver the first time a DVI-I connector is made.
- */
-int drm_mode_create_dvi_i_properties(struct drm_device *dev)
-{
-	struct drm_property *dvi_i_selector;
-	struct drm_property *dvi_i_subconnector;
-
-	if (dev->mode_config.dvi_i_select_subconnector_property)
-		return 0;
-
-	dvi_i_selector =
-		drm_property_create_enum(dev, 0,
-				    "select subconnector",
-				    drm_dvi_i_select_enum_list,
-				    ARRAY_SIZE(drm_dvi_i_select_enum_list));
-	dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
-
-	dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
-				    "subconnector",
-				    drm_dvi_i_subconnector_enum_list,
-				    ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
-	dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
-
-/**
- * drm_create_tv_properties - create TV specific connector properties
- * @dev: DRM device
- * @num_modes: number of different TV formats (modes) supported
- * @modes: array of pointers to strings containing name of each format
- *
- * Called by a driver's TV initialization routine, this function creates
- * the TV specific connector properties for a given device.  Caller is
- * responsible for allocating a list of format names and passing them to
- * this routine.
- */
-int drm_mode_create_tv_properties(struct drm_device *dev,
-				  unsigned int num_modes,
-				  const char * const modes[])
-{
-	struct drm_property *tv_selector;
-	struct drm_property *tv_subconnector;
-	unsigned int i;
-
-	if (dev->mode_config.tv_select_subconnector_property)
-		return 0;
-
-	/*
-	 * Basic connector properties
-	 */
-	tv_selector = drm_property_create_enum(dev, 0,
-					  "select subconnector",
-					  drm_tv_select_enum_list,
-					  ARRAY_SIZE(drm_tv_select_enum_list));
-	if (!tv_selector)
-		goto nomem;
-
-	dev->mode_config.tv_select_subconnector_property = tv_selector;
-
-	tv_subconnector =
-		drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
-				    "subconnector",
-				    drm_tv_subconnector_enum_list,
-				    ARRAY_SIZE(drm_tv_subconnector_enum_list));
-	if (!tv_subconnector)
-		goto nomem;
-	dev->mode_config.tv_subconnector_property = tv_subconnector;
-
-	/*
-	 * Other, TV specific properties: margins & TV modes.
-	 */
-	dev->mode_config.tv_left_margin_property =
-		drm_property_create_range(dev, 0, "left margin", 0, 100);
-	if (!dev->mode_config.tv_left_margin_property)
-		goto nomem;
-
-	dev->mode_config.tv_right_margin_property =
-		drm_property_create_range(dev, 0, "right margin", 0, 100);
-	if (!dev->mode_config.tv_right_margin_property)
-		goto nomem;
-
-	dev->mode_config.tv_top_margin_property =
-		drm_property_create_range(dev, 0, "top margin", 0, 100);
-	if (!dev->mode_config.tv_top_margin_property)
-		goto nomem;
-
-	dev->mode_config.tv_bottom_margin_property =
-		drm_property_create_range(dev, 0, "bottom margin", 0, 100);
-	if (!dev->mode_config.tv_bottom_margin_property)
-		goto nomem;
-
-	dev->mode_config.tv_mode_property =
-		drm_property_create(dev, DRM_MODE_PROP_ENUM,
-				    "mode", num_modes);
-	if (!dev->mode_config.tv_mode_property)
-		goto nomem;
-
-	for (i = 0; i < num_modes; i++)
-		drm_property_add_enum(dev->mode_config.tv_mode_property, i,
-				      i, modes[i]);
-
-	dev->mode_config.tv_brightness_property =
-		drm_property_create_range(dev, 0, "brightness", 0, 100);
-	if (!dev->mode_config.tv_brightness_property)
-		goto nomem;
-
-	dev->mode_config.tv_contrast_property =
-		drm_property_create_range(dev, 0, "contrast", 0, 100);
-	if (!dev->mode_config.tv_contrast_property)
-		goto nomem;
-
-	dev->mode_config.tv_flicker_reduction_property =
-		drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
-	if (!dev->mode_config.tv_flicker_reduction_property)
-		goto nomem;
-
-	dev->mode_config.tv_overscan_property =
-		drm_property_create_range(dev, 0, "overscan", 0, 100);
-	if (!dev->mode_config.tv_overscan_property)
-		goto nomem;
-
-	dev->mode_config.tv_saturation_property =
-		drm_property_create_range(dev, 0, "saturation", 0, 100);
-	if (!dev->mode_config.tv_saturation_property)
-		goto nomem;
-
-	dev->mode_config.tv_hue_property =
-		drm_property_create_range(dev, 0, "hue", 0, 100);
-	if (!dev->mode_config.tv_hue_property)
-		goto nomem;
-
-	return 0;
-nomem:
-	return -ENOMEM;
-}
-EXPORT_SYMBOL(drm_mode_create_tv_properties);
-
-/**
- * drm_mode_create_scaling_mode_property - create scaling mode property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_scaling_mode_property(struct drm_device *dev)
-{
-	struct drm_property *scaling_mode;
-
-	if (dev->mode_config.scaling_mode_property)
-		return 0;
-
-	scaling_mode =
-		drm_property_create_enum(dev, 0, "scaling mode",
-				drm_scaling_mode_enum_list,
-				    ARRAY_SIZE(drm_scaling_mode_enum_list));
-
-	dev->mode_config.scaling_mode_property = scaling_mode;
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
-
-/**
- * drm_mode_create_aspect_ratio_property - create aspect ratio property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
-{
-	if (dev->mode_config.aspect_ratio_property)
-		return 0;
-
-	dev->mode_config.aspect_ratio_property =
-		drm_property_create_enum(dev, 0, "aspect ratio",
-				drm_aspect_ratio_enum_list,
-				ARRAY_SIZE(drm_aspect_ratio_enum_list));
-
-	if (dev->mode_config.aspect_ratio_property == NULL)
-		return -ENOMEM;
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
-
-/**
- * drm_mode_create_dirty_property - create dirty property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_dirty_info_property(struct drm_device *dev)
-{
-	struct drm_property *dirty_info;
-
-	if (dev->mode_config.dirty_info_property)
-		return 0;
-
-	dirty_info =
-		drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
-				    "dirty",
-				    drm_dirty_info_enum_list,
-				    ARRAY_SIZE(drm_dirty_info_enum_list));
-	dev->mode_config.dirty_info_property = dirty_info;
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
-
-/**
- * drm_mode_create_suggested_offset_properties - create suggests offset properties
- * @dev: DRM device
- *
- * Create the the suggested x/y offset property for connectors.
- */
-int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
-{
-	if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property)
-		return 0;
-
-	dev->mode_config.suggested_x_property =
-		drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff);
-
-	dev->mode_config.suggested_y_property =
-		drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff);
-
-	if (dev->mode_config.suggested_x_property == NULL ||
-	    dev->mode_config.suggested_y_property == NULL)
-		return -ENOMEM;
-	return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
-
-/**
  * drm_mode_getresources - get graphics configuration
  * @dev: drm device for the ioctl
  * @data: data pointer for the ioctl
@@ -2123,263 +854,6 @@
 	return 0;
 }
 
-static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
-					 const struct drm_file *file_priv)
-{
-	/*
-	 * If user-space hasn't configured the driver to expose the stereo 3D
-	 * modes, don't expose them.
-	 */
-	if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
-		return false;
-
-	return true;
-}
-
-static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector)
-{
-	/* For atomic drivers only state objects are synchronously updated and
-	 * protected by modeset locks, so check those first. */
-	if (connector->state)
-		return connector->state->best_encoder;
-	return connector->encoder;
-}
-
-/* helper for getconnector and getproperties ioctls */
-static int get_properties(struct drm_mode_object *obj, bool atomic,
-		uint32_t __user *prop_ptr, uint64_t __user *prop_values,
-		uint32_t *arg_count_props)
-{
-	int props_count;
-	int i, ret, copied;
-
-	props_count = obj->properties->count;
-	if (!atomic)
-		props_count -= obj->properties->atomic_count;
-
-	if ((*arg_count_props >= props_count) && props_count) {
-		for (i = 0, copied = 0; copied < props_count; i++) {
-			struct drm_property *prop = obj->properties->properties[i];
-			uint64_t val;
-
-			if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic)
-				continue;
-
-			ret = drm_object_property_get_value(obj, prop, &val);
-			if (ret)
-				return ret;
-
-			if (put_user(prop->base.id, prop_ptr + copied))
-				return -EFAULT;
-
-			if (put_user(val, prop_values + copied))
-				return -EFAULT;
-
-			copied++;
-		}
-	}
-	*arg_count_props = props_count;
-
-	return 0;
-}
-
-/**
- * drm_mode_getconnector - get connector configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Construct a connector configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getconnector(struct drm_device *dev, void *data,
-			  struct drm_file *file_priv)
-{
-	struct drm_mode_get_connector *out_resp = data;
-	struct drm_connector *connector;
-	struct drm_encoder *encoder;
-	struct drm_display_mode *mode;
-	int mode_count = 0;
-	int encoders_count = 0;
-	int ret = 0;
-	int copied = 0;
-	int i;
-	struct drm_mode_modeinfo u_mode;
-	struct drm_mode_modeinfo __user *mode_ptr;
-	uint32_t __user *encoder_ptr;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
-
-	mutex_lock(&dev->mode_config.mutex);
-
-	connector = drm_connector_lookup(dev, out_resp->connector_id);
-	if (!connector) {
-		ret = -ENOENT;
-		goto out_unlock;
-	}
-
-	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
-		if (connector->encoder_ids[i] != 0)
-			encoders_count++;
-
-	if (out_resp->count_modes == 0) {
-		connector->funcs->fill_modes(connector,
-					     dev->mode_config.max_width,
-					     dev->mode_config.max_height);
-	}
-
-	/* delayed so we get modes regardless of pre-fill_modes state */
-	list_for_each_entry(mode, &connector->modes, head)
-		if (drm_mode_expose_to_userspace(mode, file_priv))
-			mode_count++;
-
-	out_resp->connector_id = connector->base.id;
-	out_resp->connector_type = connector->connector_type;
-	out_resp->connector_type_id = connector->connector_type_id;
-	out_resp->mm_width = connector->display_info.width_mm;
-	out_resp->mm_height = connector->display_info.height_mm;
-	out_resp->subpixel = connector->display_info.subpixel_order;
-	out_resp->connection = connector->status;
-
-	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-	encoder = drm_connector_get_encoder(connector);
-	if (encoder)
-		out_resp->encoder_id = encoder->base.id;
-	else
-		out_resp->encoder_id = 0;
-
-	/*
-	 * This ioctl is called twice, once to determine how much space is
-	 * needed, and the 2nd time to fill it.
-	 */
-	if ((out_resp->count_modes >= mode_count) && mode_count) {
-		copied = 0;
-		mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
-		list_for_each_entry(mode, &connector->modes, head) {
-			if (!drm_mode_expose_to_userspace(mode, file_priv))
-				continue;
-
-			drm_mode_convert_to_umode(&u_mode, mode);
-			if (copy_to_user(mode_ptr + copied,
-					 &u_mode, sizeof(u_mode))) {
-				ret = -EFAULT;
-				goto out;
-			}
-			copied++;
-		}
-	}
-	out_resp->count_modes = mode_count;
-
-	ret = get_properties(&connector->base, file_priv->atomic,
-			(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
-			(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
-			&out_resp->count_props);
-	if (ret)
-		goto out;
-
-	if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
-		copied = 0;
-		encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
-		for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-			if (connector->encoder_ids[i] != 0) {
-				if (put_user(connector->encoder_ids[i],
-					     encoder_ptr + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
-			}
-		}
-	}
-	out_resp->count_encoders = encoders_count;
-
-out:
-	drm_modeset_unlock(&dev->mode_config.connection_mutex);
-
-	drm_connector_unreference(connector);
-out_unlock:
-	mutex_unlock(&dev->mode_config.mutex);
-
-	return ret;
-}
-
-static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
-{
-	struct drm_connector *connector;
-	struct drm_device *dev = encoder->dev;
-	bool uses_atomic = false;
-
-	/* For atomic drivers only state objects are synchronously updated and
-	 * protected by modeset locks, so check those first. */
-	drm_for_each_connector(connector, dev) {
-		if (!connector->state)
-			continue;
-
-		uses_atomic = true;
-
-		if (connector->state->best_encoder != encoder)
-			continue;
-
-		return connector->state->crtc;
-	}
-
-	/* Don't return stale data (e.g. pending async disable). */
-	if (uses_atomic)
-		return NULL;
-
-	return encoder->crtc;
-}
-
-/**
- * drm_mode_getencoder - get encoder configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Construct a encoder configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getencoder(struct drm_device *dev, void *data,
-			struct drm_file *file_priv)
-{
-	struct drm_mode_get_encoder *enc_resp = data;
-	struct drm_encoder *encoder;
-	struct drm_crtc *crtc;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	encoder = drm_encoder_find(dev, enc_resp->encoder_id);
-	if (!encoder)
-		return -ENOENT;
-
-	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-	crtc = drm_encoder_get_crtc(encoder);
-	if (crtc)
-		enc_resp->crtc_id = crtc->base.id;
-	else
-		enc_resp->crtc_id = 0;
-	drm_modeset_unlock(&dev->mode_config.connection_mutex);
-
-	enc_resp->encoder_type = encoder->encoder_type;
-	enc_resp->encoder_id = encoder->base.id;
-	enc_resp->possible_crtcs = encoder->possible_crtcs;
-	enc_resp->possible_clones = encoder->possible_clones;
-
-	return 0;
-}
-
 /**
  * drm_mode_getplane_res - enumerate all plane resources
  * @dev: DRM device
@@ -2592,8 +1066,9 @@
 	/* Check whether this plane supports the fb pixel format. */
 	ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
 	if (ret) {
-		DRM_DEBUG_KMS("Invalid pixel format %s\n",
-			      drm_get_format_name(fb->pixel_format));
+		char *format_name = drm_get_format_name(fb->pixel_format);
+		DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
+		kfree(format_name);
 		goto out;
 	}
 
@@ -2902,8 +1377,9 @@
 			ret = drm_plane_check_pixel_format(crtc->primary,
 							   fb->pixel_format);
 			if (ret) {
-				DRM_DEBUG_KMS("Invalid pixel format %s\n",
-					drm_get_format_name(fb->pixel_format));
+				char *format_name = drm_get_format_name(fb->pixel_format);
+				DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
+				kfree(format_name);
 				goto out;
 			}
 		}
@@ -3040,7 +1516,7 @@
 	 */
 	if (req->flags & DRM_MODE_CURSOR_BO) {
 		if (req->handle) {
-			fb = internal_framebuffer_create(dev, &fbreq, file_priv);
+			fb = drm_internal_framebuffer_create(dev, &fbreq, file_priv);
 			if (IS_ERR(fb)) {
 				DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
 				return PTR_ERR(fb);
@@ -3195,1802 +1671,9 @@
 	return drm_mode_cursor_common(dev, req, file_priv);
 }
 
-/**
- * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description
- * @bpp: bits per pixels
- * @depth: bit depth per pixel
- *
- * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
- * Useful in fbdev emulation code, since that deals in those values.
- */
-uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
-{
-	uint32_t fmt;
-
-	switch (bpp) {
-	case 8:
-		fmt = DRM_FORMAT_C8;
-		break;
-	case 16:
-		if (depth == 15)
-			fmt = DRM_FORMAT_XRGB1555;
-		else
-			fmt = DRM_FORMAT_RGB565;
-		break;
-	case 24:
-		fmt = DRM_FORMAT_RGB888;
-		break;
-	case 32:
-		if (depth == 24)
-			fmt = DRM_FORMAT_XRGB8888;
-		else if (depth == 30)
-			fmt = DRM_FORMAT_XRGB2101010;
-		else
-			fmt = DRM_FORMAT_ARGB8888;
-		break;
-	default:
-		DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
-		fmt = DRM_FORMAT_XRGB8888;
-		break;
-	}
-
-	return fmt;
-}
-EXPORT_SYMBOL(drm_mode_legacy_fb_format);
-
-/**
- * drm_mode_addfb - add an FB to the graphics configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Add a new FB to the specified CRTC, given a user request. This is the
- * original addfb ioctl which only supported RGB formats.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_addfb(struct drm_device *dev,
-		   void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_fb_cmd *or = data;
-	struct drm_mode_fb_cmd2 r = {};
-	int ret;
-
-	/* convert to new format and call new ioctl */
-	r.fb_id = or->fb_id;
-	r.width = or->width;
-	r.height = or->height;
-	r.pitches[0] = or->pitch;
-	r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
-	r.handles[0] = or->handle;
-
-	ret = drm_mode_addfb2(dev, &r, file_priv);
-	if (ret)
-		return ret;
-
-	or->fb_id = r.fb_id;
-
-	return 0;
-}
-
-static int format_check(const struct drm_mode_fb_cmd2 *r)
-{
-	uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
-
-	switch (format) {
-	case DRM_FORMAT_C8:
-	case DRM_FORMAT_RGB332:
-	case DRM_FORMAT_BGR233:
-	case DRM_FORMAT_XRGB4444:
-	case DRM_FORMAT_XBGR4444:
-	case DRM_FORMAT_RGBX4444:
-	case DRM_FORMAT_BGRX4444:
-	case DRM_FORMAT_ARGB4444:
-	case DRM_FORMAT_ABGR4444:
-	case DRM_FORMAT_RGBA4444:
-	case DRM_FORMAT_BGRA4444:
-	case DRM_FORMAT_XRGB1555:
-	case DRM_FORMAT_XBGR1555:
-	case DRM_FORMAT_RGBX5551:
-	case DRM_FORMAT_BGRX5551:
-	case DRM_FORMAT_ARGB1555:
-	case DRM_FORMAT_ABGR1555:
-	case DRM_FORMAT_RGBA5551:
-	case DRM_FORMAT_BGRA5551:
-	case DRM_FORMAT_RGB565:
-	case DRM_FORMAT_BGR565:
-	case DRM_FORMAT_RGB888:
-	case DRM_FORMAT_BGR888:
-	case DRM_FORMAT_XRGB8888:
-	case DRM_FORMAT_XBGR8888:
-	case DRM_FORMAT_RGBX8888:
-	case DRM_FORMAT_BGRX8888:
-	case DRM_FORMAT_ARGB8888:
-	case DRM_FORMAT_ABGR8888:
-	case DRM_FORMAT_RGBA8888:
-	case DRM_FORMAT_BGRA8888:
-	case DRM_FORMAT_XRGB2101010:
-	case DRM_FORMAT_XBGR2101010:
-	case DRM_FORMAT_RGBX1010102:
-	case DRM_FORMAT_BGRX1010102:
-	case DRM_FORMAT_ARGB2101010:
-	case DRM_FORMAT_ABGR2101010:
-	case DRM_FORMAT_RGBA1010102:
-	case DRM_FORMAT_BGRA1010102:
-	case DRM_FORMAT_YUYV:
-	case DRM_FORMAT_YVYU:
-	case DRM_FORMAT_UYVY:
-	case DRM_FORMAT_VYUY:
-	case DRM_FORMAT_AYUV:
-	case DRM_FORMAT_NV12:
-	case DRM_FORMAT_NV21:
-	case DRM_FORMAT_NV16:
-	case DRM_FORMAT_NV61:
-	case DRM_FORMAT_NV24:
-	case DRM_FORMAT_NV42:
-	case DRM_FORMAT_YUV410:
-	case DRM_FORMAT_YVU410:
-	case DRM_FORMAT_YUV411:
-	case DRM_FORMAT_YVU411:
-	case DRM_FORMAT_YUV420:
-	case DRM_FORMAT_YVU420:
-	case DRM_FORMAT_YUV422:
-	case DRM_FORMAT_YVU422:
-	case DRM_FORMAT_YUV444:
-	case DRM_FORMAT_YVU444:
-		return 0;
-	default:
-		DRM_DEBUG_KMS("invalid pixel format %s\n",
-			      drm_get_format_name(r->pixel_format));
-		return -EINVAL;
-	}
-}
-
-static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
-{
-	int ret, hsub, vsub, num_planes, i;
-
-	ret = format_check(r);
-	if (ret) {
-		DRM_DEBUG_KMS("bad framebuffer format %s\n",
-			      drm_get_format_name(r->pixel_format));
-		return ret;
-	}
-
-	hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
-	vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
-	num_planes = drm_format_num_planes(r->pixel_format);
-
-	if (r->width == 0 || r->width % hsub) {
-		DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
-		return -EINVAL;
-	}
-
-	if (r->height == 0 || r->height % vsub) {
-		DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < num_planes; i++) {
-		unsigned int width = r->width / (i != 0 ? hsub : 1);
-		unsigned int height = r->height / (i != 0 ? vsub : 1);
-		unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
-
-		if (!r->handles[i]) {
-			DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
-			return -EINVAL;
-		}
-
-		if ((uint64_t) width * cpp > UINT_MAX)
-			return -ERANGE;
-
-		if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
-			return -ERANGE;
-
-		if (r->pitches[i] < width * cpp) {
-			DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
-			return -EINVAL;
-		}
-
-		if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
-			DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
-				      r->modifier[i], i);
-			return -EINVAL;
-		}
-
-		/* modifier specific checks: */
-		switch (r->modifier[i]) {
-		case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
-			/* NOTE: the pitch restriction may be lifted later if it turns
-			 * out that no hw has this restriction:
-			 */
-			if (r->pixel_format != DRM_FORMAT_NV12 ||
-					width % 128 || height % 32 ||
-					r->pitches[i] % 128) {
-				DRM_DEBUG_KMS("bad modifier data for plane %d\n", i);
-				return -EINVAL;
-			}
-			break;
-
-		default:
-			break;
-		}
-	}
-
-	for (i = num_planes; i < 4; i++) {
-		if (r->modifier[i]) {
-			DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
-			return -EINVAL;
-		}
-
-		/* Pre-FB_MODIFIERS userspace didn't clear the structs properly. */
-		if (!(r->flags & DRM_MODE_FB_MODIFIERS))
-			continue;
-
-		if (r->handles[i]) {
-			DRM_DEBUG_KMS("buffer object handle for unused plane %d\n", i);
-			return -EINVAL;
-		}
-
-		if (r->pitches[i]) {
-			DRM_DEBUG_KMS("non-zero pitch for unused plane %d\n", i);
-			return -EINVAL;
-		}
-
-		if (r->offsets[i]) {
-			DRM_DEBUG_KMS("non-zero offset for unused plane %d\n", i);
-			return -EINVAL;
-		}
-	}
-
-	return 0;
-}
-
-static struct drm_framebuffer *
-internal_framebuffer_create(struct drm_device *dev,
-			    const struct drm_mode_fb_cmd2 *r,
-			    struct drm_file *file_priv)
-{
-	struct drm_mode_config *config = &dev->mode_config;
-	struct drm_framebuffer *fb;
-	int ret;
-
-	if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
-		DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
-		return ERR_PTR(-EINVAL);
-	}
-
-	if ((config->min_width > r->width) || (r->width > config->max_width)) {
-		DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
-			  r->width, config->min_width, config->max_width);
-		return ERR_PTR(-EINVAL);
-	}
-	if ((config->min_height > r->height) || (r->height > config->max_height)) {
-		DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
-			  r->height, config->min_height, config->max_height);
-		return ERR_PTR(-EINVAL);
-	}
-
-	if (r->flags & DRM_MODE_FB_MODIFIERS &&
-	    !dev->mode_config.allow_fb_modifiers) {
-		DRM_DEBUG_KMS("driver does not support fb modifiers\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	ret = framebuffer_check(r);
-	if (ret)
-		return ERR_PTR(ret);
-
-	fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
-	if (IS_ERR(fb)) {
-		DRM_DEBUG_KMS("could not create framebuffer\n");
-		return fb;
-	}
-
-	return fb;
-}
-
-/**
- * drm_mode_addfb2 - add an FB to the graphics configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Add a new FB to the specified CRTC, given a user request with format. This is
- * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
- * and uses fourcc codes as pixel format specifiers.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_addfb2(struct drm_device *dev,
-		    void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_fb_cmd2 *r = data;
-	struct drm_framebuffer *fb;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	fb = internal_framebuffer_create(dev, r, file_priv);
-	if (IS_ERR(fb))
-		return PTR_ERR(fb);
-
-	DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
-	r->fb_id = fb->base.id;
-
-	/* Transfer ownership to the filp for reaping on close */
-	mutex_lock(&file_priv->fbs_lock);
-	list_add(&fb->filp_head, &file_priv->fbs);
-	mutex_unlock(&file_priv->fbs_lock);
-
-	return 0;
-}
-
-struct drm_mode_rmfb_work {
-	struct work_struct work;
-	struct list_head fbs;
-};
-
-static void drm_mode_rmfb_work_fn(struct work_struct *w)
-{
-	struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
-
-	while (!list_empty(&arg->fbs)) {
-		struct drm_framebuffer *fb =
-			list_first_entry(&arg->fbs, typeof(*fb), filp_head);
-
-		list_del_init(&fb->filp_head);
-		drm_framebuffer_remove(fb);
-	}
-}
-
-/**
- * drm_mode_rmfb - remove an FB from the configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Remove the FB specified by the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_rmfb(struct drm_device *dev,
-		   void *data, struct drm_file *file_priv)
-{
-	struct drm_framebuffer *fb = NULL;
-	struct drm_framebuffer *fbl = NULL;
-	uint32_t *id = data;
-	int found = 0;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	fb = drm_framebuffer_lookup(dev, *id);
-	if (!fb)
-		return -ENOENT;
-
-	mutex_lock(&file_priv->fbs_lock);
-	list_for_each_entry(fbl, &file_priv->fbs, filp_head)
-		if (fb == fbl)
-			found = 1;
-	if (!found) {
-		mutex_unlock(&file_priv->fbs_lock);
-		goto fail_unref;
-	}
-
-	list_del_init(&fb->filp_head);
-	mutex_unlock(&file_priv->fbs_lock);
-
-	/* drop the reference we picked up in framebuffer lookup */
-	drm_framebuffer_unreference(fb);
-
-	/*
-	 * we now own the reference that was stored in the fbs list
-	 *
-	 * drm_framebuffer_remove may fail with -EINTR on pending signals,
-	 * so run this in a separate stack as there's no way to correctly
-	 * handle this after the fb is already removed from the lookup table.
-	 */
-	if (drm_framebuffer_read_refcount(fb) > 1) {
-		struct drm_mode_rmfb_work arg;
-
-		INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
-		INIT_LIST_HEAD(&arg.fbs);
-		list_add_tail(&fb->filp_head, &arg.fbs);
-
-		schedule_work(&arg.work);
-		flush_work(&arg.work);
-		destroy_work_on_stack(&arg.work);
-	} else
-		drm_framebuffer_unreference(fb);
-
-	return 0;
-
-fail_unref:
-	drm_framebuffer_unreference(fb);
-	return -ENOENT;
-}
-
-/**
- * drm_mode_getfb - get FB info
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Lookup the FB given its ID and return info about it.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getfb(struct drm_device *dev,
-		   void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_fb_cmd *r = data;
-	struct drm_framebuffer *fb;
-	int ret;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	fb = drm_framebuffer_lookup(dev, r->fb_id);
-	if (!fb)
-		return -ENOENT;
-
-	r->height = fb->height;
-	r->width = fb->width;
-	r->depth = fb->depth;
-	r->bpp = fb->bits_per_pixel;
-	r->pitch = fb->pitches[0];
-	if (fb->funcs->create_handle) {
-		if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) ||
-		    drm_is_control_client(file_priv)) {
-			ret = fb->funcs->create_handle(fb, file_priv,
-						       &r->handle);
-		} else {
-			/* GET_FB() is an unprivileged ioctl so we must not
-			 * return a buffer-handle to non-master processes! For
-			 * backwards-compatibility reasons, we cannot make
-			 * GET_FB() privileged, so just return an invalid handle
-			 * for non-masters. */
-			r->handle = 0;
-			ret = 0;
-		}
-	} else {
-		ret = -ENODEV;
-	}
-
-	drm_framebuffer_unreference(fb);
-
-	return ret;
-}
-
-/**
- * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Lookup the FB and flush out the damaged area supplied by userspace as a clip
- * rectangle list. Generic userspace which does frontbuffer rendering must call
- * this ioctl to flush out the changes on manual-update display outputs, e.g.
- * usb display-link, mipi manual update panels or edp panel self refresh modes.
- *
- * Modesetting drivers which always update the frontbuffer do not need to
- * implement the corresponding ->dirty framebuffer callback.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
-			   void *data, struct drm_file *file_priv)
-{
-	struct drm_clip_rect __user *clips_ptr;
-	struct drm_clip_rect *clips = NULL;
-	struct drm_mode_fb_dirty_cmd *r = data;
-	struct drm_framebuffer *fb;
-	unsigned flags;
-	int num_clips;
-	int ret;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	fb = drm_framebuffer_lookup(dev, r->fb_id);
-	if (!fb)
-		return -ENOENT;
-
-	num_clips = r->num_clips;
-	clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
-
-	if (!num_clips != !clips_ptr) {
-		ret = -EINVAL;
-		goto out_err1;
-	}
-
-	flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
-
-	/* If userspace annotates copy, clips must come in pairs */
-	if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
-		ret = -EINVAL;
-		goto out_err1;
-	}
-
-	if (num_clips && clips_ptr) {
-		if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
-			ret = -EINVAL;
-			goto out_err1;
-		}
-		clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
-		if (!clips) {
-			ret = -ENOMEM;
-			goto out_err1;
-		}
-
-		ret = copy_from_user(clips, clips_ptr,
-				     num_clips * sizeof(*clips));
-		if (ret) {
-			ret = -EFAULT;
-			goto out_err2;
-		}
-	}
-
-	if (fb->funcs->dirty) {
-		ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
-				       clips, num_clips);
-	} else {
-		ret = -ENOSYS;
-	}
-
-out_err2:
-	kfree(clips);
-out_err1:
-	drm_framebuffer_unreference(fb);
-
-	return ret;
-}
-
-/**
- * drm_fb_release - remove and free the FBs on this file
- * @priv: drm file for the ioctl
- *
- * Destroy all the FBs associated with @filp.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-void drm_fb_release(struct drm_file *priv)
-{
-	struct drm_framebuffer *fb, *tfb;
-	struct drm_mode_rmfb_work arg;
-
-	INIT_LIST_HEAD(&arg.fbs);
-
-	/*
-	 * When the file gets released that means no one else can access the fb
-	 * list any more, so no need to grab fpriv->fbs_lock. And we need to
-	 * avoid upsetting lockdep since the universal cursor code adds a
-	 * framebuffer while holding mutex locks.
-	 *
-	 * Note that a real deadlock between fpriv->fbs_lock and the modeset
-	 * locks is impossible here since no one else but this function can get
-	 * at it any more.
-	 */
-	list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
-		if (drm_framebuffer_read_refcount(fb) > 1) {
-			list_move_tail(&fb->filp_head, &arg.fbs);
-		} else {
-			list_del_init(&fb->filp_head);
-
-			/* This drops the fpriv->fbs reference. */
-			drm_framebuffer_unreference(fb);
-		}
-	}
-
-	if (!list_empty(&arg.fbs)) {
-		INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
-
-		schedule_work(&arg.work);
-		flush_work(&arg.work);
-		destroy_work_on_stack(&arg.work);
-	}
-}
-
-static bool drm_property_type_valid(struct drm_property *property)
-{
-	if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
-		return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
-	return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
-}
-
-/**
- * drm_property_create - create a new property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @num_values: number of pre-defined values
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Note that the DRM core keeps a per-device list of properties and that, if
- * drm_mode_config_cleanup() is called, it will destroy all properties created
- * by the driver.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create(struct drm_device *dev, int flags,
-					 const char *name, int num_values)
-{
-	struct drm_property *property = NULL;
-	int ret;
-
-	property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
-	if (!property)
-		return NULL;
-
-	property->dev = dev;
-
-	if (num_values) {
-		property->values = kcalloc(num_values, sizeof(uint64_t),
-					   GFP_KERNEL);
-		if (!property->values)
-			goto fail;
-	}
-
-	ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
-	if (ret)
-		goto fail;
-
-	property->flags = flags;
-	property->num_values = num_values;
-	INIT_LIST_HEAD(&property->enum_list);
-
-	if (name) {
-		strncpy(property->name, name, DRM_PROP_NAME_LEN);
-		property->name[DRM_PROP_NAME_LEN-1] = '\0';
-	}
-
-	list_add_tail(&property->head, &dev->mode_config.property_list);
-
-	WARN_ON(!drm_property_type_valid(property));
-
-	return property;
-fail:
-	kfree(property->values);
-	kfree(property);
-	return NULL;
-}
-EXPORT_SYMBOL(drm_property_create);
-
-/**
- * drm_property_create_enum - create a new enumeration property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @props: enumeration lists with property values
- * @num_values: number of pre-defined values
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Userspace is only allowed to set one of the predefined values for enumeration
- * properties.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
-					 const char *name,
-					 const struct drm_prop_enum_list *props,
-					 int num_values)
-{
-	struct drm_property *property;
-	int i, ret;
-
-	flags |= DRM_MODE_PROP_ENUM;
-
-	property = drm_property_create(dev, flags, name, num_values);
-	if (!property)
-		return NULL;
-
-	for (i = 0; i < num_values; i++) {
-		ret = drm_property_add_enum(property, i,
-				      props[i].type,
-				      props[i].name);
-		if (ret) {
-			drm_property_destroy(dev, property);
-			return NULL;
-		}
-	}
-
-	return property;
-}
-EXPORT_SYMBOL(drm_property_create_enum);
-
-/**
- * drm_property_create_bitmask - create a new bitmask property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @props: enumeration lists with property bitflags
- * @num_props: size of the @props array
- * @supported_bits: bitmask of all supported enumeration values
- *
- * This creates a new bitmask drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Compared to plain enumeration properties userspace is allowed to set any
- * or'ed together combination of the predefined property bitflag values
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
-					 int flags, const char *name,
-					 const struct drm_prop_enum_list *props,
-					 int num_props,
-					 uint64_t supported_bits)
-{
-	struct drm_property *property;
-	int i, ret, index = 0;
-	int num_values = hweight64(supported_bits);
-
-	flags |= DRM_MODE_PROP_BITMASK;
-
-	property = drm_property_create(dev, flags, name, num_values);
-	if (!property)
-		return NULL;
-	for (i = 0; i < num_props; i++) {
-		if (!(supported_bits & (1ULL << props[i].type)))
-			continue;
-
-		if (WARN_ON(index >= num_values)) {
-			drm_property_destroy(dev, property);
-			return NULL;
-		}
-
-		ret = drm_property_add_enum(property, index++,
-				      props[i].type,
-				      props[i].name);
-		if (ret) {
-			drm_property_destroy(dev, property);
-			return NULL;
-		}
-	}
-
-	return property;
-}
-EXPORT_SYMBOL(drm_property_create_bitmask);
-
-static struct drm_property *property_create_range(struct drm_device *dev,
-					 int flags, const char *name,
-					 uint64_t min, uint64_t max)
-{
-	struct drm_property *property;
-
-	property = drm_property_create(dev, flags, name, 2);
-	if (!property)
-		return NULL;
-
-	property->values[0] = min;
-	property->values[1] = max;
-
-	return property;
-}
-
-/**
- * drm_property_create_range - create a new unsigned ranged property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @min: minimum value of the property
- * @max: maximum value of the property
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Userspace is allowed to set any unsigned integer value in the (min, max)
- * range inclusive.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
-					 const char *name,
-					 uint64_t min, uint64_t max)
-{
-	return property_create_range(dev, DRM_MODE_PROP_RANGE | flags,
-			name, min, max);
-}
-EXPORT_SYMBOL(drm_property_create_range);
-
-/**
- * drm_property_create_signed_range - create a new signed ranged property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @min: minimum value of the property
- * @max: maximum value of the property
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Userspace is allowed to set any signed integer value in the (min, max)
- * range inclusive.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
-					 int flags, const char *name,
-					 int64_t min, int64_t max)
-{
-	return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags,
-			name, I642U64(min), I642U64(max));
-}
-EXPORT_SYMBOL(drm_property_create_signed_range);
-
-/**
- * drm_property_create_object - create a new object property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @type: object type from DRM_MODE_OBJECT_* defines
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Userspace is only allowed to set this to any property value of the given
- * @type. Only useful for atomic properties, which is enforced.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_object(struct drm_device *dev,
-					 int flags, const char *name, uint32_t type)
-{
-	struct drm_property *property;
-
-	flags |= DRM_MODE_PROP_OBJECT;
-
-	if (WARN_ON(!(flags & DRM_MODE_PROP_ATOMIC)))
-		return NULL;
-
-	property = drm_property_create(dev, flags, name, 1);
-	if (!property)
-		return NULL;
-
-	property->values[0] = type;
-
-	return property;
-}
-EXPORT_SYMBOL(drm_property_create_object);
-
-/**
- * drm_property_create_bool - create a new boolean property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * This is implemented as a ranged property with only {0, 1} as valid values.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
-					 const char *name)
-{
-	return drm_property_create_range(dev, flags, name, 0, 1);
-}
-EXPORT_SYMBOL(drm_property_create_bool);
-
-/**
- * drm_property_add_enum - add a possible value to an enumeration property
- * @property: enumeration property to change
- * @index: index of the new enumeration
- * @value: value of the new enumeration
- * @name: symbolic name of the new enumeration
- *
- * This functions adds enumerations to a property.
- *
- * It's use is deprecated, drivers should use one of the more specific helpers
- * to directly create the property with all enumerations already attached.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_property_add_enum(struct drm_property *property, int index,
-			  uint64_t value, const char *name)
-{
-	struct drm_property_enum *prop_enum;
-
-	if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
-			drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
-		return -EINVAL;
-
-	/*
-	 * Bitmask enum properties have the additional constraint of values
-	 * from 0 to 63
-	 */
-	if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
-			(value > 63))
-		return -EINVAL;
-
-	if (!list_empty(&property->enum_list)) {
-		list_for_each_entry(prop_enum, &property->enum_list, head) {
-			if (prop_enum->value == value) {
-				strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
-				prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
-				return 0;
-			}
-		}
-	}
-
-	prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
-	if (!prop_enum)
-		return -ENOMEM;
-
-	strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
-	prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
-	prop_enum->value = value;
-
-	property->values[index] = value;
-	list_add_tail(&prop_enum->head, &property->enum_list);
-	return 0;
-}
-EXPORT_SYMBOL(drm_property_add_enum);
-
-/**
- * drm_property_destroy - destroy a drm property
- * @dev: drm device
- * @property: property to destry
- *
- * This function frees a property including any attached resources like
- * enumeration values.
- */
-void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
-{
-	struct drm_property_enum *prop_enum, *pt;
-
-	list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
-		list_del(&prop_enum->head);
-		kfree(prop_enum);
-	}
-
-	if (property->num_values)
-		kfree(property->values);
-	drm_mode_object_unregister(dev, &property->base);
-	list_del(&property->head);
-	kfree(property);
-}
-EXPORT_SYMBOL(drm_property_destroy);
-
-/**
- * drm_object_attach_property - attach a property to a modeset object
- * @obj: drm modeset object
- * @property: property to attach
- * @init_val: initial value of the property
- *
- * This attaches the given property to the modeset object with the given initial
- * value. Currently this function cannot fail since the properties are stored in
- * a statically sized array.
- */
-void drm_object_attach_property(struct drm_mode_object *obj,
-				struct drm_property *property,
-				uint64_t init_val)
-{
-	int count = obj->properties->count;
-
-	if (count == DRM_OBJECT_MAX_PROPERTY) {
-		WARN(1, "Failed to attach object property (type: 0x%x). Please "
-			"increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
-			"you see this message on the same object type.\n",
-			obj->type);
-		return;
-	}
-
-	obj->properties->properties[count] = property;
-	obj->properties->values[count] = init_val;
-	obj->properties->count++;
-	if (property->flags & DRM_MODE_PROP_ATOMIC)
-		obj->properties->atomic_count++;
-}
-EXPORT_SYMBOL(drm_object_attach_property);
-
-/**
- * drm_object_property_set_value - set the value of a property
- * @obj: drm mode object to set property value for
- * @property: property to set
- * @val: value the property should be set to
- *
- * This functions sets a given property on a given object. This function only
- * changes the software state of the property, it does not call into the
- * driver's ->set_property callback.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_object_property_set_value(struct drm_mode_object *obj,
-				  struct drm_property *property, uint64_t val)
-{
-	int i;
-
-	for (i = 0; i < obj->properties->count; i++) {
-		if (obj->properties->properties[i] == property) {
-			obj->properties->values[i] = val;
-			return 0;
-		}
-	}
-
-	return -EINVAL;
-}
-EXPORT_SYMBOL(drm_object_property_set_value);
-
-/**
- * drm_object_property_get_value - retrieve the value of a property
- * @obj: drm mode object to get property value from
- * @property: property to retrieve
- * @val: storage for the property value
- *
- * This function retrieves the softare state of the given property for the given
- * property. Since there is no driver callback to retrieve the current property
- * value this might be out of sync with the hardware, depending upon the driver
- * and property.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_object_property_get_value(struct drm_mode_object *obj,
-				  struct drm_property *property, uint64_t *val)
-{
-	int i;
-
-	/* read-only properties bypass atomic mechanism and still store
-	 * their value in obj->properties->values[].. mostly to avoid
-	 * having to deal w/ EDID and similar props in atomic paths:
-	 */
-	if (drm_core_check_feature(property->dev, DRIVER_ATOMIC) &&
-			!(property->flags & DRM_MODE_PROP_IMMUTABLE))
-		return drm_atomic_get_property(obj, property, val);
-
-	for (i = 0; i < obj->properties->count; i++) {
-		if (obj->properties->properties[i] == property) {
-			*val = obj->properties->values[i];
-			return 0;
-		}
-	}
-
-	return -EINVAL;
-}
-EXPORT_SYMBOL(drm_object_property_get_value);
-
-/**
- * drm_mode_getproperty_ioctl - get the property metadata
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function retrieves the metadata for a given property, like the different
- * possible values for an enum property or the limits for a range property.
- *
- * Blob properties are special
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getproperty_ioctl(struct drm_device *dev,
-			       void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_get_property *out_resp = data;
-	struct drm_property *property;
-	int enum_count = 0;
-	int value_count = 0;
-	int ret = 0, i;
-	int copied;
-	struct drm_property_enum *prop_enum;
-	struct drm_mode_property_enum __user *enum_ptr;
-	uint64_t __user *values_ptr;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	drm_modeset_lock_all(dev);
-	property = drm_property_find(dev, out_resp->prop_id);
-	if (!property) {
-		ret = -ENOENT;
-		goto done;
-	}
-
-	if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
-			drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
-		list_for_each_entry(prop_enum, &property->enum_list, head)
-			enum_count++;
-	}
-
-	value_count = property->num_values;
-
-	strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
-	out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
-	out_resp->flags = property->flags;
-
-	if ((out_resp->count_values >= value_count) && value_count) {
-		values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
-		for (i = 0; i < value_count; i++) {
-			if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
-				ret = -EFAULT;
-				goto done;
-			}
-		}
-	}
-	out_resp->count_values = value_count;
-
-	if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
-			drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
-		if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
-			copied = 0;
-			enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
-			list_for_each_entry(prop_enum, &property->enum_list, head) {
-
-				if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
-					ret = -EFAULT;
-					goto done;
-				}
-
-				if (copy_to_user(&enum_ptr[copied].name,
-						 &prop_enum->name, DRM_PROP_NAME_LEN)) {
-					ret = -EFAULT;
-					goto done;
-				}
-				copied++;
-			}
-		}
-		out_resp->count_enum_blobs = enum_count;
-	}
-
-	/*
-	 * NOTE: The idea seems to have been to use this to read all the blob
-	 * property values. But nothing ever added them to the corresponding
-	 * list, userspace always used the special-purpose get_blob ioctl to
-	 * read the value for a blob property. It also doesn't make a lot of
-	 * sense to return values here when everything else is just metadata for
-	 * the property itself.
-	 */
-	if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
-		out_resp->count_enum_blobs = 0;
-done:
-	drm_modeset_unlock_all(dev);
-	return ret;
-}
-
-static void drm_property_free_blob(struct kref *kref)
-{
-	struct drm_property_blob *blob =
-		container_of(kref, struct drm_property_blob, base.refcount);
-
-	mutex_lock(&blob->dev->mode_config.blob_lock);
-	list_del(&blob->head_global);
-	mutex_unlock(&blob->dev->mode_config.blob_lock);
-
-	drm_mode_object_unregister(blob->dev, &blob->base);
-
-	kfree(blob);
-}
-
-/**
- * drm_property_create_blob - Create new blob property
- *
- * Creates a new blob property for a specified DRM device, optionally
- * copying data.
- *
- * @dev: DRM device to create property for
- * @length: Length to allocate for blob data
- * @data: If specified, copies data into blob
- *
- * Returns:
- * New blob property with a single reference on success, or an ERR_PTR
- * value on failure.
- */
-struct drm_property_blob *
-drm_property_create_blob(struct drm_device *dev, size_t length,
-			 const void *data)
-{
-	struct drm_property_blob *blob;
-	int ret;
-
-	if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
-		return ERR_PTR(-EINVAL);
-
-	blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
-	if (!blob)
-		return ERR_PTR(-ENOMEM);
-
-	/* This must be explicitly initialised, so we can safely call list_del
-	 * on it in the removal handler, even if it isn't in a file list. */
-	INIT_LIST_HEAD(&blob->head_file);
-	blob->length = length;
-	blob->dev = dev;
-
-	if (data)
-		memcpy(blob->data, data, length);
-
-	ret = drm_mode_object_get_reg(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
-				      true, drm_property_free_blob);
-	if (ret) {
-		kfree(blob);
-		return ERR_PTR(-EINVAL);
-	}
-
-	mutex_lock(&dev->mode_config.blob_lock);
-	list_add_tail(&blob->head_global,
-	              &dev->mode_config.property_blob_list);
-	mutex_unlock(&dev->mode_config.blob_lock);
-
-	return blob;
-}
-EXPORT_SYMBOL(drm_property_create_blob);
-
-/**
- * drm_property_unreference_blob - Unreference a blob property
- *
- * Drop a reference on a blob property. May free the object.
- *
- * @blob: Pointer to blob property
- */
-void drm_property_unreference_blob(struct drm_property_blob *blob)
-{
-	if (!blob)
-		return;
-
-	drm_mode_object_unreference(&blob->base);
-}
-EXPORT_SYMBOL(drm_property_unreference_blob);
-
-/**
- * drm_property_destroy_user_blobs - destroy all blobs created by this client
- * @dev:       DRM device
- * @file_priv: destroy all blobs owned by this file handle
- */
-void drm_property_destroy_user_blobs(struct drm_device *dev,
-				     struct drm_file *file_priv)
-{
-	struct drm_property_blob *blob, *bt;
-
-	/*
-	 * When the file gets released that means no one else can access the
-	 * blob list any more, so no need to grab dev->blob_lock.
-	 */
-	list_for_each_entry_safe(blob, bt, &file_priv->blobs, head_file) {
-		list_del_init(&blob->head_file);
-		drm_property_unreference_blob(blob);
-	}
-}
-
-/**
- * drm_property_reference_blob - Take a reference on an existing property
- *
- * Take a new reference on an existing blob property.
- *
- * @blob: Pointer to blob property
- */
-struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob)
-{
-	drm_mode_object_reference(&blob->base);
-	return blob;
-}
-EXPORT_SYMBOL(drm_property_reference_blob);
-
-/**
- * drm_property_lookup_blob - look up a blob property and take a reference
- * @dev: drm device
- * @id: id of the blob property
- *
- * If successful, this takes an additional reference to the blob property.
- * callers need to make sure to eventually unreference the returned property
- * again, using @drm_property_unreference_blob.
- */
-struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
-					           uint32_t id)
-{
-	struct drm_mode_object *obj;
-	struct drm_property_blob *blob = NULL;
-
-	obj = _object_find(dev, id, DRM_MODE_OBJECT_BLOB);
-	if (obj)
-		blob = obj_to_blob(obj);
-	return blob;
-}
-EXPORT_SYMBOL(drm_property_lookup_blob);
-
-/**
- * drm_property_replace_global_blob - atomically replace existing blob property
- * @dev: drm device
- * @replace: location of blob property pointer to be replaced
- * @length: length of data for new blob, or 0 for no data
- * @data: content for new blob, or NULL for no data
- * @obj_holds_id: optional object for property holding blob ID
- * @prop_holds_id: optional property holding blob ID
- * @return 0 on success or error on failure
- *
- * This function will atomically replace a global property in the blob list,
- * optionally updating a property which holds the ID of that property. It is
- * guaranteed to be atomic: no caller will be allowed to see intermediate
- * results, and either the entire operation will succeed and clean up the
- * previous property, or it will fail and the state will be unchanged.
- *
- * If length is 0 or data is NULL, no new blob will be created, and the holding
- * property, if specified, will be set to 0.
- *
- * Access to the replace pointer is assumed to be protected by the caller, e.g.
- * by holding the relevant modesetting object lock for its parent.
- *
- * For example, a drm_connector has a 'PATH' property, which contains the ID
- * of a blob property with the value of the MST path information. Calling this
- * function with replace pointing to the connector's path_blob_ptr, length and
- * data set for the new path information, obj_holds_id set to the connector's
- * base object, and prop_holds_id set to the path property name, will perform
- * a completely atomic update. The access to path_blob_ptr is protected by the
- * caller holding a lock on the connector.
- */
-static int drm_property_replace_global_blob(struct drm_device *dev,
-                                            struct drm_property_blob **replace,
-                                            size_t length,
-                                            const void *data,
-                                            struct drm_mode_object *obj_holds_id,
-                                            struct drm_property *prop_holds_id)
-{
-	struct drm_property_blob *new_blob = NULL;
-	struct drm_property_blob *old_blob = NULL;
-	int ret;
-
-	WARN_ON(replace == NULL);
-
-	old_blob = *replace;
-
-	if (length && data) {
-		new_blob = drm_property_create_blob(dev, length, data);
-		if (IS_ERR(new_blob))
-			return PTR_ERR(new_blob);
-	}
-
-	/* This does not need to be synchronised with blob_lock, as the
-	 * get_properties ioctl locks all modesetting objects, and
-	 * obj_holds_id must be locked before calling here, so we cannot
-	 * have its value out of sync with the list membership modified
-	 * below under blob_lock. */
-	if (obj_holds_id) {
-		ret = drm_object_property_set_value(obj_holds_id,
-						    prop_holds_id,
-						    new_blob ?
-						        new_blob->base.id : 0);
-		if (ret != 0)
-			goto err_created;
-	}
-
-	drm_property_unreference_blob(old_blob);
-	*replace = new_blob;
-
-	return 0;
-
-err_created:
-	drm_property_unreference_blob(new_blob);
-	return ret;
-}
-
-/**
- * drm_mode_getblob_ioctl - get the contents of a blob property value
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function retrieves the contents of a blob property. The value stored in
- * an object's blob property is just a normal modeset object id.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getblob_ioctl(struct drm_device *dev,
-			   void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_get_blob *out_resp = data;
-	struct drm_property_blob *blob;
-	int ret = 0;
-	void __user *blob_ptr;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	blob = drm_property_lookup_blob(dev, out_resp->blob_id);
-	if (!blob)
-		return -ENOENT;
-
-	if (out_resp->length == blob->length) {
-		blob_ptr = (void __user *)(unsigned long)out_resp->data;
-		if (copy_to_user(blob_ptr, blob->data, blob->length)) {
-			ret = -EFAULT;
-			goto unref;
-		}
-	}
-	out_resp->length = blob->length;
-unref:
-	drm_property_unreference_blob(blob);
-
-	return ret;
-}
-
-/**
- * drm_mode_createblob_ioctl - create a new blob property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function creates a new blob property with user-defined values. In order
- * to give us sensible validation and checking when creating, rather than at
- * every potential use, we also require a type to be provided upfront.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_createblob_ioctl(struct drm_device *dev,
-			      void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_create_blob *out_resp = data;
-	struct drm_property_blob *blob;
-	void __user *blob_ptr;
-	int ret = 0;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	blob = drm_property_create_blob(dev, out_resp->length, NULL);
-	if (IS_ERR(blob))
-		return PTR_ERR(blob);
-
-	blob_ptr = (void __user *)(unsigned long)out_resp->data;
-	if (copy_from_user(blob->data, blob_ptr, out_resp->length)) {
-		ret = -EFAULT;
-		goto out_blob;
-	}
-
-	/* Dropping the lock between create_blob and our access here is safe
-	 * as only the same file_priv can remove the blob; at this point, it is
-	 * not associated with any file_priv. */
-	mutex_lock(&dev->mode_config.blob_lock);
-	out_resp->blob_id = blob->base.id;
-	list_add_tail(&blob->head_file, &file_priv->blobs);
-	mutex_unlock(&dev->mode_config.blob_lock);
-
-	return 0;
-
-out_blob:
-	drm_property_unreference_blob(blob);
-	return ret;
-}
-
-/**
- * drm_mode_destroyblob_ioctl - destroy a user blob property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Destroy an existing user-defined blob property.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_destroyblob_ioctl(struct drm_device *dev,
-			       void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_destroy_blob *out_resp = data;
-	struct drm_property_blob *blob = NULL, *bt;
-	bool found = false;
-	int ret = 0;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	blob = drm_property_lookup_blob(dev, out_resp->blob_id);
-	if (!blob)
-		return -ENOENT;
-
-	mutex_lock(&dev->mode_config.blob_lock);
-	/* Ensure the property was actually created by this user. */
-	list_for_each_entry(bt, &file_priv->blobs, head_file) {
-		if (bt == blob) {
-			found = true;
-			break;
-		}
-	}
-
-	if (!found) {
-		ret = -EPERM;
-		goto err;
-	}
-
-	/* We must drop head_file here, because we may not be the last
-	 * reference on the blob. */
-	list_del_init(&blob->head_file);
-	mutex_unlock(&dev->mode_config.blob_lock);
-
-	/* One reference from lookup, and one from the filp. */
-	drm_property_unreference_blob(blob);
-	drm_property_unreference_blob(blob);
-
-	return 0;
-
-err:
-	mutex_unlock(&dev->mode_config.blob_lock);
-	drm_property_unreference_blob(blob);
-
-	return ret;
-}
-
-/**
- * drm_mode_connector_set_path_property - set tile property on connector
- * @connector: connector to set property on.
- * @path: path to use for property; must not be NULL.
- *
- * This creates a property to expose to userspace to specify a
- * connector path. This is mainly used for DisplayPort MST where
- * connectors have a topology and we want to allow userspace to give
- * them more meaningful names.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_connector_set_path_property(struct drm_connector *connector,
-					 const char *path)
-{
-	struct drm_device *dev = connector->dev;
-	int ret;
-
-	ret = drm_property_replace_global_blob(dev,
-	                                       &connector->path_blob_ptr,
-	                                       strlen(path) + 1,
-	                                       path,
-	                                       &connector->base,
-	                                       dev->mode_config.path_property);
-	return ret;
-}
-EXPORT_SYMBOL(drm_mode_connector_set_path_property);
-
-/**
- * drm_mode_connector_set_tile_property - set tile property on connector
- * @connector: connector to set property on.
- *
- * This looks up the tile information for a connector, and creates a
- * property for userspace to parse if it exists. The property is of
- * the form of 8 integers using ':' as a separator.
- *
- * Returns:
- * Zero on success, errno on failure.
- */
-int drm_mode_connector_set_tile_property(struct drm_connector *connector)
-{
-	struct drm_device *dev = connector->dev;
-	char tile[256];
-	int ret;
-
-	if (!connector->has_tile) {
-		ret  = drm_property_replace_global_blob(dev,
-		                                        &connector->tile_blob_ptr,
-		                                        0,
-		                                        NULL,
-		                                        &connector->base,
-		                                        dev->mode_config.tile_property);
-		return ret;
-	}
-
-	snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
-		 connector->tile_group->id, connector->tile_is_single_monitor,
-		 connector->num_h_tile, connector->num_v_tile,
-		 connector->tile_h_loc, connector->tile_v_loc,
-		 connector->tile_h_size, connector->tile_v_size);
-
-	ret = drm_property_replace_global_blob(dev,
-	                                       &connector->tile_blob_ptr,
-	                                       strlen(tile) + 1,
-	                                       tile,
-	                                       &connector->base,
-	                                       dev->mode_config.tile_property);
-	return ret;
-}
-EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
-
-/**
- * drm_mode_connector_update_edid_property - update the edid property of a connector
- * @connector: drm connector
- * @edid: new value of the edid property
- *
- * This function creates a new blob modeset object and assigns its id to the
- * connector's edid property.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_connector_update_edid_property(struct drm_connector *connector,
-					    const struct edid *edid)
-{
-	struct drm_device *dev = connector->dev;
-	size_t size = 0;
-	int ret;
-
-	/* ignore requests to set edid when overridden */
-	if (connector->override_edid)
-		return 0;
-
-	if (edid)
-		size = EDID_LENGTH * (1 + edid->extensions);
-
-	ret = drm_property_replace_global_blob(dev,
-					       &connector->edid_blob_ptr,
-	                                       size,
-	                                       edid,
-	                                       &connector->base,
-	                                       dev->mode_config.edid_property);
-	return ret;
-}
-EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
-
-/* Some properties could refer to dynamic refcnt'd objects, or things that
- * need special locking to handle lifetime issues (ie. to ensure the prop
- * value doesn't become invalid part way through the property update due to
- * race).  The value returned by reference via 'obj' should be passed back
- * to drm_property_change_valid_put() after the property is set (and the
- * object to which the property is attached has a chance to take it's own
- * reference).
- */
-bool drm_property_change_valid_get(struct drm_property *property,
-					 uint64_t value, struct drm_mode_object **ref)
-{
-	int i;
-
-	if (property->flags & DRM_MODE_PROP_IMMUTABLE)
-		return false;
-
-	*ref = NULL;
-
-	if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
-		if (value < property->values[0] || value > property->values[1])
-			return false;
-		return true;
-	} else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
-		int64_t svalue = U642I64(value);
-
-		if (svalue < U642I64(property->values[0]) ||
-				svalue > U642I64(property->values[1]))
-			return false;
-		return true;
-	} else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
-		uint64_t valid_mask = 0;
-
-		for (i = 0; i < property->num_values; i++)
-			valid_mask |= (1ULL << property->values[i]);
-		return !(value & ~valid_mask);
-	} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
-		struct drm_property_blob *blob;
-
-		if (value == 0)
-			return true;
-
-		blob = drm_property_lookup_blob(property->dev, value);
-		if (blob) {
-			*ref = &blob->base;
-			return true;
-		} else {
-			return false;
-		}
-	} else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
-		/* a zero value for an object property translates to null: */
-		if (value == 0)
-			return true;
-
-		*ref = _object_find(property->dev, value, property->values[0]);
-		return *ref != NULL;
-	}
-
-	for (i = 0; i < property->num_values; i++)
-		if (property->values[i] == value)
-			return true;
-	return false;
-}
-
-void drm_property_change_valid_put(struct drm_property *property,
-		struct drm_mode_object *ref)
-{
-	if (!ref)
-		return;
-
-	if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
-		drm_mode_object_unreference(ref);
-	} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
-		drm_property_unreference_blob(obj_to_blob(ref));
-}
-
-/**
- * drm_mode_connector_property_set_ioctl - set the current value of a connector property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function sets the current value for a connectors's property. It also
- * calls into a driver's ->set_property callback to update the hardware state
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
-				       void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_connector_set_property *conn_set_prop = data;
-	struct drm_mode_obj_set_property obj_set_prop = {
-		.value = conn_set_prop->value,
-		.prop_id = conn_set_prop->prop_id,
-		.obj_id = conn_set_prop->connector_id,
-		.obj_type = DRM_MODE_OBJECT_CONNECTOR
-	};
-
-	/* It does all the locking and checking we need */
-	return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
-}
-
-static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
-					   struct drm_property *property,
-					   uint64_t value)
-{
-	int ret = -EINVAL;
-	struct drm_connector *connector = obj_to_connector(obj);
-
-	/* Do DPMS ourselves */
-	if (property == connector->dev->mode_config.dpms_property) {
-		ret = (*connector->funcs->dpms)(connector, (int)value);
-	} else if (connector->funcs->set_property)
-		ret = connector->funcs->set_property(connector, property, value);
-
-	/* store the property value if successful */
-	if (!ret)
-		drm_object_property_set_value(&connector->base, property, value);
-	return ret;
-}
-
-static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
-				      struct drm_property *property,
-				      uint64_t value)
+int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+			       struct drm_property *property,
+			       uint64_t value)
 {
 	int ret = -EINVAL;
 	struct drm_crtc *crtc = obj_to_crtc(obj);
@@ -5033,176 +1716,6 @@
 EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
 
 /**
- * drm_mode_obj_get_properties_ioctl - get the current value of a object's property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function retrieves the current value for an object's property. Compared
- * to the connector specific ioctl this one is extended to also work on crtc and
- * plane objects.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
-				      struct drm_file *file_priv)
-{
-	struct drm_mode_obj_get_properties *arg = data;
-	struct drm_mode_object *obj;
-	int ret = 0;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	drm_modeset_lock_all(dev);
-
-	obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
-	if (!obj) {
-		ret = -ENOENT;
-		goto out;
-	}
-	if (!obj->properties) {
-		ret = -EINVAL;
-		goto out_unref;
-	}
-
-	ret = get_properties(obj, file_priv->atomic,
-			(uint32_t __user *)(unsigned long)(arg->props_ptr),
-			(uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
-			&arg->count_props);
-
-out_unref:
-	drm_mode_object_unreference(obj);
-out:
-	drm_modeset_unlock_all(dev);
-	return ret;
-}
-
-/**
- * drm_mode_obj_set_property_ioctl - set the current value of an object's property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function sets the current value for an object's property. It also calls
- * into a driver's ->set_property callback to update the hardware state.
- * Compared to the connector specific ioctl this one is extended to also work on
- * crtc and plane objects.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
-				    struct drm_file *file_priv)
-{
-	struct drm_mode_obj_set_property *arg = data;
-	struct drm_mode_object *arg_obj;
-	struct drm_mode_object *prop_obj;
-	struct drm_property *property;
-	int i, ret = -EINVAL;
-	struct drm_mode_object *ref;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	drm_modeset_lock_all(dev);
-
-	arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
-	if (!arg_obj) {
-		ret = -ENOENT;
-		goto out;
-	}
-	if (!arg_obj->properties)
-		goto out_unref;
-
-	for (i = 0; i < arg_obj->properties->count; i++)
-		if (arg_obj->properties->properties[i]->base.id == arg->prop_id)
-			break;
-
-	if (i == arg_obj->properties->count)
-		goto out_unref;
-
-	prop_obj = drm_mode_object_find(dev, arg->prop_id,
-					DRM_MODE_OBJECT_PROPERTY);
-	if (!prop_obj) {
-		ret = -ENOENT;
-		goto out_unref;
-	}
-	property = obj_to_property(prop_obj);
-
-	if (!drm_property_change_valid_get(property, arg->value, &ref))
-		goto out_unref;
-
-	switch (arg_obj->type) {
-	case DRM_MODE_OBJECT_CONNECTOR:
-		ret = drm_mode_connector_set_obj_prop(arg_obj, property,
-						      arg->value);
-		break;
-	case DRM_MODE_OBJECT_CRTC:
-		ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
-		break;
-	case DRM_MODE_OBJECT_PLANE:
-		ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
-						  property, arg->value);
-		break;
-	}
-
-	drm_property_change_valid_put(property, ref);
-
-out_unref:
-	drm_mode_object_unreference(arg_obj);
-out:
-	drm_modeset_unlock_all(dev);
-	return ret;
-}
-
-/**
- * drm_mode_connector_attach_encoder - attach a connector to an encoder
- * @connector: connector to attach
- * @encoder: encoder to attach @connector to
- *
- * This function links up a connector to an encoder. Note that the routing
- * restrictions between encoders and crtcs are exposed to userspace through the
- * possible_clones and possible_crtcs bitmasks.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_connector_attach_encoder(struct drm_connector *connector,
-				      struct drm_encoder *encoder)
-{
-	int i;
-
-	/*
-	 * In the past, drivers have attempted to model the static association
-	 * of connector to encoder in simple connector/encoder devices using a
-	 * direct assignment of connector->encoder = encoder. This connection
-	 * is a logical one and the responsibility of the core, so drivers are
-	 * expected not to mess with this.
-	 *
-	 * Note that the error return should've been enough here, but a large
-	 * majority of drivers ignores the return value, so add in a big WARN
-	 * to get people's attention.
-	 */
-	if (WARN_ON(connector->encoder))
-		return -EINVAL;
-
-	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-		if (connector->encoder_ids[i] == 0) {
-			connector->encoder_ids[i] = encoder->base.id;
-			return 0;
-		}
-	}
-	return -ENOMEM;
-}
-EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
-
-/**
  * drm_mode_crtc_set_gamma_size - set the gamma table size
  * @crtc: CRTC to set the gamma table size for
  * @gamma_size: size of the gamma table
@@ -5398,14 +1911,23 @@
 int drm_mode_page_flip_ioctl(struct drm_device *dev,
 			     void *data, struct drm_file *file_priv)
 {
-	struct drm_mode_crtc_page_flip *page_flip = data;
+	struct drm_mode_crtc_page_flip_target *page_flip = data;
 	struct drm_crtc *crtc;
 	struct drm_framebuffer *fb = NULL;
 	struct drm_pending_vblank_event *e = NULL;
+	u32 target_vblank = page_flip->sequence;
 	int ret = -EINVAL;
 
-	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
-	    page_flip->reserved != 0)
+	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS)
+		return -EINVAL;
+
+	if (page_flip->sequence != 0 && !(page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET))
+		return -EINVAL;
+
+	/* Only one of the DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags
+	 * can be specified
+	 */
+	if ((page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) == DRM_MODE_PAGE_FLIP_TARGET)
 		return -EINVAL;
 
 	if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
@@ -5415,6 +1937,45 @@
 	if (!crtc)
 		return -ENOENT;
 
+	if (crtc->funcs->page_flip_target) {
+		u32 current_vblank;
+		int r;
+
+		r = drm_crtc_vblank_get(crtc);
+		if (r)
+			return r;
+
+		current_vblank = drm_crtc_vblank_count(crtc);
+
+		switch (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) {
+		case DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE:
+			if ((int)(target_vblank - current_vblank) > 1) {
+				DRM_DEBUG("Invalid absolute flip target %u, "
+					  "must be <= %u\n", target_vblank,
+					  current_vblank + 1);
+				drm_crtc_vblank_put(crtc);
+				return -EINVAL;
+			}
+			break;
+		case DRM_MODE_PAGE_FLIP_TARGET_RELATIVE:
+			if (target_vblank != 0 && target_vblank != 1) {
+				DRM_DEBUG("Invalid relative flip target %u, "
+					  "must be 0 or 1\n", target_vblank);
+				drm_crtc_vblank_put(crtc);
+				return -EINVAL;
+			}
+			target_vblank += current_vblank;
+			break;
+		default:
+			target_vblank = current_vblank +
+				!(page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC);
+			break;
+		}
+	} else if (crtc->funcs->page_flip == NULL ||
+		   (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET)) {
+		return -EINVAL;
+	}
+
 	drm_modeset_lock_crtc(crtc, crtc->primary);
 	if (crtc->primary->fb == NULL) {
 		/* The framebuffer is currently unbound, presumably
@@ -5425,9 +1986,6 @@
 		goto out;
 	}
 
-	if (crtc->funcs->page_flip == NULL)
-		goto out;
-
 	fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
 	if (!fb) {
 		ret = -ENOENT;
@@ -5468,7 +2026,12 @@
 	}
 
 	crtc->primary->old_fb = crtc->primary->fb;
-	ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
+	if (crtc->funcs->page_flip_target)
+		ret = crtc->funcs->page_flip_target(crtc, fb, e,
+						    page_flip->flags,
+						    target_vblank);
+	else
+		ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
 	if (ret) {
 		if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT)
 			drm_event_cancel_free(dev, &e->base);
@@ -5481,6 +2044,8 @@
 	}
 
 out:
+	if (ret && crtc->funcs->page_flip_target)
+		drm_crtc_vblank_put(crtc);
 	if (fb)
 		drm_framebuffer_unreference(fb);
 	if (crtc->primary->old_fb)
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 604d3ef..5d2cb13 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -75,35 +75,6 @@
  */
 
 /**
- * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
- * 						connector list
- * @dev: drm device to operate on
- *
- * Some userspace presumes that the first connected connector is the main
- * display, where it's supposed to display e.g. the login screen. For
- * laptops, this should be the main panel. Use this function to sort all
- * (eDP/LVDS) panels to the front of the connector list, instead of
- * painstakingly trying to initialize them in the right order.
- */
-void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
-{
-	struct drm_connector *connector, *tmp;
-	struct list_head panel_list;
-
-	INIT_LIST_HEAD(&panel_list);
-
-	list_for_each_entry_safe(connector, tmp,
-				 &dev->mode_config.connector_list, head) {
-		if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
-		    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
-			list_move_tail(&connector->head, &panel_list);
-	}
-
-	list_splice(&panel_list, &dev->mode_config.connector_list);
-}
-EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
-
-/**
  * drm_helper_encoder_in_use - check if a given encoder is in use
  * @encoder: encoder to check
  *
@@ -913,33 +884,6 @@
 EXPORT_SYMBOL(drm_helper_connector_dpms);
 
 /**
- * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
- * @fb: drm_framebuffer object to fill out
- * @mode_cmd: metadata from the userspace fb creation request
- *
- * This helper can be used in a drivers fb_create callback to pre-fill the fb's
- * metadata fields.
- */
-void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
-				    const struct drm_mode_fb_cmd2 *mode_cmd)
-{
-	int i;
-
-	fb->width = mode_cmd->width;
-	fb->height = mode_cmd->height;
-	for (i = 0; i < 4; i++) {
-		fb->pitches[i] = mode_cmd->pitches[i];
-		fb->offsets[i] = mode_cmd->offsets[i];
-		fb->modifier[i] = mode_cmd->modifier[i];
-	}
-	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
-				    &fb->bits_per_pixel);
-	fb->pixel_format = mode_cmd->pixel_format;
-	fb->flags = mode_cmd->flags;
-}
-EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
-
-/**
  * drm_helper_resume_force_mode - force-restore mode setting configuration
  * @dev: drm_device which should be restored
  *
diff --git a/include/drm/drm_dp_aux_dev.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
similarity index 62%
rename from include/drm/drm_dp_aux_dev.h
rename to drivers/gpu/drm/drm_crtc_helper_internal.h
index 1b76d99..4e6b57a 100644
--- a/include/drm/drm_dp_aux_dev.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -1,5 +1,5 @@
 /*
- * Copyright © 2015 Intel Corporation
+ * Copyright © 2016 Intel Corporation
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -8,37 +8,36 @@
  * and/or sell copies of the Software, and to permit persons to whom the
  * Software is furnished to do so, subject to the following conditions:
  *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Rafael Antognolli <rafael.antognolli@intel.com>
- *
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#ifndef DRM_DP_AUX_DEV
-#define DRM_DP_AUX_DEV
+/*
+ * This header file contains mode setting related functions and definitions
+ * which are only used within the drm kms helper module as internal
+ * implementation details and are not exported to drivers.
+ */
 
 #include <drm/drm_dp_helper.h>
 
-#ifdef CONFIG_DRM_DP_AUX_CHARDEV
+/* drm_fb_helper.c */
+int drm_fb_helper_modinit(void);
 
+/* drm_dp_aux_dev.c */
+#ifdef CONFIG_DRM_DP_AUX_CHARDEV
 int drm_dp_aux_dev_init(void);
 void drm_dp_aux_dev_exit(void);
 int drm_dp_aux_register_devnode(struct drm_dp_aux *aux);
 void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux);
-
 #else
-
 static inline int drm_dp_aux_dev_init(void)
 {
 	return 0;
@@ -56,7 +55,4 @@
 static inline void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
 {
 }
-
-#endif
-
 #endif
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 0c34e6d..444e609 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -33,18 +33,9 @@
 
 
 /* drm_crtc.c */
-void drm_connector_ida_init(void);
-void drm_connector_ida_destroy(void);
-int drm_mode_object_get(struct drm_device *dev,
-			struct drm_mode_object *obj, uint32_t obj_type);
-void drm_mode_object_unregister(struct drm_device *dev,
-				struct drm_mode_object *object);
-bool drm_property_change_valid_get(struct drm_property *property,
-				   uint64_t value,
-				   struct drm_mode_object **ref);
-void drm_property_change_valid_put(struct drm_property *property,
-				   struct drm_mode_object *ref);
-
+int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+			       struct drm_property *property,
+			       uint64_t value);
 int drm_plane_check_pixel_format(const struct drm_plane *plane,
 				 u32 format);
 int drm_crtc_check_viewport(const struct drm_crtc *crtc,
@@ -53,8 +44,6 @@
 			    const struct drm_framebuffer *fb);
 
 void drm_fb_release(struct drm_file *file_priv);
-void drm_property_destroy_user_blobs(struct drm_device *dev,
-				     struct drm_file *file_priv);
 
 /* dumb buffer support IOCTLs */
 int drm_mode_create_dumb_ioctl(struct drm_device *dev,
@@ -64,32 +53,13 @@
 int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
 				void *data, struct drm_file *file_priv);
 
-/* framebuffer IOCTLs */
-extern int drm_mode_addfb(struct drm_device *dev,
-			  void *data, struct drm_file *file_priv);
-extern int drm_mode_addfb2(struct drm_device *dev,
-			   void *data, struct drm_file *file_priv);
-int drm_mode_rmfb(struct drm_device *dev,
-			 void *data, struct drm_file *file_priv);
-int drm_mode_getfb(struct drm_device *dev,
-		   void *data, struct drm_file *file_priv);
-int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
-			   void *data, struct drm_file *file_priv);
-
 /* IOCTLs */
-int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
-				      struct drm_file *file_priv);
-int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
-				    struct drm_file *file_priv);
-
 int drm_mode_getresources(struct drm_device *dev,
 			  void *data, struct drm_file *file_priv);
 int drm_mode_getplane_res(struct drm_device *dev, void *data,
 			  struct drm_file *file_priv);
 int drm_mode_getcrtc(struct drm_device *dev,
 		     void *data, struct drm_file *file_priv);
-int drm_mode_getconnector(struct drm_device *dev,
-			  void *data, struct drm_file *file_priv);
 int drm_mode_setcrtc(struct drm_device *dev,
 		     void *data, struct drm_file *file_priv);
 int drm_mode_getplane(struct drm_device *dev,
@@ -100,6 +70,24 @@
 			  void *data, struct drm_file *file_priv);
 int drm_mode_cursor2_ioctl(struct drm_device *dev,
 			   void *data, struct drm_file *file_priv);
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+			     void *data, struct drm_file *file_priv);
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+			     void *data, struct drm_file *file_priv);
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+			     void *data, struct drm_file *file_priv);
+
+/* drm_property.c */
+void drm_property_destroy_user_blobs(struct drm_device *dev,
+				     struct drm_file *file_priv);
+bool drm_property_change_valid_get(struct drm_property *property,
+				   uint64_t value,
+				   struct drm_mode_object **ref);
+void drm_property_change_valid_put(struct drm_property *property,
+				   struct drm_mode_object *ref);
+
+/* IOCTL */
 int drm_mode_getproperty_ioctl(struct drm_device *dev,
 			       void *data, struct drm_file *file_priv);
 int drm_mode_getblob_ioctl(struct drm_device *dev,
@@ -108,17 +96,77 @@
 			      void *data, struct drm_file *file_priv);
 int drm_mode_destroyblob_ioctl(struct drm_device *dev,
 			       void *data, struct drm_file *file_priv);
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
-					  void *data, struct drm_file *file_priv);
+
+/* drm_mode_object.c */
+int drm_mode_object_get_reg(struct drm_device *dev,
+			    struct drm_mode_object *obj,
+			    uint32_t obj_type,
+			    bool register_obj,
+			    void (*obj_free_cb)(struct kref *kref));
+void drm_mode_object_register(struct drm_device *dev,
+			      struct drm_mode_object *obj);
+int drm_mode_object_get(struct drm_device *dev,
+			struct drm_mode_object *obj, uint32_t obj_type);
+struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
+					       uint32_t id, uint32_t type);
+void drm_mode_object_unregister(struct drm_device *dev,
+				struct drm_mode_object *object);
+int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
+				   uint32_t __user *prop_ptr,
+				   uint64_t __user *prop_values,
+				   uint32_t *arg_count_props);
+struct drm_property *drm_mode_obj_find_prop_id(struct drm_mode_object *obj,
+					       uint32_t prop_id);
+
+/* IOCTL */
+
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv);
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+
+/* drm_encoder.c */
+int drm_encoder_register_all(struct drm_device *dev);
+void drm_encoder_unregister_all(struct drm_device *dev);
+
+/* IOCTL */
 int drm_mode_getencoder(struct drm_device *dev,
 			void *data, struct drm_file *file_priv);
-int drm_mode_gamma_get_ioctl(struct drm_device *dev,
-			     void *data, struct drm_file *file_priv);
-int drm_mode_gamma_set_ioctl(struct drm_device *dev,
-			     void *data, struct drm_file *file_priv);
 
-int drm_mode_page_flip_ioctl(struct drm_device *dev,
-			     void *data, struct drm_file *file_priv);
+/* drm_connector.c */
+void drm_connector_ida_init(void);
+void drm_connector_ida_destroy(void);
+void drm_connector_unregister_all(struct drm_device *dev);
+int drm_connector_register_all(struct drm_device *dev);
+int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+				    struct drm_property *property,
+				    uint64_t value);
+int drm_connector_create_standard_properties(struct drm_device *dev);
+
+/* IOCTL */
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+					  void *data, struct drm_file *file_priv);
+int drm_mode_getconnector(struct drm_device *dev,
+			  void *data, struct drm_file *file_priv);
+
+/* drm_framebuffer.c */
+struct drm_framebuffer *
+drm_internal_framebuffer_create(struct drm_device *dev,
+				const struct drm_mode_fb_cmd2 *r,
+				struct drm_file *file_priv);
+void drm_framebuffer_free(struct kref *kref);
+
+/* IOCTL */
+int drm_mode_addfb(struct drm_device *dev,
+		   void *data, struct drm_file *file_priv);
+int drm_mode_addfb2(struct drm_device *dev,
+		    void *data, struct drm_file *file_priv);
+int drm_mode_rmfb(struct drm_device *dev,
+		  void *data, struct drm_file *file_priv);
+int drm_mode_getfb(struct drm_device *dev,
+		   void *data, struct drm_file *file_priv);
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+			   void *data, struct drm_file *file_priv);
 
 /* drm_atomic.c */
 int drm_atomic_get_property(struct drm_mode_object *obj,
@@ -130,5 +178,5 @@
 void drm_modeset_unregister_all(struct drm_device *dev);
 
 /* drm_blend.c */
-int drm_atomic_helper_normalize_zpos(struct drm_device *dev,
-				     struct drm_atomic_state *state);
+int drm_atomic_normalize_zpos(struct drm_device *dev,
+			      struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index 734f86a..ec1ed94 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -36,6 +36,8 @@
 #include <drm/drm_crtc.h>
 #include <drm/drmP.h>
 
+#include "drm_crtc_helper_internal.h"
+
 struct drm_dp_aux_dev {
 	unsigned index;
 	struct drm_dp_aux *aux;
@@ -283,12 +285,7 @@
 	schedule();
 	return 0;
 }
-/**
- * drm_dp_aux_unregister_devnode() - unregister a devnode for this aux channel
- * @aux: DisplayPort AUX channel
- *
- * Returns 0 on success or a negative error code on failure.
- */
+
 void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
 {
 	struct drm_dp_aux_dev *aux_dev;
@@ -314,14 +311,7 @@
 	DRM_DEBUG("drm_dp_aux_dev: aux [%s] unregistering\n", aux->name);
 	kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
 }
-EXPORT_SYMBOL(drm_dp_aux_unregister_devnode);
 
-/**
- * drm_dp_aux_register_devnode() - register a devnode for this aux channel
- * @aux: DisplayPort AUX channel
- *
- * Returns 0 on success or a negative error code on failure.
- */
 int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
 {
 	struct drm_dp_aux_dev *aux_dev;
@@ -347,7 +337,6 @@
 	drm_dp_aux_unregister_devnode(aux);
 	return res;
 }
-EXPORT_SYMBOL(drm_dp_aux_register_devnode);
 
 int drm_dp_aux_dev_init(void)
 {
@@ -369,11 +358,9 @@
 	class_destroy(drm_dp_aux_dev_class);
 	return res;
 }
-EXPORT_SYMBOL(drm_dp_aux_dev_init);
 
 void drm_dp_aux_dev_exit(void)
 {
 	unregister_chrdev(drm_dev_major, "aux");
 	class_destroy(drm_dp_aux_dev_class);
 }
-EXPORT_SYMBOL(drm_dp_aux_dev_exit);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 559db96..0ad20f1 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -28,9 +28,10 @@
 #include <linux/sched.h>
 #include <linux/i2c.h>
 #include <drm/drm_dp_helper.h>
-#include <drm/drm_dp_aux_dev.h>
 #include <drm/drmP.h>
 
+#include "drm_crtc_helper_internal.h"
+
 /**
  * DOC: dp helpers
  *
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 57ce973..acf6a5f 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -63,37 +63,51 @@
 
 static struct dentry *drm_debugfs_root;
 
-void drm_err(const char *format, ...)
+#define DRM_PRINTK_FMT "[" DRM_NAME ":%s]%s %pV"
+
+void drm_dev_printk(const struct device *dev, const char *level,
+		    unsigned int category, const char *function_name,
+		    const char *prefix, const char *format, ...)
 {
 	struct va_format vaf;
 	va_list args;
 
-	va_start(args, format);
-
-	vaf.fmt = format;
-	vaf.va = &args;
-
-	printk(KERN_ERR "[" DRM_NAME ":%ps] *ERROR* %pV",
-	       __builtin_return_address(0), &vaf);
-
-	va_end(args);
-}
-EXPORT_SYMBOL(drm_err);
-
-void drm_ut_debug_printk(const char *function_name, const char *format, ...)
-{
-	struct va_format vaf;
-	va_list args;
+	if (category != DRM_UT_NONE && !(drm_debug & category))
+		return;
 
 	va_start(args, format);
 	vaf.fmt = format;
 	vaf.va = &args;
 
-	printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
+	if (dev)
+		dev_printk(level, dev, DRM_PRINTK_FMT, function_name, prefix,
+			   &vaf);
+	else
+		printk("%s" DRM_PRINTK_FMT, level, function_name, prefix, &vaf);
 
 	va_end(args);
 }
-EXPORT_SYMBOL(drm_ut_debug_printk);
+EXPORT_SYMBOL(drm_dev_printk);
+
+void drm_printk(const char *level, unsigned int category,
+		const char *function_name, const char *prefix,
+		const char *format, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	if (category != DRM_UT_NONE && !(drm_debug & category))
+		return;
+
+	va_start(args, format);
+	vaf.fmt = format;
+	vaf.va = &args;
+
+	printk("%s" DRM_PRINTK_FMT, level, function_name, prefix, &vaf);
+
+	va_end(args);
+}
+EXPORT_SYMBOL(drm_printk);
 
 /*
  * DRM Minors
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 637a0aa..5054132 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -991,7 +991,7 @@
 	 .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 64 - 1920x1080@100Hz */
 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
-		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 	 .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 };
@@ -3721,14 +3721,7 @@
 }
 EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
 
-/**
- * drm_assign_hdmi_deep_color_info - detect whether monitor supports
- * hdmi deep color modes and update drm_display_info if so.
- * @edid: monitor EDID information
- * @info: Updated with maximum supported deep color bpc and color format
- *        if deep color supported.
- * @connector: DRM connector, used only for debug output
- *
+/*
  * Parse the CEA extension according to CEA-861-B.
  * Return true if HDMI deep color supported, false if not or unknown.
  */
@@ -3822,16 +3815,6 @@
 	return false;
 }
 
-/**
- * drm_add_display_info - pull display info out if present
- * @edid: EDID data
- * @info: display info (attached to connector)
- * @connector: connector whose edid is used to build display info
- *
- * Grab any available display info and stuff it into the drm_display_info
- * structure that's part of the connector.  Useful for tracking bpp and
- * color spaces.
- */
 static void drm_add_display_info(struct edid *edid,
                                  struct drm_display_info *info,
                                  struct drm_connector *connector)
@@ -4052,7 +4035,9 @@
  * @connector: connector we're probing
  * @edid: EDID data
  *
- * Add the specified modes to the connector's mode list.
+ * Add the specified modes to the connector's mode list. Also fills out the
+ * &drm_display_info structure in @connector with any information which can be
+ * derived from the edid.
  *
  * Return: The number of modes added or 0 if we couldn't find any.
  */
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
new file mode 100644
index 0000000..998a674
--- /dev/null
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_encoder.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * Encoders represent the connecting element between the CRTC (as the overall
+ * pixel pipeline, represented by struct &drm_crtc) and the connectors (as the
+ * generic sink entity, represented by struct &drm_connector). Encoders are
+ * objects exposed to userspace, originally to allow userspace to infer cloning
+ * and connector/CRTC restrictions. Unfortunately almost all drivers get this
+ * wrong, making the uabi pretty much useless. On top of that the exposed
+ * restrictions are too simple for todays hardware, and the recommend way to
+ * infer restrictions is by using the DRM_MODE_ATOMIC_TEST_ONLY flag for the
+ * atomic IOCTL.
+ *
+ * Otherwise encoders aren't used in the uapi at all (any modeset request from
+ * userspace directly connects a connector with a CRTC), drivers are therefore
+ * free to use them however they wish. Modeset helper libraries make strong use
+ * of encoders to facilitate code sharing. But for more complex settings it is
+ * usually better to move shared code into a separate &drm_bridge. Compared to
+ * encoders bridges also have the benefit of not being purely an internal
+ * abstraction since they are not exposed to userspace at all.
+ *
+ * Encoders are initialized with drm_encoder_init() and cleaned up using
+ * drm_encoder_cleanup().
+ */
+static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
+	{ DRM_MODE_ENCODER_NONE, "None" },
+	{ DRM_MODE_ENCODER_DAC, "DAC" },
+	{ DRM_MODE_ENCODER_TMDS, "TMDS" },
+	{ DRM_MODE_ENCODER_LVDS, "LVDS" },
+	{ DRM_MODE_ENCODER_TVDAC, "TV" },
+	{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
+	{ DRM_MODE_ENCODER_DSI, "DSI" },
+	{ DRM_MODE_ENCODER_DPMST, "DP MST" },
+	{ DRM_MODE_ENCODER_DPI, "DPI" },
+};
+
+int drm_encoder_register_all(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+	int ret = 0;
+
+	drm_for_each_encoder(encoder, dev) {
+		if (encoder->funcs->late_register)
+			ret = encoder->funcs->late_register(encoder);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+void drm_encoder_unregister_all(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+
+	drm_for_each_encoder(encoder, dev) {
+		if (encoder->funcs->early_unregister)
+			encoder->funcs->early_unregister(encoder);
+	}
+}
+
+/**
+ * drm_encoder_init - Init a preallocated encoder
+ * @dev: drm device
+ * @encoder: the encoder to init
+ * @funcs: callbacks for this encoder
+ * @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
+ *
+ * Initialises a preallocated encoder. Encoder should be subclassed as part of
+ * driver encoder objects. At driver unload time drm_encoder_cleanup() should be
+ * called from the driver's destroy hook in &drm_encoder_funcs.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_encoder_init(struct drm_device *dev,
+		     struct drm_encoder *encoder,
+		     const struct drm_encoder_funcs *funcs,
+		     int encoder_type, const char *name, ...)
+{
+	int ret;
+
+	drm_modeset_lock_all(dev);
+
+	ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+	if (ret)
+		goto out_unlock;
+
+	encoder->dev = dev;
+	encoder->encoder_type = encoder_type;
+	encoder->funcs = funcs;
+	if (name) {
+		va_list ap;
+
+		va_start(ap, name);
+		encoder->name = kvasprintf(GFP_KERNEL, name, ap);
+		va_end(ap);
+	} else {
+		encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
+					  drm_encoder_enum_list[encoder_type].name,
+					  encoder->base.id);
+	}
+	if (!encoder->name) {
+		ret = -ENOMEM;
+		goto out_put;
+	}
+
+	list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
+	encoder->index = dev->mode_config.num_encoder++;
+
+out_put:
+	if (ret)
+		drm_mode_object_unregister(dev, &encoder->base);
+
+out_unlock:
+	drm_modeset_unlock_all(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_encoder_init);
+
+/**
+ * drm_encoder_cleanup - cleans up an initialised encoder
+ * @encoder: encoder to cleanup
+ *
+ * Cleans up the encoder but doesn't free the object.
+ */
+void drm_encoder_cleanup(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+
+	/* Note that the encoder_list is considered to be static; should we
+	 * remove the drm_encoder at runtime we would have to decrement all
+	 * the indices on the drm_encoder after us in the encoder_list.
+	 */
+
+	drm_modeset_lock_all(dev);
+	drm_mode_object_unregister(dev, &encoder->base);
+	kfree(encoder->name);
+	list_del(&encoder->head);
+	dev->mode_config.num_encoder--;
+	drm_modeset_unlock_all(dev);
+
+	memset(encoder, 0, sizeof(*encoder));
+}
+EXPORT_SYMBOL(drm_encoder_cleanup);
+
+static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
+{
+	struct drm_connector *connector;
+	struct drm_device *dev = encoder->dev;
+	bool uses_atomic = false;
+
+	/* For atomic drivers only state objects are synchronously updated and
+	 * protected by modeset locks, so check those first. */
+	drm_for_each_connector(connector, dev) {
+		if (!connector->state)
+			continue;
+
+		uses_atomic = true;
+
+		if (connector->state->best_encoder != encoder)
+			continue;
+
+		return connector->state->crtc;
+	}
+
+	/* Don't return stale data (e.g. pending async disable). */
+	if (uses_atomic)
+		return NULL;
+
+	return encoder->crtc;
+}
+
+int drm_mode_getencoder(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_mode_get_encoder *enc_resp = data;
+	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	encoder = drm_encoder_find(dev, enc_resp->encoder_id);
+	if (!encoder)
+		return -ENOENT;
+
+	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+	crtc = drm_encoder_get_crtc(encoder);
+	if (crtc)
+		enc_resp->crtc_id = crtc->base.id;
+	else
+		enc_resp->crtc_id = 0;
+	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+	enc_resp->encoder_type = encoder->encoder_type;
+	enc_resp->encoder_id = encoder->base.id;
+	enc_resp->possible_crtcs = encoder->possible_crtcs;
+	enc_resp->possible_clones = encoder->possible_clones;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index aed79d3..c4d3500 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -29,6 +29,7 @@
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/console.h>
 #include <linux/kernel.h>
 #include <linux/sysrq.h>
 #include <linux/slab.h>
@@ -617,6 +618,16 @@
 	kfree(helper->crtc_info);
 }
 
+static void drm_fb_helper_resume_worker(struct work_struct *work)
+{
+	struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
+						    resume_work);
+
+	console_lock();
+	fb_set_suspend(helper->fbdev, 0);
+	console_unlock();
+}
+
 static void drm_fb_helper_dirty_work(struct work_struct *work)
 {
 	struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
@@ -648,6 +659,7 @@
 {
 	INIT_LIST_HEAD(&helper->kernel_fb_list);
 	spin_lock_init(&helper->dirty_lock);
+	INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker);
 	INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work);
 	helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0;
 	helper->funcs = funcs;
@@ -1023,17 +1035,65 @@
 /**
  * drm_fb_helper_set_suspend - wrapper around fb_set_suspend
  * @fb_helper: driver-allocated fbdev helper
- * @state: desired state, zero to resume, non-zero to suspend
+ * @suspend: whether to suspend or resume
  *
- * A wrapper around fb_set_suspend implemented by fbdev core
+ * A wrapper around fb_set_suspend implemented by fbdev core.
+ * Use drm_fb_helper_set_suspend_unlocked() if you don't need to take
+ * the lock yourself
  */
-void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state)
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend)
 {
 	if (fb_helper && fb_helper->fbdev)
-		fb_set_suspend(fb_helper->fbdev, state);
+		fb_set_suspend(fb_helper->fbdev, suspend);
 }
 EXPORT_SYMBOL(drm_fb_helper_set_suspend);
 
+/**
+ * drm_fb_helper_set_suspend_unlocked - wrapper around fb_set_suspend that also
+ *                                      takes the console lock
+ * @fb_helper: driver-allocated fbdev helper
+ * @suspend: whether to suspend or resume
+ *
+ * A wrapper around fb_set_suspend() that takes the console lock. If the lock
+ * isn't available on resume, a worker is tasked with waiting for the lock
+ * to become available. The console lock can be pretty contented on resume
+ * due to all the printk activity.
+ *
+ * This function can be called multiple times with the same state since
+ * &fb_info->state is checked to see if fbdev is running or not before locking.
+ *
+ * Use drm_fb_helper_set_suspend() if you need to take the lock yourself.
+ */
+void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
+					bool suspend)
+{
+	if (!fb_helper || !fb_helper->fbdev)
+		return;
+
+	/* make sure there's no pending/ongoing resume */
+	flush_work(&fb_helper->resume_work);
+
+	if (suspend) {
+		if (fb_helper->fbdev->state != FBINFO_STATE_RUNNING)
+			return;
+
+		console_lock();
+
+	} else {
+		if (fb_helper->fbdev->state == FBINFO_STATE_RUNNING)
+			return;
+
+		if (!console_trylock()) {
+			schedule_work(&fb_helper->resume_work);
+			return;
+		}
+	}
+
+	fb_set_suspend(fb_helper->fbdev, suspend);
+	console_unlock();
+}
+EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked);
+
 static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
 		     u16 blue, u16 regno, struct fb_info *info)
 {
@@ -2193,7 +2253,7 @@
  * @fb_helper: the drm_fb_helper
  *
  * Scan the connectors attached to the fb_helper and try to put together a
- * setup after *notification of a change in output configuration.
+ * setup after notification of a change in output configuration.
  *
  * Called at runtime, takes the mode config locks to be able to check/change the
  * modeset configuration. Must be run from process context (which usually means
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 0645c85..29c56b4 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -36,19 +36,60 @@
 }
 
 /**
+ * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description
+ * @bpp: bits per pixels
+ * @depth: bit depth per pixel
+ *
+ * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
+ * Useful in fbdev emulation code, since that deals in those values.
+ */
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+{
+	uint32_t fmt;
+
+	switch (bpp) {
+	case 8:
+		fmt = DRM_FORMAT_C8;
+		break;
+	case 16:
+		if (depth == 15)
+			fmt = DRM_FORMAT_XRGB1555;
+		else
+			fmt = DRM_FORMAT_RGB565;
+		break;
+	case 24:
+		fmt = DRM_FORMAT_RGB888;
+		break;
+	case 32:
+		if (depth == 24)
+			fmt = DRM_FORMAT_XRGB8888;
+		else if (depth == 30)
+			fmt = DRM_FORMAT_XRGB2101010;
+		else
+			fmt = DRM_FORMAT_ARGB8888;
+		break;
+	default:
+		DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
+		fmt = DRM_FORMAT_XRGB8888;
+		break;
+	}
+
+	return fmt;
+}
+EXPORT_SYMBOL(drm_mode_legacy_fb_format);
+
+/**
  * drm_get_format_name - return a string for drm fourcc format
  * @format: format to compute name of
  *
- * Note that the buffer used by this function is globally shared and owned by
- * the function itself.
- *
- * FIXME: This isn't really multithreading safe.
+ * Note that the buffer returned by this function is owned by the caller
+ * and will need to be freed using kfree().
  */
-const char *drm_get_format_name(uint32_t format)
+char *drm_get_format_name(uint32_t format)
 {
-	static char buf[32];
+	char *buf = kmalloc(32, GFP_KERNEL);
 
-	snprintf(buf, sizeof(buf),
+	snprintf(buf, 32,
 		 "%c%c%c%c %s-endian (0x%08x)",
 		 printable_char(format & 0xff),
 		 printable_char((format >> 8) & 0xff),
@@ -73,6 +114,8 @@
 void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
 			  int *bpp)
 {
+	char *format_name;
+
 	switch (format) {
 	case DRM_FORMAT_C8:
 	case DRM_FORMAT_RGB332:
@@ -127,8 +170,9 @@
 		*bpp = 32;
 		break;
 	default:
-		DRM_DEBUG_KMS("unsupported pixel format %s\n",
-			      drm_get_format_name(format));
+		format_name = drm_get_format_name(format);
+		DRM_DEBUG_KMS("unsupported pixel format %s\n", format_name);
+		kfree(format_name);
 		*depth = 0;
 		*bpp = 0;
 		break;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
new file mode 100644
index 0000000..30dc01e
--- /dev/null
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -0,0 +1,831 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_auth.h>
+#include <drm/drm_framebuffer.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * Frame buffers are abstract memory objects that provide a source of pixels to
+ * scanout to a CRTC. Applications explicitly request the creation of frame
+ * buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls and receive an opaque
+ * handle that can be passed to the KMS CRTC control, plane configuration and
+ * page flip functions.
+ *
+ * Frame buffers rely on the underlying memory manager for allocating backing
+ * storage. When creating a frame buffer applications pass a memory handle
+ * (or a list of memory handles for multi-planar formats) through the
+ * struct &drm_mode_fb_cmd2 argument. For drivers using GEM as their userspace
+ * buffer management interface this would be a GEM handle.  Drivers are however
+ * free to use their own backing storage object handles, e.g. vmwgfx directly
+ * exposes special TTM handles to userspace and so expects TTM handles in the
+ * create ioctl and not GEM handles.
+ *
+ * Framebuffers are tracked with struct &drm_framebuffer. They are published
+ * using drm_framebuffer_init() - after calling that function userspace can use
+ * and access the framebuffer object. The helper function
+ * drm_helper_mode_fill_fb_struct() can be used to pre-fill the required
+ * metadata fields.
+ *
+ * The lifetime of a drm framebuffer is controlled with a reference count,
+ * drivers can grab additional references with drm_framebuffer_reference() and
+ * drop them again with drm_framebuffer_unreference(). For driver-private
+ * framebuffers for which the last reference is never dropped (e.g. for the
+ * fbdev framebuffer when the struct struct &drm_framebuffer is embedded into
+ * the fbdev helper struct) drivers can manually clean up a framebuffer at
+ * module unload time with drm_framebuffer_unregister_private(). But doing this
+ * is not recommended, and it's better to have a normal free-standing struct
+ * &drm_framebuffer.
+ */
+
+/**
+ * drm_mode_addfb - add an FB to the graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Add a new FB to the specified CRTC, given a user request. This is the
+ * original addfb ioctl which only supported RGB formats.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_addfb(struct drm_device *dev,
+		   void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_fb_cmd *or = data;
+	struct drm_mode_fb_cmd2 r = {};
+	int ret;
+
+	/* convert to new format and call new ioctl */
+	r.fb_id = or->fb_id;
+	r.width = or->width;
+	r.height = or->height;
+	r.pitches[0] = or->pitch;
+	r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+	r.handles[0] = or->handle;
+
+	ret = drm_mode_addfb2(dev, &r, file_priv);
+	if (ret)
+		return ret;
+
+	or->fb_id = r.fb_id;
+
+	return 0;
+}
+
+static int format_check(const struct drm_mode_fb_cmd2 *r)
+{
+	uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
+	char *format_name;
+
+	switch (format) {
+	case DRM_FORMAT_C8:
+	case DRM_FORMAT_RGB332:
+	case DRM_FORMAT_BGR233:
+	case DRM_FORMAT_XRGB4444:
+	case DRM_FORMAT_XBGR4444:
+	case DRM_FORMAT_RGBX4444:
+	case DRM_FORMAT_BGRX4444:
+	case DRM_FORMAT_ARGB4444:
+	case DRM_FORMAT_ABGR4444:
+	case DRM_FORMAT_RGBA4444:
+	case DRM_FORMAT_BGRA4444:
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_XBGR1555:
+	case DRM_FORMAT_RGBX5551:
+	case DRM_FORMAT_BGRX5551:
+	case DRM_FORMAT_ARGB1555:
+	case DRM_FORMAT_ABGR1555:
+	case DRM_FORMAT_RGBA5551:
+	case DRM_FORMAT_BGRA5551:
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_BGR565:
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_BGR888:
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_RGBX8888:
+	case DRM_FORMAT_BGRX8888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_RGBA8888:
+	case DRM_FORMAT_BGRA8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_RGBX1010102:
+	case DRM_FORMAT_BGRX1010102:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_ABGR2101010:
+	case DRM_FORMAT_RGBA1010102:
+	case DRM_FORMAT_BGRA1010102:
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_AYUV:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV24:
+	case DRM_FORMAT_NV42:
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YVU411:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YVU422:
+	case DRM_FORMAT_YUV444:
+	case DRM_FORMAT_YVU444:
+		return 0;
+	default:
+		format_name = drm_get_format_name(r->pixel_format);
+		DRM_DEBUG_KMS("invalid pixel format %s\n", format_name);
+		kfree(format_name);
+		return -EINVAL;
+	}
+}
+
+static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
+{
+	int ret, hsub, vsub, num_planes, i;
+
+	ret = format_check(r);
+	if (ret) {
+		char *format_name = drm_get_format_name(r->pixel_format);
+		DRM_DEBUG_KMS("bad framebuffer format %s\n", format_name);
+		kfree(format_name);
+		return ret;
+	}
+
+	hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
+	vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
+	num_planes = drm_format_num_planes(r->pixel_format);
+
+	if (r->width == 0 || r->width % hsub) {
+		DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
+		return -EINVAL;
+	}
+
+	if (r->height == 0 || r->height % vsub) {
+		DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_planes; i++) {
+		unsigned int width = r->width / (i != 0 ? hsub : 1);
+		unsigned int height = r->height / (i != 0 ? vsub : 1);
+		unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
+
+		if (!r->handles[i]) {
+			DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
+			return -EINVAL;
+		}
+
+		if ((uint64_t) width * cpp > UINT_MAX)
+			return -ERANGE;
+
+		if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
+			return -ERANGE;
+
+		if (r->pitches[i] < width * cpp) {
+			DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
+			return -EINVAL;
+		}
+
+		if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
+			DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
+				      r->modifier[i], i);
+			return -EINVAL;
+		}
+
+		/* modifier specific checks: */
+		switch (r->modifier[i]) {
+		case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
+			/* NOTE: the pitch restriction may be lifted later if it turns
+			 * out that no hw has this restriction:
+			 */
+			if (r->pixel_format != DRM_FORMAT_NV12 ||
+					width % 128 || height % 32 ||
+					r->pitches[i] % 128) {
+				DRM_DEBUG_KMS("bad modifier data for plane %d\n", i);
+				return -EINVAL;
+			}
+			break;
+
+		default:
+			break;
+		}
+	}
+
+	for (i = num_planes; i < 4; i++) {
+		if (r->modifier[i]) {
+			DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
+			return -EINVAL;
+		}
+
+		/* Pre-FB_MODIFIERS userspace didn't clear the structs properly. */
+		if (!(r->flags & DRM_MODE_FB_MODIFIERS))
+			continue;
+
+		if (r->handles[i]) {
+			DRM_DEBUG_KMS("buffer object handle for unused plane %d\n", i);
+			return -EINVAL;
+		}
+
+		if (r->pitches[i]) {
+			DRM_DEBUG_KMS("non-zero pitch for unused plane %d\n", i);
+			return -EINVAL;
+		}
+
+		if (r->offsets[i]) {
+			DRM_DEBUG_KMS("non-zero offset for unused plane %d\n", i);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+struct drm_framebuffer *
+drm_internal_framebuffer_create(struct drm_device *dev,
+				const struct drm_mode_fb_cmd2 *r,
+				struct drm_file *file_priv)
+{
+	struct drm_mode_config *config = &dev->mode_config;
+	struct drm_framebuffer *fb;
+	int ret;
+
+	if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
+		DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if ((config->min_width > r->width) || (r->width > config->max_width)) {
+		DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
+			  r->width, config->min_width, config->max_width);
+		return ERR_PTR(-EINVAL);
+	}
+	if ((config->min_height > r->height) || (r->height > config->max_height)) {
+		DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
+			  r->height, config->min_height, config->max_height);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (r->flags & DRM_MODE_FB_MODIFIERS &&
+	    !dev->mode_config.allow_fb_modifiers) {
+		DRM_DEBUG_KMS("driver does not support fb modifiers\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	ret = framebuffer_check(r);
+	if (ret)
+		return ERR_PTR(ret);
+
+	fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+	if (IS_ERR(fb)) {
+		DRM_DEBUG_KMS("could not create framebuffer\n");
+		return fb;
+	}
+
+	return fb;
+}
+
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Add a new FB to the specified CRTC, given a user request with format. This is
+ * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
+ * and uses fourcc codes as pixel format specifiers.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_addfb2(struct drm_device *dev,
+		    void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_fb_cmd2 *r = data;
+	struct drm_framebuffer *fb;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	fb = drm_internal_framebuffer_create(dev, r, file_priv);
+	if (IS_ERR(fb))
+		return PTR_ERR(fb);
+
+	DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+	r->fb_id = fb->base.id;
+
+	/* Transfer ownership to the filp for reaping on close */
+	mutex_lock(&file_priv->fbs_lock);
+	list_add(&fb->filp_head, &file_priv->fbs);
+	mutex_unlock(&file_priv->fbs_lock);
+
+	return 0;
+}
+
+struct drm_mode_rmfb_work {
+	struct work_struct work;
+	struct list_head fbs;
+};
+
+static void drm_mode_rmfb_work_fn(struct work_struct *w)
+{
+	struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
+
+	while (!list_empty(&arg->fbs)) {
+		struct drm_framebuffer *fb =
+			list_first_entry(&arg->fbs, typeof(*fb), filp_head);
+
+		list_del_init(&fb->filp_head);
+		drm_framebuffer_remove(fb);
+	}
+}
+
+/**
+ * drm_mode_rmfb - remove an FB from the configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_rmfb(struct drm_device *dev,
+		   void *data, struct drm_file *file_priv)
+{
+	struct drm_framebuffer *fb = NULL;
+	struct drm_framebuffer *fbl = NULL;
+	uint32_t *id = data;
+	int found = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	fb = drm_framebuffer_lookup(dev, *id);
+	if (!fb)
+		return -ENOENT;
+
+	mutex_lock(&file_priv->fbs_lock);
+	list_for_each_entry(fbl, &file_priv->fbs, filp_head)
+		if (fb == fbl)
+			found = 1;
+	if (!found) {
+		mutex_unlock(&file_priv->fbs_lock);
+		goto fail_unref;
+	}
+
+	list_del_init(&fb->filp_head);
+	mutex_unlock(&file_priv->fbs_lock);
+
+	/* drop the reference we picked up in framebuffer lookup */
+	drm_framebuffer_unreference(fb);
+
+	/*
+	 * we now own the reference that was stored in the fbs list
+	 *
+	 * drm_framebuffer_remove may fail with -EINTR on pending signals,
+	 * so run this in a separate stack as there's no way to correctly
+	 * handle this after the fb is already removed from the lookup table.
+	 */
+	if (drm_framebuffer_read_refcount(fb) > 1) {
+		struct drm_mode_rmfb_work arg;
+
+		INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
+		INIT_LIST_HEAD(&arg.fbs);
+		list_add_tail(&fb->filp_head, &arg.fbs);
+
+		schedule_work(&arg.work);
+		flush_work(&arg.work);
+		destroy_work_on_stack(&arg.work);
+	} else
+		drm_framebuffer_unreference(fb);
+
+	return 0;
+
+fail_unref:
+	drm_framebuffer_unreference(fb);
+	return -ENOENT;
+}
+
+/**
+ * drm_mode_getfb - get FB info
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_getfb(struct drm_device *dev,
+		   void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_fb_cmd *r = data;
+	struct drm_framebuffer *fb;
+	int ret;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	fb = drm_framebuffer_lookup(dev, r->fb_id);
+	if (!fb)
+		return -ENOENT;
+
+	r->height = fb->height;
+	r->width = fb->width;
+	r->depth = fb->depth;
+	r->bpp = fb->bits_per_pixel;
+	r->pitch = fb->pitches[0];
+	if (fb->funcs->create_handle) {
+		if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) ||
+		    drm_is_control_client(file_priv)) {
+			ret = fb->funcs->create_handle(fb, file_priv,
+						       &r->handle);
+		} else {
+			/* GET_FB() is an unprivileged ioctl so we must not
+			 * return a buffer-handle to non-master processes! For
+			 * backwards-compatibility reasons, we cannot make
+			 * GET_FB() privileged, so just return an invalid handle
+			 * for non-masters. */
+			r->handle = 0;
+			ret = 0;
+		}
+	} else {
+		ret = -ENODEV;
+	}
+
+	drm_framebuffer_unreference(fb);
+
+	return ret;
+}
+
+/**
+ * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Lookup the FB and flush out the damaged area supplied by userspace as a clip
+ * rectangle list. Generic userspace which does frontbuffer rendering must call
+ * this ioctl to flush out the changes on manual-update display outputs, e.g.
+ * usb display-link, mipi manual update panels or edp panel self refresh modes.
+ *
+ * Modesetting drivers which always update the frontbuffer do not need to
+ * implement the corresponding ->dirty framebuffer callback.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+			   void *data, struct drm_file *file_priv)
+{
+	struct drm_clip_rect __user *clips_ptr;
+	struct drm_clip_rect *clips = NULL;
+	struct drm_mode_fb_dirty_cmd *r = data;
+	struct drm_framebuffer *fb;
+	unsigned flags;
+	int num_clips;
+	int ret;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	fb = drm_framebuffer_lookup(dev, r->fb_id);
+	if (!fb)
+		return -ENOENT;
+
+	num_clips = r->num_clips;
+	clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
+
+	if (!num_clips != !clips_ptr) {
+		ret = -EINVAL;
+		goto out_err1;
+	}
+
+	flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+
+	/* If userspace annotates copy, clips must come in pairs */
+	if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+		ret = -EINVAL;
+		goto out_err1;
+	}
+
+	if (num_clips && clips_ptr) {
+		if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
+			ret = -EINVAL;
+			goto out_err1;
+		}
+		clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
+		if (!clips) {
+			ret = -ENOMEM;
+			goto out_err1;
+		}
+
+		ret = copy_from_user(clips, clips_ptr,
+				     num_clips * sizeof(*clips));
+		if (ret) {
+			ret = -EFAULT;
+			goto out_err2;
+		}
+	}
+
+	if (fb->funcs->dirty) {
+		ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
+				       clips, num_clips);
+	} else {
+		ret = -ENOSYS;
+	}
+
+out_err2:
+	kfree(clips);
+out_err1:
+	drm_framebuffer_unreference(fb);
+
+	return ret;
+}
+
+/**
+ * drm_fb_release - remove and free the FBs on this file
+ * @priv: drm file for the ioctl
+ *
+ * Destroy all the FBs associated with @filp.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+void drm_fb_release(struct drm_file *priv)
+{
+	struct drm_framebuffer *fb, *tfb;
+	struct drm_mode_rmfb_work arg;
+
+	INIT_LIST_HEAD(&arg.fbs);
+
+	/*
+	 * When the file gets released that means no one else can access the fb
+	 * list any more, so no need to grab fpriv->fbs_lock. And we need to
+	 * avoid upsetting lockdep since the universal cursor code adds a
+	 * framebuffer while holding mutex locks.
+	 *
+	 * Note that a real deadlock between fpriv->fbs_lock and the modeset
+	 * locks is impossible here since no one else but this function can get
+	 * at it any more.
+	 */
+	list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+		if (drm_framebuffer_read_refcount(fb) > 1) {
+			list_move_tail(&fb->filp_head, &arg.fbs);
+		} else {
+			list_del_init(&fb->filp_head);
+
+			/* This drops the fpriv->fbs reference. */
+			drm_framebuffer_unreference(fb);
+		}
+	}
+
+	if (!list_empty(&arg.fbs)) {
+		INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
+
+		schedule_work(&arg.work);
+		flush_work(&arg.work);
+		destroy_work_on_stack(&arg.work);
+	}
+}
+
+void drm_framebuffer_free(struct kref *kref)
+{
+	struct drm_framebuffer *fb =
+			container_of(kref, struct drm_framebuffer, base.refcount);
+	struct drm_device *dev = fb->dev;
+
+	/*
+	 * The lookup idr holds a weak reference, which has not necessarily been
+	 * removed at this point. Check for that.
+	 */
+	drm_mode_object_unregister(dev, &fb->base);
+
+	fb->funcs->destroy(fb);
+}
+
+/**
+ * drm_framebuffer_init - initialize a framebuffer
+ * @dev: DRM device
+ * @fb: framebuffer to be initialized
+ * @funcs: ... with these functions
+ *
+ * Allocates an ID for the framebuffer's parent mode object, sets its mode
+ * functions & device file and adds it to the master fd list.
+ *
+ * IMPORTANT:
+ * This functions publishes the fb and makes it available for concurrent access
+ * by other users. Which means by this point the fb _must_ be fully set up -
+ * since all the fb attributes are invariant over its lifetime, no further
+ * locking but only correct reference counting is required.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+			 const struct drm_framebuffer_funcs *funcs)
+{
+	int ret;
+
+	INIT_LIST_HEAD(&fb->filp_head);
+	fb->dev = dev;
+	fb->funcs = funcs;
+
+	ret = drm_mode_object_get_reg(dev, &fb->base, DRM_MODE_OBJECT_FB,
+				      false, drm_framebuffer_free);
+	if (ret)
+		goto out;
+
+	mutex_lock(&dev->mode_config.fb_lock);
+	dev->mode_config.num_fb++;
+	list_add(&fb->head, &dev->mode_config.fb_list);
+	mutex_unlock(&dev->mode_config.fb_lock);
+
+	drm_mode_object_register(dev, &fb->base);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(drm_framebuffer_init);
+
+/**
+ * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
+ * @dev: drm device
+ * @id: id of the fb object
+ *
+ * If successful, this grabs an additional reference to the framebuffer -
+ * callers need to make sure to eventually unreference the returned framebuffer
+ * again, using @drm_framebuffer_unreference.
+ */
+struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+					       uint32_t id)
+{
+	struct drm_mode_object *obj;
+	struct drm_framebuffer *fb = NULL;
+
+	obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_FB);
+	if (obj)
+		fb = obj_to_fb(obj);
+	return fb;
+}
+EXPORT_SYMBOL(drm_framebuffer_lookup);
+
+/**
+ * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
+ * @fb: fb to unregister
+ *
+ * Drivers need to call this when cleaning up driver-private framebuffers, e.g.
+ * those used for fbdev. Note that the caller must hold a reference of it's own,
+ * i.e. the object may not be destroyed through this call (since it'll lead to a
+ * locking inversion).
+ */
+void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
+{
+	struct drm_device *dev;
+
+	if (!fb)
+		return;
+
+	dev = fb->dev;
+
+	/* Mark fb as reaped and drop idr ref. */
+	drm_mode_object_unregister(dev, &fb->base);
+}
+EXPORT_SYMBOL(drm_framebuffer_unregister_private);
+
+/**
+ * drm_framebuffer_cleanup - remove a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * Cleanup framebuffer. This function is intended to be used from the drivers
+ * ->destroy callback. It can also be used to clean up driver private
+ * framebuffers embedded into a larger structure.
+ *
+ * Note that this function does not remove the fb from active usuage - if it is
+ * still used anywhere, hilarity can ensue since userspace could call getfb on
+ * the id and get back -EINVAL. Obviously no concern at driver unload time.
+ *
+ * Also, the framebuffer will not be removed from the lookup idr - for
+ * user-created framebuffers this will happen in in the rmfb ioctl. For
+ * driver-private objects (e.g. for fbdev) drivers need to explicitly call
+ * drm_framebuffer_unregister_private.
+ */
+void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = fb->dev;
+
+	mutex_lock(&dev->mode_config.fb_lock);
+	list_del(&fb->head);
+	dev->mode_config.num_fb--;
+	mutex_unlock(&dev->mode_config.fb_lock);
+}
+EXPORT_SYMBOL(drm_framebuffer_cleanup);
+
+/**
+ * drm_framebuffer_remove - remove and unreference a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * Scans all the CRTCs and planes in @dev's mode_config.  If they're
+ * using @fb, removes it, setting it to NULL. Then drops the reference to the
+ * passed-in framebuffer. Might take the modeset locks.
+ *
+ * Note that this function optimizes the cleanup away if the caller holds the
+ * last reference to the framebuffer. It is also guaranteed to not take the
+ * modeset locks in this case.
+ */
+void drm_framebuffer_remove(struct drm_framebuffer *fb)
+{
+	struct drm_device *dev;
+	struct drm_crtc *crtc;
+	struct drm_plane *plane;
+
+	if (!fb)
+		return;
+
+	dev = fb->dev;
+
+	WARN_ON(!list_empty(&fb->filp_head));
+
+	/*
+	 * drm ABI mandates that we remove any deleted framebuffers from active
+	 * useage. But since most sane clients only remove framebuffers they no
+	 * longer need, try to optimize this away.
+	 *
+	 * Since we're holding a reference ourselves, observing a refcount of 1
+	 * means that we're the last holder and can skip it. Also, the refcount
+	 * can never increase from 1 again, so we don't need any barriers or
+	 * locks.
+	 *
+	 * Note that userspace could try to race with use and instate a new
+	 * usage _after_ we've cleared all current ones. End result will be an
+	 * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
+	 * in this manner.
+	 */
+	if (drm_framebuffer_read_refcount(fb) > 1) {
+		drm_modeset_lock_all(dev);
+		/* remove from any CRTC */
+		drm_for_each_crtc(crtc, dev) {
+			if (crtc->primary->fb == fb) {
+				/* should turn off the crtc */
+				if (drm_crtc_force_disable(crtc))
+					DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+			}
+		}
+
+		drm_for_each_plane(plane, dev) {
+			if (plane->fb == fb)
+				drm_plane_force_disable(plane);
+		}
+		drm_modeset_unlock_all(dev);
+	}
+
+	drm_framebuffer_unreference(fb);
+}
+EXPORT_SYMBOL(drm_framebuffer_remove);
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
index 3d2e91c..b404287 100644
--- a/drivers/gpu/drm/drm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -65,30 +65,34 @@
 
 int drm_global_item_ref(struct drm_global_reference *ref)
 {
-	int ret;
+	int ret = 0;
 	struct drm_global_item *item = &glob[ref->global_type];
 
 	mutex_lock(&item->mutex);
 	if (item->refcount == 0) {
-		item->object = kzalloc(ref->size, GFP_KERNEL);
-		if (unlikely(item->object == NULL)) {
+		ref->object = kzalloc(ref->size, GFP_KERNEL);
+		if (unlikely(ref->object == NULL)) {
 			ret = -ENOMEM;
-			goto out_err;
+			goto error_unlock;
 		}
-
-		ref->object = item->object;
 		ret = ref->init(ref);
 		if (unlikely(ret != 0))
-			goto out_err;
+			goto error_free;
 
+		item->object = ref->object;
+	} else {
+		ref->object = item->object;
 	}
+
 	++item->refcount;
-	ref->object = item->object;
 	mutex_unlock(&item->mutex);
 	return 0;
-out_err:
+
+error_free:
+	kfree(ref->object);
+	ref->object = NULL;
+error_unlock:
 	mutex_unlock(&item->mutex);
-	item->object = NULL;
 	return ret;
 }
 EXPORT_SYMBOL(drm_global_item_ref);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 7b30b30..dae18e5 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -142,7 +142,7 @@
 			      unsigned long add)
 {
 	int ret;
-	unsigned long mask = (1 << bits) - 1;
+	unsigned long mask = (1UL << bits) - 1;
 	unsigned long first, unshifted_key;
 
 	unshifted_key = hash_long(seed, bits);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 57676f8..5f896e7 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -346,6 +346,7 @@
 	struct drm_stats __user *stats;
 	int i, err;
 
+	memset(&s32, 0, sizeof(drm_stats32_t));
 	stats = compat_alloc_user_space(sizeof(*stats));
 	if (!stats)
 		return -EFAULT;
@@ -1015,6 +1016,7 @@
 	return 0;
 }
 
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
 typedef struct drm_mode_fb_cmd232 {
 	u32 fb_id;
 	u32 width;
@@ -1071,6 +1073,7 @@
 
 	return 0;
 }
+#endif
 
 static drm_ioctl_compat_t *drm_compat_ioctls[] = {
 	[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
@@ -1104,7 +1107,9 @@
 	[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
 #endif
 	[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
 	[DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
+#endif
 };
 
 /**
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index bb51ee9..b97c421 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -189,9 +189,8 @@
 	 */
 	if (client->idx == 0) {
 		client->auth = file_priv->authenticated;
-		client->pid = pid_vnr(file_priv->pid);
-		client->uid = from_kuid_munged(current_user_ns(),
-					       file_priv->uid);
+		client->pid = task_pid_vnr(current);
+		client->uid = overflowuid;
 		client->magic = 0;
 		client->iocs = 0;
 
@@ -228,6 +227,7 @@
 static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
 	struct drm_get_cap *req = data;
+	struct drm_crtc *crtc;
 
 	req->value = 0;
 	switch (req->capability) {
@@ -254,6 +254,13 @@
 	case DRM_CAP_ASYNC_PAGE_FLIP:
 		req->value = dev->mode_config.async_page_flip;
 		break;
+	case DRM_CAP_PAGE_FLIP_TARGET:
+		req->value = 1;
+		drm_for_each_crtc(crtc, dev) {
+			if (!crtc->funcs->page_flip_target)
+				req->value = 0;
+		}
+		break;
 	case DRM_CAP_CURSOR_WIDTH:
 		if (dev->mode_config.cursor_width)
 			req->value = dev->mode_config.cursor_width;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index c0c3f20..404a1ce 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -713,10 +713,10 @@
  * Negative value on error, failure or if not supported in current
  * video mode:
  *
- * -EINVAL   - Invalid CRTC.
- * -EAGAIN   - Temporary unavailable, e.g., called before initial modeset.
- * -ENOTSUPP - Function not supported in current display mode.
- * -EIO      - Failed, e.g., due to failed scanout position query.
+ * -EINVAL    Invalid CRTC.
+ * -EAGAIN    Temporary unavailable, e.g., called before initial modeset.
+ * -ENOTSUPP  Function not supported in current display mode.
+ * -EIO       Failed, e.g., due to failed scanout position query.
  *
  * Returns or'ed positive status flags on success:
  *
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 3187c4b..45db36c 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -27,7 +27,8 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_fb_helper.h>
-#include <drm/drm_dp_aux_dev.h>
+
+#include "drm_crtc_helper_internal.h"
 
 MODULE_AUTHOR("David Airlie, Jesse Barnes");
 MODULE_DESCRIPTION("DRM KMS helper");
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
new file mode 100644
index 0000000..9f17085
--- /dev/null
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_mode_object.h>
+
+#include "drm_crtc_internal.h"
+
+/*
+ * Internal function to assign a slot in the object idr and optionally
+ * register the object into the idr.
+ */
+int drm_mode_object_get_reg(struct drm_device *dev,
+			    struct drm_mode_object *obj,
+			    uint32_t obj_type,
+			    bool register_obj,
+			    void (*obj_free_cb)(struct kref *kref))
+{
+	int ret;
+
+	mutex_lock(&dev->mode_config.idr_mutex);
+	ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
+	if (ret >= 0) {
+		/*
+		 * Set up the object linking under the protection of the idr
+		 * lock so that other users can't see inconsistent state.
+		 */
+		obj->id = ret;
+		obj->type = obj_type;
+		if (obj_free_cb) {
+			obj->free_cb = obj_free_cb;
+			kref_init(&obj->refcount);
+		}
+	}
+	mutex_unlock(&dev->mode_config.idr_mutex);
+
+	return ret < 0 ? ret : 0;
+}
+
+/**
+ * drm_mode_object_get - allocate a new modeset identifier
+ * @dev: DRM device
+ * @obj: object pointer, used to generate unique ID
+ * @obj_type: object type
+ *
+ * Create a unique identifier based on @ptr in @dev's identifier space.  Used
+ * for tracking modes, CRTCs and connectors. Note that despite the _get postfix
+ * modeset identifiers are _not_ reference counted. Hence don't use this for
+ * reference counted modeset objects like framebuffers.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_mode_object_get(struct drm_device *dev,
+			struct drm_mode_object *obj, uint32_t obj_type)
+{
+	return drm_mode_object_get_reg(dev, obj, obj_type, true, NULL);
+}
+
+void drm_mode_object_register(struct drm_device *dev,
+			      struct drm_mode_object *obj)
+{
+	mutex_lock(&dev->mode_config.idr_mutex);
+	idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
+	mutex_unlock(&dev->mode_config.idr_mutex);
+}
+
+/**
+ * drm_mode_object_unregister - free a modeset identifer
+ * @dev: DRM device
+ * @object: object to free
+ *
+ * Free @id from @dev's unique identifier pool.
+ * This function can be called multiple times, and guards against
+ * multiple removals.
+ * These modeset identifiers are _not_ reference counted. Hence don't use this
+ * for reference counted modeset objects like framebuffers.
+ */
+void drm_mode_object_unregister(struct drm_device *dev,
+				struct drm_mode_object *object)
+{
+	mutex_lock(&dev->mode_config.idr_mutex);
+	if (object->id) {
+		idr_remove(&dev->mode_config.crtc_idr, object->id);
+		object->id = 0;
+	}
+	mutex_unlock(&dev->mode_config.idr_mutex);
+}
+
+struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
+					       uint32_t id, uint32_t type)
+{
+	struct drm_mode_object *obj = NULL;
+
+	mutex_lock(&dev->mode_config.idr_mutex);
+	obj = idr_find(&dev->mode_config.crtc_idr, id);
+	if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
+		obj = NULL;
+	if (obj && obj->id != id)
+		obj = NULL;
+
+	if (obj && obj->free_cb) {
+		if (!kref_get_unless_zero(&obj->refcount))
+			obj = NULL;
+	}
+	mutex_unlock(&dev->mode_config.idr_mutex);
+
+	return obj;
+}
+
+/**
+ * drm_mode_object_find - look up a drm object with static lifetime
+ * @dev: drm device
+ * @id: id of the mode object
+ * @type: type of the mode object
+ *
+ * This function is used to look up a modeset object. It will acquire a
+ * reference for reference counted objects. This reference must be dropped again
+ * by callind drm_mode_object_unreference().
+ */
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+		uint32_t id, uint32_t type)
+{
+	struct drm_mode_object *obj = NULL;
+
+	obj = __drm_mode_object_find(dev, id, type);
+	return obj;
+}
+EXPORT_SYMBOL(drm_mode_object_find);
+
+/**
+ * drm_mode_object_unreference - decr the object refcnt
+ * @obj: mode_object
+ *
+ * This function decrements the object's refcount if it is a refcounted modeset
+ * object. It is a no-op on any other object. This is used to drop references
+ * acquired with drm_mode_object_reference().
+ */
+void drm_mode_object_unreference(struct drm_mode_object *obj)
+{
+	if (obj->free_cb) {
+		DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
+		kref_put(&obj->refcount, obj->free_cb);
+	}
+}
+EXPORT_SYMBOL(drm_mode_object_unreference);
+
+/**
+ * drm_mode_object_reference - incr the object refcnt
+ * @obj: mode_object
+ *
+ * This function increments the object's refcount if it is a refcounted modeset
+ * object. It is a no-op on any other object. References should be dropped again
+ * by calling drm_mode_object_unreference().
+ */
+void drm_mode_object_reference(struct drm_mode_object *obj)
+{
+	if (obj->free_cb) {
+		DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
+		kref_get(&obj->refcount);
+	}
+}
+EXPORT_SYMBOL(drm_mode_object_reference);
+
+/**
+ * drm_object_attach_property - attach a property to a modeset object
+ * @obj: drm modeset object
+ * @property: property to attach
+ * @init_val: initial value of the property
+ *
+ * This attaches the given property to the modeset object with the given initial
+ * value. Currently this function cannot fail since the properties are stored in
+ * a statically sized array.
+ */
+void drm_object_attach_property(struct drm_mode_object *obj,
+				struct drm_property *property,
+				uint64_t init_val)
+{
+	int count = obj->properties->count;
+
+	if (count == DRM_OBJECT_MAX_PROPERTY) {
+		WARN(1, "Failed to attach object property (type: 0x%x). Please "
+			"increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
+			"you see this message on the same object type.\n",
+			obj->type);
+		return;
+	}
+
+	obj->properties->properties[count] = property;
+	obj->properties->values[count] = init_val;
+	obj->properties->count++;
+}
+EXPORT_SYMBOL(drm_object_attach_property);
+
+/**
+ * drm_object_property_set_value - set the value of a property
+ * @obj: drm mode object to set property value for
+ * @property: property to set
+ * @val: value the property should be set to
+ *
+ * This function sets a given property on a given object. This function only
+ * changes the software state of the property, it does not call into the
+ * driver's ->set_property callback.
+ *
+ * Note that atomic drivers should not have any need to call this, the core will
+ * ensure consistency of values reported back to userspace through the
+ * appropriate ->atomic_get_property callback. Only legacy drivers should call
+ * this function to update the tracked value (after clamping and other
+ * restrictions have been applied).
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_object_property_set_value(struct drm_mode_object *obj,
+				  struct drm_property *property, uint64_t val)
+{
+	int i;
+
+	for (i = 0; i < obj->properties->count; i++) {
+		if (obj->properties->properties[i] == property) {
+			obj->properties->values[i] = val;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(drm_object_property_set_value);
+
+/**
+ * drm_object_property_get_value - retrieve the value of a property
+ * @obj: drm mode object to get property value from
+ * @property: property to retrieve
+ * @val: storage for the property value
+ *
+ * This function retrieves the softare state of the given property for the given
+ * property. Since there is no driver callback to retrieve the current property
+ * value this might be out of sync with the hardware, depending upon the driver
+ * and property.
+ *
+ * Atomic drivers should never call this function directly, the core will read
+ * out property values through the various ->atomic_get_property callbacks.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_object_property_get_value(struct drm_mode_object *obj,
+				  struct drm_property *property, uint64_t *val)
+{
+	int i;
+
+	/* read-only properties bypass atomic mechanism and still store
+	 * their value in obj->properties->values[].. mostly to avoid
+	 * having to deal w/ EDID and similar props in atomic paths:
+	 */
+	if (drm_core_check_feature(property->dev, DRIVER_ATOMIC) &&
+			!(property->flags & DRM_MODE_PROP_IMMUTABLE))
+		return drm_atomic_get_property(obj, property, val);
+
+	for (i = 0; i < obj->properties->count; i++) {
+		if (obj->properties->properties[i] == property) {
+			*val = obj->properties->values[i];
+			return 0;
+		}
+
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(drm_object_property_get_value);
+
+/* helper for getconnector and getproperties ioctls */
+int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
+				   uint32_t __user *prop_ptr,
+				   uint64_t __user *prop_values,
+				   uint32_t *arg_count_props)
+{
+	int i, ret, count;
+
+	for (i = 0, count = 0; i < obj->properties->count; i++) {
+		struct drm_property *prop = obj->properties->properties[i];
+		uint64_t val;
+
+		if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic)
+			continue;
+
+		if (*arg_count_props > count) {
+			ret = drm_object_property_get_value(obj, prop, &val);
+			if (ret)
+				return ret;
+
+			if (put_user(prop->base.id, prop_ptr + count))
+				return -EFAULT;
+
+			if (put_user(val, prop_values + count))
+				return -EFAULT;
+		}
+
+		count++;
+	}
+	*arg_count_props = count;
+
+	return 0;
+}
+
+/**
+ * drm_mode_obj_get_properties_ioctl - get the current value of a object's property
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function retrieves the current value for an object's property. Compared
+ * to the connector specific ioctl this one is extended to also work on crtc and
+ * plane objects.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv)
+{
+	struct drm_mode_obj_get_properties *arg = data;
+	struct drm_mode_object *obj;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+
+	obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+	if (!obj) {
+		ret = -ENOENT;
+		goto out;
+	}
+	if (!obj->properties) {
+		ret = -EINVAL;
+		goto out_unref;
+	}
+
+	ret = drm_mode_object_get_properties(obj, file_priv->atomic,
+			(uint32_t __user *)(unsigned long)(arg->props_ptr),
+			(uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
+			&arg->count_props);
+
+out_unref:
+	drm_mode_object_unreference(obj);
+out:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+struct drm_property *drm_mode_obj_find_prop_id(struct drm_mode_object *obj,
+					       uint32_t prop_id)
+{
+	int i;
+
+	for (i = 0; i < obj->properties->count; i++)
+		if (obj->properties->properties[i]->base.id == prop_id)
+			return obj->properties->properties[i];
+
+	return NULL;
+}
+
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv)
+{
+	struct drm_mode_obj_set_property *arg = data;
+	struct drm_mode_object *arg_obj;
+	struct drm_property *property;
+	int ret = -EINVAL;
+	struct drm_mode_object *ref;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+
+	arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+	if (!arg_obj) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	if (!arg_obj->properties)
+		goto out_unref;
+
+	property = drm_mode_obj_find_prop_id(arg_obj, arg->prop_id);
+	if (!property)
+		goto out_unref;
+
+	if (!drm_property_change_valid_get(property, arg->value, &ref))
+		goto out_unref;
+
+	switch (arg_obj->type) {
+	case DRM_MODE_OBJECT_CONNECTOR:
+		ret = drm_mode_connector_set_obj_prop(arg_obj, property,
+						      arg->value);
+		break;
+	case DRM_MODE_OBJECT_CRTC:
+		ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
+		break;
+	case DRM_MODE_OBJECT_PLANE:
+		ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
+						  property, arg->value);
+		break;
+	}
+
+	drm_property_change_valid_put(property, ref);
+
+out_unref:
+	drm_mode_object_unreference(arg_obj);
+out:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 1570487..53f07ac 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -657,6 +657,15 @@
 }
 EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode);
 
+/**
+ * drm_bus_flags_from_videomode - extract information about pixelclk and
+ * DE polarity from videomode and store it in a separate variable
+ * @vm: videomode structure to use
+ * @bus_flags: information about pixelclk and DE polarity will be stored here
+ *
+ * Sets DRM_BUS_FLAG_DE_(LOW|HIGH) and DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE
+ * in @bus_flags according to DISPLAY_FLAGS found in @vm
+ */
 void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags)
 {
 	*bus_flags = 0;
@@ -677,6 +686,7 @@
  * of_get_drm_display_mode - get a drm_display_mode from devicetree
  * @np: device_node with the timing specification
  * @dmode: will be set to the return value
+ * @bus_flags: information about pixelclk and DE polarity
  * @index: index into the list of display timings in devicetree
  *
  * This function is expensive and should only be used, if only one mode is to be
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
new file mode 100644
index 0000000..1d45738
--- /dev/null
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_plane_helper.h>
+
+/**
+ * DOC: aux kms helpers
+ *
+ * This helper library contains various one-off functions which don't really fit
+ * anywhere else in the DRM modeset helper library.
+ */
+
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ * 						connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
+void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
+{
+	struct drm_connector *connector, *tmp;
+	struct list_head panel_list;
+
+	INIT_LIST_HEAD(&panel_list);
+
+	list_for_each_entry_safe(connector, tmp,
+				 &dev->mode_config.connector_list, head) {
+		if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+		    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+			list_move_tail(&connector->head, &panel_list);
+	}
+
+	list_splice(&panel_list, &dev->mode_config.connector_list);
+}
+EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
+
+/**
+ * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
+ * @fb: drm_framebuffer object to fill out
+ * @mode_cmd: metadata from the userspace fb creation request
+ *
+ * This helper can be used in a drivers fb_create callback to pre-fill the fb's
+ * metadata fields.
+ */
+void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+				    const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	int i;
+
+	fb->width = mode_cmd->width;
+	fb->height = mode_cmd->height;
+	for (i = 0; i < 4; i++) {
+		fb->pitches[i] = mode_cmd->pitches[i];
+		fb->offsets[i] = mode_cmd->offsets[i];
+		fb->modifier[i] = mode_cmd->modifier[i];
+	}
+	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
+				    &fb->bits_per_pixel);
+	fb->pixel_format = mode_cmd->pixel_format;
+	fb->flags = mode_cmd->flags;
+}
+EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
+
+/*
+ * This is the minimal list of formats that seem to be safe for modeset use
+ * with all current DRM drivers.  Most hardware can actually support more
+ * formats than this and drivers may specify a more accurate list when
+ * creating the primary plane.  However drivers that still call
+ * drm_plane_init() will use this minimal format list as the default.
+ */
+static const uint32_t safe_modeset_formats[] = {
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+};
+
+static struct drm_plane *create_primary_plane(struct drm_device *dev)
+{
+	struct drm_plane *primary;
+	int ret;
+
+	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+	if (primary == NULL) {
+		DRM_DEBUG_KMS("Failed to allocate primary plane\n");
+		return NULL;
+	}
+
+	/*
+	 * Remove the format_default field from drm_plane when dropping
+	 * this helper.
+	 */
+	primary->format_default = true;
+
+	/* possible_crtc's will be filled in later by crtc_init */
+	ret = drm_universal_plane_init(dev, primary, 0,
+				       &drm_primary_helper_funcs,
+				       safe_modeset_formats,
+				       ARRAY_SIZE(safe_modeset_formats),
+				       DRM_PLANE_TYPE_PRIMARY, NULL);
+	if (ret) {
+		kfree(primary);
+		primary = NULL;
+	}
+
+	return primary;
+}
+
+/**
+ * drm_crtc_init - Legacy CRTC initialization function
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @funcs: callbacks for the new CRTC
+ *
+ * Initialize a CRTC object with a default helper-provided primary plane and no
+ * cursor plane.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+		  const struct drm_crtc_funcs *funcs)
+{
+	struct drm_plane *primary;
+
+	primary = create_primary_plane(dev);
+	return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs,
+					 NULL);
+}
+EXPORT_SYMBOL(drm_crtc_init);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index b522aab..7899fc1 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -64,18 +64,6 @@
  */
 
 /*
- * This is the minimal list of formats that seem to be safe for modeset use
- * with all current DRM drivers.  Most hardware can actually support more
- * formats than this and drivers may specify a more accurate list when
- * creating the primary plane.  However drivers that still call
- * drm_plane_init() will use this minimal format list as the default.
- */
-static const uint32_t safe_modeset_formats[] = {
-	DRM_FORMAT_XRGB8888,
-	DRM_FORMAT_ARGB8888,
-};
-
-/*
  * Returns the connectors currently associated with a CRTC.  This function
  * should be called twice:  once with a NULL connector list to retrieve
  * the list size, and once with the properly allocated list to be filled in.
@@ -211,7 +199,7 @@
  * @crtc: owning CRTC of owning plane
  * @fb: framebuffer to flip onto plane
  * @src: source coordinates in 16.16 fixed point
- * @dest: integer destination coordinates
+ * @dst: integer destination coordinates
  * @clip: integer clipping coordinates
  * @rotation: plane rotation
  * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
@@ -438,60 +426,6 @@
 };
 EXPORT_SYMBOL(drm_primary_helper_funcs);
 
-static struct drm_plane *create_primary_plane(struct drm_device *dev)
-{
-	struct drm_plane *primary;
-	int ret;
-
-	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
-	if (primary == NULL) {
-		DRM_DEBUG_KMS("Failed to allocate primary plane\n");
-		return NULL;
-	}
-
-	/*
-	 * Remove the format_default field from drm_plane when dropping
-	 * this helper.
-	 */
-	primary->format_default = true;
-
-	/* possible_crtc's will be filled in later by crtc_init */
-	ret = drm_universal_plane_init(dev, primary, 0,
-				       &drm_primary_helper_funcs,
-				       safe_modeset_formats,
-				       ARRAY_SIZE(safe_modeset_formats),
-				       DRM_PLANE_TYPE_PRIMARY, NULL);
-	if (ret) {
-		kfree(primary);
-		primary = NULL;
-	}
-
-	return primary;
-}
-
-/**
- * drm_crtc_init - Legacy CRTC initialization function
- * @dev: DRM device
- * @crtc: CRTC object to init
- * @funcs: callbacks for the new CRTC
- *
- * Initialize a CRTC object with a default helper-provided primary plane and no
- * cursor plane.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
-		  const struct drm_crtc_funcs *funcs)
-{
-	struct drm_plane *primary;
-
-	primary = create_primary_plane(dev);
-	return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs,
-					 NULL);
-}
-EXPORT_SYMBOL(drm_crtc_init);
-
 int drm_plane_helper_commit(struct drm_plane *plane,
 			    struct drm_plane_state *plane_state,
 			    struct drm_framebuffer *old_fb)
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index a0df377..f6b64d7 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -129,6 +129,7 @@
 {
 	bool poll = false;
 	struct drm_connector *connector;
+	unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
 
 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
@@ -141,8 +142,13 @@
 			poll = true;
 	}
 
+	if (dev->mode_config.delayed_event) {
+		poll = true;
+		delay = 0;
+	}
+
 	if (poll)
-		schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+		schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
 
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
new file mode 100644
index 0000000..a4d81cf
--- /dev/null
+++ b/drivers/gpu/drm/drm_property.c
@@ -0,0 +1,912 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_property.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * Properties as represented by &drm_property are used to extend the modeset
+ * interface exposed to userspace. For the atomic modeset IOCTL properties are
+ * even the only way to transport metadata about the desired new modeset
+ * configuration from userspace to the kernel. Properties have a well-defined
+ * value range, which is enforced by the drm core. See the documentation of the
+ * flags member of struct &drm_property for an overview of the different
+ * property types and ranges.
+ *
+ * Properties don't store the current value directly, but need to be
+ * instatiated by attaching them to a &drm_mode_object with
+ * drm_object_attach_property().
+ *
+ * Property values are only 64bit. To support bigger piles of data (like gamma
+ * tables, color correction matrizes or large structures) a property can instead
+ * point at a &drm_property_blob with that additional data
+ *
+ * Properties are defined by their symbolic name, userspace must keep a
+ * per-object mapping from those names to the property ID used in the atomic
+ * IOCTL and in the get/set property IOCTL.
+ */
+
+static bool drm_property_type_valid(struct drm_property *property)
+{
+	if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
+		return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+	return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+}
+
+/**
+ * drm_property_create - create a new property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @num_values: number of pre-defined values
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+					 const char *name, int num_values)
+{
+	struct drm_property *property = NULL;
+	int ret;
+
+	property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
+	if (!property)
+		return NULL;
+
+	property->dev = dev;
+
+	if (num_values) {
+		property->values = kcalloc(num_values, sizeof(uint64_t),
+					   GFP_KERNEL);
+		if (!property->values)
+			goto fail;
+	}
+
+	ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
+	if (ret)
+		goto fail;
+
+	property->flags = flags;
+	property->num_values = num_values;
+	INIT_LIST_HEAD(&property->enum_list);
+
+	if (name) {
+		strncpy(property->name, name, DRM_PROP_NAME_LEN);
+		property->name[DRM_PROP_NAME_LEN-1] = '\0';
+	}
+
+	list_add_tail(&property->head, &dev->mode_config.property_list);
+
+	WARN_ON(!drm_property_type_valid(property));
+
+	return property;
+fail:
+	kfree(property->values);
+	kfree(property);
+	return NULL;
+}
+EXPORT_SYMBOL(drm_property_create);
+
+/**
+ * drm_property_create_enum - create a new enumeration property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @props: enumeration lists with property values
+ * @num_values: number of pre-defined values
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Userspace is only allowed to set one of the predefined values for enumeration
+ * properties.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+					 const char *name,
+					 const struct drm_prop_enum_list *props,
+					 int num_values)
+{
+	struct drm_property *property;
+	int i, ret;
+
+	flags |= DRM_MODE_PROP_ENUM;
+
+	property = drm_property_create(dev, flags, name, num_values);
+	if (!property)
+		return NULL;
+
+	for (i = 0; i < num_values; i++) {
+		ret = drm_property_add_enum(property, i,
+				      props[i].type,
+				      props[i].name);
+		if (ret) {
+			drm_property_destroy(dev, property);
+			return NULL;
+		}
+	}
+
+	return property;
+}
+EXPORT_SYMBOL(drm_property_create_enum);
+
+/**
+ * drm_property_create_bitmask - create a new bitmask property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @props: enumeration lists with property bitflags
+ * @num_props: size of the @props array
+ * @supported_bits: bitmask of all supported enumeration values
+ *
+ * This creates a new bitmask drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Compared to plain enumeration properties userspace is allowed to set any
+ * or'ed together combination of the predefined property bitflag values
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+					 int flags, const char *name,
+					 const struct drm_prop_enum_list *props,
+					 int num_props,
+					 uint64_t supported_bits)
+{
+	struct drm_property *property;
+	int i, ret, index = 0;
+	int num_values = hweight64(supported_bits);
+
+	flags |= DRM_MODE_PROP_BITMASK;
+
+	property = drm_property_create(dev, flags, name, num_values);
+	if (!property)
+		return NULL;
+	for (i = 0; i < num_props; i++) {
+		if (!(supported_bits & (1ULL << props[i].type)))
+			continue;
+
+		if (WARN_ON(index >= num_values)) {
+			drm_property_destroy(dev, property);
+			return NULL;
+		}
+
+		ret = drm_property_add_enum(property, index++,
+				      props[i].type,
+				      props[i].name);
+		if (ret) {
+			drm_property_destroy(dev, property);
+			return NULL;
+		}
+	}
+
+	return property;
+}
+EXPORT_SYMBOL(drm_property_create_bitmask);
+
+static struct drm_property *property_create_range(struct drm_device *dev,
+					 int flags, const char *name,
+					 uint64_t min, uint64_t max)
+{
+	struct drm_property *property;
+
+	property = drm_property_create(dev, flags, name, 2);
+	if (!property)
+		return NULL;
+
+	property->values[0] = min;
+	property->values[1] = max;
+
+	return property;
+}
+
+/**
+ * drm_property_create_range - create a new unsigned ranged property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @min: minimum value of the property
+ * @max: maximum value of the property
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Userspace is allowed to set any unsigned integer value in the (min, max)
+ * range inclusive.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+					 const char *name,
+					 uint64_t min, uint64_t max)
+{
+	return property_create_range(dev, DRM_MODE_PROP_RANGE | flags,
+			name, min, max);
+}
+EXPORT_SYMBOL(drm_property_create_range);
+
+/**
+ * drm_property_create_signed_range - create a new signed ranged property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @min: minimum value of the property
+ * @max: maximum value of the property
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Userspace is allowed to set any signed integer value in the (min, max)
+ * range inclusive.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
+					 int flags, const char *name,
+					 int64_t min, int64_t max)
+{
+	return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags,
+			name, I642U64(min), I642U64(max));
+}
+EXPORT_SYMBOL(drm_property_create_signed_range);
+
+/**
+ * drm_property_create_object - create a new object property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @type: object type from DRM_MODE_OBJECT_* defines
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Userspace is only allowed to set this to any property value of the given
+ * @type. Only useful for atomic properties, which is enforced.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_object(struct drm_device *dev,
+						int flags, const char *name,
+						uint32_t type)
+{
+	struct drm_property *property;
+
+	flags |= DRM_MODE_PROP_OBJECT;
+
+	if (WARN_ON(!(flags & DRM_MODE_PROP_ATOMIC)))
+		return NULL;
+
+	property = drm_property_create(dev, flags, name, 1);
+	if (!property)
+		return NULL;
+
+	property->values[0] = type;
+
+	return property;
+}
+EXPORT_SYMBOL(drm_property_create_object);
+
+/**
+ * drm_property_create_bool - create a new boolean property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * This is implemented as a ranged property with only {0, 1} as valid values.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
+					      const char *name)
+{
+	return drm_property_create_range(dev, flags, name, 0, 1);
+}
+EXPORT_SYMBOL(drm_property_create_bool);
+
+/**
+ * drm_property_add_enum - add a possible value to an enumeration property
+ * @property: enumeration property to change
+ * @index: index of the new enumeration
+ * @value: value of the new enumeration
+ * @name: symbolic name of the new enumeration
+ *
+ * This functions adds enumerations to a property.
+ *
+ * It's use is deprecated, drivers should use one of the more specific helpers
+ * to directly create the property with all enumerations already attached.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_property_add_enum(struct drm_property *property, int index,
+			  uint64_t value, const char *name)
+{
+	struct drm_property_enum *prop_enum;
+
+	if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+			drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
+		return -EINVAL;
+
+	/*
+	 * Bitmask enum properties have the additional constraint of values
+	 * from 0 to 63
+	 */
+	if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
+			(value > 63))
+		return -EINVAL;
+
+	if (!list_empty(&property->enum_list)) {
+		list_for_each_entry(prop_enum, &property->enum_list, head) {
+			if (prop_enum->value == value) {
+				strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+				prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+				return 0;
+			}
+		}
+	}
+
+	prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
+	if (!prop_enum)
+		return -ENOMEM;
+
+	strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+	prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+	prop_enum->value = value;
+
+	property->values[index] = value;
+	list_add_tail(&prop_enum->head, &property->enum_list);
+	return 0;
+}
+EXPORT_SYMBOL(drm_property_add_enum);
+
+/**
+ * drm_property_destroy - destroy a drm property
+ * @dev: drm device
+ * @property: property to destry
+ *
+ * This function frees a property including any attached resources like
+ * enumeration values.
+ */
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
+{
+	struct drm_property_enum *prop_enum, *pt;
+
+	list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
+		list_del(&prop_enum->head);
+		kfree(prop_enum);
+	}
+
+	if (property->num_values)
+		kfree(property->values);
+	drm_mode_object_unregister(dev, &property->base);
+	list_del(&property->head);
+	kfree(property);
+}
+EXPORT_SYMBOL(drm_property_destroy);
+
+int drm_mode_getproperty_ioctl(struct drm_device *dev,
+			       void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_get_property *out_resp = data;
+	struct drm_property *property;
+	int enum_count = 0;
+	int value_count = 0;
+	int ret = 0, i;
+	int copied;
+	struct drm_property_enum *prop_enum;
+	struct drm_mode_property_enum __user *enum_ptr;
+	uint64_t __user *values_ptr;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+	property = drm_property_find(dev, out_resp->prop_id);
+	if (!property) {
+		ret = -ENOENT;
+		goto done;
+	}
+
+	if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+			drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
+		list_for_each_entry(prop_enum, &property->enum_list, head)
+			enum_count++;
+	}
+
+	value_count = property->num_values;
+
+	strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
+	out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
+	out_resp->flags = property->flags;
+
+	if ((out_resp->count_values >= value_count) && value_count) {
+		values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
+		for (i = 0; i < value_count; i++) {
+			if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
+				ret = -EFAULT;
+				goto done;
+			}
+		}
+	}
+	out_resp->count_values = value_count;
+
+	if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+			drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
+		if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
+			copied = 0;
+			enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
+			list_for_each_entry(prop_enum, &property->enum_list, head) {
+
+				if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
+					ret = -EFAULT;
+					goto done;
+				}
+
+				if (copy_to_user(&enum_ptr[copied].name,
+						 &prop_enum->name, DRM_PROP_NAME_LEN)) {
+					ret = -EFAULT;
+					goto done;
+				}
+				copied++;
+			}
+		}
+		out_resp->count_enum_blobs = enum_count;
+	}
+
+	/*
+	 * NOTE: The idea seems to have been to use this to read all the blob
+	 * property values. But nothing ever added them to the corresponding
+	 * list, userspace always used the special-purpose get_blob ioctl to
+	 * read the value for a blob property. It also doesn't make a lot of
+	 * sense to return values here when everything else is just metadata for
+	 * the property itself.
+	 */
+	if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
+		out_resp->count_enum_blobs = 0;
+done:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+static void drm_property_free_blob(struct kref *kref)
+{
+	struct drm_property_blob *blob =
+		container_of(kref, struct drm_property_blob, base.refcount);
+
+	mutex_lock(&blob->dev->mode_config.blob_lock);
+	list_del(&blob->head_global);
+	mutex_unlock(&blob->dev->mode_config.blob_lock);
+
+	drm_mode_object_unregister(blob->dev, &blob->base);
+
+	kfree(blob);
+}
+
+/**
+ * drm_property_create_blob - Create new blob property
+ * @dev: DRM device to create property for
+ * @length: Length to allocate for blob data
+ * @data: If specified, copies data into blob
+ *
+ * Creates a new blob property for a specified DRM device, optionally
+ * copying data. Note that blob properties are meant to be invariant, hence the
+ * data must be filled out before the blob is used as the value of any property.
+ *
+ * Returns:
+ * New blob property with a single reference on success, or an ERR_PTR
+ * value on failure.
+ */
+struct drm_property_blob *
+drm_property_create_blob(struct drm_device *dev, size_t length,
+			 const void *data)
+{
+	struct drm_property_blob *blob;
+	int ret;
+
+	if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
+		return ERR_PTR(-EINVAL);
+
+	blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+	if (!blob)
+		return ERR_PTR(-ENOMEM);
+
+	/* This must be explicitly initialised, so we can safely call list_del
+	 * on it in the removal handler, even if it isn't in a file list. */
+	INIT_LIST_HEAD(&blob->head_file);
+	blob->length = length;
+	blob->dev = dev;
+
+	if (data)
+		memcpy(blob->data, data, length);
+
+	ret = drm_mode_object_get_reg(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
+				      true, drm_property_free_blob);
+	if (ret) {
+		kfree(blob);
+		return ERR_PTR(-EINVAL);
+	}
+
+	mutex_lock(&dev->mode_config.blob_lock);
+	list_add_tail(&blob->head_global,
+	              &dev->mode_config.property_blob_list);
+	mutex_unlock(&dev->mode_config.blob_lock);
+
+	return blob;
+}
+EXPORT_SYMBOL(drm_property_create_blob);
+
+/**
+ * drm_property_unreference_blob - Unreference a blob property
+ * @blob: Pointer to blob property
+ *
+ * Drop a reference on a blob property. May free the object.
+ */
+void drm_property_unreference_blob(struct drm_property_blob *blob)
+{
+	if (!blob)
+		return;
+
+	drm_mode_object_unreference(&blob->base);
+}
+EXPORT_SYMBOL(drm_property_unreference_blob);
+
+void drm_property_destroy_user_blobs(struct drm_device *dev,
+				     struct drm_file *file_priv)
+{
+	struct drm_property_blob *blob, *bt;
+
+	/*
+	 * When the file gets released that means no one else can access the
+	 * blob list any more, so no need to grab dev->blob_lock.
+	 */
+	list_for_each_entry_safe(blob, bt, &file_priv->blobs, head_file) {
+		list_del_init(&blob->head_file);
+		drm_property_unreference_blob(blob);
+	}
+}
+
+/**
+ * drm_property_reference_blob - Take a reference on an existing property
+ * @blob: Pointer to blob property
+ *
+ * Take a new reference on an existing blob property. Returns @blob, which
+ * allows this to be used as a shorthand in assignments.
+ */
+struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob)
+{
+	drm_mode_object_reference(&blob->base);
+	return blob;
+}
+EXPORT_SYMBOL(drm_property_reference_blob);
+
+/**
+ * drm_property_lookup_blob - look up a blob property and take a reference
+ * @dev: drm device
+ * @id: id of the blob property
+ *
+ * If successful, this takes an additional reference to the blob property.
+ * callers need to make sure to eventually unreference the returned property
+ * again, using @drm_property_unreference_blob.
+ *
+ * Return:
+ * NULL on failure, pointer to the blob on success.
+ */
+struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
+					           uint32_t id)
+{
+	struct drm_mode_object *obj;
+	struct drm_property_blob *blob = NULL;
+
+	obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB);
+	if (obj)
+		blob = obj_to_blob(obj);
+	return blob;
+}
+EXPORT_SYMBOL(drm_property_lookup_blob);
+
+/**
+ * drm_property_replace_global_blob - replace existing blob property
+ * @dev: drm device
+ * @replace: location of blob property pointer to be replaced
+ * @length: length of data for new blob, or 0 for no data
+ * @data: content for new blob, or NULL for no data
+ * @obj_holds_id: optional object for property holding blob ID
+ * @prop_holds_id: optional property holding blob ID
+ * @return 0 on success or error on failure
+ *
+ * This function will replace a global property in the blob list, optionally
+ * updating a property which holds the ID of that property.
+ *
+ * If length is 0 or data is NULL, no new blob will be created, and the holding
+ * property, if specified, will be set to 0.
+ *
+ * Access to the replace pointer is assumed to be protected by the caller, e.g.
+ * by holding the relevant modesetting object lock for its parent.
+ *
+ * For example, a drm_connector has a 'PATH' property, which contains the ID
+ * of a blob property with the value of the MST path information. Calling this
+ * function with replace pointing to the connector's path_blob_ptr, length and
+ * data set for the new path information, obj_holds_id set to the connector's
+ * base object, and prop_holds_id set to the path property name, will perform
+ * a completely atomic update. The access to path_blob_ptr is protected by the
+ * caller holding a lock on the connector.
+ */
+int drm_property_replace_global_blob(struct drm_device *dev,
+				     struct drm_property_blob **replace,
+				     size_t length,
+				     const void *data,
+				     struct drm_mode_object *obj_holds_id,
+				     struct drm_property *prop_holds_id)
+{
+	struct drm_property_blob *new_blob = NULL;
+	struct drm_property_blob *old_blob = NULL;
+	int ret;
+
+	WARN_ON(replace == NULL);
+
+	old_blob = *replace;
+
+	if (length && data) {
+		new_blob = drm_property_create_blob(dev, length, data);
+		if (IS_ERR(new_blob))
+			return PTR_ERR(new_blob);
+	}
+
+	if (obj_holds_id) {
+		ret = drm_object_property_set_value(obj_holds_id,
+						    prop_holds_id,
+						    new_blob ?
+						        new_blob->base.id : 0);
+		if (ret != 0)
+			goto err_created;
+	}
+
+	drm_property_unreference_blob(old_blob);
+	*replace = new_blob;
+
+	return 0;
+
+err_created:
+	drm_property_unreference_blob(new_blob);
+	return ret;
+}
+EXPORT_SYMBOL(drm_property_replace_global_blob);
+
+int drm_mode_getblob_ioctl(struct drm_device *dev,
+			   void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_get_blob *out_resp = data;
+	struct drm_property_blob *blob;
+	int ret = 0;
+	void __user *blob_ptr;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	blob = drm_property_lookup_blob(dev, out_resp->blob_id);
+	if (!blob)
+		return -ENOENT;
+
+	if (out_resp->length == blob->length) {
+		blob_ptr = (void __user *)(unsigned long)out_resp->data;
+		if (copy_to_user(blob_ptr, blob->data, blob->length)) {
+			ret = -EFAULT;
+			goto unref;
+		}
+	}
+	out_resp->length = blob->length;
+unref:
+	drm_property_unreference_blob(blob);
+
+	return ret;
+}
+
+int drm_mode_createblob_ioctl(struct drm_device *dev,
+			      void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_create_blob *out_resp = data;
+	struct drm_property_blob *blob;
+	void __user *blob_ptr;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	blob = drm_property_create_blob(dev, out_resp->length, NULL);
+	if (IS_ERR(blob))
+		return PTR_ERR(blob);
+
+	blob_ptr = (void __user *)(unsigned long)out_resp->data;
+	if (copy_from_user(blob->data, blob_ptr, out_resp->length)) {
+		ret = -EFAULT;
+		goto out_blob;
+	}
+
+	/* Dropping the lock between create_blob and our access here is safe
+	 * as only the same file_priv can remove the blob; at this point, it is
+	 * not associated with any file_priv. */
+	mutex_lock(&dev->mode_config.blob_lock);
+	out_resp->blob_id = blob->base.id;
+	list_add_tail(&blob->head_file, &file_priv->blobs);
+	mutex_unlock(&dev->mode_config.blob_lock);
+
+	return 0;
+
+out_blob:
+	drm_property_unreference_blob(blob);
+	return ret;
+}
+
+int drm_mode_destroyblob_ioctl(struct drm_device *dev,
+			       void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_destroy_blob *out_resp = data;
+	struct drm_property_blob *blob = NULL, *bt;
+	bool found = false;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	blob = drm_property_lookup_blob(dev, out_resp->blob_id);
+	if (!blob)
+		return -ENOENT;
+
+	mutex_lock(&dev->mode_config.blob_lock);
+	/* Ensure the property was actually created by this user. */
+	list_for_each_entry(bt, &file_priv->blobs, head_file) {
+		if (bt == blob) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		ret = -EPERM;
+		goto err;
+	}
+
+	/* We must drop head_file here, because we may not be the last
+	 * reference on the blob. */
+	list_del_init(&blob->head_file);
+	mutex_unlock(&dev->mode_config.blob_lock);
+
+	/* One reference from lookup, and one from the filp. */
+	drm_property_unreference_blob(blob);
+	drm_property_unreference_blob(blob);
+
+	return 0;
+
+err:
+	mutex_unlock(&dev->mode_config.blob_lock);
+	drm_property_unreference_blob(blob);
+
+	return ret;
+}
+
+/* Some properties could refer to dynamic refcnt'd objects, or things that
+ * need special locking to handle lifetime issues (ie. to ensure the prop
+ * value doesn't become invalid part way through the property update due to
+ * race).  The value returned by reference via 'obj' should be passed back
+ * to drm_property_change_valid_put() after the property is set (and the
+ * object to which the property is attached has a chance to take it's own
+ * reference).
+ */
+bool drm_property_change_valid_get(struct drm_property *property,
+				   uint64_t value, struct drm_mode_object **ref)
+{
+	int i;
+
+	if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+		return false;
+
+	*ref = NULL;
+
+	if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
+		if (value < property->values[0] || value > property->values[1])
+			return false;
+		return true;
+	} else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
+		int64_t svalue = U642I64(value);
+
+		if (svalue < U642I64(property->values[0]) ||
+				svalue > U642I64(property->values[1]))
+			return false;
+		return true;
+	} else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
+		uint64_t valid_mask = 0;
+
+		for (i = 0; i < property->num_values; i++)
+			valid_mask |= (1ULL << property->values[i]);
+		return !(value & ~valid_mask);
+	} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
+		struct drm_property_blob *blob;
+
+		if (value == 0)
+			return true;
+
+		blob = drm_property_lookup_blob(property->dev, value);
+		if (blob) {
+			*ref = &blob->base;
+			return true;
+		} else {
+			return false;
+		}
+	} else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
+		/* a zero value for an object property translates to null: */
+		if (value == 0)
+			return true;
+
+		*ref = __drm_mode_object_find(property->dev, value,
+					      property->values[0]);
+		return *ref != NULL;
+	}
+
+	for (i = 0; i < property->num_values; i++)
+		if (property->values[i] == value)
+			return true;
+	return false;
+}
+
+void drm_property_change_valid_put(struct drm_property *property,
+		struct drm_mode_object *ref)
+{
+	if (!ref)
+		return;
+
+	if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
+		drm_mode_object_unreference(ref);
+	} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
+		drm_property_unreference_blob(obj_to_blob(ref));
+}
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 0a02efe..7b6d26e 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -34,6 +34,12 @@
 	.destroy = drm_encoder_cleanup,
 };
 
+static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
+				     struct drm_crtc_state *state)
+{
+	return drm_atomic_add_affected_planes(state->state, crtc);
+}
+
 static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_simple_display_pipe *pipe;
@@ -57,6 +63,7 @@
 }
 
 static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = {
+	.atomic_check = drm_simple_kms_crtc_check,
 	.disable = drm_simple_kms_crtc_disable,
 	.enable = drm_simple_kms_crtc_enable,
 };
@@ -133,16 +140,61 @@
 };
 
 /**
+ * drm_simple_display_pipe_attach_bridge - Attach a bridge to the display pipe
+ * @pipe: simple display pipe object
+ * @bridge: bridge to attach
+ *
+ * Makes it possible to still use the drm_simple_display_pipe helpers when
+ * a DRM bridge has to be used.
+ *
+ * Note that you probably want to initialize the pipe by passing a NULL
+ * connector to drm_simple_display_pipe_init().
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe,
+					  struct drm_bridge *bridge)
+{
+	bridge->encoder = &pipe->encoder;
+	pipe->encoder.bridge = bridge;
+	return drm_bridge_attach(pipe->encoder.dev, bridge);
+}
+EXPORT_SYMBOL(drm_simple_display_pipe_attach_bridge);
+
+/**
+ * drm_simple_display_pipe_detach_bridge - Detach the bridge from the display pipe
+ * @pipe: simple display pipe object
+ *
+ * Detaches the drm bridge previously attached with
+ * drm_simple_display_pipe_attach_bridge()
+ */
+void drm_simple_display_pipe_detach_bridge(struct drm_simple_display_pipe *pipe)
+{
+	if (WARN_ON(!pipe->encoder.bridge))
+		return;
+
+	drm_bridge_detach(pipe->encoder.bridge);
+	pipe->encoder.bridge = NULL;
+}
+EXPORT_SYMBOL(drm_simple_display_pipe_detach_bridge);
+
+/**
  * drm_simple_display_pipe_init - Initialize a simple display pipeline
  * @dev: DRM device
  * @pipe: simple display pipe object to initialize
  * @funcs: callbacks for the display pipe (optional)
- * @formats: array of supported formats (%DRM_FORMAT_*)
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
  * @format_count: number of elements in @formats
- * @connector: connector to attach and register
+ * @connector: connector to attach and register (optional)
  *
  * Sets up a display pipeline which consist of a really simple
- * plane-crtc-encoder pipe coupled with the provided connector.
+ * plane-crtc-encoder pipe.
+ *
+ * If a connector is supplied, the pipe will be coupled with the provided
+ * connector. You may supply a NULL connector when using drm bridges, that
+ * handle connectors themselves (see drm_simple_display_pipe_attach_bridge()).
+ *
  * Teardown of a simple display pipe is all handled automatically by the drm
  * core through calling drm_mode_config_cleanup(). Drivers afterwards need to
  * release the memory for the structure themselves.
@@ -181,7 +233,7 @@
 	encoder->possible_crtcs = 1 << drm_crtc_index(crtc);
 	ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
 			       DRM_MODE_ENCODER_NONE, NULL);
-	if (ret)
+	if (ret || !connector)
 		return ret;
 
 	return drm_mode_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index ffd1b32..e3164d9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -488,8 +488,7 @@
 };
 
 static struct drm_driver etnaviv_drm_driver = {
-	.driver_features    = DRIVER_HAVE_IRQ |
-				DRIVER_GEM |
+	.driver_features    = DRIVER_GEM |
 				DRIVER_PRIME |
 				DRIVER_RENDER,
 	.open               = etnaviv_open,
@@ -533,8 +532,6 @@
 	if (!drm)
 		return -ENOMEM;
 
-	drm->platformdev = to_platform_device(dev);
-
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv) {
 		dev_err(dev, "failed to allocate private data\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 877d2ef..486943e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -105,7 +105,7 @@
 		atomic_inc(&exynos_crtc->pending_update);
 	}
 
-	drm_atomic_helper_commit_planes(dev, state, false);
+	drm_atomic_helper_commit_planes(dev, state, 0);
 
 	exynos_atomic_wait_for_commit(state);
 
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index e50467a..a7e5486 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -169,25 +169,10 @@
 	return;
 }
 
-static void
-fsl_dcu_drm_plane_cleanup_fb(struct drm_plane *plane,
-			     const struct drm_plane_state *new_state)
-{
-}
-
-static int
-fsl_dcu_drm_plane_prepare_fb(struct drm_plane *plane,
-			     const struct drm_plane_state *new_state)
-{
-	return 0;
-}
-
 static const struct drm_plane_helper_funcs fsl_dcu_drm_plane_helper_funcs = {
 	.atomic_check = fsl_dcu_drm_plane_atomic_check,
 	.atomic_disable = fsl_dcu_drm_plane_atomic_disable,
 	.atomic_update = fsl_dcu_drm_plane_atomic_update,
-	.cleanup_fb = fsl_dcu_drm_plane_cleanup_fb,
-	.prepare_fb = fsl_dcu_drm_plane_prepare_fb,
 };
 
 static void fsl_dcu_drm_plane_destroy(struct drm_plane *plane)
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 38dc890..ea733ab 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -415,14 +415,6 @@
 	if (ret)
 		return ret;
 
-	/* Didn't get an EDID, so
-	 * Set wide sync ranges so we get all modes
-	 * handed to valid_mode for checking
-	 */
-	connector->display_info.min_vfreq = 0;
-	connector->display_info.max_vfreq = 200;
-	connector->display_info.min_hfreq = 0;
-	connector->display_info.max_hfreq = 200;
 	if (mode_dev->panel_fixed_mode != NULL) {
 		struct drm_display_mode *mode =
 		    drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 907cb51..acb3848 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -335,11 +335,6 @@
 	struct drm_display_mode *dup_mode = NULL;
 	struct drm_device *dev = connector->dev;
 
-	connector->display_info.min_vfreq = 0;
-	connector->display_info.max_vfreq = 200;
-	connector->display_info.min_hfreq = 0;
-	connector->display_info.max_hfreq = 200;
-
 	if (fixed_mode) {
 		dev_dbg(dev->dev, "fixed_mode %dx%d\n",
 				fixed_mode->hdisplay, fixed_mode->vdisplay);
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index ab696ca..eab6d88 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -163,10 +163,7 @@
 	if (bclp > 255)
 		return ASLE_BACKLIGHT_FAILED;
 
-	if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
-		int max = bd->props.max_brightness;
-		gma_backlight_set(dev, bclp * max / 255);
-	}
+	gma_backlight_set(dev, bclp * bd->props.max_brightness / 255);
 
 	asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
 
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index e55733c..fd7c912 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -530,15 +530,6 @@
 	if (ret)
 		return ret;
 
-	/* Didn't get an EDID, so
-	 * Set wide sync ranges so we get all modes
-	 * handed to valid_mode for checking
-	 */
-	connector->display_info.min_vfreq = 0;
-	connector->display_info.max_vfreq = 200;
-	connector->display_info.min_hfreq = 0;
-	connector->display_info.max_hfreq = 200;
-
 	if (mode_dev->panel_fixed_mode != NULL) {
 		struct drm_display_mode *mode =
 		    drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index c3707d4..7e7a4d4 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -608,15 +608,17 @@
 			 u32 ch, u32 y, u32 in_h, u32 fmt)
 {
 	struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0);
+	char *format_name;
 	u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
 	u32 stride = fb->pitches[0];
 	u32 addr = (u32)obj->paddr + y * stride;
 
 	DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
 			 ch + 1, y, in_h, stride, (u32)obj->paddr);
+	format_name = drm_get_format_name(fb->pixel_format);
 	DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n",
-			 addr, fb->width, fb->height, fmt,
-			 drm_get_format_name(fb->pixel_format));
+			 addr, fb->width, fb->height, fmt, format_name);
+	kfree(format_name);
 
 	/* get reg offset */
 	reg_ctrl = RD_CH_CTRL(ch);
@@ -815,19 +817,6 @@
 	ade_compositor_routing_disable(base, ch);
 }
 
-static int ade_plane_prepare_fb(struct drm_plane *plane,
-				const struct drm_plane_state *new_state)
-{
-	/* do nothing */
-	return 0;
-}
-
-static void ade_plane_cleanup_fb(struct drm_plane *plane,
-				 const struct drm_plane_state *old_state)
-{
-	/* do nothing */
-}
-
 static int ade_plane_atomic_check(struct drm_plane *plane,
 				  struct drm_plane_state *state)
 {
@@ -895,8 +884,6 @@
 }
 
 static const struct drm_plane_helper_funcs ade_plane_helper_funcs = {
-	.prepare_fb = ade_plane_prepare_fb,
-	.cleanup_fb = ade_plane_cleanup_fb,
 	.atomic_check = ade_plane_atomic_check,
 	.atomic_update = ade_plane_atomic_update,
 	.atomic_disable = ade_plane_atomic_disable,
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 1edd9bc..1fc2f50 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -169,7 +169,7 @@
 
 static struct drm_driver kirin_drm_driver = {
 	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
-				  DRIVER_ATOMIC | DRIVER_HAVE_IRQ,
+				  DRIVER_ATOMIC,
 	.fops			= &kirin_drm_fops,
 
 	.gem_free_object_unlocked = drm_gem_cma_free_object,
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
index 4d341db..a6c92be 100644
--- a/drivers/gpu/drm/i2c/Kconfig
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -22,6 +22,7 @@
 config DRM_I2C_NXP_TDA998X
 	tristate "NXP Semiconductors TDA998X HDMI encoder"
 	default m if DRM_TILCDC
+	select SND_SOC_HDMI_CODEC if SND_SOC
 	help
 	  Support for NXP Semiconductors TDA998X HDMI encoders.
 
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index f4315bc..9798d40 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -20,6 +20,7 @@
 #include <linux/module.h>
 #include <linux/irq.h>
 #include <sound/asoundef.h>
+#include <sound/hdmi-codec.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
@@ -30,6 +31,11 @@
 
 #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 
+struct tda998x_audio_port {
+	u8 format;		/* AFMT_xxx */
+	u8 config;		/* AP value */
+};
+
 struct tda998x_priv {
 	struct i2c_client *cec;
 	struct i2c_client *hdmi;
@@ -41,7 +47,10 @@
 	u8 vip_cntrl_0;
 	u8 vip_cntrl_1;
 	u8 vip_cntrl_2;
-	struct tda998x_encoder_params params;
+	struct tda998x_audio_params audio_params;
+
+	struct platform_device *audio_pdev;
+	struct mutex audio_mutex;
 
 	wait_queue_head_t wq_edid;
 	volatile int wq_edid_wait;
@@ -53,6 +62,8 @@
 
 	struct drm_encoder encoder;
 	struct drm_connector connector;
+
+	struct tda998x_audio_port audio_port[2];
 };
 
 #define conn_to_tda998x_priv(x) \
@@ -666,26 +677,16 @@
 	reg_set(priv, REG_DIP_IF_FLAGS, bit);
 }
 
-static void
-tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
+static int tda998x_write_aif(struct tda998x_priv *priv,
+			     struct hdmi_audio_infoframe *cea)
 {
 	union hdmi_infoframe frame;
 
-	hdmi_audio_infoframe_init(&frame.audio);
-
-	frame.audio.channels = p->audio_frame[1] & 0x07;
-	frame.audio.channel_allocation = p->audio_frame[4];
-	frame.audio.level_shift_value = (p->audio_frame[5] & 0x78) >> 3;
-	frame.audio.downmix_inhibit = (p->audio_frame[5] & 0x80) >> 7;
-
-	/*
-	 * L-PCM and IEC61937 compressed audio shall always set sample
-	 * frequency to "refer to stream".  For others, see the HDMI
-	 * specification.
-	 */
-	frame.audio.sample_frequency = (p->audio_frame[2] & 0x1c) >> 2;
+	frame.audio = *cea;
 
 	tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, &frame);
+
+	return 0;
 }
 
 static void
@@ -710,20 +711,21 @@
 	}
 }
 
-static void
+static int
 tda998x_configure_audio(struct tda998x_priv *priv,
-		struct drm_display_mode *mode, struct tda998x_encoder_params *p)
+			struct tda998x_audio_params *params,
+			unsigned mode_clock)
 {
 	u8 buf[6], clksel_aip, clksel_fs, cts_n, adiv;
 	u32 n;
 
 	/* Enable audio ports */
-	reg_write(priv, REG_ENA_AP, p->audio_cfg);
-	reg_write(priv, REG_ENA_ACLK, p->audio_clk_cfg);
+	reg_write(priv, REG_ENA_AP, params->config);
 
 	/* Set audio input source */
-	switch (p->audio_format) {
+	switch (params->format) {
 	case AFMT_SPDIF:
+		reg_write(priv, REG_ENA_ACLK, 0);
 		reg_write(priv, REG_MUX_AP, MUX_AP_SELECT_SPDIF);
 		clksel_aip = AIP_CLKSEL_AIP_SPDIF;
 		clksel_fs = AIP_CLKSEL_FS_FS64SPDIF;
@@ -731,15 +733,29 @@
 		break;
 
 	case AFMT_I2S:
+		reg_write(priv, REG_ENA_ACLK, 1);
 		reg_write(priv, REG_MUX_AP, MUX_AP_SELECT_I2S);
 		clksel_aip = AIP_CLKSEL_AIP_I2S;
 		clksel_fs = AIP_CLKSEL_FS_ACLK;
-		cts_n = CTS_N_M(3) | CTS_N_K(3);
+		switch (params->sample_width) {
+		case 16:
+			cts_n = CTS_N_M(3) | CTS_N_K(1);
+			break;
+		case 18:
+		case 20:
+		case 24:
+			cts_n = CTS_N_M(3) | CTS_N_K(2);
+			break;
+		default:
+		case 32:
+			cts_n = CTS_N_M(3) | CTS_N_K(3);
+			break;
+		}
 		break;
 
 	default:
-		BUG();
-		return;
+		dev_err(&priv->hdmi->dev, "Unsupported I2S format\n");
+		return -EINVAL;
 	}
 
 	reg_write(priv, REG_AIP_CLKSEL, clksel_aip);
@@ -755,11 +771,11 @@
 	 * assume 100MHz requires larger divider.
 	 */
 	adiv = AUDIO_DIV_SERCLK_8;
-	if (mode->clock > 100000)
+	if (mode_clock > 100000)
 		adiv++;			/* AUDIO_DIV_SERCLK_16 */
 
 	/* S/PDIF asks for a larger divider */
-	if (p->audio_format == AFMT_SPDIF)
+	if (params->format == AFMT_SPDIF)
 		adiv++;			/* AUDIO_DIV_SERCLK_16 or _32 */
 
 	reg_write(priv, REG_AUDIO_DIV, adiv);
@@ -768,7 +784,7 @@
 	 * This is the approximate value of N, which happens to be
 	 * the recommended values for non-coherent clocks.
 	 */
-	n = 128 * p->audio_sample_rate / 1000;
+	n = 128 * params->sample_rate / 1000;
 
 	/* Write the CTS and N values */
 	buf[0] = 0x44;
@@ -786,20 +802,21 @@
 	reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
 	reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
 
-	/* Write the channel status */
-	buf[0] = IEC958_AES0_CON_NOT_COPYRIGHT;
-	buf[1] = 0x00;
-	buf[2] = IEC958_AES3_CON_FS_NOTID;
-	buf[3] = IEC958_AES4_CON_ORIGFS_NOTID |
-			IEC958_AES4_CON_MAX_WORDLEN_24;
+	/* Write the channel status
+	 * The REG_CH_STAT_B-registers skip IEC958 AES2 byte, because
+	 * there is a separate register for each I2S wire.
+	 */
+	buf[0] = params->status[0];
+	buf[1] = params->status[1];
+	buf[2] = params->status[3];
+	buf[3] = params->status[4];
 	reg_write_range(priv, REG_CH_STAT_B(0), buf, 4);
 
 	tda998x_audio_mute(priv, true);
 	msleep(20);
 	tda998x_audio_mute(priv, false);
 
-	/* Write the audio information packet */
-	tda998x_write_aif(priv, p);
+	return tda998x_write_aif(priv, &params->cea);
 }
 
 /* DRM encoder functions */
@@ -820,7 +837,7 @@
 			    VIP_CNTRL_2_SWAP_F(p->swap_f) |
 			    (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
 
-	priv->params = *p;
+	priv->audio_params = p->audio_params;
 }
 
 static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
@@ -1057,9 +1074,13 @@
 
 		tda998x_write_avi(priv, adjusted_mode);
 
-		if (priv->params.audio_cfg)
-			tda998x_configure_audio(priv, adjusted_mode,
-						&priv->params);
+		if (priv->audio_params.format != AFMT_UNUSED) {
+			mutex_lock(&priv->audio_mutex);
+			tda998x_configure_audio(priv,
+						&priv->audio_params,
+						adjusted_mode->clock);
+			mutex_unlock(&priv->audio_mutex);
+		}
 	}
 }
 
@@ -1159,6 +1180,8 @@
 	drm_mode_connector_update_edid_property(connector, edid);
 	n = drm_add_edid_modes(connector, edid);
 	priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
+	drm_edid_to_eld(connector, edid);
+
 	kfree(edid);
 
 	return n;
@@ -1180,6 +1203,9 @@
 	cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
 	reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
 
+	if (priv->audio_pdev)
+		platform_device_unregister(priv->audio_pdev);
+
 	if (priv->hdmi->irq)
 		free_irq(priv->hdmi->irq, priv);
 
@@ -1189,8 +1215,189 @@
 	i2c_unregister_device(priv->cec);
 }
 
+static int tda998x_audio_hw_params(struct device *dev, void *data,
+				   struct hdmi_codec_daifmt *daifmt,
+				   struct hdmi_codec_params *params)
+{
+	struct tda998x_priv *priv = dev_get_drvdata(dev);
+	int i, ret;
+	struct tda998x_audio_params audio = {
+		.sample_width = params->sample_width,
+		.sample_rate = params->sample_rate,
+		.cea = params->cea,
+	};
+
+	if (!priv->encoder.crtc)
+		return -ENODEV;
+
+	memcpy(audio.status, params->iec.status,
+	       min(sizeof(audio.status), sizeof(params->iec.status)));
+
+	switch (daifmt->fmt) {
+	case HDMI_I2S:
+		if (daifmt->bit_clk_inv || daifmt->frame_clk_inv ||
+		    daifmt->bit_clk_master || daifmt->frame_clk_master) {
+			dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
+				daifmt->bit_clk_inv, daifmt->frame_clk_inv,
+				daifmt->bit_clk_master,
+				daifmt->frame_clk_master);
+			return -EINVAL;
+		}
+		for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
+			if (priv->audio_port[i].format == AFMT_I2S)
+				audio.config = priv->audio_port[i].config;
+		audio.format = AFMT_I2S;
+		break;
+	case HDMI_SPDIF:
+		for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
+			if (priv->audio_port[i].format == AFMT_SPDIF)
+				audio.config = priv->audio_port[i].config;
+		audio.format = AFMT_SPDIF;
+		break;
+	default:
+		dev_err(dev, "%s: Invalid format %d\n", __func__, daifmt->fmt);
+		return -EINVAL;
+	}
+
+	if (audio.config == 0) {
+		dev_err(dev, "%s: No audio configutation found\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&priv->audio_mutex);
+	ret = tda998x_configure_audio(priv,
+				      &audio,
+				      priv->encoder.crtc->hwmode.clock);
+
+	if (ret == 0)
+		priv->audio_params = audio;
+	mutex_unlock(&priv->audio_mutex);
+
+	return ret;
+}
+
+static void tda998x_audio_shutdown(struct device *dev, void *data)
+{
+	struct tda998x_priv *priv = dev_get_drvdata(dev);
+
+	mutex_lock(&priv->audio_mutex);
+
+	reg_write(priv, REG_ENA_AP, 0);
+
+	priv->audio_params.format = AFMT_UNUSED;
+
+	mutex_unlock(&priv->audio_mutex);
+}
+
+int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
+{
+	struct tda998x_priv *priv = dev_get_drvdata(dev);
+
+	mutex_lock(&priv->audio_mutex);
+
+	tda998x_audio_mute(priv, enable);
+
+	mutex_unlock(&priv->audio_mutex);
+	return 0;
+}
+
+static int tda998x_audio_get_eld(struct device *dev, void *data,
+				 uint8_t *buf, size_t len)
+{
+	struct tda998x_priv *priv = dev_get_drvdata(dev);
+	struct drm_mode_config *config = &priv->encoder.dev->mode_config;
+	struct drm_connector *connector;
+	int ret = -ENODEV;
+
+	mutex_lock(&config->mutex);
+	list_for_each_entry(connector, &config->connector_list, head) {
+		if (&priv->encoder == connector->encoder) {
+			memcpy(buf, connector->eld,
+			       min(sizeof(connector->eld), len));
+			ret = 0;
+		}
+	}
+	mutex_unlock(&config->mutex);
+
+	return ret;
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+	.hw_params = tda998x_audio_hw_params,
+	.audio_shutdown = tda998x_audio_shutdown,
+	.digital_mute = tda998x_audio_digital_mute,
+	.get_eld = tda998x_audio_get_eld,
+};
+
+static int tda998x_audio_codec_init(struct tda998x_priv *priv,
+				    struct device *dev)
+{
+	struct hdmi_codec_pdata codec_data = {
+		.ops = &audio_codec_ops,
+		.max_i2s_channels = 2,
+	};
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++) {
+		if (priv->audio_port[i].format == AFMT_I2S &&
+		    priv->audio_port[i].config != 0)
+			codec_data.i2s = 1;
+		if (priv->audio_port[i].format == AFMT_SPDIF &&
+		    priv->audio_port[i].config != 0)
+			codec_data.spdif = 1;
+	}
+
+	priv->audio_pdev = platform_device_register_data(
+		dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+		&codec_data, sizeof(codec_data));
+
+	return PTR_ERR_OR_ZERO(priv->audio_pdev);
+}
+
 /* I2C driver functions */
 
+static int tda998x_get_audio_ports(struct tda998x_priv *priv,
+				   struct device_node *np)
+{
+	const u32 *port_data;
+	u32 size;
+	int i;
+
+	port_data = of_get_property(np, "audio-ports", &size);
+	if (!port_data)
+		return 0;
+
+	size /= sizeof(u32);
+	if (size > 2 * ARRAY_SIZE(priv->audio_port) || size % 2 != 0) {
+		dev_err(&priv->hdmi->dev,
+			"Bad number of elements in audio-ports dt-property\n");
+		return -EINVAL;
+	}
+
+	size /= 2;
+
+	for (i = 0; i < size; i++) {
+		u8 afmt = be32_to_cpup(&port_data[2*i]);
+		u8 ena_ap = be32_to_cpup(&port_data[2*i+1]);
+
+		if (afmt != AFMT_SPDIF && afmt != AFMT_I2S) {
+			dev_err(&priv->hdmi->dev,
+				"Bad audio format %u\n", afmt);
+			return -EINVAL;
+		}
+
+		priv->audio_port[i].format = afmt;
+		priv->audio_port[i].config = ena_ap;
+	}
+
+	if (priv->audio_port[0].format == priv->audio_port[1].format) {
+		dev_err(&priv->hdmi->dev,
+			"There can only be on I2S port and one SPDIF port\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 {
 	struct device_node *np = client->dev.of_node;
@@ -1304,7 +1511,7 @@
 	if (!np)
 		return 0;		/* non-DT */
 
-	/* get the optional video properties */
+	/* get the device tree parameters */
 	ret = of_property_read_u32(np, "video-ports", &video);
 	if (ret == 0) {
 		priv->vip_cntrl_0 = video >> 16;
@@ -1312,8 +1519,16 @@
 		priv->vip_cntrl_2 = video;
 	}
 
-	return 0;
+	mutex_init(&priv->audio_mutex); /* Protect access from audio thread */
 
+	ret = tda998x_get_audio_ports(priv, np);
+	if (ret)
+		goto fail;
+
+	if (priv->audio_port[0].format != AFMT_UNUSED)
+		tda998x_audio_codec_init(priv, &client->dev);
+
+	return 0;
 fail:
 	/* if encoder_init fails, the encoder slave is never registered,
 	 * so cleanup here:
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a6d174a..27b0e34 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3004,6 +3004,7 @@
 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
 		struct drm_plane_state *state;
 		struct drm_plane *plane = &intel_plane->base;
+		char *format_name;
 
 		if (!plane->state) {
 			seq_puts(m, "plane->state is NULL!\n");
@@ -3012,6 +3013,12 @@
 
 		state = plane->state;
 
+		if (state->fb) {
+			format_name = drm_get_format_name(state->fb->pixel_format);
+		} else {
+			format_name = kstrdup("N/A", GFP_KERNEL);
+		}
+
 		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
 			   plane->base.id,
 			   plane_type(intel_plane->base.type),
@@ -3025,8 +3032,10 @@
 			   ((state->src_w & 0xffff) * 15625) >> 10,
 			   (state->src_h >> 16),
 			   ((state->src_h & 0xffff) * 15625) >> 10,
-			   state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A",
+			   format_name,
 			   plane_rotation(state->rotation));
+
+		kfree(format_name);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 38f805e..dae340c 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -153,27 +153,27 @@
  * host point of view, the graphic address space is partitioned by multiple
  * vGPUs in different VMs. ::
  *
- *                        vGPU1 view         Host view
- *             0 ------> +-----------+     +-----------+
- *               ^       |###########|     |   vGPU3   |
- *               |       |###########|     +-----------+
- *               |       |###########|     |   vGPU2   |
- *               |       +-----------+     +-----------+
- *        mappable GM    | available | ==> |   vGPU1   |
- *               |       +-----------+     +-----------+
- *               |       |###########|     |           |
- *               v       |###########|     |   Host    |
- *               +=======+===========+     +===========+
- *               ^       |###########|     |   vGPU3   |
- *               |       |###########|     +-----------+
- *               |       |###########|     |   vGPU2   |
- *               |       +-----------+     +-----------+
- *      unmappable GM    | available | ==> |   vGPU1   |
- *               |       +-----------+     +-----------+
- *               |       |###########|     |           |
- *               |       |###########|     |   Host    |
- *               v       |###########|     |           |
- * total GM size ------> +-----------+     +-----------+
+ *                         vGPU1 view         Host view
+ *              0 ------> +-----------+     +-----------+
+ *                ^       |###########|     |   vGPU3   |
+ *                |       |###########|     +-----------+
+ *                |       |###########|     |   vGPU2   |
+ *                |       +-----------+     +-----------+
+ *         mappable GM    | available | ==> |   vGPU1   |
+ *                |       +-----------+     +-----------+
+ *                |       |###########|     |           |
+ *                v       |###########|     |   Host    |
+ *                +=======+===========+     +===========+
+ *                ^       |###########|     |   vGPU3   |
+ *                |       |###########|     +-----------+
+ *                |       |###########|     |   vGPU2   |
+ *                |       +-----------+     +-----------+
+ *       unmappable GM    | available | ==> |   vGPU1   |
+ *                |       +-----------+     +-----------+
+ *                |       |###########|     |           |
+ *                |       |###########|     |   Host    |
+ *                v       |###########|     |           |
+ *  total GM size ------> +-----------+     +-----------+
  *
  * Returns:
  * zero on success, non-zero if configuration invalid or ballooning failed
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index e06d1f5..b82de30 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -143,6 +143,7 @@
 		crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
 
 	if (state->fb && intel_rotation_90_or_270(state->rotation)) {
+		char *format_name;
 		if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
 			state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
 			DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
@@ -157,8 +158,9 @@
 		switch (state->fb->pixel_format) {
 		case DRM_FORMAT_C8:
 		case DRM_FORMAT_RGB565:
-			DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
-					drm_get_format_name(state->fb->pixel_format));
+			format_name = drm_get_format_name(state->fb->pixel_format);
+			DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", format_name);
+			kfree(format_name);
 			return -EINVAL;
 
 		default:
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index a421171..6c70a5b 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -51,10 +51,10 @@
  * related registers. (The notable exception is the power management, not
  * covered here.)
  *
- * The struct i915_audio_component is used to interact between the graphics
- * and audio drivers. The struct i915_audio_component_ops *ops in it is
+ * The struct &i915_audio_component is used to interact between the graphics
+ * and audio drivers. The struct &i915_audio_component_ops @ops in it is
  * defined in graphics driver and called in audio driver. The
- * struct i915_audio_component_audio_ops *audio_ops is called from i915 driver.
+ * struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver.
  */
 
 static const struct {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 69b80d0..497d99b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12821,6 +12821,7 @@
 
 	DRM_DEBUG_KMS("planes on this crtc\n");
 	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		char *format_name;
 		intel_plane = to_intel_plane(plane);
 		if (intel_plane->pipe != crtc->pipe)
 			continue;
@@ -12833,11 +12834,12 @@
 			continue;
 		}
 
+		format_name = drm_get_format_name(fb->pixel_format);
+
 		DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
 			      plane->base.id, plane->name);
 		DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
-			      fb->base.id, fb->width, fb->height,
-			      drm_get_format_name(fb->pixel_format));
+			      fb->base.id, fb->width, fb->height, format_name);
 		DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
 			      state->scaler_id,
 			      state->base.src.x1 >> 16,
@@ -12847,6 +12849,8 @@
 			      state->base.dst.x1, state->base.dst.y1,
 			      drm_rect_width(&state->base.dst),
 			      drm_rect_height(&state->base.dst));
+
+		kfree(format_name);
 	}
 }
 
@@ -14628,7 +14632,7 @@
  */
 int
 intel_prepare_plane_fb(struct drm_plane *plane,
-		       const struct drm_plane_state *new_state)
+		       struct drm_plane_state *new_state)
 {
 	struct drm_device *dev = plane->dev;
 	struct drm_framebuffer *fb = new_state->fb;
@@ -14714,7 +14718,7 @@
  */
 void
 intel_cleanup_plane_fb(struct drm_plane *plane,
-		       const struct drm_plane_state *old_state)
+		       struct drm_plane_state *old_state)
 {
 	struct drm_device *dev = plane->dev;
 	struct intel_plane_state *old_intel_state;
@@ -15630,6 +15634,7 @@
 	unsigned int tiling = i915_gem_object_get_tiling(obj);
 	int ret;
 	u32 pitch_limit, stride_alignment;
+	char *format_name;
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
@@ -15720,16 +15725,18 @@
 		break;
 	case DRM_FORMAT_XRGB1555:
 		if (INTEL_INFO(dev)->gen > 3) {
-			DRM_DEBUG("unsupported pixel format: %s\n",
-				  drm_get_format_name(mode_cmd->pixel_format));
+			format_name = drm_get_format_name(mode_cmd->pixel_format);
+			DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+			kfree(format_name);
 			return -EINVAL;
 		}
 		break;
 	case DRM_FORMAT_ABGR8888:
 		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
 		    INTEL_INFO(dev)->gen < 9) {
-			DRM_DEBUG("unsupported pixel format: %s\n",
-				  drm_get_format_name(mode_cmd->pixel_format));
+			format_name = drm_get_format_name(mode_cmd->pixel_format);
+			DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+			kfree(format_name);
 			return -EINVAL;
 		}
 		break;
@@ -15737,15 +15744,17 @@
 	case DRM_FORMAT_XRGB2101010:
 	case DRM_FORMAT_XBGR2101010:
 		if (INTEL_INFO(dev)->gen < 4) {
-			DRM_DEBUG("unsupported pixel format: %s\n",
-				  drm_get_format_name(mode_cmd->pixel_format));
+			format_name = drm_get_format_name(mode_cmd->pixel_format);
+			DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+			kfree(format_name);
 			return -EINVAL;
 		}
 		break;
 	case DRM_FORMAT_ABGR2101010:
 		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
-			DRM_DEBUG("unsupported pixel format: %s\n",
-				  drm_get_format_name(mode_cmd->pixel_format));
+			format_name = drm_get_format_name(mode_cmd->pixel_format);
+			DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+			kfree(format_name);
 			return -EINVAL;
 		}
 		break;
@@ -15754,14 +15763,16 @@
 	case DRM_FORMAT_YVYU:
 	case DRM_FORMAT_VYUY:
 		if (INTEL_INFO(dev)->gen < 5) {
-			DRM_DEBUG("unsupported pixel format: %s\n",
-				  drm_get_format_name(mode_cmd->pixel_format));
+			format_name = drm_get_format_name(mode_cmd->pixel_format);
+			DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+			kfree(format_name);
 			return -EINVAL;
 		}
 		break;
 	default:
-		DRM_DEBUG("unsupported pixel format: %s\n",
-			  drm_get_format_name(mode_cmd->pixel_format));
+		format_name = drm_get_format_name(mode_cmd->pixel_format);
+		DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+		kfree(format_name);
 		return -EINVAL;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 810c89e..8fd16ad 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1267,9 +1267,9 @@
 void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
 void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
 int intel_prepare_plane_fb(struct drm_plane *plane,
-			   const struct drm_plane_state *new_state);
+			   struct drm_plane_state *new_state);
 void intel_cleanup_plane_fb(struct drm_plane *plane,
-			    const struct drm_plane_state *old_state);
+			    struct drm_plane_state *old_state);
 int intel_plane_atomic_get_property(struct drm_plane *plane,
 				    const struct drm_plane_state *state,
 				    struct drm_property *property,
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 944786d..e40db2d 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -155,6 +155,7 @@
  *
  *     +-------------------------------+
  *     |        guc_css_header         |
+ *     |                               |
  *     | contains major/minor version  |
  *     +-------------------------------+
  *     |             uCode             |
@@ -176,10 +177,10 @@
  *
  * 1. Header, uCode and RSA are must-have components.
  * 2. All firmware components, if they present, are in the sequence illustrated
- * in the layout table above.
+ *    in the layout table above.
  * 3. Length info of each component can be found in header, in dwords.
  * 4. Modulus and exponent key are not required by driver. They may not appear
- * in fw. So driver will load a truncated firmware in this case.
+ *    in fw. So driver will load a truncated firmware in this case.
  */
 
 struct guc_css_header {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 4987d97..e1d47d5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -1125,17 +1125,6 @@
 	}
 	lvds_connector->base.edid = edid;
 
-	if (IS_ERR_OR_NULL(edid)) {
-		/* Didn't get an EDID, so
-		 * Set wide sync ranges so we get all modes
-		 * handed to valid_mode for checking
-		 */
-		connector->display_info.min_vfreq = 0;
-		connector->display_info.max_vfreq = 200;
-		connector->display_info.min_hfreq = 0;
-		connector->display_info.max_hfreq = 200;
-	}
-
 	list_for_each_entry(scan, &connector->probed_modes, head) {
 		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
 			DRM_DEBUG_KMS("using preferred mode from EDID: ");
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 438bac8..56dfc4cd 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -193,7 +193,8 @@
 
 	drm_atomic_helper_commit_modeset_disables(dev, state);
 
-	drm_atomic_helper_commit_planes(dev, state, true);
+	drm_atomic_helper_commit_planes(dev, state,
+					DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
 	drm_atomic_helper_commit_modeset_enables(dev, state);
 
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 0e769ab..72c1ae4 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -70,13 +70,15 @@
 	 *
 	 *     drm_atomic_helper_commit_modeset_disables(dev, state);
 	 *     drm_atomic_helper_commit_modeset_enables(dev, state);
-	 *     drm_atomic_helper_commit_planes(dev, state, true);
+	 *     drm_atomic_helper_commit_planes(dev, state,
+	 *                                     DRM_PLANE_COMMIT_ACTIVE_ONLY);
 	 *
 	 * See the kerneldoc entries for these three functions for more details.
 	 */
 	drm_atomic_helper_commit_modeset_disables(drm, state);
 	drm_atomic_helper_commit_modeset_enables(drm, state);
-	drm_atomic_helper_commit_planes(drm, state, true);
+	drm_atomic_helper_commit_planes(drm, state,
+					DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
 	drm_atomic_helper_wait_for_vblanks(drm, state);
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 81325f6..88dd221 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -183,8 +183,10 @@
 	}
 
 	sysram = vmalloc(size);
-	if (!sysram)
+	if (!sysram) {
+		ret = -ENOMEM;
 		goto err_sysram;
+	}
 
 	info = drm_fb_helper_alloc_fbi(helper);
 	if (IS_ERR(info)) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 9f96dfe..7c9626d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -99,7 +99,7 @@
 };
 
 static int mdp4_plane_prepare_fb(struct drm_plane *plane,
-		const struct drm_plane_state *new_state)
+				 struct drm_plane_state *new_state)
 {
 	struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
 	struct mdp4_kms *mdp4_kms = get_kms(plane);
@@ -113,7 +113,7 @@
 }
 
 static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
-		const struct drm_plane_state *old_state)
+				  struct drm_plane_state *old_state)
 {
 	struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
 	struct mdp4_kms *mdp4_kms = get_kms(plane);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index a02a24e..ba8f432 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -250,7 +250,7 @@
 };
 
 static int mdp5_plane_prepare_fb(struct drm_plane *plane,
-		const struct drm_plane_state *new_state)
+				 struct drm_plane_state *new_state)
 {
 	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
 	struct mdp5_kms *mdp5_kms = get_kms(plane);
@@ -264,7 +264,7 @@
 }
 
 static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
-		const struct drm_plane_state *old_state)
+				  struct drm_plane_state *old_state)
 {
 	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
 	struct mdp5_kms *mdp5_kms = get_kms(plane);
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 4a8a6f1..73bae38 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -112,13 +112,13 @@
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
 
-	drm_atomic_helper_wait_for_fences(dev, state);
+	drm_atomic_helper_wait_for_fences(dev, state, false);
 
 	kms->funcs->prepare_commit(kms, state);
 
 	drm_atomic_helper_commit_modeset_disables(dev, state);
 
-	drm_atomic_helper_commit_planes(dev, state, false);
+	drm_atomic_helper_commit_planes(dev, state, 0);
 
 	drm_atomic_helper_commit_modeset_enables(dev, state);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 6190035..8ab9ce5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1151,7 +1151,7 @@
 	if (ret)
 		goto out;
 
-	ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, new_mem);
+	ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_mem);
 out:
 	ttm_bo_mem_put(bo, &tmp_mem);
 	return ret;
@@ -1179,7 +1179,7 @@
 	if (ret)
 		return ret;
 
-	ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, &tmp_mem);
+	ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_mem);
 	if (ret)
 		goto out;
 
@@ -1297,7 +1297,7 @@
 	/* Fallback to software copy. */
 	ret = ttm_bo_wait(bo, intr, no_wait_gpu);
 	if (ret == 0)
-		ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem);
+		ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_mem);
 
 out:
 	if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 3dd78f2..e1cfba5 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -96,7 +96,7 @@
 	dispc_runtime_get();
 
 	drm_atomic_helper_commit_modeset_disables(dev, old_state);
-	drm_atomic_helper_commit_planes(dev, old_state, false);
+	drm_atomic_helper_commit_planes(dev, old_state, 0);
 	drm_atomic_helper_commit_modeset_enables(dev, old_state);
 
 	omap_atomic_wait_for_completion(dev, old_state);
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 4c7727e..66ac8c4 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -60,7 +60,7 @@
 }
 
 static int omap_plane_prepare_fb(struct drm_plane *plane,
-				 const struct drm_plane_state *new_state)
+				 struct drm_plane_state *new_state)
 {
 	if (!new_state->fb)
 		return 0;
@@ -69,7 +69,7 @@
 }
 
 static void omap_plane_cleanup_fb(struct drm_plane *plane,
-				  const struct drm_plane_state *old_state)
+				  struct drm_plane_state *old_state)
 {
 	if (old_state->fb)
 		omap_framebuffer_unpin(old_state->fb);
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index ffe8853..9b728ed 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -57,11 +57,8 @@
 static int
 alloc_drawable(struct qxl_device *qdev, struct qxl_release **release)
 {
-	int ret;
-	ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
-					 QXL_RELEASE_DRAWABLE, release,
-					 NULL);
-	return ret;
+	return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
+					  QXL_RELEASE_DRAWABLE, release, NULL);
 }
 
 static void
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 5e1d789..fa5440d 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -61,7 +61,7 @@
 	if (domain == QXL_GEM_DOMAIN_VRAM)
 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
 	if (domain == QXL_GEM_DOMAIN_SURFACE)
-		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
+		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
 	if (domain == QXL_GEM_DOMAIN_CPU)
 		qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
 	if (!c)
@@ -151,7 +151,7 @@
 
 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
 		map = qdev->vram_mapping;
-	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
 		map = qdev->surface_mapping;
 	else
 		goto fallback;
@@ -191,7 +191,7 @@
 
 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
 		map = qdev->vram_mapping;
-	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
 		map = qdev->surface_mapping;
 	else
 		goto fallback;
@@ -311,7 +311,7 @@
 
 int qxl_surf_evict(struct qxl_device *qdev)
 {
-	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
+	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
 }
 
 int qxl_vram_evict(struct qxl_device *qdev)
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index f599cd0..cd83f05 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -203,12 +203,9 @@
 static int qxl_release_bo_alloc(struct qxl_device *qdev,
 				struct qxl_bo **bo)
 {
-	int ret;
 	/* pin releases bo's they are too messy to evict */
-	ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
-			    QXL_GEM_DOMAIN_VRAM, NULL,
-			    bo);
-	return ret;
+	return qxl_bo_create(qdev, PAGE_SIZE, false, true,
+			     QXL_GEM_DOMAIN_VRAM, NULL, bo);
 }
 
 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index d50c967..a257ad2 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -168,7 +168,7 @@
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		break;
 	case TTM_PL_VRAM:
-	case TTM_PL_PRIV0:
+	case TTM_PL_PRIV:
 		/* "On-card" video ram */
 		man->func = &ttm_bo_manager_func;
 		man->gpu_offset = 0;
@@ -235,7 +235,7 @@
 		mem->bus.base = qdev->vram_base;
 		mem->bus.offset = mem->start << PAGE_SHIFT;
 		break;
-	case TTM_PL_PRIV0:
+	case TTM_PL_PRIV:
 		mem->bus.is_iomem = true;
 		mem->bus.base = qdev->surfaceram_base;
 		mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -361,8 +361,8 @@
 		qxl_move_null(bo, new_mem);
 		return 0;
 	}
-	return ttm_bo_move_memcpy(bo, evict, interruptible,
-				  no_wait_gpu, new_mem);
+	return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu,
+				  new_mem);
 }
 
 static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
@@ -376,7 +376,7 @@
 	qbo = to_qxl_bo(bo);
 	qdev = qbo->gem_base.dev->dev_private;
 
-	if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id)
+	if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
 		qxl_surface_evict(qdev, qbo, new_mem ? true : false);
 }
 
@@ -422,7 +422,7 @@
 		DRM_ERROR("Failed initializing VRAM heap.\n");
 		return r;
 	}
-	r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0,
+	r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV,
 			   qdev->surfaceram_size / PAGE_SIZE);
 	if (r) {
 		DRM_ERROR("Failed initializing Surfaces heap.\n");
@@ -445,7 +445,7 @@
 void qxl_ttm_fini(struct qxl_device *qdev)
 {
 	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
-	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
+	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
 	ttm_bo_device_release(&qdev->mman.bdev);
 	qxl_ttm_global_fini(qdev);
 	DRM_INFO("qxl: ttm finalized\n");
@@ -489,7 +489,7 @@
 		if (i == 0)
 			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
 		else
-			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
+			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv;
 
 	}
 	return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a97abc8..4824f70 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1154,6 +1154,7 @@
 	u32 tmp, viewport_w, viewport_h;
 	int r;
 	bool bypass_lut = false;
+	char *format_name;
 
 	/* no fb bound */
 	if (!atomic && !crtc->primary->fb) {
@@ -1257,8 +1258,9 @@
 		bypass_lut = true;
 		break;
 	default:
-		DRM_ERROR("Unsupported screen format %s\n",
-			  drm_get_format_name(target_fb->pixel_format));
+		format_name = drm_get_format_name(target_fb->pixel_format);
+		DRM_ERROR("Unsupported screen format %s\n", format_name);
+		kfree(format_name);
 		return -EINVAL;
 	}
 
@@ -1433,8 +1435,8 @@
 	WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
 	       (viewport_w << 16) | viewport_h);
 
-	/* set pageflip to happen only at start of vblank interval (front porch) */
-	WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
 		radeon_fb = to_radeon_framebuffer(fb);
@@ -1469,6 +1471,7 @@
 	u32 viewport_w, viewport_h;
 	int r;
 	bool bypass_lut = false;
+	char *format_name;
 
 	/* no fb bound */
 	if (!atomic && !crtc->primary->fb) {
@@ -1558,8 +1561,9 @@
 		bypass_lut = true;
 		break;
 	default:
-		DRM_ERROR("Unsupported screen format %s\n",
-			  drm_get_format_name(target_fb->pixel_format));
+		format_name = drm_get_format_name(target_fb->pixel_format);
+		DRM_ERROR("Unsupported screen format %s\n", format_name);
+		kfree(format_name);
 		return -EINVAL;
 	}
 
@@ -1632,8 +1636,8 @@
 	WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
 	       (viewport_w << 16) | viewport_h);
 
-	/* set pageflip to happen only at start of vblank interval (front porch) */
-	WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
 		radeon_fb = to_radeon_framebuffer(fb);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index cead089a..432cb46 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -389,22 +389,21 @@
 {
 	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
 	u8 msg[DP_DPCD_SIZE];
-	int ret, i;
+	int ret;
 
-	for (i = 0; i < 7; i++) {
-		ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
-				       DP_DPCD_SIZE);
-		if (ret == DP_DPCD_SIZE) {
-			memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+	ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
+			       DP_DPCD_SIZE);
+	if (ret == DP_DPCD_SIZE) {
+		memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
 
-			DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
-				      dig_connector->dpcd);
+		DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+			      dig_connector->dpcd);
 
-			radeon_dp_probe_oui(radeon_connector);
+		radeon_dp_probe_oui(radeon_connector);
 
-			return true;
-		}
+		return true;
 	}
+
 	dig_connector->dpcd[0] = 0;
 	return false;
 }
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 0c1b9ff..f6ff41a 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1871,7 +1871,7 @@
 {
 	const __be32 *fw_data = NULL;
 	const __le32 *new_fw_data = NULL;
-	u32 running, blackout = 0, tmp;
+	u32 running, tmp;
 	u32 *io_mc_regs = NULL;
 	const __le32 *new_io_mc_regs = NULL;
 	int i, regs_size, ucode_size;
@@ -1912,11 +1912,6 @@
 	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
 
 	if (running == 0) {
-		if (running) {
-			blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
-			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
-		}
-
 		/* reset the engine and set to writable */
 		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
 		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
@@ -1964,9 +1959,6 @@
 				break;
 			udelay(1);
 		}
-
-		if (running)
-			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
 	}
 
 	return 0;
@@ -4201,11 +4193,7 @@
 u32 cik_gfx_get_wptr(struct radeon_device *rdev,
 		     struct radeon_ring *ring)
 {
-	u32 wptr;
-
-	wptr = RREG32(CP_RB0_WPTR);
-
-	return wptr;
+	return RREG32(CP_RB0_WPTR);
 }
 
 void cik_gfx_set_wptr(struct radeon_device *rdev,
@@ -8215,7 +8203,7 @@
 		return;
 
 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
 	if (r) {
 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
 		return;
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index cead228..48db935 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -2069,6 +2069,7 @@
 #define UVD_UDEC_ADDR_CONFIG		0xef4c
 #define UVD_UDEC_DB_ADDR_CONFIG		0xef50
 #define UVD_UDEC_DBW_ADDR_CONFIG	0xef54
+#define UVD_NO_OP			0xeffc
 
 #define UVD_LMI_EXT40_ADDR		0xf498
 #define UVD_GP_SCRATCH4			0xf4e0
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index db275b7..0b6b576 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2878,9 +2878,8 @@
 	for (i = 0; i < rdev->num_crtc; i++) {
 		if (save->crtc_enabled[i]) {
 			tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
-			if ((tmp & 0x7) != 3) {
+			if ((tmp & 0x7) != 0) {
 				tmp &= ~0x7;
-				tmp |= 0x3;
 				WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
 			}
 			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
@@ -5580,7 +5579,7 @@
 		return;
 
 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
 	if (r) {
 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
 		return;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index c8e3d39..f3d88ca 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1523,6 +1523,7 @@
 #define UVD_UDEC_ADDR_CONFIG				0xef4c
 #define UVD_UDEC_DB_ADDR_CONFIG				0xef50
 #define UVD_UDEC_DBW_ADDR_CONFIG			0xef54
+#define UVD_NO_OP					0xeffc
 #define UVD_RBC_RB_RPTR					0xf690
 #define UVD_RBC_RB_WPTR					0xf694
 #define UVD_STATUS					0xf6bc
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 4a3d7ca..103fc86 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2062,7 +2062,7 @@
 		return;
 
 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
 	if (r) {
 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
 		return;
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 47eb49b..3c9fec8 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -1137,6 +1137,7 @@
 #define UVD_UDEC_ADDR_CONFIG				0xEF4C
 #define UVD_UDEC_DB_ADDR_CONFIG				0xEF50
 #define UVD_UDEC_DBW_ADDR_CONFIG			0xEF54
+#define UVD_NO_OP					0xEFFC
 #define UVD_RBC_RB_RPTR					0xF690
 #define UVD_RBC_RB_WPTR					0xF694
 #define UVD_STATUS					0xf6bc
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index f25994b..f5e84f4 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1071,11 +1071,7 @@
 u32 r100_gfx_get_wptr(struct radeon_device *rdev,
 		      struct radeon_ring *ring)
 {
-	u32 wptr;
-
-	wptr = RREG32(RADEON_CP_RB_WPTR);
-
-	return wptr;
+	return RREG32(RADEON_CP_RB_WPTR);
 }
 
 void r100_gfx_set_wptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9247e7d..a951881 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2631,11 +2631,7 @@
 u32 r600_gfx_get_wptr(struct radeon_device *rdev,
 		      struct radeon_ring *ring)
 {
-	u32 wptr;
-
-	wptr = RREG32(R600_CP_RB_WPTR);
-
-	return wptr;
+	return RREG32(R600_CP_RB_WPTR);
 }
 
 void r600_gfx_set_wptr(struct radeon_device *rdev,
@@ -3097,7 +3093,7 @@
 		return;
 
 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
 	if (r) {
 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
 		return;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 1e8495c..2e00a52 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1490,6 +1490,7 @@
 #define UVD_GPCOM_VCPU_DATA0				0xef10
 #define UVD_GPCOM_VCPU_DATA1				0xef14
 #define UVD_ENGINE_CNTL					0xef18
+#define UVD_NO_OP					0xeffc
 
 #define UVD_SEMA_CNTL					0xf400
 #define UVD_RB_ARB_CTRL					0xf480
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5633ee3..1b0dcad 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -742,6 +742,7 @@
 	struct work_struct		unpin_work;
 	struct radeon_device		*rdev;
 	int				crtc_id;
+	u32				target_vblank;
 	uint64_t			base;
 	struct drm_pending_vblank_event *event;
 	struct radeon_bo		*old_rbo;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index a00dd2f..b423c01 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -639,7 +639,7 @@
  * Used at driver startup.
  * Returns true if virtual or false if not.
  */
-static bool radeon_device_is_virtual(void)
+bool radeon_device_is_virtual(void)
 {
 #ifdef CONFIG_X86
 	return boot_cpu_has(X86_FEATURE_HYPERVISOR);
@@ -1594,7 +1594,8 @@
 
 	rdev = dev->dev_private;
 
-	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+	    dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
 		return 0;
 
 	drm_kms_helper_poll_disable(dev);
@@ -1689,7 +1690,8 @@
 	struct drm_crtc *crtc;
 	int r;
 
-	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+	    dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
 		return 0;
 
 	if (fbcon) {
@@ -1956,14 +1958,3 @@
 	}
 #endif
 }
-
-#if defined(CONFIG_DEBUG_FS)
-int radeon_debugfs_init(struct drm_minor *minor)
-{
-	return 0;
-}
-
-void radeon_debugfs_cleanup(struct drm_minor *minor)
-{
-}
-#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index c3206fb..890171f 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -400,14 +400,13 @@
 	struct radeon_flip_work *work =
 		container_of(__work, struct radeon_flip_work, flip_work);
 	struct radeon_device *rdev = work->rdev;
+	struct drm_device *dev = rdev->ddev;
 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
 
 	struct drm_crtc *crtc = &radeon_crtc->base;
 	unsigned long flags;
 	int r;
-	int vpos, hpos, stat, min_udelay = 0;
-	unsigned repcnt = 4;
-	struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
+	int vpos, hpos;
 
 	down_read(&rdev->exclusive_lock);
 	if (work->fence) {
@@ -438,59 +437,25 @@
 		work->fence = NULL;
 	}
 
+	/* Wait until we're out of the vertical blank period before the one
+	 * targeted by the flip
+	 */
+	while (radeon_crtc->enabled &&
+	       (radeon_get_crtc_scanoutpos(dev, work->crtc_id, 0,
+					   &vpos, &hpos, NULL, NULL,
+					   &crtc->hwmode)
+		& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+	       (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+	       (int)(work->target_vblank -
+		     dev->driver->get_vblank_counter(dev, work->crtc_id)) > 0)
+		usleep_range(1000, 2000);
+
 	/* We borrow the event spin lock for protecting flip_status */
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
 	/* set the proper interrupt */
 	radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
 
-	/* If this happens to execute within the "virtually extended" vblank
-	 * interval before the start of the real vblank interval then it needs
-	 * to delay programming the mmio flip until the real vblank is entered.
-	 * This prevents completing a flip too early due to the way we fudge
-	 * our vblank counter and vblank timestamps in order to work around the
-	 * problem that the hw fires vblank interrupts before actual start of
-	 * vblank (when line buffer refilling is done for a frame). It
-	 * complements the fudging logic in radeon_get_crtc_scanoutpos() for
-	 * timestamping and radeon_get_vblank_counter_kms() for vblank counts.
-	 *
-	 * In practice this won't execute very often unless on very fast
-	 * machines because the time window for this to happen is very small.
-	 */
-	while (radeon_crtc->enabled && --repcnt) {
-		/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
-		 * start in hpos, and to the "fudged earlier" vblank start in
-		 * vpos.
-		 */
-		stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id,
-						  GET_DISTANCE_TO_VBLANKSTART,
-						  &vpos, &hpos, NULL, NULL,
-						  &crtc->hwmode);
-
-		if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
-		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
-		    !(vpos >= 0 && hpos <= 0))
-			break;
-
-		/* Sleep at least until estimated real start of hw vblank */
-		min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
-		if (min_udelay > vblank->framedur_ns / 2000) {
-			/* Don't wait ridiculously long - something is wrong */
-			repcnt = 0;
-			break;
-		}
-		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-		usleep_range(min_udelay, 2 * min_udelay);
-		spin_lock_irqsave(&crtc->dev->event_lock, flags);
-	};
-
-	if (!repcnt)
-		DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
-				 "framedur %d, linedur %d, stat %d, vpos %d, "
-				 "hpos %d\n", work->crtc_id, min_udelay,
-				 vblank->framedur_ns / 1000,
-				 vblank->linedur_ns / 1000, stat, vpos, hpos);
-
 	/* do the flip (mmio) */
 	radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base, work->async);
 
@@ -499,10 +464,11 @@
 	up_read(&rdev->exclusive_lock);
 }
 
-static int radeon_crtc_page_flip(struct drm_crtc *crtc,
-				 struct drm_framebuffer *fb,
-				 struct drm_pending_vblank_event *event,
-				 uint32_t page_flip_flags)
+static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
+					struct drm_framebuffer *fb,
+					struct drm_pending_vblank_event *event,
+					uint32_t page_flip_flags,
+					uint32_t target)
 {
 	struct drm_device *dev = crtc->dev;
 	struct radeon_device *rdev = dev->dev_private;
@@ -599,12 +565,8 @@
 		base &= ~7;
 	}
 	work->base = base;
-
-	r = drm_crtc_vblank_get(crtc);
-	if (r) {
-		DRM_ERROR("failed to get vblank before flip\n");
-		goto pflip_cleanup;
-	}
+	work->target_vblank = target - drm_crtc_vblank_count(crtc) +
+		dev->driver->get_vblank_counter(dev, work->crtc_id);
 
 	/* We borrow the event spin lock for protecting flip_work */
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -613,7 +575,7 @@
 		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 		r = -EBUSY;
-		goto vblank_cleanup;
+		goto pflip_cleanup;
 	}
 	radeon_crtc->flip_status = RADEON_FLIP_PENDING;
 	radeon_crtc->flip_work = work;
@@ -626,9 +588,6 @@
 	queue_work(radeon_crtc->flip_queue, &work->flip_work);
 	return 0;
 
-vblank_cleanup:
-	drm_crtc_vblank_put(crtc);
-
 pflip_cleanup:
 	if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
 		DRM_ERROR("failed to reserve new rbo in error path\n");
@@ -697,7 +656,7 @@
 	.gamma_set = radeon_crtc_gamma_set,
 	.set_config = radeon_crtc_set_config,
 	.destroy = radeon_crtc_destroy,
-	.page_flip = radeon_crtc_page_flip,
+	.page_flip_target = radeon_crtc_page_flip_target,
 };
 
 static void radeon_crtc_init(struct drm_device *dev, int index)
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index db64e00..2d46564 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -164,7 +164,6 @@
 	}
 
 	if (tmp & AUX_SW_RX_TIMEOUT) {
-		DRM_DEBUG_KMS("dp_aux_ch timed out\n");
 		ret = -ETIMEDOUT;
 		goto done;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 90f2ff2..78367ba 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -95,9 +95,10 @@
  *   2.44.0 - SET_APPEND_CNT packet3 support
  *   2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
  *   2.46.0 - Add PFP_SYNC_ME support on evergreen
+ *   2.47.0 - Add UVD_NO_OP register support
  */
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	46
+#define KMS_DRIVER_MINOR	47
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -155,11 +156,6 @@
 extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
 				    unsigned long arg);
 
-#if defined(CONFIG_DEBUG_FS)
-int radeon_debugfs_init(struct drm_minor *minor);
-void radeon_debugfs_cleanup(struct drm_minor *minor);
-#endif
-
 /* atpx handler */
 #if defined(CONFIG_VGA_SWITCHEROO)
 void radeon_register_atpx_handler(void);
@@ -310,6 +306,8 @@
 
 static struct drm_driver kms_driver;
 
+bool radeon_device_is_virtual(void);
+
 static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
 {
 	struct apertures_struct *ap;
@@ -363,6 +361,16 @@
 	drm_put_dev(dev);
 }
 
+static void
+radeon_pci_shutdown(struct pci_dev *pdev)
+{
+	/* if we are running in a VM, make sure the device
+	 * torn down properly on reboot/shutdown
+	 */
+	if (radeon_device_is_virtual())
+		radeon_pci_remove(pdev);
+}
+
 static int radeon_pmops_suspend(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
@@ -374,6 +382,14 @@
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+	/* GPU comes up enabled by the bios on resume */
+	if (radeon_is_px(drm_dev)) {
+		pm_runtime_disable(dev);
+		pm_runtime_set_active(dev);
+		pm_runtime_enable(dev);
+	}
+
 	return radeon_resume_kms(drm_dev, true, true);
 }
 
@@ -530,10 +546,6 @@
 	.disable_vblank = radeon_disable_vblank_kms,
 	.get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
 	.get_scanout_position = radeon_get_crtc_scanoutpos,
-#if defined(CONFIG_DEBUG_FS)
-	.debugfs_init = radeon_debugfs_init,
-	.debugfs_cleanup = radeon_debugfs_cleanup,
-#endif
 	.irq_preinstall = radeon_driver_irq_preinstall_kms,
 	.irq_postinstall = radeon_driver_irq_postinstall_kms,
 	.irq_uninstall = radeon_driver_irq_uninstall_kms,
@@ -575,6 +587,7 @@
 	.id_table = pciidlist,
 	.probe = radeon_pci_probe,
 	.remove = radeon_pci_remove,
+	.shutdown = radeon_pci_shutdown,
 	.driver.pm = &radeon_pm_ops,
 };
 
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 568e036..0daad44 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -25,6 +25,7 @@
  */
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/pm_runtime.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
@@ -46,8 +47,35 @@
 	struct radeon_device *rdev;
 };
 
+static int
+radeonfb_open(struct fb_info *info, int user)
+{
+	struct radeon_fbdev *rfbdev = info->par;
+	struct radeon_device *rdev = rfbdev->rdev;
+	int ret = pm_runtime_get_sync(rdev->ddev->dev);
+	if (ret < 0 && ret != -EACCES) {
+		pm_runtime_mark_last_busy(rdev->ddev->dev);
+		pm_runtime_put_autosuspend(rdev->ddev->dev);
+		return ret;
+	}
+	return 0;
+}
+
+static int
+radeonfb_release(struct fb_info *info, int user)
+{
+	struct radeon_fbdev *rfbdev = info->par;
+	struct radeon_device *rdev = rfbdev->rdev;
+
+	pm_runtime_mark_last_busy(rdev->ddev->dev);
+	pm_runtime_put_autosuspend(rdev->ddev->dev);
+	return 0;
+}
+
 static struct fb_ops radeonfb_ops = {
 	.owner = THIS_MODULE,
+	.fb_open = radeonfb_open,
+	.fb_release = radeonfb_release,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 9590bcd..021aa00 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -938,10 +938,8 @@
 			 "Radeon i2c hw bus %s", name);
 		i2c->adapter.algo = &radeon_i2c_algo;
 		ret = i2c_add_adapter(&i2c->adapter);
-		if (ret) {
-			DRM_ERROR("Failed to register hw i2c %s\n", name);
+		if (ret)
 			goto out_free;
-		}
 	} else if (rec->hw_capable &&
 		   radeon_hw_i2c &&
 		   ASIC_IS_DCE3(rdev)) {
@@ -950,10 +948,8 @@
 			 "Radeon i2c hw bus %s", name);
 		i2c->adapter.algo = &radeon_atom_i2c_algo;
 		ret = i2c_add_adapter(&i2c->adapter);
-		if (ret) {
-			DRM_ERROR("Failed to register hw i2c %s\n", name);
+		if (ret)
 			goto out_free;
-		}
 	} else {
 		/* set the radeon bit adapter */
 		snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 835563c..4388dde 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -641,11 +641,11 @@
 	if (rdev->family >= CHIP_CAYMAN) {
 		struct radeon_fpriv *fpriv;
 		struct radeon_vm *vm;
-		int r;
 
 		fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
 		if (unlikely(!fpriv)) {
-			return -ENOMEM;
+			r = -ENOMEM;
+			goto out_suspend;
 		}
 
 		if (rdev->accel_working) {
@@ -653,14 +653,14 @@
 			r = radeon_vm_init(rdev, vm);
 			if (r) {
 				kfree(fpriv);
-				return r;
+				goto out_suspend;
 			}
 
 			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
 			if (r) {
 				radeon_vm_fini(rdev, vm);
 				kfree(fpriv);
-				return r;
+				goto out_suspend;
 			}
 
 			/* map the ib pool buffer read only into
@@ -674,15 +674,16 @@
 			if (r) {
 				radeon_vm_fini(rdev, vm);
 				kfree(fpriv);
-				return r;
+				goto out_suspend;
 			}
 		}
 		file_priv->driver_priv = fpriv;
 	}
 
+out_suspend:
 	pm_runtime_mark_last_busy(dev->dev);
 	pm_runtime_put_autosuspend(dev->dev);
-	return 0;
+	return r;
 }
 
 /**
@@ -717,6 +718,8 @@
 		kfree(fpriv);
 		file_priv->driver_priv = NULL;
 	}
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
 }
 
 /**
@@ -733,6 +736,8 @@
 {
 	struct radeon_device *rdev = dev->dev_private;
 
+	pm_runtime_get_sync(dev->dev);
+
 	mutex_lock(&rdev->gem.mutex);
 	if (rdev->hyperz_filp == file_priv)
 		rdev->hyperz_filp = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 0c00e19..93414ac 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -346,7 +346,7 @@
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
-	r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
+	r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
 out_cleanup:
 	ttm_bo_mem_put(bo, &tmp_mem);
 	return r;
@@ -379,7 +379,7 @@
 	if (unlikely(r)) {
 		return r;
 	}
-	r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
+	r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
@@ -444,8 +444,7 @@
 
 	if (r) {
 memcpy:
-		r = ttm_bo_move_memcpy(bo, evict, interruptible,
-				       no_wait_gpu, new_mem);
+		r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
 		if (r) {
 			return r;
 		}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 73dfe01..0cd0e7b 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -669,6 +669,7 @@
 				return r;
 			break;
 		case UVD_ENGINE_CNTL:
+		case UVD_NO_OP:
 			break;
 		default:
 			DRM_ERROR("Invalid reg 0x%X!\n",
@@ -753,8 +754,10 @@
 	ib.ptr[3] = addr >> 32;
 	ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
 	ib.ptr[5] = 0;
-	for (i = 6; i < 16; ++i)
-		ib.ptr[i] = PACKET2(0);
+	for (i = 6; i < 16; i += 2) {
+		ib.ptr[i] = PACKET0(UVD_NO_OP, 0);
+		ib.ptr[i+1] = 0;
+	}
 	ib.length_dw = 16;
 
 	r = radeon_ib_schedule(rdev, &ib, NULL, false);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index c55d653..76c55c5 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -406,9 +406,8 @@
 	for (i = 0; i < rdev->num_crtc; i++) {
 		if (save->crtc_enabled[i]) {
 			tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
-			if ((tmp & 0x7) != 3) {
+			if ((tmp & 0x7) != 0) {
 				tmp &= ~0x7;
-				tmp |= 0x3;
 				WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
 			}
 			tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 1c120a4..729ae58 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1738,7 +1738,7 @@
 		return;
 
 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
 	if (r) {
 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
 		return;
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9ef2064..0271f4c 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -387,6 +387,7 @@
 #define UVD_UDEC_TILING_CONFIG                          0xef40
 #define UVD_UDEC_DB_TILING_CONFIG                       0xef44
 #define UVD_UDEC_DBW_TILING_CONFIG                      0xef48
+#define UVD_NO_OP					0xeffc
 
 #define	GC_USER_SHADER_PIPE_CONFIG			0x8954
 #define		INACTIVE_QD_PIPES(x)				((x) << 8)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 2523ca9..7ee9aaf 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1547,7 +1547,7 @@
 {
 	const __be32 *fw_data = NULL;
 	const __le32 *new_fw_data = NULL;
-	u32 running, blackout = 0;
+	u32 running;
 	u32 *io_mc_regs = NULL;
 	const __le32 *new_io_mc_regs = NULL;
 	int i, regs_size, ucode_size;
@@ -1598,11 +1598,6 @@
 	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
 
 	if (running == 0) {
-		if (running) {
-			blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
-			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
-		}
-
 		/* reset the engine and set to writable */
 		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
 		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
@@ -1641,9 +1636,6 @@
 				break;
 			udelay(1);
 		}
-
-		if (running)
-			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
 	}
 
 	return 0;
@@ -6928,7 +6920,7 @@
 		return;
 
 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
 	if (r) {
 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
 		return;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index d1a7b58..eb220ee 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1559,6 +1559,7 @@
 #define UVD_UDEC_ADDR_CONFIG				0xEF4C
 #define UVD_UDEC_DB_ADDR_CONFIG				0xEF50
 #define UVD_UDEC_DBW_ADDR_CONFIG			0xEF54
+#define UVD_NO_OP					0xEFFC
 #define UVD_RBC_RB_RPTR					0xF690
 #define UVD_RBC_RB_WPTR					0xF694
 #define UVD_STATUS					0xf6bc
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index f03eb55..bd9c3bb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -257,7 +257,8 @@
 	/* Apply the atomic update. */
 	drm_atomic_helper_commit_modeset_disables(dev, old_state);
 	drm_atomic_helper_commit_modeset_enables(dev, old_state);
-	drm_atomic_helper_commit_planes(dev, old_state, true);
+	drm_atomic_helper_commit_planes(dev, old_state,
+					DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
 	drm_atomic_helper_wait_for_vblanks(dev, old_state);
 
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 05d0713..9746365 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -3,7 +3,7 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
-		rockchip_drm_gem.o rockchip_drm_vop.o
+		rockchip_drm_gem.o rockchip_drm_psr.o rockchip_drm_vop.o
 rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
 
 obj-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 89aadbf..e83be15 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -32,6 +32,7 @@
 #include <drm/bridge/analogix_dp.h>
 
 #include "rockchip_drm_drv.h"
+#include "rockchip_drm_psr.h"
 #include "rockchip_drm_vop.h"
 
 #define RK3288_GRF_SOC_CON6		0x25c
@@ -41,6 +42,8 @@
 
 #define HIWORD_UPDATE(val, mask)	(val | (mask) << 16)
 
+#define PSR_WAIT_LINE_FLAG_TIMEOUT_MS	100
+
 #define to_dp(nm)	container_of(nm, struct rockchip_dp_device, nm)
 
 /**
@@ -68,11 +71,62 @@
 	struct regmap            *grf;
 	struct reset_control     *rst;
 
+	struct work_struct	 psr_work;
+	spinlock_t		 psr_lock;
+	unsigned int             psr_state;
+
 	const struct rockchip_dp_chip_data *data;
 
 	struct analogix_dp_plat_data plat_data;
 };
 
+static void analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
+{
+	struct rockchip_dp_device *dp = to_dp(encoder);
+	unsigned long flags;
+
+	dev_dbg(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit");
+
+	spin_lock_irqsave(&dp->psr_lock, flags);
+	if (enabled)
+		dp->psr_state = EDP_VSC_PSR_STATE_ACTIVE;
+	else
+		dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE;
+
+	schedule_work(&dp->psr_work);
+	spin_unlock_irqrestore(&dp->psr_lock, flags);
+}
+
+static void analogix_dp_psr_work(struct work_struct *work)
+{
+	struct rockchip_dp_device *dp =
+				container_of(work, typeof(*dp), psr_work);
+	struct drm_crtc *crtc = dp->encoder.crtc;
+	int psr_state = dp->psr_state;
+	int vact_end;
+	int ret;
+	unsigned long flags;
+
+	if (!crtc)
+		return;
+
+	vact_end = crtc->mode.vtotal - crtc->mode.vsync_start + crtc->mode.vdisplay;
+
+	ret = rockchip_drm_wait_line_flag(dp->encoder.crtc, vact_end,
+					  PSR_WAIT_LINE_FLAG_TIMEOUT_MS);
+	if (ret) {
+		dev_err(dp->dev, "line flag interrupt did not arrive\n");
+		return;
+	}
+
+	spin_lock_irqsave(&dp->psr_lock, flags);
+	if (psr_state == EDP_VSC_PSR_STATE_ACTIVE)
+		analogix_dp_enable_psr(dp->dev);
+	else
+		analogix_dp_disable_psr(dp->dev);
+	spin_unlock_irqrestore(&dp->psr_lock, flags);
+}
+
 static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
 {
 	reset_control_assert(dp->rst);
@@ -87,6 +141,8 @@
 	struct rockchip_dp_device *dp = to_dp(plat_data);
 	int ret;
 
+	cancel_work_sync(&dp->psr_work);
+
 	ret = clk_prepare_enable(dp->pclk);
 	if (ret < 0) {
 		dev_err(dp->dev, "failed to enable pclk %d\n", ret);
@@ -342,12 +398,22 @@
 	dp->plat_data.power_off = rockchip_dp_powerdown;
 	dp->plat_data.get_modes = rockchip_dp_get_modes;
 
+	spin_lock_init(&dp->psr_lock);
+	dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE;
+	INIT_WORK(&dp->psr_work, analogix_dp_psr_work);
+
+	rockchip_drm_psr_register(&dp->encoder, analogix_dp_psr_set);
+
 	return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
 }
 
 static void rockchip_dp_unbind(struct device *dev, struct device *master,
 			       void *data)
 {
+	struct rockchip_dp_device *dp = dev_get_drvdata(dev);
+
+	rockchip_drm_psr_unregister(&dp->encoder);
+
 	return analogix_dp_unbind(dev, master, data);
 }
 
@@ -381,10 +447,8 @@
 
 		panel = of_drm_find_panel(panel_node);
 		of_node_put(panel_node);
-		if (!panel) {
-			DRM_ERROR("failed to find panel\n");
+		if (!panel)
 			return -EPROBE_DEFER;
-		}
 	}
 
 	dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
@@ -445,7 +509,6 @@
 	.remove = rockchip_dp_remove,
 	.driver = {
 		   .name = "rockchip-dp",
-		   .owner = THIS_MODULE,
 		   .pm = &rockchip_dp_pm_ops,
 		   .of_match_table = of_match_ptr(rockchip_dp_dt_ids),
 	},
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index a822d49..76eaf1d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -156,6 +156,9 @@
 
 	drm_dev->dev_private = private;
 
+	INIT_LIST_HEAD(&private->psr_list);
+	spin_lock_init(&private->psr_list_lock);
+
 	drm_mode_config_init(drm_dev);
 
 	rockchip_drm_mode_config_init(drm_dev);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index ea39329..5c69845 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -61,6 +61,9 @@
 	struct drm_gem_object *fbdev_bo;
 	const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
 	struct drm_atomic_state *state;
+
+	struct list_head psr_list;
+	spinlock_t psr_list_lock;
 };
 
 int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
@@ -70,4 +73,7 @@
 				   struct device *dev);
 void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
 				    struct device *dev);
+int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num,
+				unsigned int mstimeout);
+
 #endif /* _ROCKCHIP_DRM_DRV_H_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 55c5273..60bcc48 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -22,6 +22,7 @@
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_fb.h"
 #include "rockchip_drm_gem.h"
+#include "rockchip_drm_psr.h"
 
 #define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
 
@@ -63,9 +64,20 @@
 				     rockchip_fb->obj[0], handle);
 }
 
+static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
+				 struct drm_file *file,
+				 unsigned int flags, unsigned int color,
+				 struct drm_clip_rect *clips,
+				 unsigned int num_clips)
+{
+	rockchip_drm_psr_flush(fb->dev);
+	return 0;
+}
+
 static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
 	.destroy	= rockchip_drm_fb_destroy,
 	.create_handle	= rockchip_drm_fb_create_handle,
+	.dirty		= rockchip_drm_fb_dirty,
 };
 
 static struct rockchip_drm_fb *
@@ -233,7 +245,8 @@
 
 	drm_atomic_helper_commit_modeset_enables(dev, state);
 
-	drm_atomic_helper_commit_planes(dev, state, true);
+	drm_atomic_helper_commit_planes(dev, state,
+					DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
 	drm_atomic_helper_commit_hw_done(state);
 
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
new file mode 100644
index 0000000..c6ac5d0
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Yakir Yang <ykk@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_psr.h"
+
+#define PSR_FLUSH_TIMEOUT	msecs_to_jiffies(3000) /* 3 seconds */
+
+enum psr_state {
+	PSR_FLUSH,
+	PSR_ENABLE,
+	PSR_DISABLE,
+};
+
+struct psr_drv {
+	struct list_head	list;
+	struct drm_encoder	*encoder;
+
+	spinlock_t		lock;
+	enum psr_state		state;
+
+	struct timer_list	flush_timer;
+
+	void (*set)(struct drm_encoder *encoder, bool enable);
+};
+
+static struct psr_drv *find_psr_by_crtc(struct drm_crtc *crtc)
+{
+	struct rockchip_drm_private *drm_drv = crtc->dev->dev_private;
+	struct psr_drv *psr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+	list_for_each_entry(psr, &drm_drv->psr_list, list) {
+		if (psr->encoder->crtc == crtc)
+			goto out;
+	}
+	psr = ERR_PTR(-ENODEV);
+
+out:
+	spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+	return psr;
+}
+
+static void psr_set_state_locked(struct psr_drv *psr, enum psr_state state)
+{
+	/*
+	 * Allowed finite state machine:
+	 *
+	 *   PSR_ENABLE  < = = = = = >  PSR_FLUSH
+	 *       | ^                        |
+	 *       | |                        |
+	 *       v |                        |
+	 *   PSR_DISABLE < - - - - - - - - -
+	 */
+	if (state == psr->state)
+		return;
+
+	/* Requesting a flush when disabled is a noop */
+	if (state == PSR_FLUSH && psr->state == PSR_DISABLE)
+		return;
+
+	psr->state = state;
+
+	/* Already disabled in flush, change the state, but not the hardware */
+	if (state == PSR_DISABLE && psr->state == PSR_FLUSH)
+		return;
+
+	/* Actually commit the state change to hardware */
+	switch (psr->state) {
+	case PSR_ENABLE:
+		psr->set(psr->encoder, true);
+		break;
+
+	case PSR_DISABLE:
+	case PSR_FLUSH:
+		psr->set(psr->encoder, false);
+		break;
+	}
+}
+
+static void psr_set_state(struct psr_drv *psr, enum psr_state state)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&psr->lock, flags);
+	psr_set_state_locked(psr, state);
+	spin_unlock_irqrestore(&psr->lock, flags);
+}
+
+static void psr_flush_handler(unsigned long data)
+{
+	struct psr_drv *psr = (struct psr_drv *)data;
+	unsigned long flags;
+
+	/* If the state has changed since we initiated the flush, do nothing */
+	spin_lock_irqsave(&psr->lock, flags);
+	if (psr->state == PSR_FLUSH)
+		psr_set_state_locked(psr, PSR_ENABLE);
+	spin_unlock_irqrestore(&psr->lock, flags);
+}
+
+/**
+ * rockchip_drm_psr_enable - enable the encoder PSR which bind to given CRTC
+ * @crtc: CRTC to obtain the PSR encoder
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int rockchip_drm_psr_enable(struct drm_crtc *crtc)
+{
+	struct psr_drv *psr = find_psr_by_crtc(crtc);
+
+	if (IS_ERR(psr))
+		return PTR_ERR(psr);
+
+	psr_set_state(psr, PSR_ENABLE);
+	return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_psr_enable);
+
+/**
+ * rockchip_drm_psr_disable - disable the encoder PSR which bind to given CRTC
+ * @crtc: CRTC to obtain the PSR encoder
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int rockchip_drm_psr_disable(struct drm_crtc *crtc)
+{
+	struct psr_drv *psr = find_psr_by_crtc(crtc);
+
+	if (IS_ERR(psr))
+		return PTR_ERR(psr);
+
+	psr_set_state(psr, PSR_DISABLE);
+	return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_psr_disable);
+
+/**
+ * rockchip_drm_psr_flush - force to flush all registered PSR encoders
+ * @dev: drm device
+ *
+ * Disable the PSR function for all registered encoders, and then enable the
+ * PSR function back after PSR_FLUSH_TIMEOUT. If encoder PSR state have been
+ * changed during flush time, then keep the state no change after flush
+ * timeout.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+void rockchip_drm_psr_flush(struct drm_device *dev)
+{
+	struct rockchip_drm_private *drm_drv = dev->dev_private;
+	struct psr_drv *psr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+	list_for_each_entry(psr, &drm_drv->psr_list, list) {
+		mod_timer(&psr->flush_timer,
+			  round_jiffies_up(jiffies + PSR_FLUSH_TIMEOUT));
+
+		psr_set_state(psr, PSR_FLUSH);
+	}
+	spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+}
+EXPORT_SYMBOL(rockchip_drm_psr_flush);
+
+/**
+ * rockchip_drm_psr_register - register encoder to psr driver
+ * @encoder: encoder that obtain the PSR function
+ * @psr_set: call back to set PSR state
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int rockchip_drm_psr_register(struct drm_encoder *encoder,
+			void (*psr_set)(struct drm_encoder *, bool enable))
+{
+	struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
+	struct psr_drv *psr;
+	unsigned long flags;
+
+	if (!encoder || !psr_set)
+		return -EINVAL;
+
+	psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL);
+	if (!psr)
+		return -ENOMEM;
+
+	setup_timer(&psr->flush_timer, psr_flush_handler, (unsigned long)psr);
+	spin_lock_init(&psr->lock);
+
+	psr->state = PSR_DISABLE;
+	psr->encoder = encoder;
+	psr->set = psr_set;
+
+	spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+	list_add_tail(&psr->list, &drm_drv->psr_list);
+	spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_psr_register);
+
+/**
+ * rockchip_drm_psr_unregister - unregister encoder to psr driver
+ * @encoder: encoder that obtain the PSR function
+ * @psr_set: call back to set PSR state
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+void rockchip_drm_psr_unregister(struct drm_encoder *encoder)
+{
+	struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
+	struct psr_drv *psr, *n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+	list_for_each_entry_safe(psr, n, &drm_drv->psr_list, list) {
+		if (psr->encoder == encoder) {
+			del_timer(&psr->flush_timer);
+			list_del(&psr->list);
+			kfree(psr);
+		}
+	}
+	spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+}
+EXPORT_SYMBOL(rockchip_drm_psr_unregister);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
new file mode 100644
index 0000000..c35b688
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Yakir Yang <ykk@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ROCKCHIP_DRM_PSR___
+#define __ROCKCHIP_DRM_PSR___
+
+void rockchip_drm_psr_flush(struct drm_device *dev);
+int rockchip_drm_psr_enable(struct drm_crtc *crtc);
+int rockchip_drm_psr_disable(struct drm_crtc *crtc);
+
+int rockchip_drm_psr_register(struct drm_encoder *encoder,
+			void (*psr_set)(struct drm_encoder *, bool enable));
+void rockchip_drm_psr_unregister(struct drm_encoder *encoder);
+
+#endif /* __ROCKCHIP_DRM_PSR__ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 31744fe..d486049 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -34,17 +34,21 @@
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_gem.h"
 #include "rockchip_drm_fb.h"
+#include "rockchip_drm_psr.h"
 #include "rockchip_drm_vop.h"
 
-#define __REG_SET_RELAXED(x, off, mask, shift, v) \
-		vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift)
-#define __REG_SET_NORMAL(x, off, mask, shift, v) \
-		vop_mask_write(x, off, (mask) << shift, (v) << shift)
+#define __REG_SET_RELAXED(x, off, mask, shift, v, write_mask) \
+		vop_mask_write(x, off, mask, shift, v, write_mask, true)
+
+#define __REG_SET_NORMAL(x, off, mask, shift, v, write_mask) \
+		vop_mask_write(x, off, mask, shift, v, write_mask, false)
 
 #define REG_SET(x, base, reg, v, mode) \
-		__REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
+		__REG_SET_##mode(x, base + reg.offset, \
+				 reg.mask, reg.shift, v, reg.write_mask)
 #define REG_SET_MASK(x, base, reg, mask, v, mode) \
-		__REG_SET_##mode(x, base + reg.offset, mask, reg.shift, v)
+		__REG_SET_##mode(x, base + reg.offset, \
+				 mask, reg.shift, v, reg.write_mask)
 
 #define VOP_WIN_SET(x, win, name, v) \
 		REG_SET(x, win->base, win->phy->name, v, RELAXED)
@@ -106,6 +110,7 @@
 	struct device *dev;
 	struct drm_device *drm_dev;
 	bool is_enabled;
+	bool vblank_active;
 
 	/* mutex vsync_ work */
 	struct mutex vsync_mutex;
@@ -116,6 +121,8 @@
 	/* protected by dev->event_lock */
 	struct drm_pending_vblank_event *event;
 
+	struct completion line_flag_completion;
+
 	const struct vop_data *data;
 
 	uint32_t *regsbak;
@@ -162,27 +169,25 @@
 }
 
 static inline void vop_mask_write(struct vop *vop, uint32_t offset,
-				  uint32_t mask, uint32_t v)
+				  uint32_t mask, uint32_t shift, uint32_t v,
+				  bool write_mask, bool relaxed)
 {
-	if (mask) {
+	if (!mask)
+		return;
+
+	if (write_mask) {
+		v = ((v << shift) & 0xffff) | (mask << (shift + 16));
+	} else {
 		uint32_t cached_val = vop->regsbak[offset >> 2];
 
-		cached_val = (cached_val & ~mask) | v;
-		writel(cached_val, vop->regs + offset);
-		vop->regsbak[offset >> 2] = cached_val;
+		v = (cached_val & ~(mask << shift)) | ((v & mask) << shift);
+		vop->regsbak[offset >> 2] = v;
 	}
-}
 
-static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
-					  uint32_t mask, uint32_t v)
-{
-	if (mask) {
-		uint32_t cached_val = vop->regsbak[offset >> 2];
-
-		cached_val = (cached_val & ~mask) | v;
-		writel_relaxed(cached_val, vop->regs + offset);
-		vop->regsbak[offset >> 2] = cached_val;
-	}
+	if (relaxed)
+		writel_relaxed(v, vop->regs + offset);
+	else
+		writel(v, vop->regs + offset);
 }
 
 static inline uint32_t vop_get_intr_type(struct vop *vop,
@@ -238,7 +243,7 @@
 	case DRM_FORMAT_NV24:
 		return VOP_FMT_YUV444SP;
 	default:
-		DRM_ERROR("unsupport format[%08x]\n", format);
+		DRM_ERROR("unsupported format[%08x]\n", format);
 		return -EINVAL;
 	}
 }
@@ -315,7 +320,7 @@
 	int vskiplines = 0;
 
 	if (dst_w > 3840) {
-		DRM_ERROR("Maximum destination width (3840) exceeded\n");
+		DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
 		return;
 	}
 
@@ -353,11 +358,11 @@
 	VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode);
 	if (lb_mode == LB_RGB_3840X2) {
 		if (yrgb_ver_scl_mode != SCALE_NONE) {
-			DRM_ERROR("ERROR : not allow yrgb ver scale\n");
+			DRM_DEV_ERROR(vop->dev, "not allow yrgb ver scale\n");
 			return;
 		}
 		if (cbcr_ver_scl_mode != SCALE_NONE) {
-			DRM_ERROR("ERROR : not allow cbcr ver scale\n");
+			DRM_DEV_ERROR(vop->dev, "not allow cbcr ver scale\n");
 			return;
 		}
 		vsu_mode = SCALE_UP_BIL;
@@ -428,7 +433,72 @@
 	spin_unlock_irqrestore(&vop->irq_lock, flags);
 }
 
-static void vop_enable(struct drm_crtc *crtc)
+/*
+ * (1) each frame starts at the start of the Vsync pulse which is signaled by
+ *     the "FRAME_SYNC" interrupt.
+ * (2) the active data region of each frame ends at dsp_vact_end
+ * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num,
+ *      to get "LINE_FLAG" interrupt at the end of the active on screen data.
+ *
+ * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end
+ * Interrupts
+ * LINE_FLAG -------------------------------+
+ * FRAME_SYNC ----+                         |
+ *                |                         |
+ *                v                         v
+ *                | Vsync | Vbp |  Vactive  | Vfp |
+ *                        ^     ^           ^     ^
+ *                        |     |           |     |
+ *                        |     |           |     |
+ * dsp_vs_end ------------+     |           |     |   VOP_DSP_VTOTAL_VS_END
+ * dsp_vact_start --------------+           |     |   VOP_DSP_VACT_ST_END
+ * dsp_vact_end ----------------------------+     |   VOP_DSP_VACT_ST_END
+ * dsp_total -------------------------------------+   VOP_DSP_VTOTAL_VS_END
+ */
+static bool vop_line_flag_irq_is_enabled(struct vop *vop)
+{
+	uint32_t line_flag_irq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vop->irq_lock, flags);
+
+	line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR);
+
+	spin_unlock_irqrestore(&vop->irq_lock, flags);
+
+	return !!line_flag_irq;
+}
+
+static void vop_line_flag_irq_enable(struct vop *vop, int line_num)
+{
+	unsigned long flags;
+
+	if (WARN_ON(!vop->is_enabled))
+		return;
+
+	spin_lock_irqsave(&vop->irq_lock, flags);
+
+	VOP_CTRL_SET(vop, line_flag_num[0], line_num);
+	VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1);
+
+	spin_unlock_irqrestore(&vop->irq_lock, flags);
+}
+
+static void vop_line_flag_irq_disable(struct vop *vop)
+{
+	unsigned long flags;
+
+	if (WARN_ON(!vop->is_enabled))
+		return;
+
+	spin_lock_irqsave(&vop->irq_lock, flags);
+
+	VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0);
+
+	spin_unlock_irqrestore(&vop->irq_lock, flags);
+}
+
+static int vop_enable(struct drm_crtc *crtc)
 {
 	struct vop *vop = to_vop(crtc);
 	int ret;
@@ -436,26 +506,20 @@
 	ret = pm_runtime_get_sync(vop->dev);
 	if (ret < 0) {
 		dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
-		return;
+		goto err_put_pm_runtime;
 	}
 
 	ret = clk_enable(vop->hclk);
-	if (ret < 0) {
-		dev_err(vop->dev, "failed to enable hclk - %d\n", ret);
-		return;
-	}
+	if (WARN_ON(ret < 0))
+		goto err_put_pm_runtime;
 
 	ret = clk_enable(vop->dclk);
-	if (ret < 0) {
-		dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
+	if (WARN_ON(ret < 0))
 		goto err_disable_hclk;
-	}
 
 	ret = clk_enable(vop->aclk);
-	if (ret < 0) {
-		dev_err(vop->dev, "failed to enable aclk - %d\n", ret);
+	if (WARN_ON(ret < 0))
 		goto err_disable_dclk;
-	}
 
 	/*
 	 * Slave iommu shares power, irq and clock with vop.  It was associated
@@ -485,7 +549,7 @@
 
 	drm_crtc_vblank_on(crtc);
 
-	return;
+	return 0;
 
 err_disable_aclk:
 	clk_disable(vop->aclk);
@@ -493,6 +557,9 @@
 	clk_disable(vop->dclk);
 err_disable_hclk:
 	clk_disable(vop->hclk);
+err_put_pm_runtime:
+	pm_runtime_put_sync(vop->dev);
+	return ret;
 }
 
 static void vop_crtc_disable(struct drm_crtc *crtc)
@@ -567,7 +634,7 @@
 }
 
 static int vop_plane_prepare_fb(struct drm_plane *plane,
-				const struct drm_plane_state *new_state)
+				struct drm_plane_state *new_state)
 {
 	if (plane->state->fb)
 		drm_framebuffer_reference(plane->state->fb);
@@ -576,7 +643,7 @@
 }
 
 static void vop_plane_cleanup_fb(struct drm_plane *plane,
-				 const struct drm_plane_state *old_state)
+				 struct drm_plane_state *old_state)
 {
 	if (old_state->fb)
 		drm_framebuffer_unreference(old_state->fb);
@@ -852,6 +919,8 @@
 
 	spin_unlock_irqrestore(&vop->irq_lock, flags);
 
+	rockchip_drm_psr_disable(&vop->crtc);
+
 	return 0;
 }
 
@@ -868,6 +937,8 @@
 	VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0);
 
 	spin_unlock_irqrestore(&vop->irq_lock, flags);
+
+	rockchip_drm_psr_enable(&vop->crtc);
 }
 
 static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
@@ -911,11 +982,17 @@
 	u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
 	u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
 	u16 vact_end = vact_st + vdisplay;
-	uint32_t val;
+	uint32_t pin_pol, val;
+	int ret;
 
 	WARN_ON(vop->event);
 
-	vop_enable(crtc);
+	ret = vop_enable(crtc);
+	if (ret) {
+		DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
+		return;
+	}
+
 	/*
 	 * If dclk rate is zero, mean that scanout is stop,
 	 * we don't need wait any more.
@@ -952,25 +1029,31 @@
 		vop_dsp_hold_valid_irq_disable(vop);
 	}
 
-	val = 0x8;
-	val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
-	val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
-	VOP_CTRL_SET(vop, pin_pol, val);
+	pin_pol = 0x8;
+	pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
+	pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
+	VOP_CTRL_SET(vop, pin_pol, pin_pol);
+
 	switch (s->output_type) {
 	case DRM_MODE_CONNECTOR_LVDS:
 		VOP_CTRL_SET(vop, rgb_en, 1);
+		VOP_CTRL_SET(vop, rgb_pin_pol, pin_pol);
 		break;
 	case DRM_MODE_CONNECTOR_eDP:
+		VOP_CTRL_SET(vop, edp_pin_pol, pin_pol);
 		VOP_CTRL_SET(vop, edp_en, 1);
 		break;
 	case DRM_MODE_CONNECTOR_HDMIA:
+		VOP_CTRL_SET(vop, hdmi_pin_pol, pin_pol);
 		VOP_CTRL_SET(vop, hdmi_en, 1);
 		break;
 	case DRM_MODE_CONNECTOR_DSI:
+		VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol);
 		VOP_CTRL_SET(vop, mipi_en, 1);
 		break;
 	default:
-		DRM_ERROR("unsupport connector_type[%d]\n", s->output_type);
+		DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
+			      s->output_type);
 	}
 	VOP_CTRL_SET(vop, out_mode, s->output_mode);
 
@@ -1012,10 +1095,11 @@
 	struct vop *vop = to_vop(crtc);
 
 	spin_lock_irq(&crtc->dev->event_lock);
-	if (crtc->state->event) {
-		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
-		WARN_ON(vop->event);
+	vop->vblank_active = true;
+	WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+	WARN_ON(vop->event);
 
+	if (crtc->state->event) {
 		vop->event = crtc->state->event;
 		crtc->state->event = NULL;
 	}
@@ -1102,12 +1186,14 @@
 
 	spin_lock_irqsave(&drm->event_lock, flags);
 	if (vop->event) {
-
 		drm_crtc_send_vblank_event(crtc, vop->event);
-		drm_crtc_vblank_put(crtc);
 		vop->event = NULL;
 
 	}
+	if (vop->vblank_active) {
+		vop->vblank_active = false;
+		drm_crtc_vblank_put(crtc);
+	}
 	spin_unlock_irqrestore(&drm->event_lock, flags);
 
 	if (!completion_done(&vop->wait_update_complete))
@@ -1145,6 +1231,12 @@
 		ret = IRQ_HANDLED;
 	}
 
+	if (active_irqs & LINE_FLAG_INTR) {
+		complete(&vop->line_flag_completion);
+		active_irqs &= ~LINE_FLAG_INTR;
+		ret = IRQ_HANDLED;
+	}
+
 	if (active_irqs & FS_INTR) {
 		drm_crtc_handle_vblank(crtc);
 		vop_handle_vblank(vop);
@@ -1154,7 +1246,8 @@
 
 	/* Unhandled irqs are spurious. */
 	if (active_irqs)
-		DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
+		DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
+			      active_irqs);
 
 	return ret;
 }
@@ -1189,7 +1282,8 @@
 					       win_data->phy->nformats,
 					       win_data->type, NULL);
 		if (ret) {
-			DRM_ERROR("failed to initialize plane\n");
+			DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
+				      ret);
 			goto err_cleanup_planes;
 		}
 
@@ -1227,7 +1321,8 @@
 					       win_data->phy->nformats,
 					       win_data->type, NULL);
 		if (ret) {
-			DRM_ERROR("failed to initialize overlay plane\n");
+			DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
+				      ret);
 			goto err_cleanup_crtc;
 		}
 		drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
@@ -1235,14 +1330,15 @@
 
 	port = of_get_child_by_name(dev->of_node, "port");
 	if (!port) {
-		DRM_ERROR("no port node found in %s\n",
-			  dev->of_node->full_name);
+		DRM_DEV_ERROR(vop->dev, "no port node found in %s\n",
+			      dev->of_node->full_name);
 		ret = -ENOENT;
 		goto err_cleanup_crtc;
 	}
 
 	init_completion(&vop->dsp_hold_completion);
 	init_completion(&vop->wait_update_complete);
+	init_completion(&vop->line_flag_completion);
 	crtc->port = port;
 	rockchip_register_crtc_funcs(crtc, &private_crtc_funcs);
 
@@ -1370,6 +1466,7 @@
 	clk_disable(vop->aclk);
 
 	vop->is_enabled = false;
+	vop->vblank_active = false;
 
 	return 0;
 
@@ -1399,6 +1496,49 @@
 	}
 }
 
+/**
+ * rockchip_drm_wait_line_flag - acqiure the give line flag event
+ * @crtc: CRTC to enable line flag
+ * @line_num: interested line number
+ * @mstimeout: millisecond for timeout
+ *
+ * Driver would hold here until the interested line flag interrupt have
+ * happened or timeout to wait.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num,
+				unsigned int mstimeout)
+{
+	struct vop *vop = to_vop(crtc);
+	unsigned long jiffies_left;
+
+	if (!crtc || !vop->is_enabled)
+		return -ENODEV;
+
+	if (line_num > crtc->mode.vtotal || mstimeout <= 0)
+		return -EINVAL;
+
+	if (vop_line_flag_irq_is_enabled(vop))
+		return -EBUSY;
+
+	reinit_completion(&vop->line_flag_completion);
+	vop_line_flag_irq_enable(vop, line_num);
+
+	jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion,
+						   msecs_to_jiffies(mstimeout));
+	vop_line_flag_irq_disable(vop);
+
+	if (jiffies_left == 0) {
+		dev_err(vop->dev, "Timeout waiting for IRQ\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_wait_line_flag);
+
 static int vop_bind(struct device *dev, struct device *master, void *data)
 {
 	struct platform_device *pdev = to_platform_device(dev);
@@ -1467,6 +1607,7 @@
 		return ret;
 
 	pm_runtime_enable(&pdev->dev);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 071ff0b..1dbc526 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -33,6 +33,7 @@
 	uint32_t offset;
 	uint32_t shift;
 	uint32_t mask;
+	bool write_mask;
 };
 
 struct vop_ctrl {
@@ -48,6 +49,10 @@
 	struct vop_reg dither_down;
 	struct vop_reg dither_up;
 	struct vop_reg pin_pol;
+	struct vop_reg rgb_pin_pol;
+	struct vop_reg hdmi_pin_pol;
+	struct vop_reg edp_pin_pol;
+	struct vop_reg mipi_pin_pol;
 
 	struct vop_reg htotal_pw;
 	struct vop_reg hact_st_end;
@@ -56,6 +61,8 @@
 	struct vop_reg hpost_st_end;
 	struct vop_reg vpost_st_end;
 
+	struct vop_reg line_flag_num[2];
+
 	struct vop_reg cfg_done;
 };
 
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 919992c..35c51f3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -23,7 +23,14 @@
 #define VOP_REG(off, _mask, s) \
 		{.offset = off, \
 		 .mask = _mask, \
-		 .shift = s,}
+		 .shift = s, \
+		 .write_mask = false,}
+
+#define VOP_REG_MASK(off, _mask, s) \
+		{.offset = off, \
+		 .mask = _mask, \
+		 .shift = s, \
+		 .write_mask = true,}
 
 static const uint32_t formats_win_full[] = {
 	DRM_FORMAT_XRGB8888,
@@ -50,6 +57,89 @@
 	DRM_FORMAT_BGR565,
 };
 
+static const struct vop_scl_regs rk3036_win_scl = {
+	.scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
+	.scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
+	.scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
+	.scale_cbcr_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
+};
+
+static const struct vop_win_phy rk3036_win0_data = {
+	.scl = &rk3036_win_scl,
+	.data_formats = formats_win_full,
+	.nformats = ARRAY_SIZE(formats_win_full),
+	.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
+	.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3),
+	.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15),
+	.act_info = VOP_REG(RK3036_WIN0_ACT_INFO, 0x1fff1fff, 0),
+	.dsp_info = VOP_REG(RK3036_WIN0_DSP_INFO, 0x0fff0fff, 0),
+	.dsp_st = VOP_REG(RK3036_WIN0_DSP_ST, 0x1fff1fff, 0),
+	.yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0),
+	.uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
+	.yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
+	.uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16),
+};
+
+static const struct vop_win_phy rk3036_win1_data = {
+	.data_formats = formats_win_lite,
+	.nformats = ARRAY_SIZE(formats_win_lite),
+	.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
+	.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
+	.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
+	.act_info = VOP_REG(RK3036_WIN1_ACT_INFO, 0x1fff1fff, 0),
+	.dsp_info = VOP_REG(RK3036_WIN1_DSP_INFO, 0x0fff0fff, 0),
+	.dsp_st = VOP_REG(RK3036_WIN1_DSP_ST, 0x1fff1fff, 0),
+	.yrgb_mst = VOP_REG(RK3036_WIN1_MST, 0xffffffff, 0),
+	.yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
+};
+
+static const struct vop_win_data rk3036_vop_win_data[] = {
+	{ .base = 0x00, .phy = &rk3036_win0_data,
+	  .type = DRM_PLANE_TYPE_PRIMARY },
+	{ .base = 0x00, .phy = &rk3036_win1_data,
+	  .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const int rk3036_vop_intrs[] = {
+	DSP_HOLD_VALID_INTR,
+	FS_INTR,
+	LINE_FLAG_INTR,
+	BUS_ERROR_INTR,
+};
+
+static const struct vop_intr rk3036_intr = {
+	.intrs = rk3036_vop_intrs,
+	.nintrs = ARRAY_SIZE(rk3036_vop_intrs),
+	.status = VOP_REG(RK3036_INT_STATUS, 0xf, 0),
+	.enable = VOP_REG(RK3036_INT_STATUS, 0xf, 4),
+	.clear = VOP_REG(RK3036_INT_STATUS, 0xf, 8),
+};
+
+static const struct vop_ctrl rk3036_ctrl_data = {
+	.standby = VOP_REG(RK3036_SYS_CTRL, 0x1, 30),
+	.out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0),
+	.pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4),
+	.htotal_pw = VOP_REG(RK3036_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
+	.hact_st_end = VOP_REG(RK3036_DSP_HACT_ST_END, 0x1fff1fff, 0),
+	.vtotal_pw = VOP_REG(RK3036_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
+	.vact_st_end = VOP_REG(RK3036_DSP_VACT_ST_END, 0x1fff1fff, 0),
+	.line_flag_num[0] = VOP_REG(RK3036_INT_STATUS, 0xfff, 12),
+	.cfg_done = VOP_REG(RK3036_REG_CFG_DONE, 0x1, 0),
+};
+
+static const struct vop_reg_data rk3036_vop_init_reg_table[] = {
+	{RK3036_DSP_CTRL1, 0x00000000},
+};
+
+static const struct vop_data rk3036_vop = {
+	.init_table = rk3036_vop_init_reg_table,
+	.table_size = ARRAY_SIZE(rk3036_vop_init_reg_table),
+	.ctrl = &rk3036_ctrl_data,
+	.intr = &rk3036_intr,
+	.win = rk3036_vop_win_data,
+	.win_size = ARRAY_SIZE(rk3036_vop_win_data),
+};
+
 static const struct vop_scl_extension rk3288_win_full_scl_ext = {
 	.cbcr_vsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 31),
 	.cbcr_vsu_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 30),
@@ -133,6 +223,7 @@
 	.vact_st_end = VOP_REG(RK3288_DSP_VACT_ST_END, 0x1fff1fff, 0),
 	.hpost_st_end = VOP_REG(RK3288_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
 	.vpost_st_end = VOP_REG(RK3288_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
+	.line_flag_num[0] = VOP_REG(RK3288_INTR_CTRL0, 0x1fff, 12),
 	.cfg_done = VOP_REG(RK3288_REG_CFG_DONE, 0x1, 0),
 };
 
@@ -190,93 +281,104 @@
 	.win_size = ARRAY_SIZE(rk3288_vop_win_data),
 };
 
-static const struct vop_scl_regs rk3036_win_scl = {
-	.scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
-	.scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
-	.scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
-	.scale_cbcr_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
+static const struct vop_ctrl rk3399_ctrl_data = {
+	.standby = VOP_REG(RK3399_SYS_CTRL, 0x1, 22),
+	.gate_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 23),
+	.rgb_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 12),
+	.hdmi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 13),
+	.edp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 14),
+	.mipi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 15),
+	.dither_down = VOP_REG(RK3399_DSP_CTRL1, 0xf, 1),
+	.dither_up = VOP_REG(RK3399_DSP_CTRL1, 0x1, 6),
+	.data_blank = VOP_REG(RK3399_DSP_CTRL0, 0x1, 19),
+	.out_mode = VOP_REG(RK3399_DSP_CTRL0, 0xf, 0),
+	.rgb_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16),
+	.hdmi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 20),
+	.edp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 24),
+	.mipi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 28),
+	.htotal_pw = VOP_REG(RK3399_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
+	.hact_st_end = VOP_REG(RK3399_DSP_HACT_ST_END, 0x1fff1fff, 0),
+	.vtotal_pw = VOP_REG(RK3399_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
+	.vact_st_end = VOP_REG(RK3399_DSP_VACT_ST_END, 0x1fff1fff, 0),
+	.hpost_st_end = VOP_REG(RK3399_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
+	.vpost_st_end = VOP_REG(RK3399_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
+	.line_flag_num[0] = VOP_REG(RK3399_LINE_FLAG, 0xffff, 0),
+	.line_flag_num[1] = VOP_REG(RK3399_LINE_FLAG, 0xffff, 16),
+	.cfg_done = VOP_REG_MASK(RK3399_REG_CFG_DONE, 0x1, 0),
 };
 
-static const struct vop_win_phy rk3036_win0_data = {
-	.scl = &rk3036_win_scl,
-	.data_formats = formats_win_full,
-	.nformats = ARRAY_SIZE(formats_win_full),
-	.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
-	.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3),
-	.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15),
-	.act_info = VOP_REG(RK3036_WIN0_ACT_INFO, 0x1fff1fff, 0),
-	.dsp_info = VOP_REG(RK3036_WIN0_DSP_INFO, 0x0fff0fff, 0),
-	.dsp_st = VOP_REG(RK3036_WIN0_DSP_ST, 0x1fff1fff, 0),
-	.yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0),
-	.uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
-	.yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
-	.uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16),
-};
-
-static const struct vop_win_phy rk3036_win1_data = {
-	.data_formats = formats_win_lite,
-	.nformats = ARRAY_SIZE(formats_win_lite),
-	.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
-	.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
-	.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
-	.act_info = VOP_REG(RK3036_WIN1_ACT_INFO, 0x1fff1fff, 0),
-	.dsp_info = VOP_REG(RK3036_WIN1_DSP_INFO, 0x0fff0fff, 0),
-	.dsp_st = VOP_REG(RK3036_WIN1_DSP_ST, 0x1fff1fff, 0),
-	.yrgb_mst = VOP_REG(RK3036_WIN1_MST, 0xffffffff, 0),
-	.yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
-};
-
-static const struct vop_win_data rk3036_vop_win_data[] = {
-	{ .base = 0x00, .phy = &rk3036_win0_data,
-	  .type = DRM_PLANE_TYPE_PRIMARY },
-	{ .base = 0x00, .phy = &rk3036_win1_data,
-	  .type = DRM_PLANE_TYPE_CURSOR },
-};
-
-static const int rk3036_vop_intrs[] = {
-	DSP_HOLD_VALID_INTR,
+static const int rk3399_vop_intrs[] = {
 	FS_INTR,
+	0, 0,
 	LINE_FLAG_INTR,
+	0,
 	BUS_ERROR_INTR,
+	0, 0, 0, 0, 0, 0, 0,
+	DSP_HOLD_VALID_INTR,
 };
 
-static const struct vop_intr rk3036_intr = {
-	.intrs = rk3036_vop_intrs,
-	.nintrs = ARRAY_SIZE(rk3036_vop_intrs),
-	.status = VOP_REG(RK3036_INT_STATUS, 0xf, 0),
-	.enable = VOP_REG(RK3036_INT_STATUS, 0xf, 4),
-	.clear = VOP_REG(RK3036_INT_STATUS, 0xf, 8),
+static const struct vop_intr rk3399_vop_intr = {
+	.intrs = rk3399_vop_intrs,
+	.nintrs = ARRAY_SIZE(rk3399_vop_intrs),
+	.status = VOP_REG_MASK(RK3399_INTR_STATUS0, 0xffff, 0),
+	.enable = VOP_REG_MASK(RK3399_INTR_EN0, 0xffff, 0),
+	.clear = VOP_REG_MASK(RK3399_INTR_CLEAR0, 0xffff, 0),
 };
 
-static const struct vop_ctrl rk3036_ctrl_data = {
-	.standby = VOP_REG(RK3036_SYS_CTRL, 0x1, 30),
-	.out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0),
-	.pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4),
-	.htotal_pw = VOP_REG(RK3036_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
-	.hact_st_end = VOP_REG(RK3036_DSP_HACT_ST_END, 0x1fff1fff, 0),
-	.vtotal_pw = VOP_REG(RK3036_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
-	.vact_st_end = VOP_REG(RK3036_DSP_VACT_ST_END, 0x1fff1fff, 0),
-	.cfg_done = VOP_REG(RK3036_REG_CFG_DONE, 0x1, 0),
+static const struct vop_reg_data rk3399_init_reg_table[] = {
+	{RK3399_SYS_CTRL, 0x2000f800},
+	{RK3399_DSP_CTRL0, 0x00000000},
+	{RK3399_WIN0_CTRL0, 0x00000080},
+	{RK3399_WIN1_CTRL0, 0x00000080},
+	/* TODO: Win2/3 support multiple area function, but we haven't found
+	 * a suitable way to use it yet, so let's just use them as other windows
+	 * with only area 0 enabled.
+	 */
+	{RK3399_WIN2_CTRL0, 0x00000010},
+	{RK3399_WIN3_CTRL0, 0x00000010},
 };
 
-static const struct vop_reg_data rk3036_vop_init_reg_table[] = {
-	{RK3036_DSP_CTRL1, 0x00000000},
+static const struct vop_data rk3399_vop_big = {
+	.init_table = rk3399_init_reg_table,
+	.table_size = ARRAY_SIZE(rk3399_init_reg_table),
+	.intr = &rk3399_vop_intr,
+	.ctrl = &rk3399_ctrl_data,
+	/*
+	 * rk3399 vop big windows register layout is same as rk3288.
+	 */
+	.win = rk3288_vop_win_data,
+	.win_size = ARRAY_SIZE(rk3288_vop_win_data),
 };
 
-static const struct vop_data rk3036_vop = {
-	.init_table = rk3036_vop_init_reg_table,
-	.table_size = ARRAY_SIZE(rk3036_vop_init_reg_table),
-	.ctrl = &rk3036_ctrl_data,
-	.intr = &rk3036_intr,
-	.win = rk3036_vop_win_data,
-	.win_size = ARRAY_SIZE(rk3036_vop_win_data),
+static const struct vop_win_data rk3399_vop_lit_win_data[] = {
+	{ .base = 0x00, .phy = &rk3288_win01_data,
+	  .type = DRM_PLANE_TYPE_PRIMARY },
+	{ .base = 0x00, .phy = &rk3288_win23_data,
+	  .type = DRM_PLANE_TYPE_CURSOR},
+};
+
+static const struct vop_data rk3399_vop_lit = {
+	.init_table = rk3399_init_reg_table,
+	.table_size = ARRAY_SIZE(rk3399_init_reg_table),
+	.intr = &rk3399_vop_intr,
+	.ctrl = &rk3399_ctrl_data,
+	/*
+	 * rk3399 vop lit windows register layout is same as rk3288,
+	 * but cut off the win1 and win3 windows.
+	 */
+	.win = rk3399_vop_lit_win_data,
+	.win_size = ARRAY_SIZE(rk3399_vop_lit_win_data),
 };
 
 static const struct of_device_id vop_driver_dt_match[] = {
-	{ .compatible = "rockchip,rk3288-vop",
-	  .data = &rk3288_vop },
 	{ .compatible = "rockchip,rk3036-vop",
 	  .data = &rk3036_vop },
+	{ .compatible = "rockchip,rk3288-vop",
+	  .data = &rk3288_vop },
+	{ .compatible = "rockchip,rk3399-vop-big",
+	  .data = &rk3399_vop_big },
+	{ .compatible = "rockchip,rk3399-vop-lit",
+	  .data = &rk3399_vop_lit },
 	{},
 };
 MODULE_DEVICE_TABLE(of, vop_driver_dt_match);
@@ -305,7 +407,6 @@
 	.remove = vop_remove,
 	.driver = {
 		.name = "rockchip-vop",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(vop_driver_dt_match),
 	},
 };
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
index d4b46cb..cd19726 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
@@ -166,4 +166,197 @@
 #define RK3036_HWC_LUT_ADDR		0x800
 /* rk3036 register definition end */
 
+/* rk3399 register definition */
+#define RK3399_REG_CFG_DONE		0x00000
+#define RK3399_VERSION_INFO		0x00004
+#define RK3399_SYS_CTRL			0x00008
+#define RK3399_SYS_CTRL1		0x0000c
+#define RK3399_DSP_CTRL0		0x00010
+#define RK3399_DSP_CTRL1		0x00014
+#define RK3399_DSP_BG			0x00018
+#define RK3399_MCU_CTRL			0x0001c
+#define RK3399_WB_CTRL0			0x00020
+#define RK3399_WB_CTRL1			0x00024
+#define RK3399_WB_YRGB_MST		0x00028
+#define RK3399_WB_CBR_MST		0x0002c
+#define RK3399_WIN0_CTRL0		0x00030
+#define RK3399_WIN0_CTRL1		0x00034
+#define RK3399_WIN0_COLOR_KEY		0x00038
+#define RK3399_WIN0_VIR			0x0003c
+#define RK3399_WIN0_YRGB_MST		0x00040
+#define RK3399_WIN0_CBR_MST		0x00044
+#define RK3399_WIN0_ACT_INFO		0x00048
+#define RK3399_WIN0_DSP_INFO		0x0004c
+#define RK3399_WIN0_DSP_ST		0x00050
+#define RK3399_WIN0_SCL_FACTOR_YRGB	0x00054
+#define RK3399_WIN0_SCL_FACTOR_CBR	0x00058
+#define RK3399_WIN0_SCL_OFFSET		0x0005c
+#define RK3399_WIN0_SRC_ALPHA_CTRL	0x00060
+#define RK3399_WIN0_DST_ALPHA_CTRL	0x00064
+#define RK3399_WIN0_FADING_CTRL		0x00068
+#define RK3399_WIN0_CTRL2		0x0006c
+#define RK3399_WIN1_CTRL0		0x00070
+#define RK3399_WIN1_CTRL1		0x00074
+#define RK3399_WIN1_COLOR_KEY		0x00078
+#define RK3399_WIN1_VIR			0x0007c
+#define RK3399_WIN1_YRGB_MST		0x00080
+#define RK3399_WIN1_CBR_MST		0x00084
+#define RK3399_WIN1_ACT_INFO		0x00088
+#define RK3399_WIN1_DSP_INFO		0x0008c
+#define RK3399_WIN1_DSP_ST		0x00090
+#define RK3399_WIN1_SCL_FACTOR_YRGB	0x00094
+#define RK3399_WIN1_SCL_FACTOR_CBR	0x00098
+#define RK3399_WIN1_SCL_OFFSET		0x0009c
+#define RK3399_WIN1_SRC_ALPHA_CTRL	0x000a0
+#define RK3399_WIN1_DST_ALPHA_CTRL	0x000a4
+#define RK3399_WIN1_FADING_CTRL		0x000a8
+#define RK3399_WIN1_CTRL2		0x000ac
+#define RK3399_WIN2_CTRL0		0x000b0
+#define RK3399_WIN2_CTRL1		0x000b4
+#define RK3399_WIN2_VIR0_1		0x000b8
+#define RK3399_WIN2_VIR2_3		0x000bc
+#define RK3399_WIN2_MST0		0x000c0
+#define RK3399_WIN2_DSP_INFO0		0x000c4
+#define RK3399_WIN2_DSP_ST0		0x000c8
+#define RK3399_WIN2_COLOR_KEY		0x000cc
+#define RK3399_WIN2_MST1		0x000d0
+#define RK3399_WIN2_DSP_INFO1		0x000d4
+#define RK3399_WIN2_DSP_ST1		0x000d8
+#define RK3399_WIN2_SRC_ALPHA_CTRL	0x000dc
+#define RK3399_WIN2_MST2		0x000e0
+#define RK3399_WIN2_DSP_INFO2		0x000e4
+#define RK3399_WIN2_DSP_ST2		0x000e8
+#define RK3399_WIN2_DST_ALPHA_CTRL	0x000ec
+#define RK3399_WIN2_MST3		0x000f0
+#define RK3399_WIN2_DSP_INFO3		0x000f4
+#define RK3399_WIN2_DSP_ST3		0x000f8
+#define RK3399_WIN2_FADING_CTRL		0x000fc
+#define RK3399_WIN3_CTRL0		0x00100
+#define RK3399_WIN3_CTRL1		0x00104
+#define RK3399_WIN3_VIR0_1		0x00108
+#define RK3399_WIN3_VIR2_3		0x0010c
+#define RK3399_WIN3_MST0		0x00110
+#define RK3399_WIN3_DSP_INFO0		0x00114
+#define RK3399_WIN3_DSP_ST0		0x00118
+#define RK3399_WIN3_COLOR_KEY		0x0011c
+#define RK3399_WIN3_MST1		0x00120
+#define RK3399_WIN3_DSP_INFO1		0x00124
+#define RK3399_WIN3_DSP_ST1		0x00128
+#define RK3399_WIN3_SRC_ALPHA_CTRL	0x0012c
+#define RK3399_WIN3_MST2		0x00130
+#define RK3399_WIN3_DSP_INFO2		0x00134
+#define RK3399_WIN3_DSP_ST2		0x00138
+#define RK3399_WIN3_DST_ALPHA_CTRL	0x0013c
+#define RK3399_WIN3_MST3		0x00140
+#define RK3399_WIN3_DSP_INFO3		0x00144
+#define RK3399_WIN3_DSP_ST3		0x00148
+#define RK3399_WIN3_FADING_CTRL		0x0014c
+#define RK3399_HWC_CTRL0		0x00150
+#define RK3399_HWC_CTRL1		0x00154
+#define RK3399_HWC_MST			0x00158
+#define RK3399_HWC_DSP_ST		0x0015c
+#define RK3399_HWC_SRC_ALPHA_CTRL	0x00160
+#define RK3399_HWC_DST_ALPHA_CTRL	0x00164
+#define RK3399_HWC_FADING_CTRL		0x00168
+#define RK3399_HWC_RESERVED1		0x0016c
+#define RK3399_POST_DSP_HACT_INFO	0x00170
+#define RK3399_POST_DSP_VACT_INFO	0x00174
+#define RK3399_POST_SCL_FACTOR_YRGB	0x00178
+#define RK3399_POST_RESERVED		0x0017c
+#define RK3399_POST_SCL_CTRL		0x00180
+#define RK3399_POST_DSP_VACT_INFO_F1	0x00184
+#define RK3399_DSP_HTOTAL_HS_END	0x00188
+#define RK3399_DSP_HACT_ST_END		0x0018c
+#define RK3399_DSP_VTOTAL_VS_END	0x00190
+#define RK3399_DSP_VACT_ST_END		0x00194
+#define RK3399_DSP_VS_ST_END_F1		0x00198
+#define RK3399_DSP_VACT_ST_END_F1	0x0019c
+#define RK3399_PWM_CTRL			0x001a0
+#define RK3399_PWM_PERIOD_HPR		0x001a4
+#define RK3399_PWM_DUTY_LPR		0x001a8
+#define RK3399_PWM_CNT			0x001ac
+#define RK3399_BCSH_COLOR_BAR		0x001b0
+#define RK3399_BCSH_BCS			0x001b4
+#define RK3399_BCSH_H			0x001b8
+#define RK3399_BCSH_CTRL		0x001bc
+#define RK3399_CABC_CTRL0		0x001c0
+#define RK3399_CABC_CTRL1		0x001c4
+#define RK3399_CABC_CTRL2		0x001c8
+#define RK3399_CABC_CTRL3		0x001cc
+#define RK3399_CABC_GAUSS_LINE0_0	0x001d0
+#define RK3399_CABC_GAUSS_LINE0_1	0x001d4
+#define RK3399_CABC_GAUSS_LINE1_0	0x001d8
+#define RK3399_CABC_GAUSS_LINE1_1	0x001dc
+#define RK3399_CABC_GAUSS_LINE2_0	0x001e0
+#define RK3399_CABC_GAUSS_LINE2_1	0x001e4
+#define RK3399_FRC_LOWER01_0		0x001e8
+#define RK3399_FRC_LOWER01_1		0x001ec
+#define RK3399_FRC_LOWER10_0		0x001f0
+#define RK3399_FRC_LOWER10_1		0x001f4
+#define RK3399_FRC_LOWER11_0		0x001f8
+#define RK3399_FRC_LOWER11_1		0x001fc
+#define RK3399_AFBCD0_CTRL		0x00200
+#define RK3399_AFBCD0_HDR_PTR		0x00204
+#define RK3399_AFBCD0_PIC_SIZE		0x00208
+#define RK3399_AFBCD0_STATUS		0x0020c
+#define RK3399_AFBCD1_CTRL		0x00220
+#define RK3399_AFBCD1_HDR_PTR		0x00224
+#define RK3399_AFBCD1_PIC_SIZE		0x00228
+#define RK3399_AFBCD1_STATUS		0x0022c
+#define RK3399_AFBCD2_CTRL		0x00240
+#define RK3399_AFBCD2_HDR_PTR		0x00244
+#define RK3399_AFBCD2_PIC_SIZE		0x00248
+#define RK3399_AFBCD2_STATUS		0x0024c
+#define RK3399_AFBCD3_CTRL		0x00260
+#define RK3399_AFBCD3_HDR_PTR		0x00264
+#define RK3399_AFBCD3_PIC_SIZE		0x00268
+#define RK3399_AFBCD3_STATUS		0x0026c
+#define RK3399_INTR_EN0			0x00280
+#define RK3399_INTR_CLEAR0		0x00284
+#define RK3399_INTR_STATUS0		0x00288
+#define RK3399_INTR_RAW_STATUS0		0x0028c
+#define RK3399_INTR_EN1			0x00290
+#define RK3399_INTR_CLEAR1		0x00294
+#define RK3399_INTR_STATUS1		0x00298
+#define RK3399_INTR_RAW_STATUS1		0x0029c
+#define RK3399_LINE_FLAG		0x002a0
+#define RK3399_VOP_STATUS		0x002a4
+#define RK3399_BLANKING_VALUE		0x002a8
+#define RK3399_MCU_BYPASS_PORT		0x002ac
+#define RK3399_WIN0_DSP_BG		0x002b0
+#define RK3399_WIN1_DSP_BG		0x002b4
+#define RK3399_WIN2_DSP_BG		0x002b8
+#define RK3399_WIN3_DSP_BG		0x002bc
+#define RK3399_YUV2YUV_WIN		0x002c0
+#define RK3399_YUV2YUV_POST		0x002c4
+#define RK3399_AUTO_GATING_EN		0x002cc
+#define RK3399_WIN0_CSC_COE		0x003a0
+#define RK3399_WIN1_CSC_COE		0x003c0
+#define RK3399_WIN2_CSC_COE		0x003e0
+#define RK3399_WIN3_CSC_COE		0x00400
+#define RK3399_HWC_CSC_COE		0x00420
+#define RK3399_BCSH_R2Y_CSC_COE		0x00440
+#define RK3399_BCSH_Y2R_CSC_COE		0x00460
+#define RK3399_POST_YUV2YUV_Y2R_COE	0x00480
+#define RK3399_POST_YUV2YUV_3X3_COE	0x004a0
+#define RK3399_POST_YUV2YUV_R2Y_COE	0x004c0
+#define RK3399_WIN0_YUV2YUV_Y2R		0x004e0
+#define RK3399_WIN0_YUV2YUV_3X3		0x00500
+#define RK3399_WIN0_YUV2YUV_R2Y		0x00520
+#define RK3399_WIN1_YUV2YUV_Y2R		0x00540
+#define RK3399_WIN1_YUV2YUV_3X3		0x00560
+#define RK3399_WIN1_YUV2YUV_R2Y		0x00580
+#define RK3399_WIN2_YUV2YUV_Y2R		0x005a0
+#define RK3399_WIN2_YUV2YUV_3X3		0x005c0
+#define RK3399_WIN2_YUV2YUV_R2Y		0x005e0
+#define RK3399_WIN3_YUV2YUV_Y2R		0x00600
+#define RK3399_WIN3_YUV2YUV_3X3		0x00620
+#define RK3399_WIN3_YUV2YUV_R2Y		0x00640
+#define RK3399_WIN2_LUT_ADDR		0x01000
+#define RK3399_WIN3_LUT_ADDR		0x01400
+#define RK3399_HWC_LUT_ADDR		0x01800
+#define RK3399_CABC_GAMMA_LUT_ADDR	0x01c00
+#define RK3399_GAMMA_LUT_ADDR		0x02000
+/* rk3399 register definition end */
+
 #endif /* _ROCKCHIP_VOP_REG_H */
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index c01ad0a..3dc0d8f 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -1001,15 +1001,9 @@
 		cmdbuf->cmd_addr = kcmd_addr;
 	}
 	if (cmdbuf->vb_size) {
-		kvb_addr = kmalloc(cmdbuf->vb_size, GFP_KERNEL);
-		if (kvb_addr == NULL) {
-			ret = -ENOMEM;
-			goto done;
-		}
-
-		if (copy_from_user(kvb_addr, cmdbuf->vb_addr,
-				       cmdbuf->vb_size)) {
-			ret = -EFAULT;
+		kvb_addr = memdup_user(cmdbuf->vb_addr, cmdbuf->vb_size);
+		if (IS_ERR(kvb_addr)) {
+			ret = PTR_ERR(kvb_addr);
 			goto done;
 		}
 		cmdbuf->vb_addr = kvb_addr;
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 96bd3d0..7cd3804 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -178,7 +178,7 @@
 	 */
 
 	drm_atomic_helper_commit_modeset_disables(drm, state);
-	drm_atomic_helper_commit_planes(drm, state, false);
+	drm_atomic_helper_commit_planes(drm, state, 0);
 	drm_atomic_helper_commit_modeset_enables(drm, state);
 
 	drm_atomic_helper_wait_for_vblanks(drm, state);
@@ -282,7 +282,7 @@
 };
 
 static struct drm_driver sti_driver = {
-	.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
+	.driver_features = DRIVER_MODESET |
 	    DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
 	.gem_free_object_unlocked = drm_gem_cma_free_object,
 	.gem_vm_ops = &drm_gem_cma_vm_ops,
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index 58cd551..d625a82 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -9,5 +9,5 @@
 
 obj-$(CONFIG_DRM_SUN4I)		+= sun4i-drm.o sun4i-tcon.o
 obj-$(CONFIG_DRM_SUN4I)		+= sun4i_backend.o
-
+obj-$(CONFIG_DRM_SUN4I)		+= sun6i_drc.o
 obj-$(CONFIG_DRM_SUN4I)		+= sun4i_tv.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 3ab5604..91a7022 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -217,6 +217,51 @@
 }
 EXPORT_SYMBOL(sun4i_backend_update_layer_buffer);
 
+static int sun4i_backend_init_sat(struct device *dev) {
+	struct sun4i_backend *backend = dev_get_drvdata(dev);
+	int ret;
+
+	backend->sat_reset = devm_reset_control_get(dev, "sat");
+	if (IS_ERR(backend->sat_reset)) {
+		dev_err(dev, "Couldn't get the SAT reset line\n");
+		return PTR_ERR(backend->sat_reset);
+	}
+
+	ret = reset_control_deassert(backend->sat_reset);
+	if (ret) {
+		dev_err(dev, "Couldn't deassert the SAT reset line\n");
+		return ret;
+	}
+
+	backend->sat_clk = devm_clk_get(dev, "sat");
+	if (IS_ERR(backend->sat_clk)) {
+		dev_err(dev, "Couldn't get our SAT clock\n");
+		ret = PTR_ERR(backend->sat_clk);
+		goto err_assert_reset;
+	}
+
+	ret = clk_prepare_enable(backend->sat_clk);
+	if (ret) {
+		dev_err(dev, "Couldn't enable the SAT clock\n");
+		return ret;
+	}
+
+	return 0;
+
+err_assert_reset:
+	reset_control_assert(backend->sat_reset);
+	return ret;
+}
+
+static int sun4i_backend_free_sat(struct device *dev) {
+	struct sun4i_backend *backend = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(backend->sat_clk);
+	reset_control_assert(backend->sat_reset);
+
+	return 0;
+}
+
 static struct regmap_config sun4i_backend_regmap_config = {
 	.reg_bits	= 32,
 	.val_bits	= 32,
@@ -291,6 +336,15 @@
 	}
 	clk_prepare_enable(backend->ram_clk);
 
+	if (of_device_is_compatible(dev->of_node,
+				    "allwinner,sun8i-a33-display-backend")) {
+		ret = sun4i_backend_init_sat(dev);
+		if (ret) {
+			dev_err(dev, "Couldn't init SAT resources\n");
+			goto err_disable_ram_clk;
+		}
+	}
+
 	/* Reset the registers */
 	for (i = 0x800; i < 0x1000; i += 4)
 		regmap_write(backend->regs, i, 0);
@@ -306,6 +360,8 @@
 
 	return 0;
 
+err_disable_ram_clk:
+	clk_disable_unprepare(backend->ram_clk);
 err_disable_mod_clk:
 	clk_disable_unprepare(backend->mod_clk);
 err_disable_bus_clk:
@@ -320,6 +376,10 @@
 {
 	struct sun4i_backend *backend = dev_get_drvdata(dev);
 
+	if (of_device_is_compatible(dev->of_node,
+				    "allwinner,sun8i-a33-display-backend"))
+		sun4i_backend_free_sat(dev);
+
 	clk_disable_unprepare(backend->ram_clk);
 	clk_disable_unprepare(backend->mod_clk);
 	clk_disable_unprepare(backend->bus_clk);
@@ -345,6 +405,7 @@
 
 static const struct of_device_id sun4i_backend_of_table[] = {
 	{ .compatible = "allwinner,sun5i-a13-display-backend" },
+	{ .compatible = "allwinner,sun8i-a33-display-backend" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h
index 7070bb3..e007186 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.h
@@ -146,6 +146,9 @@
 	struct clk		*bus_clk;
 	struct clk		*mod_clk;
 	struct clk		*ram_clk;
+
+	struct clk		*sat_clk;
+	struct reset_control	*sat_reset;
 };
 
 void sun4i_backend_apply_color_correction(struct sun4i_backend *backend);
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
index 5b34631..4332da4 100644
--- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
@@ -14,6 +14,7 @@
 #include <linux/regmap.h>
 
 #include "sun4i_tcon.h"
+#include "sun4i_dotclock.h"
 
 struct sun4i_dclk {
 	struct clk_hw	hw;
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 8913c15..9059e3e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -200,13 +200,14 @@
 
 static bool sun4i_drv_node_is_frontend(struct device_node *node)
 {
-	return of_device_is_compatible(node,
-				       "allwinner,sun5i-a13-display-frontend");
+	return of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
+		of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend");
 }
 
 static bool sun4i_drv_node_is_tcon(struct device_node *node)
 {
-	return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon");
+	return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") ||
+		of_device_is_compatible(node, "allwinner,sun8i-a33-tcon");
 }
 
 static int compare_of(struct device *dev, void *data)
@@ -258,8 +259,8 @@
 		}
 
 		/*
-		 * If the node is our TCON, the first port is used for our
-		 * panel, and will not be part of the
+		 * If the node is our TCON, the first port is used for
+		 * panel or bridges, and will not be part of the
 		 * component framework.
 		 */
 		if (sun4i_drv_node_is_tcon(node)) {
@@ -321,6 +322,7 @@
 
 static const struct of_device_id sun4i_drv_of_table[] = {
 	{ .compatible = "allwinner,sun5i-a13-display-engine" },
+	{ .compatible = "allwinner,sun8i-a33-display-engine" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 70688fe..8b6ce619 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -15,6 +15,7 @@
 #include <drm/drmP.h>
 
 #include "sun4i_drv.h"
+#include "sun4i_framebuffer.h"
 
 static void sun4i_de_output_poll_changed(struct drm_device *drm)
 {
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index f5bbac6..c3ff10f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -19,6 +19,7 @@
 
 #include "sun4i_drv.h"
 #include "sun4i_tcon.h"
+#include "sun4i_rgb.h"
 
 struct sun4i_rgb {
 	struct drm_connector	connector;
@@ -151,7 +152,14 @@
 
 	DRM_DEBUG_DRIVER("Enabling RGB output\n");
 
-	drm_panel_enable(tcon->panel);
+	if (!IS_ERR(tcon->panel)) {
+		drm_panel_prepare(tcon->panel);
+		drm_panel_enable(tcon->panel);
+	}
+
+	/* encoder->bridge can be NULL; drm_bridge_enable checks for it */
+	drm_bridge_enable(encoder->bridge);
+
 	sun4i_tcon_channel_enable(tcon, 0);
 }
 
@@ -164,7 +172,14 @@
 	DRM_DEBUG_DRIVER("Disabling RGB output\n");
 
 	sun4i_tcon_channel_disable(tcon, 0);
-	drm_panel_disable(tcon->panel);
+
+	/* encoder->bridge can be NULL; drm_bridge_disable checks for it */
+	drm_bridge_disable(encoder->bridge);
+
+	if (!IS_ERR(tcon->panel)) {
+		drm_panel_disable(tcon->panel);
+		drm_panel_unprepare(tcon->panel);
+	}
 }
 
 static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
@@ -203,17 +218,22 @@
 {
 	struct sun4i_drv *drv = drm->dev_private;
 	struct sun4i_tcon *tcon = drv->tcon;
+	struct drm_encoder *encoder;
 	struct sun4i_rgb *rgb;
 	int ret;
 
-	/* If we don't have a panel, there's no point in going on */
-	if (IS_ERR(tcon->panel))
-		return -ENODEV;
-
 	rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
 	if (!rgb)
 		return -ENOMEM;
 	rgb->drv = drv;
+	encoder = &rgb->encoder;
+
+	tcon->panel = sun4i_tcon_find_panel(tcon->dev->of_node);
+	encoder->bridge = sun4i_tcon_find_bridge(tcon->dev->of_node);
+	if (IS_ERR(tcon->panel) && IS_ERR(encoder->bridge)) {
+		dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n");
+		return 0;
+	}
 
 	drm_encoder_helper_add(&rgb->encoder,
 			       &sun4i_rgb_enc_helper_funcs);
@@ -230,19 +250,38 @@
 	/* The RGB encoder can only work with the TCON channel 0 */
 	rgb->encoder.possible_crtcs = BIT(0);
 
-	drm_connector_helper_add(&rgb->connector,
-				 &sun4i_rgb_con_helper_funcs);
-	ret = drm_connector_init(drm, &rgb->connector,
-				 &sun4i_rgb_con_funcs,
-				 DRM_MODE_CONNECTOR_Unknown);
-	if (ret) {
-		dev_err(drm->dev, "Couldn't initialise the rgb connector\n");
-		goto err_cleanup_connector;
+	if (!IS_ERR(tcon->panel)) {
+		drm_connector_helper_add(&rgb->connector,
+					 &sun4i_rgb_con_helper_funcs);
+		ret = drm_connector_init(drm, &rgb->connector,
+					 &sun4i_rgb_con_funcs,
+					 DRM_MODE_CONNECTOR_Unknown);
+		if (ret) {
+			dev_err(drm->dev, "Couldn't initialise the rgb connector\n");
+			goto err_cleanup_connector;
+		}
+
+		drm_mode_connector_attach_encoder(&rgb->connector,
+						  &rgb->encoder);
+
+		ret = drm_panel_attach(tcon->panel, &rgb->connector);
+		if (ret) {
+			dev_err(drm->dev, "Couldn't attach our panel\n");
+			goto err_cleanup_connector;
+		}
 	}
 
-	drm_mode_connector_attach_encoder(&rgb->connector, &rgb->encoder);
+	if (!IS_ERR(encoder->bridge)) {
+		encoder->bridge->encoder = &rgb->encoder;
 
-	drm_panel_attach(tcon->panel, &rgb->connector);
+		ret = drm_bridge_attach(drm, encoder->bridge);
+		if (ret) {
+			dev_err(drm->dev, "Couldn't attach our bridge\n");
+			goto err_cleanup_connector;
+		}
+	} else {
+		encoder->bridge = NULL;
+	}
 
 	return 0;
 
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 652385f..cadacb5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -59,11 +59,13 @@
 		regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
 				   SUN4I_TCON0_CTL_TCON_ENABLE, 0);
 		clk_disable_unprepare(tcon->dclk);
-	} else if (channel == 1) {
-		regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
-				   SUN4I_TCON1_CTL_TCON_ENABLE, 0);
-		clk_disable_unprepare(tcon->sclk1);
+		return;
 	}
+
+	WARN_ON(!tcon->has_channel_1);
+	regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+			   SUN4I_TCON1_CTL_TCON_ENABLE, 0);
+	clk_disable_unprepare(tcon->sclk1);
 }
 EXPORT_SYMBOL(sun4i_tcon_channel_disable);
 
@@ -75,12 +77,14 @@
 				   SUN4I_TCON0_CTL_TCON_ENABLE,
 				   SUN4I_TCON0_CTL_TCON_ENABLE);
 		clk_prepare_enable(tcon->dclk);
-	} else if (channel == 1) {
-		regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
-				   SUN4I_TCON1_CTL_TCON_ENABLE,
-				   SUN4I_TCON1_CTL_TCON_ENABLE);
-		clk_prepare_enable(tcon->sclk1);
+		return;
 	}
+
+	WARN_ON(!tcon->has_channel_1);
+	regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+			   SUN4I_TCON1_CTL_TCON_ENABLE,
+			   SUN4I_TCON1_CTL_TCON_ENABLE);
+	clk_prepare_enable(tcon->sclk1);
 }
 EXPORT_SYMBOL(sun4i_tcon_channel_enable);
 
@@ -198,6 +202,8 @@
 	u8 clk_delay;
 	u32 val;
 
+	WARN_ON(!tcon->has_channel_1);
+
 	/* Adjust clock delay */
 	clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
 	regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
@@ -321,10 +327,12 @@
 		return PTR_ERR(tcon->sclk0);
 	}
 
-	tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
-	if (IS_ERR(tcon->sclk1)) {
-		dev_err(dev, "Couldn't get the TCON channel 1 clock\n");
-		return PTR_ERR(tcon->sclk1);
+	if (tcon->has_channel_1) {
+		tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
+		if (IS_ERR(tcon->sclk1)) {
+			dev_err(dev, "Couldn't get the TCON channel 1 clock\n");
+			return PTR_ERR(tcon->sclk1);
+		}
 	}
 
 	return sun4i_dclk_create(dev, tcon);
@@ -374,10 +382,8 @@
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	regs = devm_ioremap_resource(dev, res);
-	if (IS_ERR(regs)) {
-		dev_err(dev, "Couldn't map the TCON registers\n");
+	if (IS_ERR(regs))
 		return PTR_ERR(regs);
-	}
 
 	tcon->regs = devm_regmap_init_mmio(dev, regs,
 					   &sun4i_tcon_regmap_config);
@@ -398,7 +404,7 @@
 	return 0;
 }
 
-static struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
+struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
 {
 	struct device_node *port, *remote, *child;
 	struct device_node *end_node = NULL;
@@ -432,6 +438,40 @@
 	return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER);
 }
 
+struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node)
+{
+	struct device_node *port, *remote, *child;
+	struct device_node *end_node = NULL;
+
+	/* Inputs are listed first, then outputs */
+	port = of_graph_get_port_by_id(node, 1);
+
+	/*
+	 * Our first output is the RGB interface where the panel will
+	 * be connected.
+	 */
+	for_each_child_of_node(port, child) {
+		u32 reg;
+
+		of_property_read_u32(child, "reg", &reg);
+		if (reg == 0)
+			end_node = child;
+	}
+
+	if (!end_node) {
+		DRM_DEBUG_DRIVER("Missing bridge endpoint\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	remote = of_graph_get_remote_port_parent(end_node);
+	if (!remote) {
+		DRM_DEBUG_DRIVER("Enable to parse remote node\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	return of_drm_find_bridge(remote) ?: ERR_PTR(-EPROBE_DEFER);
+}
+
 static int sun4i_tcon_bind(struct device *dev, struct device *master,
 			   void *data)
 {
@@ -446,9 +486,15 @@
 	dev_set_drvdata(dev, tcon);
 	drv->tcon = tcon;
 	tcon->drm = drm;
+	tcon->dev = dev;
 
-	if (of_device_is_compatible(dev->of_node, "allwinner,sun5i-a13-tcon"))
+	if (of_device_is_compatible(dev->of_node, "allwinner,sun5i-a13-tcon")) {
 		tcon->has_mux = true;
+		tcon->has_channel_1 = true;
+	} else {
+		tcon->has_mux = false;
+		tcon->has_channel_1 = false;
+	}
 
 	tcon->lcd_rst = devm_reset_control_get(dev, "lcd");
 	if (IS_ERR(tcon->lcd_rst)) {
@@ -484,12 +530,6 @@
 		goto err_free_clocks;
 	}
 
-	tcon->panel = sun4i_tcon_find_panel(dev->of_node);
-	if (IS_ERR(tcon->panel)) {
-		dev_info(dev, "No panel found... RGB output disabled\n");
-		return 0;
-	}
-
 	ret = sun4i_rgb_init(drm);
 	if (ret < 0)
 		goto err_free_clocks;
@@ -519,19 +559,22 @@
 static int sun4i_tcon_probe(struct platform_device *pdev)
 {
 	struct device_node *node = pdev->dev.of_node;
+	struct drm_bridge *bridge;
 	struct drm_panel *panel;
 
 	/*
-	 * The panel is not ready.
+	 * Neither the bridge or the panel is ready.
 	 * Defer the probe.
 	 */
 	panel = sun4i_tcon_find_panel(node);
+	bridge = sun4i_tcon_find_bridge(node);
 
 	/*
 	 * If we don't have a panel endpoint, just go on
 	 */
-	if (PTR_ERR(panel) == -EPROBE_DEFER) {
-		DRM_DEBUG_DRIVER("Still waiting for our panel. Deferring...\n");
+	if ((PTR_ERR(panel) == -EPROBE_DEFER) &&
+	    (PTR_ERR(bridge) == -EPROBE_DEFER)) {
+		DRM_DEBUG_DRIVER("Still waiting for our panel/bridge. Deferring...\n");
 		return -EPROBE_DEFER;
 	}
 
@@ -547,6 +590,7 @@
 
 static const struct of_device_id sun4i_tcon_of_table[] = {
 	{ .compatible = "allwinner,sun5i-a13-tcon" },
+	{ .compatible = "allwinner,sun8i-a33-tcon" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, sun4i_tcon_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 0e0b11d..12bd489 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -143,6 +143,7 @@
 #define SUN4I_TCON_MAX_CHANNELS		2
 
 struct sun4i_tcon {
+	struct device			*dev;
 	struct drm_device		*drm;
 	struct regmap			*regs;
 
@@ -163,8 +164,13 @@
 	bool				has_mux;
 
 	struct drm_panel		*panel;
+
+	bool				has_channel_1;
 };
 
+struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node);
+struct drm_panel *sun4i_tcon_find_panel(struct device_node *node);
+
 /* Global Control */
 void sun4i_tcon_disable(struct sun4i_tcon *tcon);
 void sun4i_tcon_enable(struct sun4i_tcon *tcon);
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
new file mode 100644
index 0000000..bf6d846
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2016 Free Electrons
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+struct sun6i_drc {
+	struct clk		*bus_clk;
+	struct clk		*mod_clk;
+	struct reset_control	*reset;
+};
+
+static int sun6i_drc_bind(struct device *dev, struct device *master,
+			 void *data)
+{
+	struct sun6i_drc *drc;
+	int ret;
+
+	drc = devm_kzalloc(dev, sizeof(*drc), GFP_KERNEL);
+	if (!drc)
+		return -ENOMEM;
+	dev_set_drvdata(dev, drc);
+
+	drc->reset = devm_reset_control_get(dev, NULL);
+	if (IS_ERR(drc->reset)) {
+		dev_err(dev, "Couldn't get our reset line\n");
+		return PTR_ERR(drc->reset);
+	}
+
+	ret = reset_control_deassert(drc->reset);
+	if (ret) {
+		dev_err(dev, "Couldn't deassert our reset line\n");
+		return ret;
+	}
+
+	drc->bus_clk = devm_clk_get(dev, "ahb");
+	if (IS_ERR(drc->bus_clk)) {
+		dev_err(dev, "Couldn't get our bus clock\n");
+		ret = PTR_ERR(drc->bus_clk);
+		goto err_assert_reset;
+	}
+	clk_prepare_enable(drc->bus_clk);
+
+	drc->mod_clk = devm_clk_get(dev, "mod");
+	if (IS_ERR(drc->mod_clk)) {
+		dev_err(dev, "Couldn't get our mod clock\n");
+		ret = PTR_ERR(drc->mod_clk);
+		goto err_disable_bus_clk;
+	}
+	clk_prepare_enable(drc->mod_clk);
+
+	return 0;
+
+err_disable_bus_clk:
+	clk_disable_unprepare(drc->bus_clk);
+err_assert_reset:
+	reset_control_assert(drc->reset);
+	return ret;
+}
+
+static void sun6i_drc_unbind(struct device *dev, struct device *master,
+			    void *data)
+{
+	struct sun6i_drc *drc = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(drc->mod_clk);
+	clk_disable_unprepare(drc->bus_clk);
+	reset_control_assert(drc->reset);
+}
+
+static struct component_ops sun6i_drc_ops = {
+	.bind	= sun6i_drc_bind,
+	.unbind	= sun6i_drc_unbind,
+};
+
+static int sun6i_drc_probe(struct platform_device *pdev)
+{
+	return component_add(&pdev->dev, &sun6i_drc_ops);
+}
+
+static int sun6i_drc_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &sun6i_drc_ops);
+
+	return 0;
+}
+
+static const struct of_device_id sun6i_drc_of_table[] = {
+	{ .compatible = "allwinner,sun8i-a33-drc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, sun6i_drc_of_table);
+
+static struct platform_driver sun6i_drc_platform_driver = {
+	.probe		= sun6i_drc_probe,
+	.remove		= sun6i_drc_remove,
+	.driver		= {
+		.name		= "sun6i-drc",
+		.of_match_table	= sun6i_drc_of_table,
+	},
+};
+module_platform_driver(sun6i_drc_platform_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A31 Dynamic Range Control (DRC) Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 8495bd0..3de7ce3 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -480,17 +480,6 @@
 	.atomic_destroy_state = tegra_plane_atomic_destroy_state,
 };
 
-static int tegra_plane_prepare_fb(struct drm_plane *plane,
-				  const struct drm_plane_state *new_state)
-{
-	return 0;
-}
-
-static void tegra_plane_cleanup_fb(struct drm_plane *plane,
-				   const struct drm_plane_state *old_fb)
-{
-}
-
 static int tegra_plane_state_add(struct tegra_plane *plane,
 				 struct drm_plane_state *state)
 {
@@ -624,8 +613,6 @@
 }
 
 static const struct drm_plane_helper_funcs tegra_primary_plane_helper_funcs = {
-	.prepare_fb = tegra_plane_prepare_fb,
-	.cleanup_fb = tegra_plane_cleanup_fb,
 	.atomic_check = tegra_plane_atomic_check,
 	.atomic_update = tegra_plane_atomic_update,
 	.atomic_disable = tegra_plane_atomic_disable,
@@ -796,8 +783,6 @@
 };
 
 static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
-	.prepare_fb = tegra_plane_prepare_fb,
-	.cleanup_fb = tegra_plane_cleanup_fb,
 	.atomic_check = tegra_cursor_atomic_check,
 	.atomic_update = tegra_cursor_atomic_update,
 	.atomic_disable = tegra_cursor_atomic_disable,
@@ -866,8 +851,6 @@
 };
 
 static const struct drm_plane_helper_funcs tegra_overlay_plane_helper_funcs = {
-	.prepare_fb = tegra_plane_prepare_fb,
-	.cleanup_fb = tegra_plane_cleanup_fb,
 	.atomic_check = tegra_plane_atomic_check,
 	.atomic_update = tegra_plane_atomic_update,
 	.atomic_disable = tegra_plane_atomic_disable,
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 755264d..4b9f1c7 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -57,7 +57,8 @@
 
 	drm_atomic_helper_commit_modeset_disables(drm, state);
 	drm_atomic_helper_commit_modeset_enables(drm, state);
-	drm_atomic_helper_commit_planes(drm, state, true);
+	drm_atomic_helper_commit_planes(drm, state,
+					DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
 	drm_atomic_helper_wait_for_vblanks(drm, state);
 
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 25d6b22..2087689 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -69,6 +69,7 @@
 	struct drm_gem_cma_object *gem;
 	unsigned int depth, bpp;
 	dma_addr_t start, end;
+	u64 dma_base_and_ceiling;
 
 	drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
 	gem = drm_fb_cma_get_gem_obj(fb, 0);
@@ -79,8 +80,13 @@
 
 	end = start + (crtc->mode.vdisplay * fb->pitches[0]);
 
-	tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, start);
-	tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG, end);
+	/* Write LCDC_DMA_FB_BASE_ADDR_0_REG and LCDC_DMA_FB_CEILING_ADDR_0_REG
+	 * with a single insruction, if available. This should make it more
+	 * unlikely that LCDC would fetch the DMA addresses in the middle of
+	 * an update.
+	 */
+	dma_base_and_ceiling = (u64)(end - 1) << 32 | start;
+	tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling);
 
 	if (tilcdc_crtc->curr_fb)
 		drm_flip_work_queue(&tilcdc_crtc->unref_work,
@@ -98,6 +104,8 @@
 	if (priv->rev == 1) {
 		tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
 			LCDC_V1_UNDERFLOW_INT_ENA);
+		tilcdc_set(dev, LCDC_DMA_CTRL_REG,
+			LCDC_V1_END_OF_FRAME_INT_ENA);
 	} else {
 		tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
 			LCDC_V2_UNDERFLOW_INT_ENA |
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 3404d24..f8892e9 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -33,6 +33,20 @@
 
 static LIST_HEAD(module_list);
 
+static const u32 tilcdc_rev1_formats[] = { DRM_FORMAT_RGB565 };
+
+static const u32 tilcdc_straight_formats[] = { DRM_FORMAT_RGB565,
+					       DRM_FORMAT_BGR888,
+					       DRM_FORMAT_XBGR8888 };
+
+static const u32 tilcdc_crossed_formats[] = { DRM_FORMAT_BGR565,
+					      DRM_FORMAT_RGB888,
+					      DRM_FORMAT_XRGB8888 };
+
+static const u32 tilcdc_legacy_formats[] = { DRM_FORMAT_RGB565,
+					     DRM_FORMAT_RGB888,
+					     DRM_FORMAT_XRGB8888 };
+
 void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
 		const struct tilcdc_module_ops *funcs)
 {
@@ -118,7 +132,7 @@
 
 	drm_atomic_helper_commit_modeset_disables(dev, state);
 
-	drm_atomic_helper_commit_planes(dev, state, false);
+	drm_atomic_helper_commit_planes(dev, state, 0);
 
 	drm_atomic_helper_commit_modeset_enables(dev, state);
 
@@ -226,7 +240,6 @@
 	struct platform_device *pdev = dev->platformdev;
 	struct device_node *node = pdev->dev.of_node;
 	struct tilcdc_drm_private *priv;
-	struct tilcdc_module *mod;
 	struct resource *res;
 	u32 bpp = 0;
 	int ret;
@@ -318,6 +331,37 @@
 
 	pm_runtime_put_sync(dev->dev);
 
+	if (priv->rev == 1) {
+		DBG("Revision 1 LCDC supports only RGB565 format");
+		priv->pixelformats = tilcdc_rev1_formats;
+		priv->num_pixelformats = ARRAY_SIZE(tilcdc_rev1_formats);
+		bpp = 16;
+	} else {
+		const char *str = "\0";
+
+		of_property_read_string(node, "blue-and-red-wiring", &str);
+		if (0 == strcmp(str, "crossed")) {
+			DBG("Configured for crossed blue and red wires");
+			priv->pixelformats = tilcdc_crossed_formats;
+			priv->num_pixelformats =
+				ARRAY_SIZE(tilcdc_crossed_formats);
+			bpp = 32; /* Choose bpp with RGB support for fbdef */
+		} else if (0 == strcmp(str, "straight")) {
+			DBG("Configured for straight blue and red wires");
+			priv->pixelformats = tilcdc_straight_formats;
+			priv->num_pixelformats =
+				ARRAY_SIZE(tilcdc_straight_formats);
+			bpp = 16; /* Choose bpp with RGB support for fbdef */
+		} else {
+			DBG("Blue and red wiring '%s' unknown, use legacy mode",
+			    str);
+			priv->pixelformats = tilcdc_legacy_formats;
+			priv->num_pixelformats =
+				ARRAY_SIZE(tilcdc_legacy_formats);
+			bpp = 16; /* This is just a guess */
+		}
+	}
+
 	ret = modeset_init(dev);
 	if (ret < 0) {
 		dev_err(dev->dev, "failed to initialize mode setting\n");
@@ -331,7 +375,7 @@
 		if (ret < 0)
 			goto fail_mode_config_cleanup;
 
-		ret = tilcdc_add_external_encoders(dev, &bpp);
+		ret = tilcdc_add_external_encoders(dev);
 		if (ret < 0)
 			goto fail_component_cleanup;
 	}
@@ -354,15 +398,6 @@
 		goto fail_vblank_cleanup;
 	}
 
-	list_for_each_entry(mod, &module_list, list) {
-		DBG("%s: preferred_bpp: %d", mod->name, mod->preferred_bpp);
-		bpp = mod->preferred_bpp;
-		if (bpp > 0)
-			break;
-	}
-
-	drm_helper_disable_unused_functions(dev);
-
 	drm_mode_config_reset(dev);
 
 	priv->fbdev = drm_fbdev_cma_init(dev, bpp,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index 13001df..a6e5e6d 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -65,6 +65,10 @@
 	 */
 	uint32_t max_width;
 
+	/* Supported pixel formats */
+	const uint32_t *pixelformats;
+	uint32_t num_pixelformats;
+
 	/* The context for pm susped/resume cycle is stored here */
 	struct drm_atomic_state *saved_state;
 
@@ -112,7 +116,6 @@
 	const char *name;
 	struct list_head list;
 	const struct tilcdc_module_ops *funcs;
-	unsigned int preferred_bpp;
 };
 
 void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index ad3db4d..68e8950 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -52,7 +52,7 @@
 	return MODE_OK;
 }
 
-static int tilcdc_add_external_encoder(struct drm_device *dev, int *bpp,
+static int tilcdc_add_external_encoder(struct drm_device *dev,
 				       struct drm_connector *connector)
 {
 	struct tilcdc_drm_private *priv = dev->dev_private;
@@ -64,7 +64,6 @@
 	/* Only tda998x is supported at the moment. */
 	tilcdc_crtc_set_simulate_vesa_sync(priv->crtc, true);
 	tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_tda998x);
-	*bpp = panel_info_tda998x.bpp;
 
 	connector_funcs = devm_kzalloc(dev->dev, sizeof(*connector_funcs),
 				       GFP_KERNEL);
@@ -94,7 +93,7 @@
 	return 0;
 }
 
-int tilcdc_add_external_encoders(struct drm_device *dev, int *bpp)
+int tilcdc_add_external_encoders(struct drm_device *dev)
 {
 	struct tilcdc_drm_private *priv = dev->dev_private;
 	struct drm_connector *connector;
@@ -108,7 +107,7 @@
 			if (connector == priv->connectors[i])
 				found = true;
 		if (!found) {
-			ret = tilcdc_add_external_encoder(dev, bpp, connector);
+			ret = tilcdc_add_external_encoder(dev, connector);
 			if (ret)
 				return ret;
 		}
@@ -154,7 +153,7 @@
 
 	while ((ep = of_graph_get_next_endpoint(dev->of_node, ep))) {
 		node = of_graph_get_remote_port_parent(ep);
-		if (!node && !of_device_is_available(node)) {
+		if (!node || !of_device_is_available(node)) {
 			of_node_put(node);
 			continue;
 		}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.h b/drivers/gpu/drm/tilcdc/tilcdc_external.h
index 6aabe27..c700e0c 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.h
@@ -18,7 +18,7 @@
 #ifndef __TILCDC_EXTERNAL_H__
 #define __TILCDC_EXTERNAL_H__
 
-int tilcdc_add_external_encoders(struct drm_device *dev, int *bpp);
+int tilcdc_add_external_encoders(struct drm_device *dev);
 void tilcdc_remove_external_encoders(struct drm_device *dev);
 int tilcdc_get_external_components(struct device *dev,
 				   struct component_match **match);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 4ac1d25..7b36509 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -397,8 +397,6 @@
 		goto fail_timings;
 	}
 
-	mod->preferred_bpp = panel_mod->info->bpp;
-
 	return 0;
 
 fail_timings:
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index 41911e3..74c65fa 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -24,10 +24,6 @@
 
 #include "tilcdc_drv.h"
 
-static const u32 tilcdc_formats[] = { DRM_FORMAT_RGB565,
-				      DRM_FORMAT_RGB888,
-				      DRM_FORMAT_XRGB8888 };
-
 static struct drm_plane_funcs tilcdc_plane_funcs = {
 	.update_plane	= drm_atomic_helper_update_plane,
 	.disable_plane	= drm_atomic_helper_disable_plane,
@@ -114,12 +110,13 @@
 int tilcdc_plane_init(struct drm_device *dev,
 		      struct drm_plane *plane)
 {
+	struct tilcdc_drm_private *priv = dev->dev_private;
 	int ret;
 
 	ret = drm_plane_init(dev, plane, 1,
 			     &tilcdc_plane_funcs,
-			     tilcdc_formats,
-			     ARRAY_SIZE(tilcdc_formats),
+			     priv->pixelformats,
+			     priv->num_pixelformats,
 			     true);
 	if (ret) {
 		dev_err(dev->dev, "Failed to initialize plane: %d\n", ret);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
index 1bf5e25..f57c0d6 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_regs.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
@@ -119,6 +119,20 @@
 	iowrite32(data, priv->mmio + reg);
 }
 
+static inline void tilcdc_write64(struct drm_device *dev, u32 reg, u64 data)
+{
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	volatile void __iomem *addr = priv->mmio + reg;
+
+#ifdef iowrite64
+	iowrite64(data, addr);
+#else
+	__iowmb();
+	/* This compiles to strd (=64-bit write) on ARM7 */
+	*(volatile u64 __force *)addr = __cpu_to_le64(data);
+#endif
+}
+
 static inline u32 tilcdc_read(struct drm_device *dev, u32 reg)
 {
 	struct tilcdc_drm_private *priv = dev->dev_private;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 741c7b5..c6a70da 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -327,8 +327,6 @@
 		goto fail;
 	}
 
-	mod->preferred_bpp = dvi_info.bpp;
-
 	i2c_node = of_find_node_by_phandle(i2c_phandle);
 	if (!i2c_node) {
 		dev_err(&pdev->dev, "could not get i2c bus node\n");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 42c074a..fc6217d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -57,14 +57,14 @@
 static inline int ttm_mem_type_from_place(const struct ttm_place *place,
 					  uint32_t *mem_type)
 {
-	int i;
+	int pos;
 
-	for (i = 0; i <= TTM_PL_PRIV5; i++)
-		if (place->flags & (1 << i)) {
-			*mem_type = i;
-			return 0;
-		}
-	return -EINVAL;
+	pos = ffs(place->flags & TTM_PL_MASK_MEM);
+	if (unlikely(!pos))
+		return -EINVAL;
+
+	*mem_type = pos - 1;
+	return 0;
 }
 
 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
@@ -354,14 +354,12 @@
 
 	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
 	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
-		ret = ttm_bo_move_ttm(bo, evict, interruptible, no_wait_gpu,
-				      mem);
+		ret = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, mem);
 	else if (bdev->driver->move)
 		ret = bdev->driver->move(bo, evict, interruptible,
 					 no_wait_gpu, mem);
 	else
-		ret = ttm_bo_move_memcpy(bo, evict, interruptible,
-					 no_wait_gpu, mem);
+		ret = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, mem);
 
 	if (ret) {
 		if (bdev->driver->move_notify) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index f157a9e..bf6e216 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -45,8 +45,8 @@
 }
 
 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-		    bool evict, bool interruptible,
-		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+		    bool interruptible, bool no_wait_gpu,
+		    struct ttm_mem_reg *new_mem)
 {
 	struct ttm_tt *ttm = bo->ttm;
 	struct ttm_mem_reg *old_mem = &bo->mem;
@@ -329,8 +329,7 @@
 }
 
 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-		       bool evict, bool interruptible,
-		       bool no_wait_gpu,
+		       bool interruptible, bool no_wait_gpu,
 		       struct ttm_mem_reg *new_mem)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index a1803fb..29855be 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -600,3 +600,9 @@
 	return 0;
 }
 EXPORT_SYMBOL(ttm_round_pot);
+
+uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob)
+{
+	return glob->zone_kernel->max_mem;
+}
+EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index bef9f6f..cec4b4b 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -858,7 +858,6 @@
 	if (count) {
 		d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
 		ttm->pages[index] = d_page->p;
-		ttm_dma->cpu_address[index] = d_page->vaddr;
 		ttm_dma->dma_address[index] = d_page->dma;
 		list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
 		r = 0;
@@ -989,7 +988,6 @@
 	INIT_LIST_HEAD(&ttm_dma->pages_list);
 	for (i = 0; i < ttm->num_pages; i++) {
 		ttm->pages[i] = NULL;
-		ttm_dma->cpu_address[i] = 0;
 		ttm_dma->dma_address[i] = 0;
 	}
 
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index bc5aa57..aee3c00 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -57,10 +57,8 @@
 {
 	ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
 					  sizeof(*ttm->ttm.pages) +
-					  sizeof(*ttm->dma_address) +
-					  sizeof(*ttm->cpu_address));
-	ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
-	ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
+					  sizeof(*ttm->dma_address));
+	ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
 }
 
 #ifdef CONFIG_X86
@@ -244,7 +242,6 @@
 
 	drm_free_large(ttm->pages);
 	ttm->pages = NULL;
-	ttm_dma->cpu_address = NULL;
 	ttm_dma->dma_address = NULL;
 }
 EXPORT_SYMBOL(ttm_dma_tt_fini);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 4709b54..d2f57c5 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -150,8 +150,5 @@
 	drm_connector_register(connector);
 	drm_mode_connector_attach_encoder(connector, encoder);
 
-	drm_object_attach_property(&connector->base,
-				      dev->mode_config.dirty_info_property,
-				      1);
 	return 0;
 }
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 17d34e0..f0851db 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -16,6 +16,20 @@
 	return 0;
 }
 
+static int udl_usb_suspend(struct usb_interface *interface,
+			   pm_message_t message)
+{
+	return 0;
+}
+
+static int udl_usb_resume(struct usb_interface *interface)
+{
+	struct drm_device *dev = usb_get_intfdata(interface);
+
+	udl_modeset_restore(dev);
+	return 0;
+}
+
 static const struct vm_operations_struct udl_gem_vm_ops = {
 	.fault = udl_gem_fault,
 	.open = drm_gem_vm_open,
@@ -122,6 +136,8 @@
 	.name = "udl",
 	.probe = udl_usb_probe,
 	.disconnect = udl_usb_disconnect,
+	.suspend = udl_usb_suspend,
+	.resume = udl_usb_resume,
 	.id_table = id_table,
 };
 module_usb_driver(udl_driver);
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 0b03d34..f338a57 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -52,6 +52,7 @@
 	struct device *dev;
 	struct drm_device *ddev;
 	struct usb_device *udev;
+	struct drm_crtc *crtc;
 
 	int sku_pixel_limit;
 
@@ -87,6 +88,7 @@
 
 /* modeset */
 int udl_modeset_init(struct drm_device *dev);
+void udl_modeset_restore(struct drm_device *dev);
 void udl_modeset_cleanup(struct drm_device *dev);
 int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder);
 
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 33dbfb2..29f0207 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -16,6 +16,8 @@
 /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
 #define BULK_SIZE 512
 
+#define NR_USB_REQUEST_CHANNEL 0x12
+
 #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
 #define WRITES_IN_FLIGHT (4)
 #define MAX_VENDOR_DESCRIPTOR_SIZE 256
@@ -90,6 +92,26 @@
 	return true;
 }
 
+/*
+ * Need to ensure a channel is selected before submitting URBs
+ */
+static int udl_select_std_channel(struct udl_device *udl)
+{
+	int ret;
+	u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
+			    0x1C, 0x88, 0x5E, 0x15,
+			    0x60, 0xFE, 0xC6, 0x97,
+			    0x16, 0x3D, 0x47, 0xF2};
+
+	ret = usb_control_msg(udl->udev,
+			      usb_sndctrlpipe(udl->udev, 0),
+			      NR_USB_REQUEST_CHANNEL,
+			      (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
+			      set_def_chn, sizeof(set_def_chn),
+			      USB_CTRL_SET_TIMEOUT);
+	return ret < 0 ? ret : 0;
+}
+
 static void udl_release_urb_work(struct work_struct *work)
 {
 	struct urb_node *unode = container_of(work, struct urb_node,
@@ -301,6 +323,9 @@
 		goto err;
 	}
 
+	if (udl_select_std_channel(udl))
+		DRM_ERROR("Selecting channel failed\n");
+
 	if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
 		DRM_ERROR("udl_alloc_urb_list failed\n");
 		goto err;
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index f92ea95..f2b2481 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -309,6 +309,8 @@
 	char *wrptr;
 	int color_depth = 0;
 
+	udl->crtc = crtc;
+
 	buf = (char *)udl->mode_buf;
 
 	/* for now we just clip 24 -> 16 - if we fix that fix this */
@@ -441,8 +443,6 @@
 
 	dev->mode_config.funcs = &udl_mode_funcs;
 
-	drm_mode_create_dirty_info_property(dev);
-
 	udl_crtc_init(dev);
 
 	encoder = udl_encoder_init(dev);
@@ -452,6 +452,18 @@
 	return 0;
 }
 
+void udl_modeset_restore(struct drm_device *dev)
+{
+	struct udl_device *udl = dev->dev_private;
+	struct udl_framebuffer *ufb;
+
+	if (!udl->crtc || !udl->crtc->primary->fb)
+		return;
+	udl_crtc_commit(udl->crtc);
+	ufb = to_udl_fb(udl->crtc->primary->fb);
+	udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height);
+}
+
 void udl_modeset_cleanup(struct drm_device *dev)
 {
 	drm_mode_config_cleanup(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 8fc2b73..2682f07 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -163,14 +163,6 @@
 	int vblank_lines;
 	int ret = 0;
 
-	/*
-	 * XXX Doesn't work well in interlaced mode yet, partially due
-	 * to problems in vc4 kms or drm core interlaced mode handling,
-	 * so disable for now in interlaced mode.
-	 */
-	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-		return ret;
-
 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
 
 	/* Get optional system timestamp before query. */
@@ -191,10 +183,15 @@
 
 	/* Vertical position of hvs composed scanline. */
 	*vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
+	*hpos = 0;
 
-	/* No hpos info available. */
-	if (hpos)
-		*hpos = 0;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		*vpos /= 2;
+
+		/* Use hpos to correct for field offset in interlaced mode. */
+		if (VC4_GET_FIELD(val, SCALER_DISPSTATX_FRAME_COUNT) % 2)
+			*hpos += mode->crtc_htotal / 2;
+	}
 
 	/* This is the offset we need for translating hvs -> pv scanout pos. */
 	fifo_lines = vc4_crtc->cob_size / mode->crtc_hdisplay;
@@ -217,8 +214,6 @@
 		 * position of the PV.
 		 */
 		*vpos -= fifo_lines + 1;
-		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-			*vpos /= 2;
 
 		ret |= DRM_SCANOUTPOS_ACCURATE;
 		return ret;
@@ -480,6 +475,9 @@
 	int ret;
 	require_hvs_enabled(dev);
 
+	/* Disable vblank irq handling before crtc is disabled. */
+	drm_crtc_vblank_off(crtc);
+
 	CRTC_WRITE(PV_V_CONTROL,
 		   CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN);
 	ret = wait_for(!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN), 1);
@@ -530,6 +528,33 @@
 	/* Turn on the pixel valve, which will emit the vstart signal. */
 	CRTC_WRITE(PV_V_CONTROL,
 		   CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
+
+	/* Enable vblank irq handling after crtc is started. */
+	drm_crtc_vblank_on(crtc);
+}
+
+static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	/* Do not allow doublescan modes from user space */
+	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+		DRM_DEBUG_KMS("[CRTC:%d] Doublescan mode rejected.\n",
+			      crtc->base.id);
+		return false;
+	}
+
+	/*
+	 * Interlaced video modes got CRTC_INTERLACE_HALVE_V applied when
+	 * coming from user space. We don't want this, as it screws up
+	 * vblank timestamping, so fix it up.
+	 */
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+	DRM_DEBUG_KMS("[CRTC:%d] adjusted_mode :\n", crtc->base.id);
+	drm_mode_debug_printmodeline(adjusted_mode);
+
+	return true;
 }
 
 static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
@@ -819,6 +844,7 @@
 	.mode_set_nofb = vc4_crtc_mode_set_nofb,
 	.disable = vc4_crtc_disable,
 	.enable = vc4_crtc_enable,
+	.mode_fixup = vc4_crtc_mode_fixup,
 	.atomic_check = vc4_crtc_atomic_check,
 	.atomic_flush = vc4_crtc_atomic_flush,
 };
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 275fedb..1e1f6b8 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -340,9 +340,20 @@
 	}
 }
 
+static bool vc4_dpi_encoder_mode_fixup(struct drm_encoder *encoder,
+				       const struct drm_display_mode *mode,
+				       struct drm_display_mode *adjusted_mode)
+{
+	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+		return false;
+
+	return true;
+}
+
 static const struct drm_encoder_helper_funcs vc4_dpi_encoder_helper_funcs = {
 	.disable = vc4_dpi_encoder_disable,
 	.enable = vc4_dpi_encoder_enable,
+	.mode_fixup = vc4_dpi_encoder_mode_fixup,
 };
 
 static const struct of_device_id vc4_dpi_dt_match[] = {
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 6155e8a..27c52ec 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -419,10 +419,6 @@
 
 	vc4_flush_caches(dev);
 
-	/* Disable the binner's pre-loaded overflow memory address */
-	V3D_WRITE(V3D_BPOA, 0);
-	V3D_WRITE(V3D_BPOS, 0);
-
 	/* Either put the job in the binner if it uses the binner, or
 	 * immediately move it to the to-be-rendered queue.
 	 */
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 4452f36..68ad106 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -208,10 +208,35 @@
 	return ret;
 }
 
+/*
+ * drm_helper_probe_single_connector_modes() applies drm_mode_set_crtcinfo to
+ * all modes with flag CRTC_INTERLACE_HALVE_V. We don't want this, as it
+ * screws up vblank timestamping for interlaced modes, so fix it up.
+ */
+static int vc4_hdmi_connector_probe_modes(struct drm_connector *connector,
+					  uint32_t maxX, uint32_t maxY)
+{
+	struct drm_display_mode *mode;
+	int count;
+
+	count = drm_helper_probe_single_connector_modes(connector, maxX, maxY);
+	if (count == 0)
+		return 0;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed adapted modes :\n",
+		      connector->base.id, connector->name);
+	list_for_each_entry(mode, &connector->modes, head) {
+		drm_mode_set_crtcinfo(mode, 0);
+		drm_mode_debug_printmodeline(mode);
+	}
+
+	return count;
+}
+
 static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
 	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = vc4_hdmi_connector_detect,
-	.fill_modes = drm_helper_probe_single_connector_modes,
+	.fill_modes = vc4_hdmi_connector_probe_modes,
 	.destroy = vc4_hdmi_connector_destroy,
 	.reset = drm_atomic_helper_connector_reset,
 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -246,7 +271,7 @@
 	connector->polled = (DRM_CONNECTOR_POLL_CONNECT |
 			     DRM_CONNECTOR_POLL_DISCONNECT);
 
-	connector->interlace_allowed = 0;
+	connector->interlace_allowed = 1;
 	connector->doublescan_allowed = 0;
 
 	drm_mode_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 4ac894d..c1f65c6 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -44,7 +44,7 @@
 
 	drm_atomic_helper_commit_modeset_disables(dev, state);
 
-	drm_atomic_helper_commit_planes(dev, state, false);
+	drm_atomic_helper_commit_planes(dev, state, 0);
 
 	drm_atomic_helper_commit_modeset_enables(dev, state);
 
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 29e4b40..881bf48 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -735,8 +735,6 @@
 }
 
 static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
-	.prepare_fb = NULL,
-	.cleanup_fb = NULL,
 	.atomic_check = vc4_plane_atomic_check,
 	.atomic_update = vc4_plane_atomic_update,
 };
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 4e192aa..7cf3678 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -338,7 +338,8 @@
 
 	drm_atomic_helper_commit_modeset_disables(dev, state);
 	drm_atomic_helper_commit_modeset_enables(dev, state);
-	drm_atomic_helper_commit_planes(dev, state, true);
+	drm_atomic_helper_commit_planes(dev, state,
+					DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
 	drm_atomic_helper_commit_hw_done(state);
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index c046903..512e7cd 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -152,15 +152,10 @@
 	if (ret)
 		goto out_free;
 
-	buf = kmalloc(exbuf->size, GFP_KERNEL);
-	if (!buf) {
-		ret = -ENOMEM;
-		goto out_unresv;
-	}
-	if (copy_from_user(buf, (void __user *)(uintptr_t)exbuf->command,
-			   exbuf->size)) {
-		kfree(buf);
-		ret = -EFAULT;
+	buf = memdup_user((void __user *)(uintptr_t)exbuf->command,
+			  exbuf->size);
+	if (IS_ERR(buf)) {
+		ret = PTR_ERR(buf);
 		goto out_unresv;
 	}
 	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 74304b0..070d750 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -67,10 +67,10 @@
 			VMWGFX_NUM_GB_SURFACE +\
 			VMWGFX_NUM_GB_SCREEN_TARGET)
 
-#define VMW_PL_GMR TTM_PL_PRIV0
-#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
-#define VMW_PL_MOB TTM_PL_PRIV1
-#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
+#define VMW_PL_GMR (TTM_PL_PRIV + 0)
+#define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0)
+#define VMW_PL_MOB (TTM_PL_PRIV + 1)
+#define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1)
 
 #define VMW_RES_CONTEXT ttm_driver_type0
 #define VMW_RES_SURFACE ttm_driver_type1
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 63ccd98..23ec673 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -377,9 +377,6 @@
 	drm_mode_crtc_set_gamma_size(crtc, 256);
 
 	drm_object_attach_property(&connector->base,
-				   dev->mode_config.dirty_info_property,
-				   1);
-	drm_object_attach_property(&connector->base,
 				   dev_priv->hotplug_mode_update_property, 1);
 	drm_object_attach_property(&connector->base,
 				   dev->mode_config.suggested_x_property, 0);
@@ -421,10 +418,6 @@
 	if (ret != 0)
 		goto err_free;
 
-	ret = drm_mode_create_dirty_info_property(dev);
-	if (ret != 0)
-		goto err_vblank_cleanup;
-
 	vmw_kms_create_implicit_placement_property(dev_priv, true);
 
 	if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
@@ -439,8 +432,6 @@
 
 	return 0;
 
-err_vblank_cleanup:
-	drm_vblank_cleanup(dev);
 err_free:
 	kfree(dev_priv->ldu_priv);
 	dev_priv->ldu_priv = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index b74eae2..f423590 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -538,9 +538,6 @@
 	drm_mode_crtc_set_gamma_size(crtc, 256);
 
 	drm_object_attach_property(&connector->base,
-				   dev->mode_config.dirty_info_property,
-				   1);
-	drm_object_attach_property(&connector->base,
 				   dev_priv->hotplug_mode_update_property, 1);
 	drm_object_attach_property(&connector->base,
 				   dev->mode_config.suggested_x_property, 0);
@@ -574,10 +571,6 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = drm_mode_create_dirty_info_property(dev);
-	if (unlikely(ret != 0))
-		goto err_vblank_cleanup;
-
 	vmw_kms_create_implicit_placement_property(dev_priv, false);
 
 	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
@@ -588,10 +581,6 @@
 	DRM_INFO("Screen Objects Display Unit initialized\n");
 
 	return 0;
-
-err_vblank_cleanup:
-	drm_vblank_cleanup(dev);
-	return ret;
 }
 
 int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 41932a7..94ad8d2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1131,9 +1131,6 @@
 	drm_mode_crtc_set_gamma_size(crtc, 256);
 
 	drm_object_attach_property(&connector->base,
-				   dev->mode_config.dirty_info_property,
-				   1);
-	drm_object_attach_property(&connector->base,
 				   dev_priv->hotplug_mode_update_property, 1);
 	drm_object_attach_property(&connector->base,
 				   dev->mode_config.suggested_x_property, 0);
@@ -1202,10 +1199,6 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = drm_mode_create_dirty_info_property(dev);
-	if (unlikely(ret != 0))
-		goto err_vblank_cleanup;
-
 	dev_priv->active_display_unit = vmw_du_screen_target;
 
 	vmw_kms_create_implicit_placement_property(dev_priv, false);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index f17cb04..1887f19 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -131,7 +131,24 @@
 	return NULL;
 }
 
-/* Returns the default VGA device (vgacon's babe) */
+/**
+ * vga_default_device - return the default VGA device, for vgacon
+ *
+ * This can be defined by the platform. The default implementation
+ * is rather dumb and will probably only work properly on single
+ * vga card setups and/or x86 platforms.
+ *
+ * If your VGA default device is not PCI, you'll have to return
+ * NULL here. In this case, I assume it will not conflict with
+ * any PCI card. If this is not true, I'll have to define two archs
+ * hooks for enabling/disabling the VGA default device if that is
+ * possible. This may be a problem with real _ISA_ VGA cards, in
+ * addition to a PCI one. I don't know at this point how to deal
+ * with that card. Can theirs IOs be disabled at all ? If not, then
+ * I suppose it's a matter of having the proper arch hook telling
+ * us about it, so we basically never allow anybody to succeed a
+ * vga_get()...
+ */
 struct pci_dev *vga_default_device(void)
 {
 	return vga_default;
@@ -356,6 +373,40 @@
 		wake_up_all(&vga_wait_queue);
 }
 
+/**
+ * vga_get - acquire & locks VGA resources
+ * @pdev: pci device of the VGA card or NULL for the system default
+ * @rsrc: bit mask of resources to acquire and lock
+ * @interruptible: blocking should be interruptible by signals ?
+ *
+ * This function acquires VGA resources for the given card and mark those
+ * resources locked. If the resource requested are "normal" (and not legacy)
+ * resources, the arbiter will first check whether the card is doing legacy
+ * decoding for that type of resource. If yes, the lock is "converted" into a
+ * legacy resource lock.
+ *
+ * The arbiter will first look for all VGA cards that might conflict and disable
+ * their IOs and/or Memory access, including VGA forwarding on P2P bridges if
+ * necessary, so that the requested resources can be used. Then, the card is
+ * marked as locking these resources and the IO and/or Memory accesses are
+ * enabled on the card (including VGA forwarding on parent P2P bridges if any).
+ *
+ * This function will block if some conflicting card is already locking one of
+ * the required resources (or any resource on a different bus segment, since P2P
+ * bridges don't differentiate VGA memory and IO afaik). You can indicate
+ * whether this blocking should be interruptible by a signal (for userland
+ * interface) or not.
+ *
+ * Must not be called at interrupt time or in atomic context.  If the card
+ * already owns the resources, the function succeeds.  Nested calls are
+ * supported (a per-resource counter is maintained)
+ *
+ * On success, release the VGA resource again with vga_put().
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
 int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
 {
 	struct vga_device *vgadev, *conflict;
@@ -408,6 +459,21 @@
 }
 EXPORT_SYMBOL(vga_get);
 
+/**
+ * vga_tryget - try to acquire & lock legacy VGA resources
+ * @pdev: pci devivce of VGA card or NULL for system default
+ * @rsrc: bit mask of resources to acquire and lock
+ *
+ * This function performs the same operation as vga_get(), but will return an
+ * error (-EBUSY) instead of blocking if the resources are already locked by
+ * another card. It can be called in any context
+ *
+ * On success, release the VGA resource again with vga_put().
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
 int vga_tryget(struct pci_dev *pdev, unsigned int rsrc)
 {
 	struct vga_device *vgadev;
@@ -435,6 +501,16 @@
 }
 EXPORT_SYMBOL(vga_tryget);
 
+/**
+ * vga_put - release lock on legacy VGA resources
+ * @pdev: pci device of VGA card or NULL for system default
+ * @rsrc: but mask of resource to release
+ *
+ * This fuction releases resources previously locked by vga_get() or
+ * vga_tryget(). The resources aren't disabled right away, so that a subsequence
+ * vga_get() on the same card will succeed immediately. Resources have a
+ * counter, so locks are only released if the counter reaches 0.
+ */
 void vga_put(struct pci_dev *pdev, unsigned int rsrc)
 {
 	struct vga_device *vgadev;
@@ -716,7 +792,37 @@
 }
 EXPORT_SYMBOL(vga_set_legacy_decoding);
 
-/* call with NULL to unregister */
+/**
+ * vga_client_register - register or unregister a VGA arbitration client
+ * @pdev: pci device of the VGA client
+ * @cookie: client cookie to be used in callbacks
+ * @irq_set_state: irq state change callback
+ * @set_vga_decode: vga decode change callback
+ *
+ * Clients have two callback mechanisms they can use.
+ *
+ * @irq_set_state callback: If a client can't disable its GPUs VGA
+ * resources, then we need to be able to ask it to turn off its irqs when we
+ * turn off its mem and io decoding.
+ *
+ * @set_vga_decode callback: If a client can disable its GPU VGA resource, it
+ * will get a callback from this to set the encode/decode state.
+ *
+ * Rationale: we cannot disable VGA decode resources unconditionally some single
+ * GPU laptops seem to require ACPI or BIOS access to the VGA registers to
+ * control things like backlights etc.  Hopefully newer multi-GPU laptops do
+ * something saner, and desktops won't have any special ACPI for this. The
+ * driver will get a callback when VGA arbitration is first used by userspace
+ * since some older X servers have issues.
+ *
+ * This function does not check whether a client for @pdev has been registered
+ * already.
+ *
+ * To unregister just call this function with @irq_set_state and @set_vga_decode
+ * both set to NULL for the same @pdev as originally used to register them.
+ *
+ * Returns: 0 on success, -1 on failure
+ */
 int vga_client_register(struct pci_dev *pdev, void *cookie,
 			void (*irq_set_state)(void *cookie, bool state),
 			unsigned int (*set_vga_decode)(void *cookie,
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
index 261b86d..9cd8838 100644
--- a/include/drm/bridge/analogix_dp.h
+++ b/include/drm/bridge/analogix_dp.h
@@ -38,6 +38,9 @@
 			 struct drm_connector *);
 };
 
+int analogix_dp_enable_psr(struct device *dev);
+int analogix_dp_disable_psr(struct device *dev);
+
 int analogix_dp_resume(struct device *dev);
 int analogix_dp_suspend(struct device *dev);
 
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 734e4fb..e341e7f 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -127,6 +127,7 @@
  * run-time by echoing the debug value in its sysfs node:
  *   # echo 0xf > /sys/module/drm/parameters/debug
  */
+#define DRM_UT_NONE		0x00
 #define DRM_UT_CORE 		0x01
 #define DRM_UT_DRIVER		0x02
 #define DRM_UT_KMS		0x04
@@ -134,11 +135,15 @@
 #define DRM_UT_ATOMIC		0x10
 #define DRM_UT_VBL		0x20
 
-extern __printf(2, 3)
-void drm_ut_debug_printk(const char *function_name,
-			 const char *format, ...);
-extern __printf(1, 2)
-void drm_err(const char *format, ...);
+extern __printf(6, 7)
+void drm_dev_printk(const struct device *dev, const char *level,
+		    unsigned int category, const char *function_name,
+		    const char *prefix, const char *format, ...);
+
+extern __printf(5, 6)
+void drm_printk(const char *level, unsigned int category,
+		const char *function_name, const char *prefix,
+		const char *format, ...);
 
 /***********************************************************************/
 /** \name DRM template customization defaults */
@@ -189,8 +194,12 @@
  * \param fmt printf() like format string.
  * \param arg arguments
  */
-#define DRM_ERROR(fmt, ...)				\
-	drm_err(fmt, ##__VA_ARGS__)
+#define DRM_DEV_ERROR(dev, fmt, ...)					\
+	drm_dev_printk(dev, KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*",\
+		       fmt, ##__VA_ARGS__)
+#define DRM_ERROR(fmt, ...)						\
+	drm_printk(KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*", fmt,	\
+		   ##__VA_ARGS__)
 
 /**
  * Rate limited error output.  Like DRM_ERROR() but won't flood the log.
@@ -198,14 +207,29 @@
  * \param fmt printf() like format string.
  * \param arg arguments
  */
-#define DRM_ERROR_RATELIMITED(fmt, ...)				\
+#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...)			\
 ({									\
 	static DEFINE_RATELIMIT_STATE(_rs,				\
 				      DEFAULT_RATELIMIT_INTERVAL,	\
 				      DEFAULT_RATELIMIT_BURST);		\
 									\
 	if (__ratelimit(&_rs))						\
-		drm_err(fmt, ##__VA_ARGS__);				\
+		DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__);			\
+})
+#define DRM_ERROR_RATELIMITED(fmt, ...)					\
+	DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_INFO(dev, fmt, ...)					\
+	drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt,	\
+		       ##__VA_ARGS__)
+
+#define DRM_DEV_INFO_ONCE(dev, fmt, ...)				\
+({									\
+	static bool __print_once __read_mostly;				\
+	if (!__print_once) {						\
+		__print_once = true;					\
+		DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__);			\
+	}								\
 })
 
 /**
@@ -214,52 +238,51 @@
  * \param fmt printf() like format string.
  * \param arg arguments
  */
+#define DRM_DEV_DEBUG(dev, fmt, args...)				\
+	drm_dev_printk(dev, KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt,	\
+		       ##args)
 #define DRM_DEBUG(fmt, args...)						\
-	do {								\
-		if (unlikely(drm_debug & DRM_UT_CORE))			\
-			drm_ut_debug_printk(__func__, fmt, ##args);	\
-	} while (0)
+	drm_printk(KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt, ##args)
 
+#define DRM_DEV_DEBUG_DRIVER(dev, fmt, args...)				\
+	drm_dev_printk(dev, KERN_DEBUG, DRM_UT_DRIVER, __func__, "",	\
+		       fmt, ##args)
 #define DRM_DEBUG_DRIVER(fmt, args...)					\
-	do {								\
-		if (unlikely(drm_debug & DRM_UT_DRIVER))		\
-			drm_ut_debug_printk(__func__, fmt, ##args);	\
-	} while (0)
-#define DRM_DEBUG_KMS(fmt, args...)					\
-	do {								\
-		if (unlikely(drm_debug & DRM_UT_KMS))			\
-			drm_ut_debug_printk(__func__, fmt, ##args);	\
-	} while (0)
-#define DRM_DEBUG_PRIME(fmt, args...)					\
-	do {								\
-		if (unlikely(drm_debug & DRM_UT_PRIME))			\
-			drm_ut_debug_printk(__func__, fmt, ##args);	\
-	} while (0)
-#define DRM_DEBUG_ATOMIC(fmt, args...)					\
-	do {								\
-		if (unlikely(drm_debug & DRM_UT_ATOMIC))		\
-			drm_ut_debug_printk(__func__, fmt, ##args);	\
-	} while (0)
-#define DRM_DEBUG_VBL(fmt, args...)					\
-	do {								\
-		if (unlikely(drm_debug & DRM_UT_VBL))			\
-			drm_ut_debug_printk(__func__, fmt, ##args);	\
-	} while (0)
+	drm_printk(KERN_DEBUG, DRM_UT_DRIVER, __func__, "", fmt, ##args)
 
-#define _DRM_DEFINE_DEBUG_RATELIMITED(level, fmt, args...)		\
-	do {								\
-		if (unlikely(drm_debug & DRM_UT_ ## level)) {		\
-			static DEFINE_RATELIMIT_STATE(			\
-				_rs,					\
-				DEFAULT_RATELIMIT_INTERVAL,		\
-				DEFAULT_RATELIMIT_BURST);		\
-									\
-			if (__ratelimit(&_rs)) {			\
-				drm_ut_debug_printk(__func__, fmt,	\
-						    ##args);		\
-			}						\
-		}							\
-	} while (0)
+#define DRM_DEV_DEBUG_KMS(dev, fmt, args...)				\
+	drm_dev_printk(dev, KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt,	\
+		       ##args)
+#define DRM_DEBUG_KMS(fmt, args...)					\
+	drm_printk(KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt, ##args)
+
+#define DRM_DEV_DEBUG_PRIME(dev, fmt, args...)				\
+	drm_dev_printk(dev, KERN_DEBUG, DRM_UT_PRIME, __func__, "",	\
+		       fmt, ##args)
+#define DRM_DEBUG_PRIME(fmt, args...)					\
+	drm_printk(KERN_DEBUG, DRM_UT_PRIME, __func__, "", fmt, ##args)
+
+#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, args...)				\
+	drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ATOMIC, __func__, "",	\
+		       fmt, ##args)
+#define DRM_DEBUG_ATOMIC(fmt, args...)					\
+	drm_printk(KERN_DEBUG, DRM_UT_ATOMIC, __func__, "", fmt, ##args)
+
+#define DRM_DEV_DEBUG_VBL(dev, fmt, args...)				\
+	drm_dev_printk(dev, KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt,	\
+		       ##args)
+#define DRM_DEBUG_VBL(fmt, args...)					\
+	drm_printk(KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt, ##args)
+
+#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...)	\
+({									\
+	static DEFINE_RATELIMIT_STATE(_rs,				\
+				      DEFAULT_RATELIMIT_INTERVAL,	\
+				      DEFAULT_RATELIMIT_BURST);		\
+	if (__ratelimit(&_rs))						\
+		drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ ## level,	\
+			       __func__, "", fmt, ##args);		\
+})
 
 /**
  * Rate limited debug output. Like DRM_DEBUG() but won't flood the log.
@@ -267,14 +290,22 @@
  * \param fmt printf() like format string.
  * \param arg arguments
  */
+#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, args...)			\
+	DEV__DRM_DEFINE_DEBUG_RATELIMITED(dev, CORE, fmt, ##args)
 #define DRM_DEBUG_RATELIMITED(fmt, args...)				\
-	_DRM_DEFINE_DEBUG_RATELIMITED(CORE, fmt, ##args)
+	DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##args)
+#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, args...)		\
+	_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRIVER, fmt, ##args)
 #define DRM_DEBUG_DRIVER_RATELIMITED(fmt, args...)			\
-	_DRM_DEFINE_DEBUG_RATELIMITED(DRIVER, fmt, ##args)
+	DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##args)
+#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, args...)		\
+	_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, KMS, fmt, ##args)
 #define DRM_DEBUG_KMS_RATELIMITED(fmt, args...)				\
-	_DRM_DEFINE_DEBUG_RATELIMITED(KMS, fmt, ##args)
+	DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##args)
+#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, args...)		\
+	_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args)
 #define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...)			\
-	_DRM_DEFINE_DEBUG_RATELIMITED(PRIME, fmt, ##args)
+	DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args)
 
 /*@}*/
 
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index d86ae5d..7ff92b0 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -29,6 +29,8 @@
 #define DRM_ATOMIC_HELPER_H_
 
 #include <drm/drm_crtc.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_modeset_helper.h>
 
 struct drm_atomic_state;
 
@@ -43,8 +45,9 @@
 			     struct drm_atomic_state *state,
 			     bool nonblock);
 
-void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
-					struct drm_atomic_state *state);
+int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
+					struct drm_atomic_state *state,
+					bool pre_swap);
 bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
 					   struct drm_atomic_state *old_state,
 					   struct drm_crtc *crtc);
@@ -63,14 +66,19 @@
 
 int drm_atomic_helper_prepare_planes(struct drm_device *dev,
 				     struct drm_atomic_state *state);
+
+#define DRM_PLANE_COMMIT_ACTIVE_ONLY			BIT(0)
+#define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET	BIT(1)
+
 void drm_atomic_helper_commit_planes(struct drm_device *dev,
 				     struct drm_atomic_state *state,
-				     bool active_only);
+				     uint32_t flags);
 void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
 				      struct drm_atomic_state *old_state);
 void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state);
-void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
-					      bool atomic);
+void
+drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
+					 bool atomic);
 
 void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
 				  bool stall);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
new file mode 100644
index 0000000..66b7d67
--- /dev/null
+++ b/include/drm/drm_connector.h
@@ -0,0 +1,747 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_CONNECTOR_H__
+#define __DRM_CONNECTOR_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+struct drm_connector_helper_funcs;
+struct drm_device;
+struct drm_crtc;
+struct drm_encoder;
+struct drm_property;
+struct drm_property_blob;
+struct edid;
+
+enum drm_connector_force {
+	DRM_FORCE_UNSPECIFIED,
+	DRM_FORCE_OFF,
+	DRM_FORCE_ON,         /* force on analog part normally */
+	DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
+};
+
+/**
+ * enum drm_connector_status - status for a &drm_connector
+ *
+ * This enum is used to track the connector status. There are no separate
+ * #defines for the uapi!
+ */
+enum drm_connector_status {
+	/**
+	 * @connector_status_connected: The connector is definitely connected to
+	 * a sink device, and can be enabled.
+	 */
+	connector_status_connected = 1,
+	/**
+	 * @connector_status_disconnected: The connector isn't connected to a
+	 * sink device which can be autodetect. For digital outputs like DP or
+	 * HDMI (which can be realiable probed) this means there's really
+	 * nothing there. It is driver-dependent whether a connector with this
+	 * status can be lit up or not.
+	 */
+	connector_status_disconnected = 2,
+	/**
+	 * @connector_status_unknown: The connector's status could not be
+	 * reliably detected. This happens when probing would either cause
+	 * flicker (like load-detection when the connector is in use), or when a
+	 * hardware resource isn't available (like when load-detection needs a
+	 * free CRTC). It should be possible to light up the connector with one
+	 * of the listed fallback modes. For default configuration userspace
+	 * should only try to light up connectors with unknown status when
+	 * there's not connector with @connector_status_connected.
+	 */
+	connector_status_unknown = 3,
+};
+
+enum subpixel_order {
+	SubPixelUnknown = 0,
+	SubPixelHorizontalRGB,
+	SubPixelHorizontalBGR,
+	SubPixelVerticalRGB,
+	SubPixelVerticalBGR,
+	SubPixelNone,
+};
+
+/**
+ * struct drm_display_info - runtime data about the connected sink
+ *
+ * Describes a given display (e.g. CRT or flat panel) and its limitations. For
+ * fixed display sinks like built-in panels there's not much difference between
+ * this and struct &drm_connector. But for sinks with a real cable this
+ * structure is meant to describe all the things at the other end of the cable.
+ *
+ * For sinks which provide an EDID this can be filled out by calling
+ * drm_add_edid_modes().
+ */
+struct drm_display_info {
+	/**
+	 * @name: Name of the display.
+	 */
+	char name[DRM_DISPLAY_INFO_LEN];
+
+	/**
+	 * @width_mm: Physical width in mm.
+	 */
+        unsigned int width_mm;
+	/**
+	 * @height_mm: Physical height in mm.
+	 */
+	unsigned int height_mm;
+
+	/**
+	 * @pixel_clock: Maximum pixel clock supported by the sink, in units of
+	 * 100Hz. This mismatches the clok in &drm_display_mode (which is in
+	 * kHZ), because that's what the EDID uses as base unit.
+	 */
+	unsigned int pixel_clock;
+	/**
+	 * @bpc: Maximum bits per color channel. Used by HDMI and DP outputs.
+	 */
+	unsigned int bpc;
+
+	/**
+	 * @subpixel_order: Subpixel order of LCD panels.
+	 */
+	enum subpixel_order subpixel_order;
+
+#define DRM_COLOR_FORMAT_RGB444		(1<<0)
+#define DRM_COLOR_FORMAT_YCRCB444	(1<<1)
+#define DRM_COLOR_FORMAT_YCRCB422	(1<<2)
+
+	/**
+	 * @color_formats: HDMI Color formats, selects between RGB and YCrCb
+	 * modes. Used DRM_COLOR_FORMAT\_ defines, which are _not_ the same ones
+	 * as used to describe the pixel format in framebuffers, and also don't
+	 * match the formats in @bus_formats which are shared with v4l.
+	 */
+	u32 color_formats;
+
+	/**
+	 * @bus_formats: Pixel data format on the wire, somewhat redundant with
+	 * @color_formats. Array of size @num_bus_formats encoded using
+	 * MEDIA_BUS_FMT\_ defines shared with v4l and media drivers.
+	 */
+	const u32 *bus_formats;
+	/**
+	 * @num_bus_formats: Size of @bus_formats array.
+	 */
+	unsigned int num_bus_formats;
+
+#define DRM_BUS_FLAG_DE_LOW		(1<<0)
+#define DRM_BUS_FLAG_DE_HIGH		(1<<1)
+/* drive data on pos. edge */
+#define DRM_BUS_FLAG_PIXDATA_POSEDGE	(1<<2)
+/* drive data on neg. edge */
+#define DRM_BUS_FLAG_PIXDATA_NEGEDGE	(1<<3)
+
+	/**
+	 * @bus_flags: Additional information (like pixel signal polarity) for
+	 * the pixel data on the bus, using DRM_BUS_FLAGS\_ defines.
+	 */
+	u32 bus_flags;
+
+	/**
+	 * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even
+	 * more stuff redundant with @bus_formats.
+	 */
+	u8 edid_hdmi_dc_modes;
+
+	/**
+	 * @cea_rev: CEA revision of the HDMI sink.
+	 */
+	u8 cea_rev;
+};
+
+int drm_display_info_set_bus_formats(struct drm_display_info *info,
+				     const u32 *formats,
+				     unsigned int num_formats);
+
+/**
+ * struct drm_connector_state - mutable connector state
+ * @connector: backpointer to the connector
+ * @crtc: CRTC to connect connector to, NULL if disabled
+ * @best_encoder: can be used by helpers and drivers to select the encoder
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_connector_state {
+	struct drm_connector *connector;
+
+	struct drm_crtc *crtc;  /* do not write directly, use drm_atomic_set_crtc_for_connector() */
+
+	struct drm_encoder *best_encoder;
+
+	struct drm_atomic_state *state;
+};
+
+/**
+ * struct drm_connector_funcs - control connectors on a given device
+ *
+ * Each CRTC may have one or more connectors attached to it.  The functions
+ * below allow the core DRM code to control connectors, enumerate available modes,
+ * etc.
+ */
+struct drm_connector_funcs {
+	/**
+	 * @dpms:
+	 *
+	 * Legacy entry point to set the per-connector DPMS state. Legacy DPMS
+	 * is exposed as a standard property on the connector, but diverted to
+	 * this callback in the drm core. Note that atomic drivers don't
+	 * implement the 4 level DPMS support on the connector any more, but
+	 * instead only have an on/off "ACTIVE" property on the CRTC object.
+	 *
+	 * Drivers implementing atomic modeset should use
+	 * drm_atomic_helper_connector_dpms() to implement this hook.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 on success or a negative error code on failure.
+	 */
+	int (*dpms)(struct drm_connector *connector, int mode);
+
+	/**
+	 * @reset:
+	 *
+	 * Reset connector hardware and software state to off. This function isn't
+	 * called by the core directly, only through drm_mode_config_reset().
+	 * It's not a helper hook only for historical reasons.
+	 *
+	 * Atomic drivers can use drm_atomic_helper_connector_reset() to reset
+	 * atomic state using this hook.
+	 */
+	void (*reset)(struct drm_connector *connector);
+
+	/**
+	 * @detect:
+	 *
+	 * Check to see if anything is attached to the connector. The parameter
+	 * force is set to false whilst polling, true when checking the
+	 * connector due to a user request. force can be used by the driver to
+	 * avoid expensive, destructive operations during automated probing.
+	 *
+	 * FIXME:
+	 *
+	 * Note that this hook is only called by the probe helper. It's not in
+	 * the helper library vtable purely for historical reasons. The only DRM
+	 * core	entry point to probe connector state is @fill_modes.
+	 *
+	 * RETURNS:
+	 *
+	 * drm_connector_status indicating the connector's status.
+	 */
+	enum drm_connector_status (*detect)(struct drm_connector *connector,
+					    bool force);
+
+	/**
+	 * @force:
+	 *
+	 * This function is called to update internal encoder state when the
+	 * connector is forced to a certain state by userspace, either through
+	 * the sysfs interfaces or on the kernel cmdline. In that case the
+	 * @detect callback isn't called.
+	 *
+	 * FIXME:
+	 *
+	 * Note that this hook is only called by the probe helper. It's not in
+	 * the helper library vtable purely for historical reasons. The only DRM
+	 * core	entry point to probe connector state is @fill_modes.
+	 */
+	void (*force)(struct drm_connector *connector);
+
+	/**
+	 * @fill_modes:
+	 *
+	 * Entry point for output detection and basic mode validation. The
+	 * driver should reprobe the output if needed (e.g. when hotplug
+	 * handling is unreliable), add all detected modes to connector->modes
+	 * and filter out any the device can't support in any configuration. It
+	 * also needs to filter out any modes wider or higher than the
+	 * parameters max_width and max_height indicate.
+	 *
+	 * The drivers must also prune any modes no longer valid from
+	 * connector->modes. Furthermore it must update connector->status and
+	 * connector->edid.  If no EDID has been received for this output
+	 * connector->edid must be NULL.
+	 *
+	 * Drivers using the probe helpers should use
+	 * drm_helper_probe_single_connector_modes() or
+	 * drm_helper_probe_single_connector_modes_nomerge() to implement this
+	 * function.
+	 *
+	 * RETURNS:
+	 *
+	 * The number of modes detected and filled into connector->modes.
+	 */
+	int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
+
+	/**
+	 * @set_property:
+	 *
+	 * This is the legacy entry point to update a property attached to the
+	 * connector.
+	 *
+	 * Drivers implementing atomic modeset should use
+	 * drm_atomic_helper_connector_set_property() to implement this hook.
+	 *
+	 * This callback is optional if the driver does not support any legacy
+	 * driver-private properties.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 on success or a negative error code on failure.
+	 */
+	int (*set_property)(struct drm_connector *connector, struct drm_property *property,
+			     uint64_t val);
+
+	/**
+	 * @late_register:
+	 *
+	 * This optional hook can be used to register additional userspace
+	 * interfaces attached to the connector, light backlight control, i2c,
+	 * DP aux or similar interfaces. It is called late in the driver load
+	 * sequence from drm_connector_register() when registering all the
+	 * core drm connector interfaces. Everything added from this callback
+	 * should be unregistered in the early_unregister callback.
+	 *
+	 * Returns:
+	 *
+	 * 0 on success, or a negative error code on failure.
+	 */
+	int (*late_register)(struct drm_connector *connector);
+
+	/**
+	 * @early_unregister:
+	 *
+	 * This optional hook should be used to unregister the additional
+	 * userspace interfaces attached to the connector from
+	 * late_unregister(). It is called from drm_connector_unregister(),
+	 * early in the driver unload sequence to disable userspace access
+	 * before data structures are torndown.
+	 */
+	void (*early_unregister)(struct drm_connector *connector);
+
+	/**
+	 * @destroy:
+	 *
+	 * Clean up connector resources. This is called at driver unload time
+	 * through drm_mode_config_cleanup(). It can also be called at runtime
+	 * when a connector is being hot-unplugged for drivers that support
+	 * connector hotplugging (e.g. DisplayPort MST).
+	 */
+	void (*destroy)(struct drm_connector *connector);
+
+	/**
+	 * @atomic_duplicate_state:
+	 *
+	 * Duplicate the current atomic state for this connector and return it.
+	 * The core and helpers gurantee that any atomic state duplicated with
+	 * this hook and still owned by the caller (i.e. not transferred to the
+	 * driver by calling ->atomic_commit() from struct
+	 * &drm_mode_config_funcs) will be cleaned up by calling the
+	 * @atomic_destroy_state hook in this structure.
+	 *
+	 * Atomic drivers which don't subclass struct &drm_connector_state should use
+	 * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
+	 * state structure to extend it with driver-private state should use
+	 * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is
+	 * duplicated in a consistent fashion across drivers.
+	 *
+	 * It is an error to call this hook before connector->state has been
+	 * initialized correctly.
+	 *
+	 * NOTE:
+	 *
+	 * If the duplicate state references refcounted resources this hook must
+	 * acquire a reference for each of them. The driver must release these
+	 * references again in @atomic_destroy_state.
+	 *
+	 * RETURNS:
+	 *
+	 * Duplicated atomic state or NULL when the allocation failed.
+	 */
+	struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
+
+	/**
+	 * @atomic_destroy_state:
+	 *
+	 * Destroy a state duplicated with @atomic_duplicate_state and release
+	 * or unreference all resources it references
+	 */
+	void (*atomic_destroy_state)(struct drm_connector *connector,
+				     struct drm_connector_state *state);
+
+	/**
+	 * @atomic_set_property:
+	 *
+	 * Decode a driver-private property value and store the decoded value
+	 * into the passed-in state structure. Since the atomic core decodes all
+	 * standardized properties (even for extensions beyond the core set of
+	 * properties which might not be implemented by all drivers) this
+	 * requires drivers to subclass the state structure.
+	 *
+	 * Such driver-private properties should really only be implemented for
+	 * truly hardware/vendor specific state. Instead it is preferred to
+	 * standardize atomic extension and decode the properties used to expose
+	 * such an extension in the core.
+	 *
+	 * Do not call this function directly, use
+	 * drm_atomic_connector_set_property() instead.
+	 *
+	 * This callback is optional if the driver does not support any
+	 * driver-private atomic properties.
+	 *
+	 * NOTE:
+	 *
+	 * This function is called in the state assembly phase of atomic
+	 * modesets, which can be aborted for any reason (including on
+	 * userspace's request to just check whether a configuration would be
+	 * possible). Drivers MUST NOT touch any persistent state (hardware or
+	 * software) or data structures except the passed in @state parameter.
+	 *
+	 * Also since userspace controls in which order properties are set this
+	 * function must not do any input validation (since the state update is
+	 * incomplete and hence likely inconsistent). Instead any such input
+	 * validation must be done in the various atomic_check callbacks.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 if the property has been found, -EINVAL if the property isn't
+	 * implemented by the driver (which shouldn't ever happen, the core only
+	 * asks for properties attached to this connector). No other validation
+	 * is allowed by the driver. The core already checks that the property
+	 * value is within the range (integer, valid enum value, ...) the driver
+	 * set when registering the property.
+	 */
+	int (*atomic_set_property)(struct drm_connector *connector,
+				   struct drm_connector_state *state,
+				   struct drm_property *property,
+				   uint64_t val);
+
+	/**
+	 * @atomic_get_property:
+	 *
+	 * Reads out the decoded driver-private property. This is used to
+	 * implement the GETCONNECTOR IOCTL.
+	 *
+	 * Do not call this function directly, use
+	 * drm_atomic_connector_get_property() instead.
+	 *
+	 * This callback is optional if the driver does not support any
+	 * driver-private atomic properties.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 on success, -EINVAL if the property isn't implemented by the
+	 * driver (which shouldn't ever happen, the core only asks for
+	 * properties attached to this connector).
+	 */
+	int (*atomic_get_property)(struct drm_connector *connector,
+				   const struct drm_connector_state *state,
+				   struct drm_property *property,
+				   uint64_t *val);
+};
+
+/* mode specified on the command line */
+struct drm_cmdline_mode {
+	bool specified;
+	bool refresh_specified;
+	bool bpp_specified;
+	int xres, yres;
+	int bpp;
+	int refresh;
+	bool rb;
+	bool interlace;
+	bool cvt;
+	bool margins;
+	enum drm_connector_force force;
+};
+
+/**
+ * struct drm_connector - central DRM connector control structure
+ * @dev: parent DRM device
+ * @kdev: kernel device for sysfs attributes
+ * @attr: sysfs attributes
+ * @head: list management
+ * @base: base KMS object
+ * @name: human readable name, can be overwritten by the driver
+ * @connector_type: one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
+ * @connector_type_id: index into connector type enum
+ * @interlace_allowed: can this connector handle interlaced modes?
+ * @doublescan_allowed: can this connector handle doublescan?
+ * @stereo_allowed: can this connector handle stereo modes?
+ * @registered: is this connector exposed (registered) with userspace?
+ * @modes: modes available on this connector (from fill_modes() + user)
+ * @status: one of the drm_connector_status enums (connected, not, or unknown)
+ * @probed_modes: list of modes derived directly from the display
+ * @funcs: connector control functions
+ * @edid_blob_ptr: DRM property containing EDID if present
+ * @properties: property tracking for this connector
+ * @dpms: current dpms state
+ * @helper_private: mid-layer private data
+ * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
+ * @force: a DRM_FORCE_<foo> state for forced mode sets
+ * @override_edid: has the EDID been overwritten through debugfs for testing?
+ * @encoder_ids: valid encoders for this connector
+ * @encoder: encoder driving this connector, if any
+ * @eld: EDID-like data, if present
+ * @dvi_dual: dual link DVI, if found
+ * @max_tmds_clock: max clock rate, if found
+ * @latency_present: AV delay info from ELD, if found
+ * @video_latency: video latency info from ELD, if found
+ * @audio_latency: audio latency info from ELD, if found
+ * @null_edid_counter: track sinks that give us all zeros for the EDID
+ * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
+ * @edid_corrupt: indicates whether the last read EDID was corrupt
+ * @debugfs_entry: debugfs directory for this connector
+ * @state: current atomic state for this connector
+ * @has_tile: is this connector connected to a tiled monitor
+ * @tile_group: tile group for the connected monitor
+ * @tile_is_single_monitor: whether the tile is one monitor housing
+ * @num_h_tile: number of horizontal tiles in the tile group
+ * @num_v_tile: number of vertical tiles in the tile group
+ * @tile_h_loc: horizontal location of this tile
+ * @tile_v_loc: vertical location of this tile
+ * @tile_h_size: horizontal size of this tile.
+ * @tile_v_size: vertical size of this tile.
+ *
+ * Each connector may be connected to one or more CRTCs, or may be clonable by
+ * another connector if they can share a CRTC.  Each connector also has a specific
+ * position in the broader display (referred to as a 'screen' though it could
+ * span multiple monitors).
+ */
+struct drm_connector {
+	struct drm_device *dev;
+	struct device *kdev;
+	struct device_attribute *attr;
+	struct list_head head;
+
+	struct drm_mode_object base;
+
+	char *name;
+
+	/**
+	 * @index: Compacted connector index, which matches the position inside
+	 * the mode_config.list for drivers not supporting hot-add/removing. Can
+	 * be used as an array index. It is invariant over the lifetime of the
+	 * connector.
+	 */
+	unsigned index;
+
+	int connector_type;
+	int connector_type_id;
+	bool interlace_allowed;
+	bool doublescan_allowed;
+	bool stereo_allowed;
+	bool registered;
+	struct list_head modes; /* list of modes on this connector */
+
+	enum drm_connector_status status;
+
+	/* these are modes added by probing with DDC or the BIOS */
+	struct list_head probed_modes;
+
+	/**
+	 * @display_info: Display information is filled from EDID information
+	 * when a display is detected. For non hot-pluggable displays such as
+	 * flat panels in embedded systems, the driver should initialize the
+	 * display_info.width_mm and display_info.height_mm fields with the
+	 * physical size of the display.
+	 */
+	struct drm_display_info display_info;
+	const struct drm_connector_funcs *funcs;
+
+	struct drm_property_blob *edid_blob_ptr;
+	struct drm_object_properties properties;
+
+	/**
+	 * @path_blob_ptr:
+	 *
+	 * DRM blob property data for the DP MST path property.
+	 */
+	struct drm_property_blob *path_blob_ptr;
+
+	/**
+	 * @tile_blob_ptr:
+	 *
+	 * DRM blob property data for the tile property (used mostly by DP MST).
+	 * This is meant for screens which are driven through separate display
+	 * pipelines represented by &drm_crtc, which might not be running with
+	 * genlocked clocks. For tiled panels which are genlocked, like
+	 * dual-link LVDS or dual-link DSI, the driver should try to not expose
+	 * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
+	 */
+	struct drm_property_blob *tile_blob_ptr;
+
+/* should we poll this connector for connects and disconnects */
+/* hot plug detectable */
+#define DRM_CONNECTOR_POLL_HPD (1 << 0)
+/* poll for connections */
+#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
+/* can cleanly poll for disconnections without flickering the screen */
+/* DACs should rarely do this without a lot of testing */
+#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
+
+	/**
+	 * @polled:
+	 *
+	 * Connector polling mode, a combination of
+	 *
+	 * DRM_CONNECTOR_POLL_HPD
+	 *     The connector generates hotplug events and doesn't need to be
+	 *     periodically polled. The CONNECT and DISCONNECT flags must not
+	 *     be set together with the HPD flag.
+	 *
+	 * DRM_CONNECTOR_POLL_CONNECT
+	 *     Periodically poll the connector for connection.
+	 *
+	 * DRM_CONNECTOR_POLL_DISCONNECT
+	 *     Periodically poll the connector for disconnection.
+	 *
+	 * Set to 0 for connectors that don't support connection status
+	 * discovery.
+	 */
+	uint8_t polled;
+
+	/* requested DPMS state */
+	int dpms;
+
+	const struct drm_connector_helper_funcs *helper_private;
+
+	/* forced on connector */
+	struct drm_cmdline_mode cmdline_mode;
+	enum drm_connector_force force;
+	bool override_edid;
+
+#define DRM_CONNECTOR_MAX_ENCODER 3
+	uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+	struct drm_encoder *encoder; /* currently active encoder */
+
+#define MAX_ELD_BYTES	128
+	/* EDID bits */
+	uint8_t eld[MAX_ELD_BYTES];
+	bool dvi_dual;
+	int max_tmds_clock;	/* in MHz */
+	bool latency_present[2];
+	int video_latency[2];	/* [0]: progressive, [1]: interlaced */
+	int audio_latency[2];
+	int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
+	unsigned bad_edid_counter;
+
+	/* Flag for raw EDID header corruption - used in Displayport
+	 * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
+	 */
+	bool edid_corrupt;
+
+	struct dentry *debugfs_entry;
+
+	struct drm_connector_state *state;
+
+	/* DisplayID bits */
+	bool has_tile;
+	struct drm_tile_group *tile_group;
+	bool tile_is_single_monitor;
+
+	uint8_t num_h_tile, num_v_tile;
+	uint8_t tile_h_loc, tile_v_loc;
+	uint16_t tile_h_size, tile_v_size;
+};
+
+#define obj_to_connector(x) container_of(x, struct drm_connector, base)
+
+int drm_connector_init(struct drm_device *dev,
+		       struct drm_connector *connector,
+		       const struct drm_connector_funcs *funcs,
+		       int connector_type);
+int drm_connector_register(struct drm_connector *connector);
+void drm_connector_unregister(struct drm_connector *connector);
+int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+				      struct drm_encoder *encoder);
+
+void drm_connector_cleanup(struct drm_connector *connector);
+static inline unsigned drm_connector_index(struct drm_connector *connector)
+{
+	return connector->index;
+}
+
+/**
+ * drm_connector_lookup - lookup connector object
+ * @dev: DRM device
+ * @id: connector object id
+ *
+ * This function looks up the connector object specified by id
+ * add takes a reference to it.
+ */
+static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev,
+		uint32_t id)
+{
+	struct drm_mode_object *mo;
+	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR);
+	return mo ? obj_to_connector(mo) : NULL;
+}
+
+/**
+ * drm_connector_reference - incr the connector refcnt
+ * @connector: connector
+ *
+ * This function increments the connector's refcount.
+ */
+static inline void drm_connector_reference(struct drm_connector *connector)
+{
+	drm_mode_object_reference(&connector->base);
+}
+
+/**
+ * drm_connector_unreference - unref a connector
+ * @connector: connector to unref
+ *
+ * This function decrements the connector's refcount and frees it if it drops to zero.
+ */
+static inline void drm_connector_unreference(struct drm_connector *connector)
+{
+	drm_mode_object_unreference(&connector->base);
+}
+
+const char *drm_get_connector_status_name(enum drm_connector_status status);
+const char *drm_get_subpixel_order_name(enum subpixel_order order);
+const char *drm_get_dpms_name(int val);
+const char *drm_get_dvi_i_subconnector_name(int val);
+const char *drm_get_dvi_i_select_name(int val);
+const char *drm_get_tv_subconnector_name(int val);
+const char *drm_get_tv_select_name(int val);
+
+int drm_mode_create_dvi_i_properties(struct drm_device *dev);
+int drm_mode_create_tv_properties(struct drm_device *dev,
+				  unsigned int num_modes,
+				  const char * const modes[]);
+int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
+int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
+
+int drm_mode_connector_set_path_property(struct drm_connector *connector,
+					 const char *path);
+int drm_mode_connector_set_tile_property(struct drm_connector *connector);
+int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+					    const struct edid *edid);
+#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index b618b50..8ca71d6 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -36,40 +36,21 @@
 #include <uapi/drm/drm_fourcc.h>
 #include <drm/drm_modeset_lock.h>
 #include <drm/drm_rect.h>
+#include <drm/drm_mode_object.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_property.h>
 
 struct drm_device;
 struct drm_mode_set;
-struct drm_framebuffer;
-struct drm_object_properties;
 struct drm_file;
 struct drm_clip_rect;
 struct device_node;
 struct fence;
 struct edid;
 
-struct drm_mode_object {
-	uint32_t id;
-	uint32_t type;
-	struct drm_object_properties *properties;
-	struct kref refcount;
-	void (*free_cb)(struct kref *kref);
-};
-
-#define DRM_OBJECT_MAX_PROPERTY 24
-struct drm_object_properties {
-	int count, atomic_count;
-	/* NOTE: if we ever start dynamically destroying properties (ie.
-	 * not at drm_mode_config_cleanup() time), then we'd have to do
-	 * a better job of detaching property from mode objects to avoid
-	 * dangling property pointers:
-	 */
-	struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY];
-	/* do not read/write values directly, but use drm_object_property_get_value()
-	 * and drm_object_property_set_value():
-	 */
-	uint64_t values[DRM_OBJECT_MAX_PROPERTY];
-};
-
 static inline int64_t U642I64(uint64_t val)
 {
 	return (int64_t)*((int64_t *)&val);
@@ -94,70 +75,6 @@
 #define DRM_REFLECT_Y	BIT(5)
 #define DRM_REFLECT_MASK (DRM_REFLECT_X | DRM_REFLECT_Y)
 
-enum drm_connector_force {
-	DRM_FORCE_UNSPECIFIED,
-	DRM_FORCE_OFF,
-	DRM_FORCE_ON,         /* force on analog part normally */
-	DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
-};
-
-#include <drm/drm_modes.h>
-
-enum drm_connector_status {
-	connector_status_connected = 1,
-	connector_status_disconnected = 2,
-	connector_status_unknown = 3,
-};
-
-enum subpixel_order {
-	SubPixelUnknown = 0,
-	SubPixelHorizontalRGB,
-	SubPixelHorizontalBGR,
-	SubPixelVerticalRGB,
-	SubPixelVerticalBGR,
-	SubPixelNone,
-};
-
-#define DRM_COLOR_FORMAT_RGB444		(1<<0)
-#define DRM_COLOR_FORMAT_YCRCB444	(1<<1)
-#define DRM_COLOR_FORMAT_YCRCB422	(1<<2)
-
-#define DRM_BUS_FLAG_DE_LOW		(1<<0)
-#define DRM_BUS_FLAG_DE_HIGH		(1<<1)
-/* drive data on pos. edge */
-#define DRM_BUS_FLAG_PIXDATA_POSEDGE	(1<<2)
-/* drive data on neg. edge */
-#define DRM_BUS_FLAG_PIXDATA_NEGEDGE	(1<<3)
-
-/*
- * Describes a given display (e.g. CRT or flat panel) and its limitations.
- */
-struct drm_display_info {
-	char name[DRM_DISPLAY_INFO_LEN];
-
-	/* Physical size */
-        unsigned int width_mm;
-	unsigned int height_mm;
-
-	/* Clock limits FIXME: storage format */
-	unsigned int min_vfreq, max_vfreq;
-	unsigned int min_hfreq, max_hfreq;
-	unsigned int pixel_clock;
-	unsigned int bpc;
-
-	enum subpixel_order subpixel_order;
-	u32 color_formats;
-
-	const u32 *bus_formats;
-	unsigned int num_bus_formats;
-	u32 bus_flags;
-
-	/* Mask of supported hdmi deep color modes */
-	u8 edid_hdmi_dc_modes;
-
-	u8 cea_rev;
-};
-
 /* data corresponds to displayid vend/prod/serial */
 struct drm_tile_group {
 	struct kref refcount;
@@ -166,130 +83,7 @@
 	u8 group_data[8];
 };
 
-/**
- * struct drm_framebuffer_funcs - framebuffer hooks
- */
-struct drm_framebuffer_funcs {
-	/**
-	 * @destroy:
-	 *
-	 * Clean up framebuffer resources, specifically also unreference the
-	 * backing storage. The core guarantees to call this function for every
-	 * framebuffer successfully created by ->fb_create() in
-	 * &drm_mode_config_funcs. Drivers must also call
-	 * drm_framebuffer_cleanup() to release DRM core resources for this
-	 * framebuffer.
-	 */
-	void (*destroy)(struct drm_framebuffer *framebuffer);
-
-	/**
-	 * @create_handle:
-	 *
-	 * Create a buffer handle in the driver-specific buffer manager (either
-	 * GEM or TTM) valid for the passed-in struct &drm_file. This is used by
-	 * the core to implement the GETFB IOCTL, which returns (for
-	 * sufficiently priviledged user) also a native buffer handle. This can
-	 * be used for seamless transitions between modesetting clients by
-	 * copying the current screen contents to a private buffer and blending
-	 * between that and the new contents.
-	 *
-	 * GEM based drivers should call drm_gem_handle_create() to create the
-	 * handle.
-	 *
-	 * RETURNS:
-	 *
-	 * 0 on success or a negative error code on failure.
-	 */
-	int (*create_handle)(struct drm_framebuffer *fb,
-			     struct drm_file *file_priv,
-			     unsigned int *handle);
-	/**
-	 * @dirty:
-	 *
-	 * Optional callback for the dirty fb IOCTL.
-	 *
-	 * Userspace can notify the driver via this callback that an area of the
-	 * framebuffer has changed and should be flushed to the display
-	 * hardware. This can also be used internally, e.g. by the fbdev
-	 * emulation, though that's not the case currently.
-	 *
-	 * See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd
-	 * for more information as all the semantics and arguments have a one to
-	 * one mapping on this function.
-	 *
-	 * RETURNS:
-	 *
-	 * 0 on success or a negative error code on failure.
-	 */
-	int (*dirty)(struct drm_framebuffer *framebuffer,
-		     struct drm_file *file_priv, unsigned flags,
-		     unsigned color, struct drm_clip_rect *clips,
-		     unsigned num_clips);
-};
-
-struct drm_framebuffer {
-	struct drm_device *dev;
-	/*
-	 * Note that the fb is refcounted for the benefit of driver internals,
-	 * for example some hw, disabling a CRTC/plane is asynchronous, and
-	 * scanout does not actually complete until the next vblank.  So some
-	 * cleanup (like releasing the reference(s) on the backing GEM bo(s))
-	 * should be deferred.  In cases like this, the driver would like to
-	 * hold a ref to the fb even though it has already been removed from
-	 * userspace perspective.
-	 * The refcount is stored inside the mode object.
-	 */
-	/*
-	 * Place on the dev->mode_config.fb_list, access protected by
-	 * dev->mode_config.fb_lock.
-	 */
-	struct list_head head;
-	struct drm_mode_object base;
-	const struct drm_framebuffer_funcs *funcs;
-	unsigned int pitches[4];
-	unsigned int offsets[4];
-	uint64_t modifier[4];
-	unsigned int width;
-	unsigned int height;
-	/* depth can be 15 or 16 */
-	unsigned int depth;
-	int bits_per_pixel;
-	int flags;
-	uint32_t pixel_format; /* fourcc format */
-	int hot_x;
-	int hot_y;
-	struct list_head filp_head;
-};
-
-struct drm_property_blob {
-	struct drm_mode_object base;
-	struct drm_device *dev;
-	struct list_head head_global;
-	struct list_head head_file;
-	size_t length;
-	unsigned char data[];
-};
-
-struct drm_property_enum {
-	uint64_t value;
-	struct list_head head;
-	char name[DRM_PROP_NAME_LEN];
-};
-
-struct drm_property {
-	struct list_head head;
-	struct drm_mode_object base;
-	uint32_t flags;
-	char name[DRM_PROP_NAME_LEN];
-	uint32_t num_values;
-	uint64_t *values;
-	struct drm_device *dev;
-
-	struct list_head enum_list;
-};
-
 struct drm_crtc;
-struct drm_connector;
 struct drm_encoder;
 struct drm_pending_vblank_event;
 struct drm_plane;
@@ -298,7 +92,6 @@
 
 struct drm_crtc_helper_funcs;
 struct drm_encoder_helper_funcs;
-struct drm_connector_helper_funcs;
 struct drm_plane_helper_funcs;
 
 /**
@@ -547,16 +340,6 @@
 	 * counter and timestamp tracking though, e.g. if they have accurate
 	 * timestamp registers in hardware.
 	 *
-	 * FIXME:
-	 *
-	 * Up to that point drivers need to manage events themselves and can use
-	 * even->base.list freely for that. Specifically they need to ensure
-	 * that they don't send out page flip (or vblank) events for which the
-	 * corresponding drm file has been closed already. The drm core
-	 * unfortunately does not (yet) take care of that. Therefore drivers
-	 * currently must clean up and release pending events in their
-	 * ->preclose driver function.
-	 *
 	 * This callback is optional.
 	 *
 	 * NOTE:
@@ -583,6 +366,24 @@
 			 uint32_t flags);
 
 	/**
+	 * @page_flip_target:
+	 *
+	 * Same as @page_flip but with an additional parameter specifying the
+	 * absolute target vertical blank period (as reported by
+	 * drm_crtc_vblank_count()) when the flip should take effect.
+	 *
+	 * Note that the core code calls drm_crtc_vblank_get before this entry
+	 * point, and will call drm_crtc_vblank_put if this entry point returns
+	 * any non-0 error code. It's the driver's responsibility to call
+	 * drm_crtc_vblank_put after this entry point returns 0, typically when
+	 * the flip completes.
+	 */
+	int (*page_flip_target)(struct drm_crtc *crtc,
+				struct drm_framebuffer *fb,
+				struct drm_pending_vblank_event *event,
+				uint32_t flags, uint32_t target);
+
+	/**
 	 * @set_property:
 	 *
 	 * This is the legacy entry point to update a property attached to the
@@ -854,549 +655,6 @@
 };
 
 /**
- * struct drm_connector_state - mutable connector state
- * @connector: backpointer to the connector
- * @crtc: CRTC to connect connector to, NULL if disabled
- * @best_encoder: can be used by helpers and drivers to select the encoder
- * @state: backpointer to global drm_atomic_state
- */
-struct drm_connector_state {
-	struct drm_connector *connector;
-
-	struct drm_crtc *crtc;  /* do not write directly, use drm_atomic_set_crtc_for_connector() */
-
-	struct drm_encoder *best_encoder;
-
-	struct drm_atomic_state *state;
-};
-
-/**
- * struct drm_connector_funcs - control connectors on a given device
- *
- * Each CRTC may have one or more connectors attached to it.  The functions
- * below allow the core DRM code to control connectors, enumerate available modes,
- * etc.
- */
-struct drm_connector_funcs {
-	/**
-	 * @dpms:
-	 *
-	 * Legacy entry point to set the per-connector DPMS state. Legacy DPMS
-	 * is exposed as a standard property on the connector, but diverted to
-	 * this callback in the drm core. Note that atomic drivers don't
-	 * implement the 4 level DPMS support on the connector any more, but
-	 * instead only have an on/off "ACTIVE" property on the CRTC object.
-	 *
-	 * Drivers implementing atomic modeset should use
-	 * drm_atomic_helper_connector_dpms() to implement this hook.
-	 *
-	 * RETURNS:
-	 *
-	 * 0 on success or a negative error code on failure.
-	 */
-	int (*dpms)(struct drm_connector *connector, int mode);
-
-	/**
-	 * @reset:
-	 *
-	 * Reset connector hardware and software state to off. This function isn't
-	 * called by the core directly, only through drm_mode_config_reset().
-	 * It's not a helper hook only for historical reasons.
-	 *
-	 * Atomic drivers can use drm_atomic_helper_connector_reset() to reset
-	 * atomic state using this hook.
-	 */
-	void (*reset)(struct drm_connector *connector);
-
-	/**
-	 * @detect:
-	 *
-	 * Check to see if anything is attached to the connector. The parameter
-	 * force is set to false whilst polling, true when checking the
-	 * connector due to a user request. force can be used by the driver to
-	 * avoid expensive, destructive operations during automated probing.
-	 *
-	 * FIXME:
-	 *
-	 * Note that this hook is only called by the probe helper. It's not in
-	 * the helper library vtable purely for historical reasons. The only DRM
-	 * core	entry point to probe connector state is @fill_modes.
-	 *
-	 * RETURNS:
-	 *
-	 * drm_connector_status indicating the connector's status.
-	 */
-	enum drm_connector_status (*detect)(struct drm_connector *connector,
-					    bool force);
-
-	/**
-	 * @force:
-	 *
-	 * This function is called to update internal encoder state when the
-	 * connector is forced to a certain state by userspace, either through
-	 * the sysfs interfaces or on the kernel cmdline. In that case the
-	 * @detect callback isn't called.
-	 *
-	 * FIXME:
-	 *
-	 * Note that this hook is only called by the probe helper. It's not in
-	 * the helper library vtable purely for historical reasons. The only DRM
-	 * core	entry point to probe connector state is @fill_modes.
-	 */
-	void (*force)(struct drm_connector *connector);
-
-	/**
-	 * @fill_modes:
-	 *
-	 * Entry point for output detection and basic mode validation. The
-	 * driver should reprobe the output if needed (e.g. when hotplug
-	 * handling is unreliable), add all detected modes to connector->modes
-	 * and filter out any the device can't support in any configuration. It
-	 * also needs to filter out any modes wider or higher than the
-	 * parameters max_width and max_height indicate.
-	 *
-	 * The drivers must also prune any modes no longer valid from
-	 * connector->modes. Furthermore it must update connector->status and
-	 * connector->edid.  If no EDID has been received for this output
-	 * connector->edid must be NULL.
-	 *
-	 * Drivers using the probe helpers should use
-	 * drm_helper_probe_single_connector_modes() or
-	 * drm_helper_probe_single_connector_modes_nomerge() to implement this
-	 * function.
-	 *
-	 * RETURNS:
-	 *
-	 * The number of modes detected and filled into connector->modes.
-	 */
-	int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
-
-	/**
-	 * @set_property:
-	 *
-	 * This is the legacy entry point to update a property attached to the
-	 * connector.
-	 *
-	 * Drivers implementing atomic modeset should use
-	 * drm_atomic_helper_connector_set_property() to implement this hook.
-	 *
-	 * This callback is optional if the driver does not support any legacy
-	 * driver-private properties.
-	 *
-	 * RETURNS:
-	 *
-	 * 0 on success or a negative error code on failure.
-	 */
-	int (*set_property)(struct drm_connector *connector, struct drm_property *property,
-			     uint64_t val);
-
-	/**
-	 * @late_register:
-	 *
-	 * This optional hook can be used to register additional userspace
-	 * interfaces attached to the connector, light backlight control, i2c,
-	 * DP aux or similar interfaces. It is called late in the driver load
-	 * sequence from drm_connector_register() when registering all the
-	 * core drm connector interfaces. Everything added from this callback
-	 * should be unregistered in the early_unregister callback.
-	 *
-	 * Returns:
-	 *
-	 * 0 on success, or a negative error code on failure.
-	 */
-	int (*late_register)(struct drm_connector *connector);
-
-	/**
-	 * @early_unregister:
-	 *
-	 * This optional hook should be used to unregister the additional
-	 * userspace interfaces attached to the connector from
-	 * late_unregister(). It is called from drm_connector_unregister(),
-	 * early in the driver unload sequence to disable userspace access
-	 * before data structures are torndown.
-	 */
-	void (*early_unregister)(struct drm_connector *connector);
-
-	/**
-	 * @destroy:
-	 *
-	 * Clean up connector resources. This is called at driver unload time
-	 * through drm_mode_config_cleanup(). It can also be called at runtime
-	 * when a connector is being hot-unplugged for drivers that support
-	 * connector hotplugging (e.g. DisplayPort MST).
-	 */
-	void (*destroy)(struct drm_connector *connector);
-
-	/**
-	 * @atomic_duplicate_state:
-	 *
-	 * Duplicate the current atomic state for this connector and return it.
-	 * The core and helpers gurantee that any atomic state duplicated with
-	 * this hook and still owned by the caller (i.e. not transferred to the
-	 * driver by calling ->atomic_commit() from struct
-	 * &drm_mode_config_funcs) will be cleaned up by calling the
-	 * @atomic_destroy_state hook in this structure.
-	 *
-	 * Atomic drivers which don't subclass struct &drm_connector_state should use
-	 * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
-	 * state structure to extend it with driver-private state should use
-	 * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is
-	 * duplicated in a consistent fashion across drivers.
-	 *
-	 * It is an error to call this hook before connector->state has been
-	 * initialized correctly.
-	 *
-	 * NOTE:
-	 *
-	 * If the duplicate state references refcounted resources this hook must
-	 * acquire a reference for each of them. The driver must release these
-	 * references again in @atomic_destroy_state.
-	 *
-	 * RETURNS:
-	 *
-	 * Duplicated atomic state or NULL when the allocation failed.
-	 */
-	struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
-
-	/**
-	 * @atomic_destroy_state:
-	 *
-	 * Destroy a state duplicated with @atomic_duplicate_state and release
-	 * or unreference all resources it references
-	 */
-	void (*atomic_destroy_state)(struct drm_connector *connector,
-				     struct drm_connector_state *state);
-
-	/**
-	 * @atomic_set_property:
-	 *
-	 * Decode a driver-private property value and store the decoded value
-	 * into the passed-in state structure. Since the atomic core decodes all
-	 * standardized properties (even for extensions beyond the core set of
-	 * properties which might not be implemented by all drivers) this
-	 * requires drivers to subclass the state structure.
-	 *
-	 * Such driver-private properties should really only be implemented for
-	 * truly hardware/vendor specific state. Instead it is preferred to
-	 * standardize atomic extension and decode the properties used to expose
-	 * such an extension in the core.
-	 *
-	 * Do not call this function directly, use
-	 * drm_atomic_connector_set_property() instead.
-	 *
-	 * This callback is optional if the driver does not support any
-	 * driver-private atomic properties.
-	 *
-	 * NOTE:
-	 *
-	 * This function is called in the state assembly phase of atomic
-	 * modesets, which can be aborted for any reason (including on
-	 * userspace's request to just check whether a configuration would be
-	 * possible). Drivers MUST NOT touch any persistent state (hardware or
-	 * software) or data structures except the passed in @state parameter.
-	 *
-	 * Also since userspace controls in which order properties are set this
-	 * function must not do any input validation (since the state update is
-	 * incomplete and hence likely inconsistent). Instead any such input
-	 * validation must be done in the various atomic_check callbacks.
-	 *
-	 * RETURNS:
-	 *
-	 * 0 if the property has been found, -EINVAL if the property isn't
-	 * implemented by the driver (which shouldn't ever happen, the core only
-	 * asks for properties attached to this connector). No other validation
-	 * is allowed by the driver. The core already checks that the property
-	 * value is within the range (integer, valid enum value, ...) the driver
-	 * set when registering the property.
-	 */
-	int (*atomic_set_property)(struct drm_connector *connector,
-				   struct drm_connector_state *state,
-				   struct drm_property *property,
-				   uint64_t val);
-
-	/**
-	 * @atomic_get_property:
-	 *
-	 * Reads out the decoded driver-private property. This is used to
-	 * implement the GETCONNECTOR IOCTL.
-	 *
-	 * Do not call this function directly, use
-	 * drm_atomic_connector_get_property() instead.
-	 *
-	 * This callback is optional if the driver does not support any
-	 * driver-private atomic properties.
-	 *
-	 * RETURNS:
-	 *
-	 * 0 on success, -EINVAL if the property isn't implemented by the
-	 * driver (which shouldn't ever happen, the core only asks for
-	 * properties attached to this connector).
-	 */
-	int (*atomic_get_property)(struct drm_connector *connector,
-				   const struct drm_connector_state *state,
-				   struct drm_property *property,
-				   uint64_t *val);
-};
-
-/**
- * struct drm_encoder_funcs - encoder controls
- *
- * Encoders sit between CRTCs and connectors.
- */
-struct drm_encoder_funcs {
-	/**
-	 * @reset:
-	 *
-	 * Reset encoder hardware and software state to off. This function isn't
-	 * called by the core directly, only through drm_mode_config_reset().
-	 * It's not a helper hook only for historical reasons.
-	 */
-	void (*reset)(struct drm_encoder *encoder);
-
-	/**
-	 * @destroy:
-	 *
-	 * Clean up encoder resources. This is only called at driver unload time
-	 * through drm_mode_config_cleanup() since an encoder cannot be
-	 * hotplugged in DRM.
-	 */
-	void (*destroy)(struct drm_encoder *encoder);
-
-	/**
-	 * @late_register:
-	 *
-	 * This optional hook can be used to register additional userspace
-	 * interfaces attached to the encoder like debugfs interfaces.
-	 * It is called late in the driver load sequence from drm_dev_register().
-	 * Everything added from this callback should be unregistered in
-	 * the early_unregister callback.
-	 *
-	 * Returns:
-	 *
-	 * 0 on success, or a negative error code on failure.
-	 */
-	int (*late_register)(struct drm_encoder *encoder);
-
-	/**
-	 * @early_unregister:
-	 *
-	 * This optional hook should be used to unregister the additional
-	 * userspace interfaces attached to the encoder from
-	 * late_unregister(). It is called from drm_dev_unregister(),
-	 * early in the driver unload sequence to disable userspace access
-	 * before data structures are torndown.
-	 */
-	void (*early_unregister)(struct drm_encoder *encoder);
-};
-
-#define DRM_CONNECTOR_MAX_ENCODER 3
-
-/**
- * struct drm_encoder - central DRM encoder structure
- * @dev: parent DRM device
- * @head: list management
- * @base: base KMS object
- * @name: human readable name, can be overwritten by the driver
- * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
- * @possible_crtcs: bitmask of potential CRTC bindings
- * @possible_clones: bitmask of potential sibling encoders for cloning
- * @crtc: currently bound CRTC
- * @bridge: bridge associated to the encoder
- * @funcs: control functions
- * @helper_private: mid-layer private data
- *
- * CRTCs drive pixels to encoders, which convert them into signals
- * appropriate for a given connector or set of connectors.
- */
-struct drm_encoder {
-	struct drm_device *dev;
-	struct list_head head;
-
-	struct drm_mode_object base;
-	char *name;
-	int encoder_type;
-
-	/**
-	 * @index: Position inside the mode_config.list, can be used as an array
-	 * index. It is invariant over the lifetime of the encoder.
-	 */
-	unsigned index;
-
-	uint32_t possible_crtcs;
-	uint32_t possible_clones;
-
-	struct drm_crtc *crtc;
-	struct drm_bridge *bridge;
-	const struct drm_encoder_funcs *funcs;
-	const struct drm_encoder_helper_funcs *helper_private;
-};
-
-/* should we poll this connector for connects and disconnects */
-/* hot plug detectable */
-#define DRM_CONNECTOR_POLL_HPD (1 << 0)
-/* poll for connections */
-#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
-/* can cleanly poll for disconnections without flickering the screen */
-/* DACs should rarely do this without a lot of testing */
-#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
-
-#define MAX_ELD_BYTES	128
-
-/**
- * struct drm_connector - central DRM connector control structure
- * @dev: parent DRM device
- * @kdev: kernel device for sysfs attributes
- * @attr: sysfs attributes
- * @head: list management
- * @base: base KMS object
- * @name: human readable name, can be overwritten by the driver
- * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
- * @connector_type_id: index into connector type enum
- * @interlace_allowed: can this connector handle interlaced modes?
- * @doublescan_allowed: can this connector handle doublescan?
- * @stereo_allowed: can this connector handle stereo modes?
- * @registered: is this connector exposed (registered) with userspace?
- * @modes: modes available on this connector (from fill_modes() + user)
- * @status: one of the drm_connector_status enums (connected, not, or unknown)
- * @probed_modes: list of modes derived directly from the display
- * @display_info: information about attached display (e.g. from EDID)
- * @funcs: connector control functions
- * @edid_blob_ptr: DRM property containing EDID if present
- * @properties: property tracking for this connector
- * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
- * @dpms: current dpms state
- * @helper_private: mid-layer private data
- * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
- * @force: a %DRM_FORCE_<foo> state for forced mode sets
- * @override_edid: has the EDID been overwritten through debugfs for testing?
- * @encoder_ids: valid encoders for this connector
- * @encoder: encoder driving this connector, if any
- * @eld: EDID-like data, if present
- * @dvi_dual: dual link DVI, if found
- * @max_tmds_clock: max clock rate, if found
- * @latency_present: AV delay info from ELD, if found
- * @video_latency: video latency info from ELD, if found
- * @audio_latency: audio latency info from ELD, if found
- * @null_edid_counter: track sinks that give us all zeros for the EDID
- * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
- * @edid_corrupt: indicates whether the last read EDID was corrupt
- * @debugfs_entry: debugfs directory for this connector
- * @state: current atomic state for this connector
- * @has_tile: is this connector connected to a tiled monitor
- * @tile_group: tile group for the connected monitor
- * @tile_is_single_monitor: whether the tile is one monitor housing
- * @num_h_tile: number of horizontal tiles in the tile group
- * @num_v_tile: number of vertical tiles in the tile group
- * @tile_h_loc: horizontal location of this tile
- * @tile_v_loc: vertical location of this tile
- * @tile_h_size: horizontal size of this tile.
- * @tile_v_size: vertical size of this tile.
- *
- * Each connector may be connected to one or more CRTCs, or may be clonable by
- * another connector if they can share a CRTC.  Each connector also has a specific
- * position in the broader display (referred to as a 'screen' though it could
- * span multiple monitors).
- */
-struct drm_connector {
-	struct drm_device *dev;
-	struct device *kdev;
-	struct device_attribute *attr;
-	struct list_head head;
-
-	struct drm_mode_object base;
-
-	char *name;
-
-	/**
-	 * @index: Compacted connector index, which matches the position inside
-	 * the mode_config.list for drivers not supporting hot-add/removing. Can
-	 * be used as an array index. It is invariant over the lifetime of the
-	 * connector.
-	 */
-	unsigned index;
-
-	int connector_type;
-	int connector_type_id;
-	bool interlace_allowed;
-	bool doublescan_allowed;
-	bool stereo_allowed;
-	bool registered;
-	struct list_head modes; /* list of modes on this connector */
-
-	enum drm_connector_status status;
-
-	/* these are modes added by probing with DDC or the BIOS */
-	struct list_head probed_modes;
-
-	struct drm_display_info display_info;
-	const struct drm_connector_funcs *funcs;
-
-	struct drm_property_blob *edid_blob_ptr;
-	struct drm_object_properties properties;
-
-	/**
-	 * @path_blob_ptr:
-	 *
-	 * DRM blob property data for the DP MST path property.
-	 */
-	struct drm_property_blob *path_blob_ptr;
-
-	/**
-	 * @tile_blob_ptr:
-	 *
-	 * DRM blob property data for the tile property (used mostly by DP MST).
-	 * This is meant for screens which are driven through separate display
-	 * pipelines represented by &drm_crtc, which might not be running with
-	 * genlocked clocks. For tiled panels which are genlocked, like
-	 * dual-link LVDS or dual-link DSI, the driver should try to not expose
-	 * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
-	 */
-	struct drm_property_blob *tile_blob_ptr;
-
-	uint8_t polled; /* DRM_CONNECTOR_POLL_* */
-
-	/* requested DPMS state */
-	int dpms;
-
-	const struct drm_connector_helper_funcs *helper_private;
-
-	/* forced on connector */
-	struct drm_cmdline_mode cmdline_mode;
-	enum drm_connector_force force;
-	bool override_edid;
-	uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
-	struct drm_encoder *encoder; /* currently active encoder */
-
-	/* EDID bits */
-	uint8_t eld[MAX_ELD_BYTES];
-	bool dvi_dual;
-	int max_tmds_clock;	/* in MHz */
-	bool latency_present[2];
-	int video_latency[2];	/* [0]: progressive, [1]: interlaced */
-	int audio_latency[2];
-	int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
-	unsigned bad_edid_counter;
-
-	/* Flag for raw EDID header corruption - used in Displayport
-	 * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
-	 */
-	bool edid_corrupt;
-
-	struct dentry *debugfs_entry;
-
-	struct drm_connector_state *state;
-
-	/* DisplayID bits */
-	bool has_tile;
-	struct drm_tile_group *tile_group;
-	bool tile_is_single_monitor;
-
-	uint8_t num_h_tile, num_v_tile;
-	uint8_t tile_h_loc, tile_v_loc;
-	uint16_t tile_h_size, tile_v_size;
-};
-
-/**
  * struct drm_plane_state - mutable plane state
  * @plane: backpointer to the plane
  * @crtc: currently bound CRTC, NULL if disabled
@@ -1762,12 +1020,33 @@
 
 /**
  * struct drm_bridge_funcs - drm_bridge control functions
- * @attach: Called during drm_bridge_attach
  */
 struct drm_bridge_funcs {
+	/**
+	 * @attach:
+	 *
+	 * This callback is invoked whenever our bridge is being attached to a
+	 * &drm_encoder.
+	 *
+	 * The attach callback is optional.
+	 *
+	 * RETURNS:
+	 *
+	 * Zero on success, error code on failure.
+	 */
 	int (*attach)(struct drm_bridge *bridge);
 
 	/**
+	 * @detach:
+	 *
+	 * This callback is invoked whenever our bridge is being detached from a
+	 * &drm_encoder.
+	 *
+	 * The detach callback is optional.
+	 */
+	void (*detach)(struct drm_bridge *bridge);
+
+	/**
 	 * @mode_fixup:
 	 *
 	 * This callback is used to validate and adjust a mode. The paramater
@@ -1781,6 +1060,8 @@
 	 * this function passes all other callbacks must succeed for this
 	 * configuration.
 	 *
+	 * The mode_fixup callback is optional.
+	 *
 	 * NOTE:
 	 *
 	 * This function is called in the check phase of atomic modesets, which
@@ -2650,12 +1931,6 @@
 	 */
 	struct drm_property *aspect_ratio_property;
 	/**
-	 * @dirty_info_property: Optional connector property to give userspace a
-	 * hint that the DIRTY_FB ioctl should be used.
-	 */
-	struct drm_property *dirty_info_property;
-
-	/**
 	 * @degamma_lut_property: Optional CRTC property to set the LUT used to
 	 * convert the framebuffer's colors to linear gamma.
 	 */
@@ -2741,19 +2016,11 @@
 		for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
 
 #define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
-#define obj_to_connector(x) container_of(x, struct drm_connector, base)
-#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
 #define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
 #define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
-#define obj_to_property(x) container_of(x, struct drm_property, base)
 #define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
 #define obj_to_plane(x) container_of(x, struct drm_plane, base)
 
-struct drm_prop_enum_list {
-	int type;
-	char *name;
-};
-
 extern __printf(6, 7)
 int drm_crtc_init_with_planes(struct drm_device *dev,
 			      struct drm_crtc *crtc,
@@ -2787,50 +2054,6 @@
 	return 1 << drm_crtc_index(crtc);
 }
 
-int drm_connector_init(struct drm_device *dev,
-		       struct drm_connector *connector,
-		       const struct drm_connector_funcs *funcs,
-		       int connector_type);
-int drm_connector_register(struct drm_connector *connector);
-void drm_connector_unregister(struct drm_connector *connector);
-
-extern void drm_connector_cleanup(struct drm_connector *connector);
-static inline unsigned drm_connector_index(struct drm_connector *connector)
-{
-	return connector->index;
-}
-
-extern __printf(5, 6)
-int drm_encoder_init(struct drm_device *dev,
-		     struct drm_encoder *encoder,
-		     const struct drm_encoder_funcs *funcs,
-		     int encoder_type, const char *name, ...);
-
-/**
- * drm_encoder_index - find the index of a registered encoder
- * @encoder: encoder to find index for
- *
- * Given a registered encoder, return the index of that encoder within a DRM
- * device's list of encoders.
- */
-static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
-{
-	return encoder->index;
-}
-
-/**
- * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
- * @encoder: encoder to test
- * @crtc: crtc to test
- *
- * Return false if @encoder can't be driven by @crtc, true otherwise.
- */
-static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
-				       struct drm_crtc *crtc)
-{
-	return !!(encoder->possible_crtcs & drm_crtc_mask(crtc));
-}
-
 extern __printf(8, 9)
 int drm_universal_plane_init(struct drm_device *dev,
 			     struct drm_plane *plane,
@@ -2866,105 +2089,15 @@
 extern int drm_crtc_force_disable(struct drm_crtc *crtc);
 extern int drm_crtc_force_disable_all(struct drm_device *dev);
 
-extern void drm_encoder_cleanup(struct drm_encoder *encoder);
-
-extern const char *drm_get_connector_status_name(enum drm_connector_status status);
-extern const char *drm_get_subpixel_order_name(enum subpixel_order order);
-extern const char *drm_get_dpms_name(int val);
-extern const char *drm_get_dvi_i_subconnector_name(int val);
-extern const char *drm_get_dvi_i_select_name(int val);
-extern const char *drm_get_tv_subconnector_name(int val);
-extern const char *drm_get_tv_select_name(int val);
 extern void drm_mode_config_init(struct drm_device *dev);
 extern void drm_mode_config_reset(struct drm_device *dev);
 extern void drm_mode_config_cleanup(struct drm_device *dev);
 
-extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
-						const char *path);
-int drm_mode_connector_set_tile_property(struct drm_connector *connector);
-extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
-						   const struct edid *edid);
-
-extern int drm_display_info_set_bus_formats(struct drm_display_info *info,
-					    const u32 *formats,
-					    unsigned int num_formats);
-
-static inline bool drm_property_type_is(struct drm_property *property,
-		uint32_t type)
-{
-	/* instanceof for props.. handles extended type vs original types: */
-	if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
-		return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type;
-	return property->flags & type;
-}
-
-extern int drm_object_property_set_value(struct drm_mode_object *obj,
-					 struct drm_property *property,
-					 uint64_t val);
-extern int drm_object_property_get_value(struct drm_mode_object *obj,
-					 struct drm_property *property,
-					 uint64_t *value);
-extern int drm_framebuffer_init(struct drm_device *dev,
-				struct drm_framebuffer *fb,
-				const struct drm_framebuffer_funcs *funcs);
-extern struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
-						      uint32_t id);
-extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
-extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
-extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
-
-extern void drm_object_attach_property(struct drm_mode_object *obj,
-				       struct drm_property *property,
-				       uint64_t init_val);
-extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
-						const char *name, int num_values);
-extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
-					 const char *name,
-					 const struct drm_prop_enum_list *props,
-					 int num_values);
-struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
-					 int flags, const char *name,
-					 const struct drm_prop_enum_list *props,
-					 int num_props,
-					 uint64_t supported_bits);
-struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
-					 const char *name,
-					 uint64_t min, uint64_t max);
-struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
-					 int flags, const char *name,
-					 int64_t min, int64_t max);
-struct drm_property *drm_property_create_object(struct drm_device *dev,
-					 int flags, const char *name, uint32_t type);
-struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
-					 const char *name);
-struct drm_property_blob *drm_property_create_blob(struct drm_device *dev,
-                                                   size_t length,
-                                                   const void *data);
-struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
-                                                   uint32_t id);
-struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob);
-void drm_property_unreference_blob(struct drm_property_blob *blob);
-extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
-extern int drm_property_add_enum(struct drm_property *property, int index,
-				 uint64_t value, const char *name);
-extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
-extern int drm_mode_create_tv_properties(struct drm_device *dev,
-					 unsigned int num_modes,
-					 const char * const modes[]);
-extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
-extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
-extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
-extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
-
-extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
-					     struct drm_encoder *encoder);
 extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
 					 int gamma_size);
 
 extern int drm_mode_set_config_internal(struct drm_mode_set *set);
 
-extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
-
 extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
 							 char topology[8]);
 extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
@@ -2993,11 +2126,6 @@
 					     unsigned int zpos);
 
 /* Helpers */
-struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
-					     uint32_t id, uint32_t type);
-void drm_mode_object_reference(struct drm_mode_object *obj);
-void drm_mode_object_unreference(struct drm_mode_object *obj);
-
 static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
 		uint32_t id)
 {
@@ -3014,38 +2142,6 @@
 	return mo ? obj_to_crtc(mo) : NULL;
 }
 
-static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
-	uint32_t id)
-{
-	struct drm_mode_object *mo;
-	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
-	return mo ? obj_to_encoder(mo) : NULL;
-}
-
-/**
- * drm_connector_lookup - lookup connector object
- * @dev: DRM device
- * @id: connector object id
- *
- * This function looks up the connector object specified by id
- * add takes a reference to it.
- */
-static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev,
-		uint32_t id)
-{
-	struct drm_mode_object *mo;
-	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR);
-	return mo ? obj_to_connector(mo) : NULL;
-}
-
-static inline struct drm_property *drm_property_find(struct drm_device *dev,
-		uint32_t id)
-{
-	struct drm_mode_object *mo;
-	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY);
-	return mo ? obj_to_property(mo) : NULL;
-}
-
 /*
  * Extract a degamma/gamma LUT value provided by user and round it to the
  * precision supported by the hardware.
@@ -3065,61 +2161,6 @@
 	return clamp_val(val, 0, max);
 }
 
-/**
- * drm_framebuffer_reference - incr the fb refcnt
- * @fb: framebuffer
- *
- * This functions increments the fb's refcount.
- */
-static inline void drm_framebuffer_reference(struct drm_framebuffer *fb)
-{
-	drm_mode_object_reference(&fb->base);
-}
-
-/**
- * drm_framebuffer_unreference - unref a framebuffer
- * @fb: framebuffer to unref
- *
- * This functions decrements the fb's refcount and frees it if it drops to zero.
- */
-static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
-{
-	drm_mode_object_unreference(&fb->base);
-}
-
-/**
- * drm_framebuffer_read_refcount - read the framebuffer reference count.
- * @fb: framebuffer
- *
- * This functions returns the framebuffer's reference count.
- */
-static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb)
-{
-	return atomic_read(&fb->base.refcount.refcount);
-}
-
-/**
- * drm_connector_reference - incr the connector refcnt
- * @connector: connector
- *
- * This function increments the connector's refcount.
- */
-static inline void drm_connector_reference(struct drm_connector *connector)
-{
-	drm_mode_object_reference(&connector->base);
-}
-
-/**
- * drm_connector_unreference - unref a connector
- * @connector: connector to unref
- *
- * This function decrements the connector's refcount and frees it if it drops to zero.
- */
-static inline void drm_connector_unreference(struct drm_connector *connector)
-{
-	drm_mode_object_unreference(&connector->base);
-}
-
 /* Plane list iterator for legacy (overlay only) planes. */
 #define drm_for_each_legacy_plane(plane, dev) \
 	list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
@@ -3196,6 +2237,7 @@
 extern void drm_bridge_remove(struct drm_bridge *bridge);
 extern struct drm_bridge *of_drm_find_bridge(struct device_node *np);
 extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
+extern void drm_bridge_detach(struct drm_bridge *bridge);
 
 bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
 			const struct drm_display_mode *mode,
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 4b37afa..982c299 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -41,6 +41,7 @@
 
 #include <drm/drm_crtc.h>
 #include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_modeset_helper.h>
 
 extern void drm_helper_disable_unused_functions(struct drm_device *dev);
 extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
@@ -53,11 +54,6 @@
 
 extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
 
-extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
-
-extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
-					   const struct drm_mode_fb_cmd2 *mode_cmd);
-
 extern void drm_helper_resume_force_mode(struct drm_device *dev);
 
 int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
new file mode 100644
index 0000000..fce0203
--- /dev/null
+++ b/include/drm/drm_encoder.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_ENCODER_H__
+#define __DRM_ENCODER_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+/**
+ * struct drm_encoder_funcs - encoder controls
+ *
+ * Encoders sit between CRTCs and connectors.
+ */
+struct drm_encoder_funcs {
+	/**
+	 * @reset:
+	 *
+	 * Reset encoder hardware and software state to off. This function isn't
+	 * called by the core directly, only through drm_mode_config_reset().
+	 * It's not a helper hook only for historical reasons.
+	 */
+	void (*reset)(struct drm_encoder *encoder);
+
+	/**
+	 * @destroy:
+	 *
+	 * Clean up encoder resources. This is only called at driver unload time
+	 * through drm_mode_config_cleanup() since an encoder cannot be
+	 * hotplugged in DRM.
+	 */
+	void (*destroy)(struct drm_encoder *encoder);
+
+	/**
+	 * @late_register:
+	 *
+	 * This optional hook can be used to register additional userspace
+	 * interfaces attached to the encoder like debugfs interfaces.
+	 * It is called late in the driver load sequence from drm_dev_register().
+	 * Everything added from this callback should be unregistered in
+	 * the early_unregister callback.
+	 *
+	 * Returns:
+	 *
+	 * 0 on success, or a negative error code on failure.
+	 */
+	int (*late_register)(struct drm_encoder *encoder);
+
+	/**
+	 * @early_unregister:
+	 *
+	 * This optional hook should be used to unregister the additional
+	 * userspace interfaces attached to the encoder from
+	 * late_unregister(). It is called from drm_dev_unregister(),
+	 * early in the driver unload sequence to disable userspace access
+	 * before data structures are torndown.
+	 */
+	void (*early_unregister)(struct drm_encoder *encoder);
+};
+
+/**
+ * struct drm_encoder - central DRM encoder structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object
+ * @name: human readable name, can be overwritten by the driver
+ * @crtc: currently bound CRTC
+ * @bridge: bridge associated to the encoder
+ * @funcs: control functions
+ * @helper_private: mid-layer private data
+ *
+ * CRTCs drive pixels to encoders, which convert them into signals
+ * appropriate for a given connector or set of connectors.
+ */
+struct drm_encoder {
+	struct drm_device *dev;
+	struct list_head head;
+
+	struct drm_mode_object base;
+	char *name;
+	/**
+	 * @encoder_type:
+	 *
+	 * One of the DRM_MODE_ENCODER_<foo> types in drm_mode.h. The following
+	 * encoder types are defined thus far:
+	 *
+	 * - DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A.
+	 *
+	 * - DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort.
+	 *
+	 * - DRM_MODE_ENCODER_LVDS for display panels, or in general any panel
+	 *   with a proprietary parallel connector.
+	 *
+	 * - DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video,
+	 *   Component, SCART).
+	 *
+	 * - DRM_MODE_ENCODER_VIRTUAL for virtual machine displays
+	 *
+	 * - DRM_MODE_ENCODER_DSI for panels connected using the DSI serial bus.
+	 *
+	 * - DRM_MODE_ENCODER_DPI for panels connected using the DPI parallel
+	 *   bus.
+	 *
+	 * - DRM_MODE_ENCODER_DPMST for special fake encoders used to allow
+	 *   mutliple DP MST streams to share one physical encoder.
+	 */
+	int encoder_type;
+
+	/**
+	 * @index: Position inside the mode_config.list, can be used as an array
+	 * index. It is invariant over the lifetime of the encoder.
+	 */
+	unsigned index;
+
+	/**
+	 * @possible_crtcs: Bitmask of potential CRTC bindings, using
+	 * drm_crtc_index() as the index into the bitfield. The driver must set
+	 * the bits for all &drm_crtc objects this encoder can be connected to
+	 * before calling drm_encoder_init().
+	 *
+	 * In reality almost every driver gets this wrong.
+	 *
+	 * Note that since CRTC objects can't be hotplugged the assigned indices
+	 * are stable and hence known before registering all objects.
+	 */
+	uint32_t possible_crtcs;
+
+	/**
+	 * @possible_clones: Bitmask of potential sibling encoders for cloning,
+	 * using drm_encoder_index() as the index into the bitfield. The driver
+	 * must set the bits for all &drm_encoder objects which can clone a
+	 * &drm_crtc together with this encoder before calling
+	 * drm_encoder_init(). Drivers should set the bit representing the
+	 * encoder itself, too. Cloning bits should be set such that when two
+	 * encoders can be used in a cloned configuration, they both should have
+	 * each another bits set.
+	 *
+	 * In reality almost every driver gets this wrong.
+	 *
+	 * Note that since encoder objects can't be hotplugged the assigned indices
+	 * are stable and hence known before registering all objects.
+	 */
+	uint32_t possible_clones;
+
+	struct drm_crtc *crtc;
+	struct drm_bridge *bridge;
+	const struct drm_encoder_funcs *funcs;
+	const struct drm_encoder_helper_funcs *helper_private;
+};
+
+#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
+
+__printf(5, 6)
+int drm_encoder_init(struct drm_device *dev,
+		     struct drm_encoder *encoder,
+		     const struct drm_encoder_funcs *funcs,
+		     int encoder_type, const char *name, ...);
+
+/**
+ * drm_encoder_index - find the index of a registered encoder
+ * @encoder: encoder to find index for
+ *
+ * Given a registered encoder, return the index of that encoder within a DRM
+ * device's list of encoders.
+ */
+static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
+{
+	return encoder->index;
+}
+
+/* FIXME: We have an include file mess still, drm_crtc.h needs untangling. */
+static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc);
+
+/**
+ * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
+ * @encoder: encoder to test
+ * @crtc: crtc to test
+ *
+ * Returns false if @encoder can't be driven by @crtc, true otherwise.
+ */
+static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
+				       struct drm_crtc *crtc)
+{
+	return !!(encoder->possible_crtcs & drm_crtc_mask(crtc));
+}
+
+/**
+ * drm_encoder_find - find a &drm_encoder
+ * @dev: DRM device
+ * @id: encoder id
+ *
+ * Returns the encoder with @id, NULL if it doesn't exist. Simple wrapper around
+ * drm_mode_object_find().
+ */
+static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
+						   uint32_t id)
+{
+	struct drm_mode_object *mo;
+
+	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+
+	return mo ? obj_to_encoder(mo) : NULL;
+}
+
+void drm_encoder_cleanup(struct drm_encoder *encoder);
+
+#endif
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 130c324..797fb5f 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -177,6 +177,7 @@
  *              the screen buffer
  * @dirty_lock: spinlock protecting @dirty_clip
  * @dirty_work: worker used to flush the framebuffer
+ * @resume_work: worker used during resume if the console lock is already taken
  *
  * This is the main structure used by the fbdev helpers. Drivers supporting
  * fbdev emulation should embedded this into their overall driver structure.
@@ -197,6 +198,7 @@
 	struct drm_clip_rect dirty_clip;
 	spinlock_t dirty_lock;
 	struct work_struct dirty_work;
+	struct work_struct resume_work;
 
 	/**
 	 * @kernel_fb_list:
@@ -216,7 +218,6 @@
 };
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
-int drm_fb_helper_modinit(void);
 void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
 			   const struct drm_fb_helper_funcs *funcs);
 int drm_fb_helper_init(struct drm_device *dev,
@@ -264,7 +265,9 @@
 void drm_fb_helper_cfb_imageblit(struct fb_info *info,
 				 const struct fb_image *image);
 
-void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state);
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend);
+void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
+					bool suspend);
 
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
 
@@ -283,12 +286,6 @@
 int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
 				       struct drm_connector *connector);
-static inline int
-drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a,
-					      const char *name, bool primary)
-{
-	return remove_conflicting_framebuffers(a, name, primary);
-}
 #else
 static inline int drm_fb_helper_modinit(void)
 {
@@ -424,7 +421,12 @@
 }
 
 static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
-					     int state)
+					     bool suspend)
+{
+}
+
+static inline void
+drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, bool suspend)
 {
 }
 
@@ -483,11 +485,17 @@
 	return 0;
 }
 
+#endif
+
 static inline int
 drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a,
 					      const char *name, bool primary)
 {
+#if IS_REACHABLE(CONFIG_FB)
+	return remove_conflicting_framebuffers(a, name, primary);
+#else
 	return 0;
-}
 #endif
+}
+
 #endif
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 7f90a39..30c30fa 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -25,6 +25,7 @@
 #include <linux/types.h>
 #include <uapi/drm/drm_fourcc.h>
 
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
 void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp);
 int drm_format_num_planes(uint32_t format);
 int drm_format_plane_cpp(uint32_t format, int plane);
@@ -32,6 +33,6 @@
 int drm_format_vert_chroma_subsampling(uint32_t format);
 int drm_format_plane_width(int width, uint32_t format, int plane);
 int drm_format_plane_height(int height, uint32_t format, int plane);
-const char *drm_get_format_name(uint32_t format);
+char *drm_get_format_name(uint32_t format) __malloc;
 
 #endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
new file mode 100644
index 0000000..b2554c5
--- /dev/null
+++ b/include/drm/drm_framebuffer.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_FRAMEBUFFER_H__
+#define __DRM_FRAMEBUFFER_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+struct drm_framebuffer;
+struct drm_file;
+struct drm_device;
+
+/**
+ * struct drm_framebuffer_funcs - framebuffer hooks
+ */
+struct drm_framebuffer_funcs {
+	/**
+	 * @destroy:
+	 *
+	 * Clean up framebuffer resources, specifically also unreference the
+	 * backing storage. The core guarantees to call this function for every
+	 * framebuffer successfully created by ->fb_create() in
+	 * &drm_mode_config_funcs. Drivers must also call
+	 * drm_framebuffer_cleanup() to release DRM core resources for this
+	 * framebuffer.
+	 */
+	void (*destroy)(struct drm_framebuffer *framebuffer);
+
+	/**
+	 * @create_handle:
+	 *
+	 * Create a buffer handle in the driver-specific buffer manager (either
+	 * GEM or TTM) valid for the passed-in struct &drm_file. This is used by
+	 * the core to implement the GETFB IOCTL, which returns (for
+	 * sufficiently priviledged user) also a native buffer handle. This can
+	 * be used for seamless transitions between modesetting clients by
+	 * copying the current screen contents to a private buffer and blending
+	 * between that and the new contents.
+	 *
+	 * GEM based drivers should call drm_gem_handle_create() to create the
+	 * handle.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 on success or a negative error code on failure.
+	 */
+	int (*create_handle)(struct drm_framebuffer *fb,
+			     struct drm_file *file_priv,
+			     unsigned int *handle);
+	/**
+	 * @dirty:
+	 *
+	 * Optional callback for the dirty fb IOCTL.
+	 *
+	 * Userspace can notify the driver via this callback that an area of the
+	 * framebuffer has changed and should be flushed to the display
+	 * hardware. This can also be used internally, e.g. by the fbdev
+	 * emulation, though that's not the case currently.
+	 *
+	 * See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd
+	 * for more information as all the semantics and arguments have a one to
+	 * one mapping on this function.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 on success or a negative error code on failure.
+	 */
+	int (*dirty)(struct drm_framebuffer *framebuffer,
+		     struct drm_file *file_priv, unsigned flags,
+		     unsigned color, struct drm_clip_rect *clips,
+		     unsigned num_clips);
+};
+
+/**
+ * struct drm_framebuffer - frame buffer object
+ *
+ * Note that the fb is refcounted for the benefit of driver internals,
+ * for example some hw, disabling a CRTC/plane is asynchronous, and
+ * scanout does not actually complete until the next vblank.  So some
+ * cleanup (like releasing the reference(s) on the backing GEM bo(s))
+ * should be deferred.  In cases like this, the driver would like to
+ * hold a ref to the fb even though it has already been removed from
+ * userspace perspective. See drm_framebuffer_reference() and
+ * drm_framebuffer_unreference().
+ *
+ * The refcount is stored inside the mode object @base.
+ */
+struct drm_framebuffer {
+	/**
+	 * @dev: DRM device this framebuffer belongs to
+	 */
+	struct drm_device *dev;
+	/**
+	 * @head: Place on the dev->mode_config.fb_list, access protected by
+	 * dev->mode_config.fb_lock.
+	 */
+	struct list_head head;
+
+	/**
+	 * @base: base modeset object structure, contains the reference count.
+	 */
+	struct drm_mode_object base;
+	/**
+	 * @funcs: framebuffer vfunc table
+	 */
+	const struct drm_framebuffer_funcs *funcs;
+	/**
+	 * @pitches: Line stride per buffer. For userspace created object this
+	 * is copied from drm_mode_fb_cmd2.
+	 */
+	unsigned int pitches[4];
+	/**
+	 * @offsets: Offset from buffer start to the actual pixel data in bytes,
+	 * per buffer. For userspace created object this is copied from
+	 * drm_mode_fb_cmd2.
+	 *
+	 * Note that this is a linear offset and does not take into account
+	 * tiling or buffer laytou per @modifier. It meant to be used when the
+	 * actual pixel data for this framebuffer plane starts at an offset,
+	 * e.g.  when multiple planes are allocated within the same backing
+	 * storage buffer object. For tiled layouts this generally means it
+	 * @offsets must at least be tile-size aligned, but hardware often has
+	 * stricter requirements.
+	 *
+	 * This should not be used to specifiy x/y pixel offsets into the buffer
+	 * data (even for linear buffers). Specifying an x/y pixel offset is
+	 * instead done through the source rectangle in struct &drm_plane_state.
+	 */
+	unsigned int offsets[4];
+	/**
+	 * @modifier: Data layout modifier, per buffer. This is used to describe
+	 * tiling, or also special layouts (like compression) of auxiliary
+	 * buffers. For userspace created object this is copied from
+	 * drm_mode_fb_cmd2.
+	 */
+	uint64_t modifier[4];
+	/**
+	 * @width: Logical width of the visible area of the framebuffer, in
+	 * pixels.
+	 */
+	unsigned int width;
+	/**
+	 * @height: Logical height of the visible area of the framebuffer, in
+	 * pixels.
+	 */
+	unsigned int height;
+	/**
+	 * @depth: Depth in bits per pixel for RGB formats. 0 for everything
+	 * else. Legacy information derived from @pixel_format, it's suggested to use
+	 * the DRM FOURCC codes and helper functions directly instead.
+	 */
+	unsigned int depth;
+	/**
+	 * @bits_per_pixel: Storage used bits per pixel for RGB formats. 0 for
+	 * everything else. Legacy information derived from @pixel_format, it's
+	 * suggested to use the DRM FOURCC codes and helper functions directly
+	 * instead.
+	 */
+	int bits_per_pixel;
+	/**
+	 * @flags: Framebuffer flags like DRM_MODE_FB_INTERLACED or
+	 * DRM_MODE_FB_MODIFIERS.
+	 */
+	int flags;
+	/**
+	 * @pixel_format: DRM FOURCC code describing the pixel format.
+	 */
+	uint32_t pixel_format; /* fourcc format */
+	/**
+	 * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor
+	 * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
+	 * universal plane.
+	 */
+	int hot_x;
+	/**
+	 * @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor
+	 * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
+	 * universal plane.
+	 */
+	int hot_y;
+	/**
+	 * @filp_head: Placed on struct &drm_file fbs list_head, protected by
+	 * fbs_lock in the same structure.
+	 */
+	struct list_head filp_head;
+};
+
+int drm_framebuffer_init(struct drm_device *dev,
+			 struct drm_framebuffer *fb,
+			 const struct drm_framebuffer_funcs *funcs);
+struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+					       uint32_t id);
+void drm_framebuffer_remove(struct drm_framebuffer *fb);
+void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
+void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
+
+/**
+ * drm_framebuffer_reference - incr the fb refcnt
+ * @fb: framebuffer
+ *
+ * This functions increments the fb's refcount.
+ */
+static inline void drm_framebuffer_reference(struct drm_framebuffer *fb)
+{
+	drm_mode_object_reference(&fb->base);
+}
+
+/**
+ * drm_framebuffer_unreference - unref a framebuffer
+ * @fb: framebuffer to unref
+ *
+ * This functions decrements the fb's refcount and frees it if it drops to zero.
+ */
+static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+	drm_mode_object_unreference(&fb->base);
+}
+
+/**
+ * drm_framebuffer_read_refcount - read the framebuffer reference count.
+ * @fb: framebuffer
+ *
+ * This functions returns the framebuffer's reference count.
+ */
+static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb)
+{
+	return atomic_read(&fb->base.refcount.refcount);
+}
+#endif
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index fca1cd1..9f63736 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -210,8 +210,8 @@
  * drm_gem_object_unreference_unlocked().
  *
  * Drivers should never call this directly in their code. Instead they should
- * wrap it up into a driver_gem_object_unreference(struct driver_gem_object
- * *obj) wrapper function, and use that. Shared code should never call this, to
+ * wrap it up into a ``driver_gem_object_unreference(struct driver_gem_object
+ * *obj)`` wrapper function, and use that. Shared code should never call this, to
  * avoid breaking drivers by accident which still depend upon dev->struct_mutex
  * locking.
  */
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
new file mode 100644
index 0000000..be3d938
--- /dev/null
+++ b/include/drm/drm_mode_object.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_MODESET_H__
+#define __DRM_MODESET_H__
+
+#include <linux/kref.h>
+struct drm_object_properties;
+struct drm_property;
+
+/**
+ * struct drm_mode_object - base structure for modeset objects
+ * @id: userspace visible identifier
+ * @type: type of the object, one of DRM_MODE_OBJECT\_\*
+ * @properties: properties attached to this object, including values
+ * @refcount: reference count for objects which with dynamic lifetime
+ * @free_cb: free function callback, only set for objects with dynamic lifetime
+ *
+ * Base structure for modeset objects visible to userspace. Objects can be
+ * looked up using drm_mode_object_find(). Besides basic uapi interface
+ * properties like @id and @type it provides two services:
+ *
+ * - It tracks attached properties and their values. This is used by &drm_crtc,
+ *   &drm_plane and &drm_connector. Properties are attached by calling
+ *   drm_object_attach_property() before the object is visible to userspace.
+ *
+ * - For objects with dynamic lifetimes (as indicated by a non-NULL @free_cb) it
+ *   provides reference counting through drm_mode_object_reference() and
+ *   drm_mode_object_unreference(). This is used by &drm_framebuffer,
+ *   &drm_connector and &drm_property_blob. These objects provide specialized
+ *   reference counting wrappers.
+ */
+struct drm_mode_object {
+	uint32_t id;
+	uint32_t type;
+	struct drm_object_properties *properties;
+	struct kref refcount;
+	void (*free_cb)(struct kref *kref);
+};
+
+#define DRM_OBJECT_MAX_PROPERTY 24
+/**
+ * struct drm_object_properties - property tracking for &drm_mode_object
+ */
+struct drm_object_properties {
+	/**
+	 * @count: number of valid properties, must be less than or equal to
+	 * DRM_OBJECT_MAX_PROPERTY.
+	 */
+
+	int count;
+	/**
+	 * @properties: Array of pointers to &drm_property.
+	 *
+	 * NOTE: if we ever start dynamically destroying properties (ie.
+	 * not at drm_mode_config_cleanup() time), then we'd have to do
+	 * a better job of detaching property from mode objects to avoid
+	 * dangling property pointers:
+	 */
+	struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY];
+
+	/**
+	 * @values: Array to store the property values, matching @properties. Do
+	 * not read/write values directly, but use
+	 * drm_object_property_get_value() and drm_object_property_set_value().
+	 *
+	 * Note that atomic drivers do not store mutable properties in this
+	 * array, but only the decoded values in the corresponding state
+	 * structure. The decoding is done using the ->atomic_get_property and
+	 * ->atomic_set_property hooks of the corresponding object. Hence atomic
+	 * drivers should not use drm_object_property_set_value() and
+	 * drm_object_property_get_value() on mutable objects, i.e. those
+	 * without the DRM_MODE_PROP_IMMUTABLE flag set.
+	 */
+	uint64_t values[DRM_OBJECT_MAX_PROPERTY];
+};
+
+/* Avoid boilerplate.  I'm tired of typing. */
+#define DRM_ENUM_NAME_FN(fnname, list)				\
+	const char *fnname(int val)				\
+	{							\
+		int i;						\
+		for (i = 0; i < ARRAY_SIZE(list); i++) {	\
+			if (list[i].type == val)		\
+				return list[i].name;		\
+		}						\
+		return "(unknown)";				\
+	}
+
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+					     uint32_t id, uint32_t type);
+void drm_mode_object_reference(struct drm_mode_object *obj);
+void drm_mode_object_unreference(struct drm_mode_object *obj);
+
+int drm_object_property_set_value(struct drm_mode_object *obj,
+				  struct drm_property *property,
+				  uint64_t val);
+int drm_object_property_get_value(struct drm_mode_object *obj,
+				  struct drm_property *property,
+				  uint64_t *value);
+
+void drm_object_attach_property(struct drm_mode_object *obj,
+				struct drm_property *property,
+				uint64_t init_val);
+#endif
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 48e1a56..011f199 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -27,6 +27,9 @@
 #ifndef __DRM_MODES_H__
 #define __DRM_MODES_H__
 
+#include <drm/drm_mode_object.h>
+#include <drm/drm_connector.h>
+
 /*
  * Note on terminology:  here, for brevity and convenience, we refer to connector
  * control chips as 'CRTCs'.  They can control any type of connector, VGA, LVDS,
@@ -400,21 +403,6 @@
 	enum hdmi_picture_aspect picture_aspect_ratio;
 };
 
-/* mode specified on the command line */
-struct drm_cmdline_mode {
-	bool specified;
-	bool refresh_specified;
-	bool bpp_specified;
-	int xres, yres;
-	int bpp;
-	int refresh;
-	bool rb;
-	bool interlace;
-	bool cvt;
-	bool margins;
-	enum drm_connector_force force;
-};
-
 /**
  * drm_mode_is_stereo - check for stereo mode flags
  * @mode: drm_display_mode to check
diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h
new file mode 100644
index 0000000..b8051d5
--- /dev/null
+++ b/include/drm/drm_modeset_helper.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_KMS_HELPER_H__
+#define __DRM_KMS_HELPER_H__
+
+#include <drm/drmP.h>
+
+void drm_helper_move_panel_connectors_to_head(struct drm_device *);
+
+void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+				    const struct drm_mode_fb_cmd2 *mode_cmd);
+
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+		  const struct drm_crtc_funcs *funcs);
+
+#endif
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 686feec..10e449c 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -266,6 +266,8 @@
 	 * disable anything at the CRTC level. To ensure that runtime PM
 	 * handling (using either DPMS or the new "ACTIVE" property) works
 	 * @disable must be the inverse of @enable for atomic drivers.
+	 * Atomic drivers should consider to use @atomic_disable instead of
+	 * this one.
 	 *
 	 * NOTE:
 	 *
@@ -391,6 +393,28 @@
 	 */
 	void (*atomic_flush)(struct drm_crtc *crtc,
 			     struct drm_crtc_state *old_crtc_state);
+
+	/**
+	 * @atomic_disable:
+	 *
+	 * This callback should be used to disable the CRTC. With the atomic
+	 * drivers it is called after all encoders connected to this CRTC have
+	 * been shut off already using their own ->disable hook. If that
+	 * sequence is too simple drivers can just add their own hooks and call
+	 * it from this CRTC callback here by looping over all encoders
+	 * connected to it using for_each_encoder_on_crtc().
+	 *
+	 * This hook is used only by atomic helpers. Atomic drivers don't
+	 * need to implement it if there's no need to disable anything at the
+	 * CRTC level.
+	 *
+	 * Comparing to @disable, this one provides the additional input
+	 * parameter @old_crtc_state which could be used to access the old
+	 * state. Atomic drivers should consider to use this one instead
+	 * of @disable.
+	 */
+	void (*atomic_disable)(struct drm_crtc *crtc,
+			       struct drm_crtc_state *old_crtc_state);
 };
 
 /**
@@ -855,7 +879,7 @@
 	 * everything else must complete successfully.
 	 */
 	int (*prepare_fb)(struct drm_plane *plane,
-			  const struct drm_plane_state *new_state);
+			  struct drm_plane_state *new_state);
 	/**
 	 * @cleanup_fb:
 	 *
@@ -866,7 +890,7 @@
 	 * transitional plane helpers, but it is optional.
 	 */
 	void (*cleanup_fb)(struct drm_plane *plane,
-			   const struct drm_plane_state *old_state);
+			   struct drm_plane_state *old_state);
 
 	/**
 	 * @atomic_check:
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index fbc8ecb..c189596 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -27,6 +27,7 @@
 #include <drm/drm_rect.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_modeset_helper.h>
 
 /*
  * Drivers that don't allow primary plane scaling may pass this macro in place
@@ -37,9 +38,6 @@
  */
 #define DRM_PLANE_HELPER_NO_SCALING (1<<16)
 
-int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
-		  const struct drm_crtc_funcs *funcs);
-
 int drm_plane_helper_check_state(struct drm_plane_state *state,
 				 const struct drm_rect *clip,
 				 int min_scale, int max_scale,
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
new file mode 100644
index 0000000..30ab289
--- /dev/null
+++ b/include/drm/drm_property.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_PROPERTY_H__
+#define __DRM_PROPERTY_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+/**
+ * struct drm_property_enum - symbolic values for enumerations
+ * @value: numeric property value for this enum entry
+ * @head: list of enum values, linked to enum_list in &drm_property
+ * @name: symbolic name for the enum
+ *
+ * For enumeration and bitmask properties this structure stores the symbolic
+ * decoding for each value. This is used for example for the rotation property.
+ */
+struct drm_property_enum {
+	uint64_t value;
+	struct list_head head;
+	char name[DRM_PROP_NAME_LEN];
+};
+
+/**
+ * struct drm_property - modeset object property
+ *
+ * This structure represent a modeset object property. It combines both the name
+ * of the property with the set of permissible values. This means that when a
+ * driver wants to use a property with the same name on different objects, but
+ * with different value ranges, then it must create property for each one. An
+ * example would be rotation of &drm_plane, when e.g. the primary plane cannot
+ * be rotated. But if both the name and the value range match, then the same
+ * property structure can be instantiated multiple times for the same object.
+ * Userspace must be able to cope with this and cannot assume that the same
+ * symbolic property will have the same modeset object ID on all modeset
+ * objects.
+ *
+ * Properties are created by one of the special functions, as explained in
+ * detail in the @flags structure member.
+ *
+ * To actually expose a property it must be attached to each object using
+ * drm_object_attach_property(). Currently properties can only be attached to
+ * &drm_connector, &drm_crtc and &drm_plane.
+ *
+ * Properties are also used as the generic metadatatransport for the atomic
+ * IOCTL. Everything that was set directly in structures in the legacy modeset
+ * IOCTLs (like the plane source or destination windows, or e.g. the links to
+ * the CRTC) is exposed as a property with the DRM_MODE_PROP_ATOMIC flag set.
+ */
+struct drm_property {
+	/**
+	 * @head: per-device list of properties, for cleanup.
+	 */
+	struct list_head head;
+
+	/**
+	 * @base: base KMS object
+	 */
+	struct drm_mode_object base;
+
+	/**
+	 * @flags:
+	 *
+	 * Property flags and type. A property needs to be one of the following
+	 * types:
+	 *
+	 * DRM_MODE_PROP_RANGE
+	 *     Range properties report their minimum and maximum admissible unsigned values.
+	 *     The KMS core verifies that values set by application fit in that
+	 *     range. The range is unsigned. Range properties are created using
+	 *     drm_property_create_range().
+	 *
+	 * DRM_MODE_PROP_SIGNED_RANGE
+	 *     Range properties report their minimum and maximum admissible unsigned values.
+	 *     The KMS core verifies that values set by application fit in that
+	 *     range. The range is signed. Range properties are created using
+	 *     drm_property_create_signed_range().
+	 *
+	 * DRM_MODE_PROP_ENUM
+	 *     Enumerated properties take a numerical value that ranges from 0 to
+	 *     the number of enumerated values defined by the property minus one,
+	 *     and associate a free-formed string name to each value. Applications
+	 *     can retrieve the list of defined value-name pairs and use the
+	 *     numerical value to get and set property instance values. Enum
+	 *     properties are created using drm_property_create_enum().
+	 *
+	 * DRM_MODE_PROP_BITMASK
+	 *     Bitmask properties are enumeration properties that additionally
+	 *     restrict all enumerated values to the 0..63 range. Bitmask property
+	 *     instance values combine one or more of the enumerated bits defined
+	 *     by the property. Bitmask properties are created using
+	 *     drm_property_create_bitmask().
+	 *
+	 * DRM_MODE_PROB_OBJECT
+	 *     Object properties are used to link modeset objects. This is used
+	 *     extensively in the atomic support to create the display pipeline,
+	 *     by linking &drm_framebuffer to &drm_plane, &drm_plane to
+	 *     &drm_crtc and &drm_connector to &drm_crtc. An object property can
+	 *     only link to a specific type of &drm_mode_object, this limit is
+	 *     enforced by the core. Object properties are created using
+	 *     drm_property_create_object().
+	 *
+	 *     Object properties work like blob properties, but in a more
+	 *     general fashion. They are limited to atomic drivers and must have
+	 *     the DRM_MODE_PROP_ATOMIC flag set.
+	 *
+	 * DRM_MODE_PROP_BLOB
+	 *     Blob properties store a binary blob without any format restriction.
+	 *     The binary blobs are created as KMS standalone objects, and blob
+	 *     property instance values store the ID of their associated blob
+	 *     object. Blob properties are created by calling
+	 *     drm_property_create() with DRM_MODE_PROP_BLOB as the type.
+	 *
+	 *     Actual blob objects to contain blob data are created using
+	 *     drm_property_create_blob(), or through the corresponding IOCTL.
+	 *
+	 *     Besides the built-in limit to only accept blob objects blob
+	 *     properties work exactly like object properties. The only reasons
+	 *     blob properties exist is backwards compatibility with existing
+	 *     userspace.
+	 *
+	 * In addition a property can have any combination of the below flags:
+	 *
+	 * DRM_MODE_PROP_ATOMIC
+	 *     Set for properties which encode atomic modeset state. Such
+	 *     properties are not exposed to legacy userspace.
+	 *
+	 * DRM_MODE_PROP_IMMUTABLE
+	 *     Set for properties where userspace cannot be changed by
+	 *     userspace. The kernel is allowed to update the value of these
+	 *     properties. This is generally used to expose probe state to
+	 *     usersapce, e.g. the EDID, or the connector path property on DP
+	 *     MST sinks.
+	 */
+	uint32_t flags;
+
+	/**
+	 * @name: symbolic name of the properties
+	 */
+	char name[DRM_PROP_NAME_LEN];
+
+	/**
+	 * @num_values: size of the @values array.
+	 */
+	uint32_t num_values;
+
+	/**
+	 * @values:
+	 *
+	 * Array with limits and values for the property. The
+	 * interpretation of these limits is dependent upon the type per @flags.
+	 */
+	uint64_t *values;
+
+	/**
+	 * @dev: DRM device
+	 */
+	struct drm_device *dev;
+
+	/**
+	 * @enum_list:
+	 *
+	 * List of &drm_prop_enum_list structures with the symbolic names for
+	 * enum and bitmask values.
+	 */
+	struct list_head enum_list;
+};
+
+/**
+ * struct drm_property_blob - Blob data for &drm_property
+ * @base: base KMS object
+ * @dev: DRM device
+ * @head_global: entry on the global blob list in &drm_mode_config
+ *	property_blob_list.
+ * @head_file: entry on the per-file blob list in &drm_file blobs list.
+ * @length: size of the blob in bytes, invariant over the lifetime of the object
+ * @data: actual data, embedded at the end of this structure
+ *
+ * Blobs are used to store bigger values than what fits directly into the 64
+ * bits available for a &drm_property.
+ *
+ * Blobs are reference counted using drm_property_reference_blob() and
+ * drm_property_unreference_blob(). They are created using
+ * drm_property_create_blob().
+ */
+struct drm_property_blob {
+	struct drm_mode_object base;
+	struct drm_device *dev;
+	struct list_head head_global;
+	struct list_head head_file;
+	size_t length;
+	unsigned char data[];
+};
+
+struct drm_prop_enum_list {
+	int type;
+	char *name;
+};
+
+#define obj_to_property(x) container_of(x, struct drm_property, base)
+
+/**
+ * drm_property_type_is - check the type of a property
+ * @property: property to check
+ * @type: property type to compare with
+ *
+ * This is a helper function becauase the uapi encoding of property types is
+ * a bit special for historical reasons.
+ */
+static inline bool drm_property_type_is(struct drm_property *property,
+					uint32_t type)
+{
+	/* instanceof for props.. handles extended type vs original types: */
+	if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
+		return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type;
+	return property->flags & type;
+}
+
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+					 const char *name, int num_values);
+struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+					      const char *name,
+					      const struct drm_prop_enum_list *props,
+					      int num_values);
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+						 int flags, const char *name,
+						 const struct drm_prop_enum_list *props,
+						 int num_props,
+						 uint64_t supported_bits);
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+					       const char *name,
+					       uint64_t min, uint64_t max);
+struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
+						      int flags, const char *name,
+						      int64_t min, int64_t max);
+struct drm_property *drm_property_create_object(struct drm_device *dev,
+						int flags, const char *name, uint32_t type);
+struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
+					      const char *name);
+int drm_property_add_enum(struct drm_property *property, int index,
+			  uint64_t value, const char *name);
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
+
+struct drm_property_blob *drm_property_create_blob(struct drm_device *dev,
+						   size_t length,
+						   const void *data);
+struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
+						   uint32_t id);
+int drm_property_replace_global_blob(struct drm_device *dev,
+				     struct drm_property_blob **replace,
+				     size_t length,
+				     const void *data,
+				     struct drm_mode_object *obj_holds_id,
+				     struct drm_property *prop_holds_id);
+struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob);
+void drm_property_unreference_blob(struct drm_property_blob *blob);
+
+/**
+ * drm_connector_find - find property object
+ * @dev: DRM device
+ * @id: property object id
+ *
+ * This function looks up the property object specified by id and returns it.
+ */
+static inline struct drm_property *drm_property_find(struct drm_device *dev,
+						     uint32_t id)
+{
+	struct drm_mode_object *mo;
+	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY);
+	return mo ? obj_to_property(mo) : NULL;
+}
+
+#endif
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 2690397..5d112f7 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -60,6 +60,12 @@
 	 *
 	 * This function is called when the underlying plane state is updated.
 	 * This hook is optional.
+	 *
+	 * This is the function drivers should submit the
+	 * &drm_pending_vblank_event from. Using either
+	 * drm_crtc_arm_vblank_event(), when the driver supports vblank
+	 * interrupt handling, or drm_crtc_send_vblank_event() directly in case
+	 * the hardware lacks vblank support entirely.
 	 */
 	void (*update)(struct drm_simple_display_pipe *pipe,
 		       struct drm_plane_state *plane_state);
@@ -85,6 +91,11 @@
 	const struct drm_simple_display_pipe_funcs *funcs;
 };
 
+int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe,
+					  struct drm_bridge *bridge);
+
+void drm_simple_display_pipe_detach_bridge(struct drm_simple_display_pipe *pipe);
+
 int drm_simple_display_pipe_init(struct drm_device *dev,
 			struct drm_simple_display_pipe *pipe,
 			const struct drm_simple_display_pipe_funcs *funcs,
diff --git a/include/drm/i2c/tda998x.h b/include/drm/i2c/tda998x.h
index 3e419d9..a254830 100644
--- a/include/drm/i2c/tda998x.h
+++ b/include/drm/i2c/tda998x.h
@@ -1,6 +1,24 @@
 #ifndef __DRM_I2C_TDA998X_H__
 #define __DRM_I2C_TDA998X_H__
 
+#include <linux/hdmi.h>
+#include <dt-bindings/display/tda998x.h>
+
+enum {
+	AFMT_UNUSED =	0,
+	AFMT_SPDIF =	TDA998x_SPDIF,
+	AFMT_I2S =	TDA998x_I2S,
+};
+
+struct tda998x_audio_params {
+	u8 config;
+	u8 format;
+	unsigned sample_width;
+	unsigned sample_rate;
+	struct hdmi_audio_infoframe cea;
+	u8 status[5];
+};
+
 struct tda998x_encoder_params {
 	u8 swap_b:3;
 	u8 mirr_b:1;
@@ -15,16 +33,7 @@
 	u8 swap_e:3;
 	u8 mirr_e:1;
 
-	u8 audio_cfg;
-	u8 audio_clk_cfg;
-	u8 audio_frame[6];
-
-	enum {
-		AFMT_SPDIF,
-		AFMT_I2S
-	} audio_format;
-
-	unsigned audio_sample_rate;
+	struct tda998x_audio_params audio_params;
 };
 
 #endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 6f2c598..9eb940d 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -45,37 +45,7 @@
 
 struct drm_mm_node;
 
-/**
- * struct ttm_place
- *
- * @fpfn:	first valid page frame number to put the object
- * @lpfn:	last valid page frame number to put the object
- * @flags:	memory domain and caching flags for the object
- *
- * Structure indicating a possible place to put an object.
- */
-struct ttm_place {
-	unsigned	fpfn;
-	unsigned	lpfn;
-	uint32_t	flags;
-};
-
-/**
- * struct ttm_placement
- *
- * @num_placement:	number of preferred placements
- * @placement:		preferred placements
- * @num_busy_placement:	number of preferred placements when need to evict buffer
- * @busy_placement:	preferred placements when need to evict buffer
- *
- * Structure indicating the placement you request for an object.
- */
-struct ttm_placement {
-	unsigned		num_placement;
-	const struct ttm_place	*placement;
-	unsigned		num_busy_placement;
-	const struct ttm_place	*busy_placement;
-};
+struct ttm_placement;
 
 /**
  * struct ttm_bus_placement
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 99c6d01..4f0a921 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -133,7 +133,6 @@
  * struct ttm_dma_tt
  *
  * @ttm: Base ttm_tt struct.
- * @cpu_address: The CPU address of the pages
  * @dma_address: The DMA (bus) addresses of the pages
  * @pages_list: used by some page allocation backend
  *
@@ -143,7 +142,6 @@
  */
 struct ttm_dma_tt {
 	struct ttm_tt ttm;
-	void **cpu_address;
 	dma_addr_t *dma_address;
 	struct list_head pages_list;
 };
@@ -961,7 +959,6 @@
  * ttm_bo_move_ttm
  *
  * @bo: A pointer to a struct ttm_buffer_object.
- * @evict: 1: This is an eviction. Don't try to pipeline.
  * @interruptible: Sleep interruptible if waiting.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
@@ -977,14 +974,13 @@
  */
 
 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-			   bool evict, bool interruptible, bool no_wait_gpu,
+			   bool interruptible, bool no_wait_gpu,
 			   struct ttm_mem_reg *new_mem);
 
 /**
  * ttm_bo_move_memcpy
  *
  * @bo: A pointer to a struct ttm_buffer_object.
- * @evict: 1: This is an eviction. Don't try to pipeline.
  * @interruptible: Sleep interruptible if waiting.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
@@ -1000,8 +996,7 @@
  */
 
 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-			      bool evict, bool interruptible,
-			      bool no_wait_gpu,
+			      bool interruptible, bool no_wait_gpu,
 			      struct ttm_mem_reg *new_mem);
 
 /**
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 72dcbe8..c452089 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -155,4 +155,5 @@
 extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
 				     struct page *page);
 extern size_t ttm_round_pot(size_t size);
+extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob);
 #endif
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
index 8ed44f9..932be0c 100644
--- a/include/drm/ttm/ttm_placement.h
+++ b/include/drm/ttm/ttm_placement.h
@@ -30,6 +30,9 @@
 
 #ifndef _TTM_PLACEMENT_H_
 #define _TTM_PLACEMENT_H_
+
+#include <linux/types.h>
+
 /*
  * Memory regions for data placement.
  */
@@ -37,24 +40,12 @@
 #define TTM_PL_SYSTEM           0
 #define TTM_PL_TT               1
 #define TTM_PL_VRAM             2
-#define TTM_PL_PRIV0            3
-#define TTM_PL_PRIV1            4
-#define TTM_PL_PRIV2            5
-#define TTM_PL_PRIV3            6
-#define TTM_PL_PRIV4            7
-#define TTM_PL_PRIV5            8
-#define TTM_PL_SWAPPED          15
+#define TTM_PL_PRIV             3
 
 #define TTM_PL_FLAG_SYSTEM      (1 << TTM_PL_SYSTEM)
 #define TTM_PL_FLAG_TT          (1 << TTM_PL_TT)
 #define TTM_PL_FLAG_VRAM        (1 << TTM_PL_VRAM)
-#define TTM_PL_FLAG_PRIV0       (1 << TTM_PL_PRIV0)
-#define TTM_PL_FLAG_PRIV1       (1 << TTM_PL_PRIV1)
-#define TTM_PL_FLAG_PRIV2       (1 << TTM_PL_PRIV2)
-#define TTM_PL_FLAG_PRIV3       (1 << TTM_PL_PRIV3)
-#define TTM_PL_FLAG_PRIV4       (1 << TTM_PL_PRIV4)
-#define TTM_PL_FLAG_PRIV5       (1 << TTM_PL_PRIV5)
-#define TTM_PL_FLAG_SWAPPED     (1 << TTM_PL_SWAPPED)
+#define TTM_PL_FLAG_PRIV        (1 << TTM_PL_PRIV)
 #define TTM_PL_MASK_MEM         0x0000FFFF
 
 /*
@@ -72,7 +63,6 @@
 #define TTM_PL_FLAG_CACHED      (1 << 16)
 #define TTM_PL_FLAG_UNCACHED    (1 << 17)
 #define TTM_PL_FLAG_WC          (1 << 18)
-#define TTM_PL_FLAG_SHARED      (1 << 20)
 #define TTM_PL_FLAG_NO_EVICT    (1 << 21)
 #define TTM_PL_FLAG_TOPDOWN     (1 << 22)
 
@@ -82,14 +72,36 @@
 
 #define TTM_PL_MASK_MEMTYPE     (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
 
-/*
- * Access flags to be used for CPU- and GPU- mappings.
- * The idea is that the TTM synchronization mechanism will
- * allow concurrent READ access and exclusive write access.
- * Currently GPU- and CPU accesses are exclusive.
+/**
+ * struct ttm_place
+ *
+ * @fpfn:	first valid page frame number to put the object
+ * @lpfn:	last valid page frame number to put the object
+ * @flags:	memory domain and caching flags for the object
+ *
+ * Structure indicating a possible place to put an object.
  */
+struct ttm_place {
+	unsigned	fpfn;
+	unsigned	lpfn;
+	uint32_t	flags;
+};
 
-#define TTM_ACCESS_READ         (1 << 0)
-#define TTM_ACCESS_WRITE        (1 << 1)
+/**
+ * struct ttm_placement
+ *
+ * @num_placement:	number of preferred placements
+ * @placement:		preferred placements
+ * @num_busy_placement:	number of preferred placements when need to evict buffer
+ * @busy_placement:	preferred placements when need to evict buffer
+ *
+ * Structure indicating the placement you request for an object.
+ */
+struct ttm_placement {
+	unsigned		num_placement;
+	const struct ttm_place	*placement;
+	unsigned		num_busy_placement;
+	const struct ttm_place	*busy_placement;
+};
 
 #endif
diff --git a/include/dt-bindings/display/tda998x.h b/include/dt-bindings/display/tda998x.h
new file mode 100644
index 0000000..34757a3
--- /dev/null
+++ b/include/dt-bindings/display/tda998x.h
@@ -0,0 +1,7 @@
+#ifndef _DT_BINDINGS_TDA998X_H
+#define _DT_BINDINGS_TDA998X_H
+
+#define TDA998x_SPDIF	1
+#define TDA998x_I2S	2
+
+#endif /*_DT_BINDINGS_TDA998X_H */
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 8c3b412..ee162e3 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -73,34 +73,6 @@
 					   unsigned int decodes) { };
 #endif
 
-/**
- *     vga_get         - acquire & locks VGA resources
- *
- *     @pdev: pci device of the VGA card or NULL for the system default
- *     @rsrc: bit mask of resources to acquire and lock
- *     @interruptible: blocking should be interruptible by signals ?
- *
- *     This function acquires VGA resources for the given
- *     card and mark those resources locked. If the resource requested
- *     are "normal" (and not legacy) resources, the arbiter will first check
- *     whether the card is doing legacy decoding for that type of resource. If
- *     yes, the lock is "converted" into a legacy resource lock.
- *     The arbiter will first look for all VGA cards that might conflict
- *     and disable their IOs and/or Memory access, including VGA forwarding
- *     on P2P bridges if necessary, so that the requested resources can
- *     be used. Then, the card is marked as locking these resources and
- *     the IO and/or Memory accesse are enabled on the card (including
- *     VGA forwarding on parent P2P bridges if any).
- *     This function will block if some conflicting card is already locking
- *     one of the required resources (or any resource on a different bus
- *     segment, since P2P bridges don't differenciate VGA memory and IO
- *     afaik). You can indicate whether this blocking should be interruptible
- *     by a signal (for userland interface) or not.
- *     Must not be called at interrupt time or in atomic context.
- *     If the card already owns the resources, the function succeeds.
- *     Nested calls are supported (a per-resource counter is maintained)
- */
-
 #if defined(CONFIG_VGA_ARB)
 extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible);
 #else
@@ -108,11 +80,14 @@
 #endif
 
 /**
- *     vga_get_interruptible
+ * vga_get_interruptible
+ * @pdev: pci device of the VGA card or NULL for the system default
+ * @rsrc: bit mask of resources to acquire and lock
  *
- *     Shortcut to vga_get
+ * Shortcut to vga_get with interruptible set to true.
+ *
+ * On success, release the VGA resource again with vga_put().
  */
-
 static inline int vga_get_interruptible(struct pci_dev *pdev,
 					unsigned int rsrc)
 {
@@ -120,47 +95,26 @@
 }
 
 /**
- *     vga_get_uninterruptible
+ * vga_get_uninterruptible - shortcut to vga_get()
+ * @pdev: pci device of the VGA card or NULL for the system default
+ * @rsrc: bit mask of resources to acquire and lock
  *
- *     Shortcut to vga_get
+ * Shortcut to vga_get with interruptible set to false.
+ *
+ * On success, release the VGA resource again with vga_put().
  */
-
 static inline int vga_get_uninterruptible(struct pci_dev *pdev,
 					  unsigned int rsrc)
 {
        return vga_get(pdev, rsrc, 0);
 }
 
-/**
- *     vga_tryget      - try to acquire & lock legacy VGA resources
- *
- *     @pdev: pci devivce of VGA card or NULL for system default
- *     @rsrc: bit mask of resources to acquire and lock
- *
- *     This function performs the same operation as vga_get(), but
- *     will return an error (-EBUSY) instead of blocking if the resources
- *     are already locked by another card. It can be called in any context
- */
-
 #if defined(CONFIG_VGA_ARB)
 extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
 #else
 static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; }
 #endif
 
-/**
- *     vga_put         - release lock on legacy VGA resources
- *
- *     @pdev: pci device of VGA card or NULL for system default
- *     @rsrc: but mask of resource to release
- *
- *     This function releases resources previously locked by vga_get()
- *     or vga_tryget(). The resources aren't disabled right away, so
- *     that a subsequence vga_get() on the same card will succeed
- *     immediately. Resources have a counter, so locks are only
- *     released if the counter reaches 0.
- */
-
 #if defined(CONFIG_VGA_ARB)
 extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
 #else
@@ -168,25 +122,6 @@
 #endif
 
 
-/**
- *     vga_default_device
- *
- *     This can be defined by the platform. The default implementation
- *     is rather dumb and will probably only work properly on single
- *     vga card setups and/or x86 platforms.
- *
- *     If your VGA default device is not PCI, you'll have to return
- *     NULL here. In this case, I assume it will not conflict with
- *     any PCI card. If this is not true, I'll have to define two archs
- *     hooks for enabling/disabling the VGA default device if that is
- *     possible. This may be a problem with real _ISA_ VGA cards, in
- *     addition to a PCI one. I don't know at this point how to deal
- *     with that card. Can theirs IOs be disabled at all ? If not, then
- *     I suppose it's a matter of having the proper arch hook telling
- *     us about it, so we basically never allow anybody to succeed a
- *     vga_get()...
- */
-
 #ifdef CONFIG_VGA_ARB
 extern struct pci_dev *vga_default_device(void);
 extern void vga_set_default_device(struct pci_dev *pdev);
@@ -195,14 +130,11 @@
 static inline void vga_set_default_device(struct pci_dev *pdev) { };
 #endif
 
-/**
- *     vga_conflicts
- *
- *     Architectures should define this if they have several
- *     independent PCI domains that can afford concurrent VGA
- *     decoding
+/*
+ * Architectures should define this if they have several
+ * independent PCI domains that can afford concurrent VGA
+ * decoding
  */
-
 #ifndef __ARCH_HAS_VGA_CONFLICT
 static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2)
 {
@@ -210,34 +142,6 @@
 }
 #endif
 
-/**
- *	vga_client_register
- *
- *	@pdev: pci device of the VGA client
- *	@cookie: client cookie to be used in callbacks
- *	@irq_set_state: irq state change callback
- *	@set_vga_decode: vga decode change callback
- *
- * 	return value: 0 on success, -1 on failure
- * 	Register a client with the VGA arbitration logic
- *
- *	Clients have two callback mechanisms they can use.
- *	irq enable/disable callback -
- *		If a client can't disable its GPUs VGA resources, then we
- *		need to be able to ask it to turn off its irqs when we
- *		turn off its mem and io decoding.
- *	set_vga_decode
- *		If a client can disable its GPU VGA resource, it will
- *		get a callback from this to set the encode/decode state
- *
- * Rationale: we cannot disable VGA decode resources unconditionally
- * some single GPU laptops seem to require ACPI or BIOS access to the
- * VGA registers to control things like backlights etc.
- * Hopefully newer multi-GPU laptops do something saner, and desktops
- * won't have any special ACPI for this.
- * They driver will get a callback when VGA arbitration is first used
- * by userspace since we some older X servers have issues.
- */
 #if defined(CONFIG_VGA_ARB)
 int vga_client_register(struct pci_dev *pdev, void *cookie,
 			void (*irq_set_state)(void *cookie, bool state),
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 462246a..d6b5a21 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -77,6 +77,10 @@
 #define AMDGPU_GEM_CREATE_NO_CPU_ACCESS		(1 << 1)
 /* Flag that USWC attributes should be used for GTT */
 #define AMDGPU_GEM_CREATE_CPU_GTT_USWC		(1 << 2)
+/* Flag that the memory should be in VRAM and cleared */
+#define AMDGPU_GEM_CREATE_VRAM_CLEARED		(1 << 3)
+/* Flag that create shadow bo(GTT) while allocating vram bo */
+#define AMDGPU_GEM_CREATE_SHADOW		(1 << 4)
 
 struct drm_amdgpu_gem_create_in  {
 	/** the requested memory size */
@@ -481,6 +485,8 @@
 #define AMDGPU_INFO_DEV_INFO			0x16
 /* visible vram usage */
 #define AMDGPU_INFO_VIS_VRAM_USAGE		0x17
+/* number of TTM buffer evictions */
+#define AMDGPU_INFO_NUM_EVICTIONS		0x18
 
 #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT	0
 #define AMDGPU_INFO_MMR_SE_INDEX_MASK	0xff
@@ -643,6 +649,7 @@
  * Supported GPU families
  */
 #define AMDGPU_FAMILY_UNKNOWN			0
+#define AMDGPU_FAMILY_SI			110 /* Hainan, Oland, Verde, Pitcairn, Tahiti */
 #define AMDGPU_FAMILY_CI			120 /* Bonaire, Hawaii */
 #define AMDGPU_FAMILY_KV			125 /* Kaveri, Kabini, Mullins */
 #define AMDGPU_FAMILY_VI			130 /* Iceland, Tonga */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 452675f..b2c5284 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -646,6 +646,7 @@
 #define DRM_CAP_CURSOR_WIDTH		0x8
 #define DRM_CAP_CURSOR_HEIGHT		0x9
 #define DRM_CAP_ADDFB2_MODIFIERS	0x10
+#define DRM_CAP_PAGE_FLIP_TARGET	0x11
 
 /** DRM_IOCTL_GET_CAP ioctl argument type */
 struct drm_get_cap {
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 49a7265..df0e350 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -520,7 +520,13 @@
 
 #define DRM_MODE_PAGE_FLIP_EVENT 0x01
 #define DRM_MODE_PAGE_FLIP_ASYNC 0x02
-#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC)
+#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
+#define DRM_MODE_PAGE_FLIP_TARGET_RELATIVE 0x8
+#define DRM_MODE_PAGE_FLIP_TARGET (DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE | \
+				   DRM_MODE_PAGE_FLIP_TARGET_RELATIVE)
+#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT | \
+				  DRM_MODE_PAGE_FLIP_ASYNC | \
+				  DRM_MODE_PAGE_FLIP_TARGET)
 
 /*
  * Request a page flip on the specified crtc.
@@ -543,8 +549,7 @@
  * 'as soon as possible', meaning that it not delay waiting for vblank.
  * This may cause tearing on the screen.
  *
- * The reserved field must be zero until we figure out something
- * clever to use it for.
+ * The reserved field must be zero.
  */
 
 struct drm_mode_crtc_page_flip {
@@ -555,6 +560,34 @@
 	__u64 user_data;
 };
 
+/*
+ * Request a page flip on the specified crtc.
+ *
+ * Same as struct drm_mode_crtc_page_flip, but supports new flags and
+ * re-purposes the reserved field:
+ *
+ * The sequence field must be zero unless either of the
+ * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is specified. When
+ * the ABSOLUTE flag is specified, the sequence field denotes the absolute
+ * vblank sequence when the flip should take effect. When the RELATIVE
+ * flag is specified, the sequence field denotes the relative (to the
+ * current one when the ioctl is called) vblank sequence when the flip
+ * should take effect. NOTE: DRM_IOCTL_WAIT_VBLANK must still be used to
+ * make sure the vblank sequence before the target one has passed before
+ * calling this ioctl. The purpose of the
+ * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is merely to clarify
+ * the target for when code dealing with a page flip runs during a
+ * vertical blank period.
+ */
+
+struct drm_mode_crtc_page_flip_target {
+	__u32 crtc_id;
+	__u32 fb_id;
+	__u32 flags;
+	__u32 sequence;
+	__u64 user_data;
+};
+
 /* create a dumb scanout buffer */
 struct drm_mode_create_dumb {
 	__u32 height;